Home | History | Annotate | Line # | Download | only in drm
drm_dp_mst_topology.c revision 1.10
      1 /*	$NetBSD: drm_dp_mst_topology.c,v 1.10 2021/12/19 09:45:01 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright  2014 Red Hat
      5  *
      6  * Permission to use, copy, modify, distribute, and sell this software and its
      7  * documentation for any purpose is hereby granted without fee, provided that
      8  * the above copyright notice appear in all copies and that both that copyright
      9  * notice and this permission notice appear in supporting documentation, and
     10  * that the name of the copyright holders not be used in advertising or
     11  * publicity pertaining to distribution of the software without specific,
     12  * written prior permission.  The copyright holders make no representations
     13  * about the suitability of this software for any purpose.  It is provided "as
     14  * is" without express or implied warranty.
     15  *
     16  * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
     17  * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
     18  * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
     19  * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
     20  * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
     21  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
     22  * OF THIS SOFTWARE.
     23  */
     24 
     25 #include <sys/cdefs.h>
     26 __KERNEL_RCSID(0, "$NetBSD: drm_dp_mst_topology.c,v 1.10 2021/12/19 09:45:01 riastradh Exp $");
     27 
     28 #include <linux/delay.h>
     29 #include <linux/errno.h>
     30 #include <linux/i2c.h>
     31 #include <linux/init.h>
     32 #include <linux/kernel.h>
     33 #include <linux/sched.h>
     34 #include <linux/seq_file.h>
     35 
     36 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
     37 #include <linux/stacktrace.h>
     38 #include <linux/sort.h>
     39 #include <linux/timekeeping.h>
     40 #include <linux/math64.h>
     41 #endif
     42 
     43 #include <drm/drm_atomic.h>
     44 #include <drm/drm_atomic_helper.h>
     45 #include <drm/drm_dp_mst_helper.h>
     46 #include <drm/drm_drv.h>
     47 #include <drm/drm_print.h>
     48 #include <drm/drm_probe_helper.h>
     49 
     50 #include "drm_crtc_helper_internal.h"
     51 #include "drm_dp_mst_topology_internal.h"
     52 
     53 #include <linux/nbsd-namespace.h>
     54 
     55 /**
     56  * DOC: dp mst helper
     57  *
     58  * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
     59  * protocol. The helpers contain a topology manager and bandwidth manager.
     60  * The helpers encapsulate the sending and received of sideband msgs.
     61  */
     62 struct drm_dp_pending_up_req {
     63 	struct drm_dp_sideband_msg_hdr hdr;
     64 	struct drm_dp_sideband_msg_req_body msg;
     65 	struct list_head next;
     66 };
     67 
     68 #if IS_ENABLED(CONFIG_DEBUG_FS)
     69 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
     70 				  char *buf);
     71 #endif
     72 
     73 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
     74 
     75 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
     76 				     int id,
     77 				     struct drm_dp_payload *payload);
     78 
     79 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
     80 				 struct drm_dp_mst_port *port,
     81 				 int offset, int size, u8 *bytes);
     82 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
     83 				  struct drm_dp_mst_port *port,
     84 				  int offset, int size, u8 *bytes);
     85 
     86 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
     87 				    struct drm_dp_mst_branch *mstb);
     88 
     89 static void
     90 drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
     91 				   struct drm_dp_mst_branch *mstb);
     92 
     93 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
     94 					   struct drm_dp_mst_branch *mstb,
     95 					   struct drm_dp_mst_port *port);
     96 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
     97 				 u8 *guid);
     98 
     99 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
    100 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
    101 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
    102 
    103 #define DBG_PREFIX "[dp_mst]"
    104 
    105 #define DP_STR(x) [DP_ ## x] = #x
    106 
    107 static const char *drm_dp_mst_req_type_str(u8 req_type)
    108 {
    109 	static const char * const req_type_str[] = {
    110 		DP_STR(GET_MSG_TRANSACTION_VERSION),
    111 		DP_STR(LINK_ADDRESS),
    112 		DP_STR(CONNECTION_STATUS_NOTIFY),
    113 		DP_STR(ENUM_PATH_RESOURCES),
    114 		DP_STR(ALLOCATE_PAYLOAD),
    115 		DP_STR(QUERY_PAYLOAD),
    116 		DP_STR(RESOURCE_STATUS_NOTIFY),
    117 		DP_STR(CLEAR_PAYLOAD_ID_TABLE),
    118 		DP_STR(REMOTE_DPCD_READ),
    119 		DP_STR(REMOTE_DPCD_WRITE),
    120 		DP_STR(REMOTE_I2C_READ),
    121 		DP_STR(REMOTE_I2C_WRITE),
    122 		DP_STR(POWER_UP_PHY),
    123 		DP_STR(POWER_DOWN_PHY),
    124 		DP_STR(SINK_EVENT_NOTIFY),
    125 		DP_STR(QUERY_STREAM_ENC_STATUS),
    126 	};
    127 
    128 	if (req_type >= ARRAY_SIZE(req_type_str) ||
    129 	    !req_type_str[req_type])
    130 		return "unknown";
    131 
    132 	return req_type_str[req_type];
    133 }
    134 
    135 #undef DP_STR
    136 #define DP_STR(x) [DP_NAK_ ## x] = #x
    137 
    138 static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
    139 {
    140 	static const char * const nak_reason_str[] = {
    141 		DP_STR(WRITE_FAILURE),
    142 		DP_STR(INVALID_READ),
    143 		DP_STR(CRC_FAILURE),
    144 		DP_STR(BAD_PARAM),
    145 		DP_STR(DEFER),
    146 		DP_STR(LINK_FAILURE),
    147 		DP_STR(NO_RESOURCES),
    148 		DP_STR(DPCD_FAIL),
    149 		DP_STR(I2C_NAK),
    150 		DP_STR(ALLOCATE_FAIL),
    151 	};
    152 
    153 	if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
    154 	    !nak_reason_str[nak_reason])
    155 		return "unknown";
    156 
    157 	return nak_reason_str[nak_reason];
    158 }
    159 
    160 #undef DP_STR
    161 #define DP_STR(x) [DRM_DP_SIDEBAND_TX_ ## x] = #x
    162 
    163 static const char *drm_dp_mst_sideband_tx_state_str(int state)
    164 {
    165 	static const char * const sideband_reason_str[] = {
    166 		DP_STR(QUEUED),
    167 		DP_STR(START_SEND),
    168 		DP_STR(SENT),
    169 		DP_STR(RX),
    170 		DP_STR(TIMEOUT),
    171 	};
    172 
    173 	if (state >= ARRAY_SIZE(sideband_reason_str) ||
    174 	    !sideband_reason_str[state])
    175 		return "unknown";
    176 
    177 	return sideband_reason_str[state];
    178 }
    179 
    180 static int
    181 drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
    182 {
    183 	int i;
    184 	u8 unpacked_rad[16];
    185 
    186 	for (i = 0; i < lct; i++) {
    187 		if (i % 2)
    188 			unpacked_rad[i] = rad[i / 2] >> 4;
    189 		else
    190 			unpacked_rad[i] = rad[i / 2] & BIT_MASK(4);
    191 	}
    192 
    193 	/* TODO: Eventually add something to printk so we can format the rad
    194 	 * like this: 1.2.3
    195 	 */
    196 	return snprintf(out, len, "%*phC", lct, unpacked_rad);
    197 }
    198 
    199 /* sideband msg handling */
    200 static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
    201 {
    202 	u8 bitmask = 0x80;
    203 	u8 bitshift = 7;
    204 	u8 array_index = 0;
    205 	int number_of_bits = num_nibbles * 4;
    206 	u8 remainder = 0;
    207 
    208 	while (number_of_bits != 0) {
    209 		number_of_bits--;
    210 		remainder <<= 1;
    211 		remainder |= (data[array_index] & bitmask) >> bitshift;
    212 		bitmask >>= 1;
    213 		bitshift--;
    214 		if (bitmask == 0) {
    215 			bitmask = 0x80;
    216 			bitshift = 7;
    217 			array_index++;
    218 		}
    219 		if ((remainder & 0x10) == 0x10)
    220 			remainder ^= 0x13;
    221 	}
    222 
    223 	number_of_bits = 4;
    224 	while (number_of_bits != 0) {
    225 		number_of_bits--;
    226 		remainder <<= 1;
    227 		if ((remainder & 0x10) != 0)
    228 			remainder ^= 0x13;
    229 	}
    230 
    231 	return remainder;
    232 }
    233 
    234 static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
    235 {
    236 	u8 bitmask = 0x80;
    237 	u8 bitshift = 7;
    238 	u8 array_index = 0;
    239 	int number_of_bits = number_of_bytes * 8;
    240 	u16 remainder = 0;
    241 
    242 	while (number_of_bits != 0) {
    243 		number_of_bits--;
    244 		remainder <<= 1;
    245 		remainder |= (data[array_index] & bitmask) >> bitshift;
    246 		bitmask >>= 1;
    247 		bitshift--;
    248 		if (bitmask == 0) {
    249 			bitmask = 0x80;
    250 			bitshift = 7;
    251 			array_index++;
    252 		}
    253 		if ((remainder & 0x100) == 0x100)
    254 			remainder ^= 0xd5;
    255 	}
    256 
    257 	number_of_bits = 8;
    258 	while (number_of_bits != 0) {
    259 		number_of_bits--;
    260 		remainder <<= 1;
    261 		if ((remainder & 0x100) != 0)
    262 			remainder ^= 0xd5;
    263 	}
    264 
    265 	return remainder & 0xff;
    266 }
    267 static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
    268 {
    269 	u8 size = 3;
    270 	size += (hdr->lct / 2);
    271 	return size;
    272 }
    273 
    274 static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
    275 					   u8 *buf, int *len)
    276 {
    277 	int idx = 0;
    278 	int i;
    279 	u8 crc4;
    280 	buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
    281 	for (i = 0; i < (hdr->lct / 2); i++)
    282 		buf[idx++] = hdr->rad[i];
    283 	buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
    284 		(hdr->msg_len & 0x3f);
    285 	buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
    286 
    287 	crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
    288 	buf[idx - 1] |= (crc4 & 0xf);
    289 
    290 	*len = idx;
    291 }
    292 
    293 static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
    294 					   u8 *buf, int buflen, u8 *hdrlen)
    295 {
    296 	u8 crc4;
    297 	u8 len;
    298 	int i;
    299 	u8 idx;
    300 	if (buf[0] == 0)
    301 		return false;
    302 	len = 3;
    303 	len += ((buf[0] & 0xf0) >> 4) / 2;
    304 	if (len > buflen)
    305 		return false;
    306 	crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
    307 
    308 	if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
    309 		DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
    310 		return false;
    311 	}
    312 
    313 	hdr->lct = (buf[0] & 0xf0) >> 4;
    314 	hdr->lcr = (buf[0] & 0xf);
    315 	idx = 1;
    316 	for (i = 0; i < (hdr->lct / 2); i++)
    317 		hdr->rad[i] = buf[idx++];
    318 	hdr->broadcast = (buf[idx] >> 7) & 0x1;
    319 	hdr->path_msg = (buf[idx] >> 6) & 0x1;
    320 	hdr->msg_len = buf[idx] & 0x3f;
    321 	idx++;
    322 	hdr->somt = (buf[idx] >> 7) & 0x1;
    323 	hdr->eomt = (buf[idx] >> 6) & 0x1;
    324 	hdr->seqno = (buf[idx] >> 4) & 0x1;
    325 	idx++;
    326 	*hdrlen = idx;
    327 	return true;
    328 }
    329 
    330 static void
    331 drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
    332 			   struct drm_dp_sideband_msg_tx *raw)
    333 {
    334 	int idx = 0;
    335 	int i;
    336 	u8 *buf = raw->msg;
    337 	buf[idx++] = req->req_type & 0x7f;
    338 
    339 	switch (req->req_type) {
    340 	case DP_ENUM_PATH_RESOURCES:
    341 	case DP_POWER_DOWN_PHY:
    342 	case DP_POWER_UP_PHY:
    343 		buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
    344 		idx++;
    345 		break;
    346 	case DP_ALLOCATE_PAYLOAD:
    347 		buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
    348 			(req->u.allocate_payload.number_sdp_streams & 0xf);
    349 		idx++;
    350 		buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
    351 		idx++;
    352 		buf[idx] = (req->u.allocate_payload.pbn >> 8);
    353 		idx++;
    354 		buf[idx] = (req->u.allocate_payload.pbn & 0xff);
    355 		idx++;
    356 		for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
    357 			buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
    358 				(req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
    359 			idx++;
    360 		}
    361 		if (req->u.allocate_payload.number_sdp_streams & 1) {
    362 			i = req->u.allocate_payload.number_sdp_streams - 1;
    363 			buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
    364 			idx++;
    365 		}
    366 		break;
    367 	case DP_QUERY_PAYLOAD:
    368 		buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
    369 		idx++;
    370 		buf[idx] = (req->u.query_payload.vcpi & 0x7f);
    371 		idx++;
    372 		break;
    373 	case DP_REMOTE_DPCD_READ:
    374 		buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
    375 		buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
    376 		idx++;
    377 		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
    378 		idx++;
    379 		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
    380 		idx++;
    381 		buf[idx] = (req->u.dpcd_read.num_bytes);
    382 		idx++;
    383 		break;
    384 
    385 	case DP_REMOTE_DPCD_WRITE:
    386 		buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
    387 		buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
    388 		idx++;
    389 		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
    390 		idx++;
    391 		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
    392 		idx++;
    393 		buf[idx] = (req->u.dpcd_write.num_bytes);
    394 		idx++;
    395 		memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
    396 		idx += req->u.dpcd_write.num_bytes;
    397 		break;
    398 	case DP_REMOTE_I2C_READ:
    399 		buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
    400 		buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
    401 		idx++;
    402 		for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
    403 			buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
    404 			idx++;
    405 			buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
    406 			idx++;
    407 			memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
    408 			idx += req->u.i2c_read.transactions[i].num_bytes;
    409 
    410 			buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4;
    411 			buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
    412 			idx++;
    413 		}
    414 		buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
    415 		idx++;
    416 		buf[idx] = (req->u.i2c_read.num_bytes_read);
    417 		idx++;
    418 		break;
    419 
    420 	case DP_REMOTE_I2C_WRITE:
    421 		buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
    422 		idx++;
    423 		buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
    424 		idx++;
    425 		buf[idx] = (req->u.i2c_write.num_bytes);
    426 		idx++;
    427 		memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
    428 		idx += req->u.i2c_write.num_bytes;
    429 		break;
    430 	}
    431 	raw->cur_len = idx;
    432 }
    433 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req);
    434 
    435 /* Decode a sideband request we've encoded, mainly used for debugging */
    436 static int
    437 drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
    438 			   struct drm_dp_sideband_msg_req_body *req)
    439 {
    440 	const u8 *buf = raw->msg;
    441 	int i, idx = 0;
    442 
    443 	req->req_type = buf[idx++] & 0x7f;
    444 	switch (req->req_type) {
    445 	case DP_ENUM_PATH_RESOURCES:
    446 	case DP_POWER_DOWN_PHY:
    447 	case DP_POWER_UP_PHY:
    448 		req->u.port_num.port_number = (buf[idx] >> 4) & 0xf;
    449 		break;
    450 	case DP_ALLOCATE_PAYLOAD:
    451 		{
    452 			struct drm_dp_allocate_payload *a =
    453 				&req->u.allocate_payload;
    454 
    455 			a->number_sdp_streams = buf[idx] & 0xf;
    456 			a->port_number = (buf[idx] >> 4) & 0xf;
    457 
    458 			WARN_ON(buf[++idx] & 0x80);
    459 			a->vcpi = buf[idx] & 0x7f;
    460 
    461 			a->pbn = buf[++idx] << 8;
    462 			a->pbn |= buf[++idx];
    463 
    464 			idx++;
    465 			for (i = 0; i < a->number_sdp_streams; i++) {
    466 				a->sdp_stream_sink[i] =
    467 					(buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf;
    468 			}
    469 		}
    470 		break;
    471 	case DP_QUERY_PAYLOAD:
    472 		req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf;
    473 		WARN_ON(buf[++idx] & 0x80);
    474 		req->u.query_payload.vcpi = buf[idx] & 0x7f;
    475 		break;
    476 	case DP_REMOTE_DPCD_READ:
    477 		{
    478 			struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read;
    479 
    480 			r->port_number = (buf[idx] >> 4) & 0xf;
    481 
    482 			r->dpcd_address = (buf[idx] << 16) & 0xf0000;
    483 			r->dpcd_address |= (buf[++idx] << 8) & 0xff00;
    484 			r->dpcd_address |= buf[++idx] & 0xff;
    485 
    486 			r->num_bytes = buf[++idx];
    487 		}
    488 		break;
    489 	case DP_REMOTE_DPCD_WRITE:
    490 		{
    491 			struct drm_dp_remote_dpcd_write *w =
    492 				&req->u.dpcd_write;
    493 
    494 			w->port_number = (buf[idx] >> 4) & 0xf;
    495 
    496 			w->dpcd_address = (buf[idx] << 16) & 0xf0000;
    497 			w->dpcd_address |= (buf[++idx] << 8) & 0xff00;
    498 			w->dpcd_address |= buf[++idx] & 0xff;
    499 
    500 			w->num_bytes = buf[++idx];
    501 
    502 			w->bytes = kmemdup(&buf[++idx], w->num_bytes,
    503 					   GFP_KERNEL);
    504 			if (!w->bytes)
    505 				return -ENOMEM;
    506 		}
    507 		break;
    508 	case DP_REMOTE_I2C_READ:
    509 		{
    510 			struct drm_dp_remote_i2c_read *r = &req->u.i2c_read;
    511 			struct drm_dp_remote_i2c_read_tx *tx;
    512 			bool failed = false;
    513 
    514 			r->num_transactions = buf[idx] & 0x3;
    515 			r->port_number = (buf[idx] >> 4) & 0xf;
    516 			for (i = 0; i < r->num_transactions; i++) {
    517 				tx = &r->transactions[i];
    518 
    519 				tx->i2c_dev_id = buf[++idx] & 0x7f;
    520 				tx->num_bytes = buf[++idx];
    521 				tx->bytes = kmemdup(&buf[++idx],
    522 						    tx->num_bytes,
    523 						    GFP_KERNEL);
    524 				if (!tx->bytes) {
    525 					failed = true;
    526 					break;
    527 				}
    528 				idx += tx->num_bytes;
    529 				tx->no_stop_bit = (buf[idx] >> 5) & 0x1;
    530 				tx->i2c_transaction_delay = buf[idx] & 0xf;
    531 			}
    532 
    533 			if (failed) {
    534 				for (i = 0; i < r->num_transactions; i++) {
    535 					tx = &r->transactions[i];
    536 					kfree(tx->bytes);
    537 				}
    538 				return -ENOMEM;
    539 			}
    540 
    541 			r->read_i2c_device_id = buf[++idx] & 0x7f;
    542 			r->num_bytes_read = buf[++idx];
    543 		}
    544 		break;
    545 	case DP_REMOTE_I2C_WRITE:
    546 		{
    547 			struct drm_dp_remote_i2c_write *w = &req->u.i2c_write;
    548 
    549 			w->port_number = (buf[idx] >> 4) & 0xf;
    550 			w->write_i2c_device_id = buf[++idx] & 0x7f;
    551 			w->num_bytes = buf[++idx];
    552 			w->bytes = kmemdup(&buf[++idx], w->num_bytes,
    553 					   GFP_KERNEL);
    554 			if (!w->bytes)
    555 				return -ENOMEM;
    556 		}
    557 		break;
    558 	}
    559 
    560 	return 0;
    561 }
    562 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req);
    563 
    564 static void
    565 drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
    566 				  int indent, struct drm_printer *printer)
    567 {
    568 	int i;
    569 
    570 #define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__)
    571 	if (req->req_type == DP_LINK_ADDRESS) {
    572 		/* No contents to print */
    573 		P("type=%s\n", drm_dp_mst_req_type_str(req->req_type));
    574 		return;
    575 	}
    576 
    577 	P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type));
    578 	indent++;
    579 
    580 	switch (req->req_type) {
    581 	case DP_ENUM_PATH_RESOURCES:
    582 	case DP_POWER_DOWN_PHY:
    583 	case DP_POWER_UP_PHY:
    584 		P("port=%d\n", req->u.port_num.port_number);
    585 		break;
    586 	case DP_ALLOCATE_PAYLOAD:
    587 		P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph\n",
    588 		  req->u.allocate_payload.port_number,
    589 		  req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn,
    590 		  req->u.allocate_payload.number_sdp_streams,
    591 		  req->u.allocate_payload.number_sdp_streams,
    592 		  req->u.allocate_payload.sdp_stream_sink);
    593 		break;
    594 	case DP_QUERY_PAYLOAD:
    595 		P("port=%d vcpi=%d\n",
    596 		  req->u.query_payload.port_number,
    597 		  req->u.query_payload.vcpi);
    598 		break;
    599 	case DP_REMOTE_DPCD_READ:
    600 		P("port=%d dpcd_addr=%05x len=%d\n",
    601 		  req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address,
    602 		  req->u.dpcd_read.num_bytes);
    603 		break;
    604 	case DP_REMOTE_DPCD_WRITE:
    605 		P("port=%d addr=%05x len=%d: %*ph\n",
    606 		  req->u.dpcd_write.port_number,
    607 		  req->u.dpcd_write.dpcd_address,
    608 		  req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes,
    609 		  req->u.dpcd_write.bytes);
    610 		break;
    611 	case DP_REMOTE_I2C_READ:
    612 		P("port=%d num_tx=%d id=%d size=%d:\n",
    613 		  req->u.i2c_read.port_number,
    614 		  req->u.i2c_read.num_transactions,
    615 		  req->u.i2c_read.read_i2c_device_id,
    616 		  req->u.i2c_read.num_bytes_read);
    617 
    618 		indent++;
    619 		for (i = 0; i < req->u.i2c_read.num_transactions; i++) {
    620 			const struct drm_dp_remote_i2c_read_tx *rtx =
    621 				&req->u.i2c_read.transactions[i];
    622 
    623 			P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph\n",
    624 			  i, rtx->i2c_dev_id, rtx->num_bytes,
    625 			  rtx->no_stop_bit, rtx->i2c_transaction_delay,
    626 			  rtx->num_bytes, rtx->bytes);
    627 		}
    628 		break;
    629 	case DP_REMOTE_I2C_WRITE:
    630 		P("port=%d id=%d size=%d: %*ph\n",
    631 		  req->u.i2c_write.port_number,
    632 		  req->u.i2c_write.write_i2c_device_id,
    633 		  req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,
    634 		  req->u.i2c_write.bytes);
    635 		break;
    636 	default:
    637 		P("???\n");
    638 		break;
    639 	}
    640 #undef P
    641 }
    642 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body);
    643 
    644 static inline void
    645 drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p,
    646 				const struct drm_dp_sideband_msg_tx *txmsg)
    647 {
    648 	struct drm_dp_sideband_msg_req_body req;
    649 	char buf[64];
    650 	int ret;
    651 	int i;
    652 
    653 	drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf,
    654 			      sizeof(buf));
    655 	drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n",
    656 		   txmsg->cur_offset, txmsg->cur_len, txmsg->seqno,
    657 		   drm_dp_mst_sideband_tx_state_str(txmsg->state),
    658 		   txmsg->path_msg, buf);
    659 
    660 	ret = drm_dp_decode_sideband_req(txmsg, &req);
    661 	if (ret) {
    662 		drm_printf(p, "<failed to decode sideband req: %d>\n", ret);
    663 		return;
    664 	}
    665 	drm_dp_dump_sideband_msg_req_body(&req, 1, p);
    666 
    667 	switch (req.req_type) {
    668 	case DP_REMOTE_DPCD_WRITE:
    669 		kfree(req.u.dpcd_write.bytes);
    670 		break;
    671 	case DP_REMOTE_I2C_READ:
    672 		for (i = 0; i < req.u.i2c_read.num_transactions; i++)
    673 			kfree(req.u.i2c_read.transactions[i].bytes);
    674 		break;
    675 	case DP_REMOTE_I2C_WRITE:
    676 		kfree(req.u.i2c_write.bytes);
    677 		break;
    678 	}
    679 }
    680 
    681 static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
    682 {
    683 	u8 crc4;
    684 	crc4 = drm_dp_msg_data_crc4(msg, len);
    685 	msg[len] = crc4;
    686 }
    687 
    688 static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
    689 					 struct drm_dp_sideband_msg_tx *raw)
    690 {
    691 	int idx = 0;
    692 	u8 *buf = raw->msg;
    693 
    694 	buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
    695 
    696 	raw->cur_len = idx;
    697 }
    698 
    699 /* this adds a chunk of msg to the builder to get the final msg */
    700 static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
    701 				      u8 *replybuf, u8 replybuflen, bool hdr)
    702 {
    703 	int ret;
    704 	u8 crc4 __unused;	/* XXX Mistake?  */
    705 
    706 	if (hdr) {
    707 		u8 hdrlen;
    708 		struct drm_dp_sideband_msg_hdr recv_hdr;
    709 		ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
    710 		if (ret == false) {
    711 			print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
    712 			return false;
    713 		}
    714 
    715 		/*
    716 		 * ignore out-of-order messages or messages that are part of a
    717 		 * failed transaction
    718 		 */
    719 		if (!recv_hdr.somt && !msg->have_somt)
    720 			return false;
    721 
    722 		/* get length contained in this portion */
    723 		msg->curchunk_len = recv_hdr.msg_len;
    724 		msg->curchunk_hdrlen = hdrlen;
    725 
    726 		/* we have already gotten an somt - don't bother parsing */
    727 		if (recv_hdr.somt && msg->have_somt)
    728 			return false;
    729 
    730 		if (recv_hdr.somt) {
    731 			memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
    732 			msg->have_somt = true;
    733 		}
    734 		if (recv_hdr.eomt)
    735 			msg->have_eomt = true;
    736 
    737 		/* copy the bytes for the remainder of this header chunk */
    738 		msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
    739 		memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
    740 	} else {
    741 		memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
    742 		msg->curchunk_idx += replybuflen;
    743 	}
    744 
    745 	if (msg->curchunk_idx >= msg->curchunk_len) {
    746 		/* do CRC */
    747 		crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
    748 		/* copy chunk into bigger msg */
    749 		memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
    750 		msg->curlen += msg->curchunk_len - 1;
    751 	}
    752 	return true;
    753 }
    754 
    755 static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
    756 					       struct drm_dp_sideband_msg_reply_body *repmsg)
    757 {
    758 	int idx = 1;
    759 	int i;
    760 	memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
    761 	idx += 16;
    762 	repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
    763 	idx++;
    764 	if (idx > raw->curlen)
    765 		goto fail_len;
    766 	for (i = 0; i < repmsg->u.link_addr.nports; i++) {
    767 		if (raw->msg[idx] & 0x80)
    768 			repmsg->u.link_addr.ports[i].input_port = 1;
    769 
    770 		repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
    771 		repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
    772 
    773 		idx++;
    774 		if (idx > raw->curlen)
    775 			goto fail_len;
    776 		repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
    777 		repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
    778 		if (repmsg->u.link_addr.ports[i].input_port == 0)
    779 			repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
    780 		idx++;
    781 		if (idx > raw->curlen)
    782 			goto fail_len;
    783 		if (repmsg->u.link_addr.ports[i].input_port == 0) {
    784 			repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
    785 			idx++;
    786 			if (idx > raw->curlen)
    787 				goto fail_len;
    788 			memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
    789 			idx += 16;
    790 			if (idx > raw->curlen)
    791 				goto fail_len;
    792 			repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
    793 			repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
    794 			idx++;
    795 
    796 		}
    797 		if (idx > raw->curlen)
    798 			goto fail_len;
    799 	}
    800 
    801 	return true;
    802 fail_len:
    803 	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
    804 	return false;
    805 }
    806 
    807 static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
    808 						   struct drm_dp_sideband_msg_reply_body *repmsg)
    809 {
    810 	int idx = 1;
    811 	repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
    812 	idx++;
    813 	if (idx > raw->curlen)
    814 		goto fail_len;
    815 	repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
    816 	idx++;
    817 	if (idx > raw->curlen)
    818 		goto fail_len;
    819 
    820 	memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
    821 	return true;
    822 fail_len:
    823 	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
    824 	return false;
    825 }
    826 
    827 static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
    828 						      struct drm_dp_sideband_msg_reply_body *repmsg)
    829 {
    830 	int idx = 1;
    831 	repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
    832 	idx++;
    833 	if (idx > raw->curlen)
    834 		goto fail_len;
    835 	return true;
    836 fail_len:
    837 	DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
    838 	return false;
    839 }
    840 
    841 static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
    842 						      struct drm_dp_sideband_msg_reply_body *repmsg)
    843 {
    844 	int idx = 1;
    845 
    846 	repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
    847 	idx++;
    848 	if (idx > raw->curlen)
    849 		goto fail_len;
    850 	repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
    851 	idx++;
    852 	/* TODO check */
    853 	memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
    854 	return true;
    855 fail_len:
    856 	DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
    857 	return false;
    858 }
    859 
    860 static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
    861 							  struct drm_dp_sideband_msg_reply_body *repmsg)
    862 {
    863 	int idx = 1;
    864 	repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
    865 	repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1;
    866 	idx++;
    867 	if (idx > raw->curlen)
    868 		goto fail_len;
    869 	repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
    870 	idx += 2;
    871 	if (idx > raw->curlen)
    872 		goto fail_len;
    873 	repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
    874 	idx += 2;
    875 	if (idx > raw->curlen)
    876 		goto fail_len;
    877 	return true;
    878 fail_len:
    879 	DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
    880 	return false;
    881 }
    882 
    883 static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
    884 							  struct drm_dp_sideband_msg_reply_body *repmsg)
    885 {
    886 	int idx = 1;
    887 	repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
    888 	idx++;
    889 	if (idx > raw->curlen)
    890 		goto fail_len;
    891 	repmsg->u.allocate_payload.vcpi = raw->msg[idx];
    892 	idx++;
    893 	if (idx > raw->curlen)
    894 		goto fail_len;
    895 	repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
    896 	idx += 2;
    897 	if (idx > raw->curlen)
    898 		goto fail_len;
    899 	return true;
    900 fail_len:
    901 	DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
    902 	return false;
    903 }
    904 
    905 static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
    906 						    struct drm_dp_sideband_msg_reply_body *repmsg)
    907 {
    908 	int idx = 1;
    909 	repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
    910 	idx++;
    911 	if (idx > raw->curlen)
    912 		goto fail_len;
    913 	repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
    914 	idx += 2;
    915 	if (idx > raw->curlen)
    916 		goto fail_len;
    917 	return true;
    918 fail_len:
    919 	DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
    920 	return false;
    921 }
    922 
    923 static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
    924 						       struct drm_dp_sideband_msg_reply_body *repmsg)
    925 {
    926 	int idx = 1;
    927 
    928 	repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
    929 	idx++;
    930 	if (idx > raw->curlen) {
    931 		DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",
    932 			      idx, raw->curlen);
    933 		return false;
    934 	}
    935 	return true;
    936 }
    937 
    938 static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
    939 					struct drm_dp_sideband_msg_reply_body *msg)
    940 {
    941 	memset(msg, 0, sizeof(*msg));
    942 	msg->reply_type = (raw->msg[0] & 0x80) >> 7;
    943 	msg->req_type = (raw->msg[0] & 0x7f);
    944 
    945 	if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
    946 		memcpy(msg->u.nak.guid, &raw->msg[1], 16);
    947 		msg->u.nak.reason = raw->msg[17];
    948 		msg->u.nak.nak_data = raw->msg[18];
    949 		return false;
    950 	}
    951 
    952 	switch (msg->req_type) {
    953 	case DP_LINK_ADDRESS:
    954 		return drm_dp_sideband_parse_link_address(raw, msg);
    955 	case DP_QUERY_PAYLOAD:
    956 		return drm_dp_sideband_parse_query_payload_ack(raw, msg);
    957 	case DP_REMOTE_DPCD_READ:
    958 		return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
    959 	case DP_REMOTE_DPCD_WRITE:
    960 		return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
    961 	case DP_REMOTE_I2C_READ:
    962 		return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
    963 	case DP_ENUM_PATH_RESOURCES:
    964 		return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
    965 	case DP_ALLOCATE_PAYLOAD:
    966 		return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
    967 	case DP_POWER_DOWN_PHY:
    968 	case DP_POWER_UP_PHY:
    969 		return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
    970 	case DP_CLEAR_PAYLOAD_ID_TABLE:
    971 		return true; /* since there's nothing to parse */
    972 	default:
    973 		DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type,
    974 			  drm_dp_mst_req_type_str(msg->req_type));
    975 		return false;
    976 	}
    977 }
    978 
    979 static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
    980 							   struct drm_dp_sideband_msg_req_body *msg)
    981 {
    982 	int idx = 1;
    983 
    984 	msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
    985 	idx++;
    986 	if (idx > raw->curlen)
    987 		goto fail_len;
    988 
    989 	memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
    990 	idx += 16;
    991 	if (idx > raw->curlen)
    992 		goto fail_len;
    993 
    994 	msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
    995 	msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
    996 	msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
    997 	msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
    998 	msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
    999 	idx++;
   1000 	return true;
   1001 fail_len:
   1002 	DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
   1003 	return false;
   1004 }
   1005 
   1006 static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
   1007 							   struct drm_dp_sideband_msg_req_body *msg)
   1008 {
   1009 	int idx = 1;
   1010 
   1011 	msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
   1012 	idx++;
   1013 	if (idx > raw->curlen)
   1014 		goto fail_len;
   1015 
   1016 	memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
   1017 	idx += 16;
   1018 	if (idx > raw->curlen)
   1019 		goto fail_len;
   1020 
   1021 	msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
   1022 	idx++;
   1023 	return true;
   1024 fail_len:
   1025 	DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
   1026 	return false;
   1027 }
   1028 
   1029 static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
   1030 				      struct drm_dp_sideband_msg_req_body *msg)
   1031 {
   1032 	memset(msg, 0, sizeof(*msg));
   1033 	msg->req_type = (raw->msg[0] & 0x7f);
   1034 
   1035 	switch (msg->req_type) {
   1036 	case DP_CONNECTION_STATUS_NOTIFY:
   1037 		return drm_dp_sideband_parse_connection_status_notify(raw, msg);
   1038 	case DP_RESOURCE_STATUS_NOTIFY:
   1039 		return drm_dp_sideband_parse_resource_status_notify(raw, msg);
   1040 	default:
   1041 		DRM_ERROR("Got unknown request 0x%02x (%s)\n", msg->req_type,
   1042 			  drm_dp_mst_req_type_str(msg->req_type));
   1043 		return false;
   1044 	}
   1045 }
   1046 
   1047 static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
   1048 {
   1049 	struct drm_dp_sideband_msg_req_body req;
   1050 
   1051 	req.req_type = DP_REMOTE_DPCD_WRITE;
   1052 	req.u.dpcd_write.port_number = port_num;
   1053 	req.u.dpcd_write.dpcd_address = offset;
   1054 	req.u.dpcd_write.num_bytes = num_bytes;
   1055 	req.u.dpcd_write.bytes = bytes;
   1056 	drm_dp_encode_sideband_req(&req, msg);
   1057 
   1058 	return 0;
   1059 }
   1060 
   1061 static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
   1062 {
   1063 	struct drm_dp_sideband_msg_req_body req;
   1064 
   1065 	req.req_type = DP_LINK_ADDRESS;
   1066 	drm_dp_encode_sideband_req(&req, msg);
   1067 	return 0;
   1068 }
   1069 
   1070 static int build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
   1071 {
   1072 	struct drm_dp_sideband_msg_req_body req;
   1073 
   1074 	req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
   1075 	drm_dp_encode_sideband_req(&req, msg);
   1076 	return 0;
   1077 }
   1078 
   1079 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
   1080 {
   1081 	struct drm_dp_sideband_msg_req_body req;
   1082 
   1083 	req.req_type = DP_ENUM_PATH_RESOURCES;
   1084 	req.u.port_num.port_number = port_num;
   1085 	drm_dp_encode_sideband_req(&req, msg);
   1086 	msg->path_msg = true;
   1087 	return 0;
   1088 }
   1089 
   1090 static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
   1091 				  u8 vcpi, uint16_t pbn,
   1092 				  u8 number_sdp_streams,
   1093 				  u8 *sdp_stream_sink)
   1094 {
   1095 	struct drm_dp_sideband_msg_req_body req;
   1096 	memset(&req, 0, sizeof(req));
   1097 	req.req_type = DP_ALLOCATE_PAYLOAD;
   1098 	req.u.allocate_payload.port_number = port_num;
   1099 	req.u.allocate_payload.vcpi = vcpi;
   1100 	req.u.allocate_payload.pbn = pbn;
   1101 	req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
   1102 	memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
   1103 		   number_sdp_streams);
   1104 	drm_dp_encode_sideband_req(&req, msg);
   1105 	msg->path_msg = true;
   1106 	return 0;
   1107 }
   1108 
   1109 static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
   1110 				  int port_num, bool power_up)
   1111 {
   1112 	struct drm_dp_sideband_msg_req_body req;
   1113 
   1114 	if (power_up)
   1115 		req.req_type = DP_POWER_UP_PHY;
   1116 	else
   1117 		req.req_type = DP_POWER_DOWN_PHY;
   1118 
   1119 	req.u.port_num.port_number = port_num;
   1120 	drm_dp_encode_sideband_req(&req, msg);
   1121 	msg->path_msg = true;
   1122 	return 0;
   1123 }
   1124 
   1125 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
   1126 					struct drm_dp_vcpi *vcpi)
   1127 {
   1128 	int ret, vcpi_ret;
   1129 
   1130 	mutex_lock(&mgr->payload_lock);
   1131 	ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
   1132 	if (ret > mgr->max_payloads) {
   1133 		ret = -EINVAL;
   1134 		DRM_DEBUG_KMS("out of payload ids %d\n", ret);
   1135 		goto out_unlock;
   1136 	}
   1137 
   1138 	vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
   1139 	if (vcpi_ret > mgr->max_payloads) {
   1140 		ret = -EINVAL;
   1141 		DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
   1142 		goto out_unlock;
   1143 	}
   1144 
   1145 	set_bit(ret, &mgr->payload_mask);
   1146 	set_bit(vcpi_ret, &mgr->vcpi_mask);
   1147 	vcpi->vcpi = vcpi_ret + 1;
   1148 	mgr->proposed_vcpis[ret - 1] = vcpi;
   1149 out_unlock:
   1150 	mutex_unlock(&mgr->payload_lock);
   1151 	return ret;
   1152 }
   1153 
   1154 static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
   1155 				      int vcpi)
   1156 {
   1157 	int i;
   1158 	if (vcpi == 0)
   1159 		return;
   1160 
   1161 	mutex_lock(&mgr->payload_lock);
   1162 	DRM_DEBUG_KMS("putting payload %d\n", vcpi);
   1163 	clear_bit(vcpi - 1, &mgr->vcpi_mask);
   1164 
   1165 	for (i = 0; i < mgr->max_payloads; i++) {
   1166 		if (mgr->proposed_vcpis[i] &&
   1167 		    mgr->proposed_vcpis[i]->vcpi == vcpi) {
   1168 			mgr->proposed_vcpis[i] = NULL;
   1169 			clear_bit(i + 1, &mgr->payload_mask);
   1170 		}
   1171 	}
   1172 	mutex_unlock(&mgr->payload_lock);
   1173 }
   1174 
   1175 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
   1176 			      struct drm_dp_sideband_msg_tx *txmsg)
   1177 {
   1178 	unsigned int state;
   1179 
   1180 	/*
   1181 	 * All updates to txmsg->state are protected by mgr->qlock, and the two
   1182 	 * cases we check here are terminal states. For those the barriers
   1183 	 * provided by the wake_up/wait_event pair are enough.
   1184 	 */
   1185 	state = READ_ONCE(txmsg->state);
   1186 	return (state == DRM_DP_SIDEBAND_TX_RX ||
   1187 		state == DRM_DP_SIDEBAND_TX_TIMEOUT);
   1188 }
   1189 
   1190 static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
   1191 				    struct drm_dp_sideband_msg_tx *txmsg)
   1192 {
   1193 	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
   1194 	int ret;
   1195 
   1196 #ifdef __NetBSD__
   1197 	mutex_lock(&mstb->mgr->qlock);
   1198 	DRM_TIMED_WAIT_UNTIL(ret, &mgr->tx_waitq, &mstb->mgr->qlock, 4*HZ,
   1199 	    check_txmsg_state(mgr, txmsg));
   1200 #else
   1201 	ret = wait_event_timeout(mgr->tx_waitq,
   1202 				 check_txmsg_state(mgr, txmsg),
   1203 				 (4 * HZ));
   1204 	mutex_lock(&mstb->mgr->qlock);
   1205 #endif
   1206 	if (ret > 0) {
   1207 		if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
   1208 			ret = -EIO;
   1209 			goto out;
   1210 		}
   1211 	} else {
   1212 		DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
   1213 
   1214 		/* dump some state */
   1215 		ret = -EIO;
   1216 
   1217 		/* remove from q */
   1218 		if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
   1219 		    txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
   1220 			list_del(&txmsg->next);
   1221 		}
   1222 
   1223 		if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
   1224 		    txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
   1225 			mstb->tx_slots[txmsg->seqno] = NULL;
   1226 		}
   1227 		mgr->is_waiting_for_dwn_reply = false;
   1228 
   1229 	}
   1230 out:
   1231 	if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
   1232 		struct drm_printer p = drm_debug_printer(DBG_PREFIX);
   1233 
   1234 		drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
   1235 	}
   1236 	mutex_unlock(&mgr->qlock);
   1237 
   1238 	drm_dp_mst_kick_tx(mgr);
   1239 	return ret;
   1240 }
   1241 
   1242 static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
   1243 {
   1244 	struct drm_dp_mst_branch *mstb;
   1245 
   1246 	mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
   1247 	if (!mstb)
   1248 		return NULL;
   1249 
   1250 	mstb->lct = lct;
   1251 	if (lct > 1)
   1252 		memcpy(mstb->rad, rad, lct / 2);
   1253 	INIT_LIST_HEAD(&mstb->ports);
   1254 	kref_init(&mstb->topology_kref);
   1255 	kref_init(&mstb->malloc_kref);
   1256 	return mstb;
   1257 }
   1258 
   1259 static void drm_dp_free_mst_branch_device(struct kref *kref)
   1260 {
   1261 	struct drm_dp_mst_branch *mstb =
   1262 		container_of(kref, struct drm_dp_mst_branch, malloc_kref);
   1263 
   1264 	if (mstb->port_parent)
   1265 		drm_dp_mst_put_port_malloc(mstb->port_parent);
   1266 
   1267 	kfree(mstb);
   1268 }
   1269 
   1270 /**
   1271  * DOC: Branch device and port refcounting
   1272  *
   1273  * Topology refcount overview
   1274  * ~~~~~~~~~~~~~~~~~~~~~~~~~~
   1275  *
   1276  * The refcounting schemes for &struct drm_dp_mst_branch and &struct
   1277  * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have
   1278  * two different kinds of refcounts: topology refcounts, and malloc refcounts.
   1279  *
   1280  * Topology refcounts are not exposed to drivers, and are handled internally
   1281  * by the DP MST helpers. The helpers use them in order to prevent the
   1282  * in-memory topology state from being changed in the middle of critical
   1283  * operations like changing the internal state of payload allocations. This
   1284  * means each branch and port will be considered to be connected to the rest
   1285  * of the topology until its topology refcount reaches zero. Additionally,
   1286  * for ports this means that their associated &struct drm_connector will stay
   1287  * registered with userspace until the port's refcount reaches 0.
   1288  *
   1289  * Malloc refcount overview
   1290  * ~~~~~~~~~~~~~~~~~~~~~~~~
   1291  *
   1292  * Malloc references are used to keep a &struct drm_dp_mst_port or &struct
   1293  * drm_dp_mst_branch allocated even after all of its topology references have
   1294  * been dropped, so that the driver or MST helpers can safely access each
   1295  * branch's last known state before it was disconnected from the topology.
   1296  * When the malloc refcount of a port or branch reaches 0, the memory
   1297  * allocation containing the &struct drm_dp_mst_branch or &struct
   1298  * drm_dp_mst_port respectively will be freed.
   1299  *
   1300  * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed
   1301  * to drivers. As of writing this documentation, there are no drivers that
   1302  * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST
   1303  * helpers. Exposing this API to drivers in a race-free manner would take more
   1304  * tweaking of the refcounting scheme, however patches are welcome provided
   1305  * there is a legitimate driver usecase for this.
   1306  *
   1307  * Refcount relationships in a topology
   1308  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   1309  *
   1310  * Let's take a look at why the relationship between topology and malloc
   1311  * refcounts is designed the way it is.
   1312  *
   1313  * .. kernel-figure:: dp-mst/topology-figure-1.dot
   1314  *
   1315  *    An example of topology and malloc refs in a DP MST topology with two
   1316  *    active payloads. Topology refcount increments are indicated by solid
   1317  *    lines, and malloc refcount increments are indicated by dashed lines.
   1318  *    Each starts from the branch which incremented the refcount, and ends at
   1319  *    the branch to which the refcount belongs to, i.e. the arrow points the
   1320  *    same way as the C pointers used to reference a structure.
   1321  *
   1322  * As you can see in the above figure, every branch increments the topology
   1323  * refcount of its children, and increments the malloc refcount of its
   1324  * parent. Additionally, every payload increments the malloc refcount of its
   1325  * assigned port by 1.
   1326  *
   1327  * So, what would happen if MSTB #3 from the above figure was unplugged from
   1328  * the system, but the driver hadn't yet removed payload #2 from port #3? The
   1329  * topology would start to look like the figure below.
   1330  *
   1331  * .. kernel-figure:: dp-mst/topology-figure-2.dot
   1332  *
   1333  *    Ports and branch devices which have been released from memory are
   1334  *    colored grey, and references which have been removed are colored red.
   1335  *
   1336  * Whenever a port or branch device's topology refcount reaches zero, it will
   1337  * decrement the topology refcounts of all its children, the malloc refcount
   1338  * of its parent, and finally its own malloc refcount. For MSTB #4 and port
   1339  * #4, this means they both have been disconnected from the topology and freed
   1340  * from memory. But, because payload #2 is still holding a reference to port
   1341  * #3, port #3 is removed from the topology but its &struct drm_dp_mst_port
   1342  * is still accessible from memory. This also means port #3 has not yet
   1343  * decremented the malloc refcount of MSTB #3, so its &struct
   1344  * drm_dp_mst_branch will also stay allocated in memory until port #3's
   1345  * malloc refcount reaches 0.
   1346  *
   1347  * This relationship is necessary because in order to release payload #2, we
   1348  * need to be able to figure out the last relative of port #3 that's still
   1349  * connected to the topology. In this case, we would travel up the topology as
   1350  * shown below.
   1351  *
   1352  * .. kernel-figure:: dp-mst/topology-figure-3.dot
   1353  *
   1354  * And finally, remove payload #2 by communicating with port #2 through
   1355  * sideband transactions.
   1356  */
   1357 
   1358 /**
   1359  * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch
   1360  * device
   1361  * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of
   1362  *
   1363  * Increments &drm_dp_mst_branch.malloc_kref. When
   1364  * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
   1365  * will be released and @mstb may no longer be used.
   1366  *
   1367  * See also: drm_dp_mst_put_mstb_malloc()
   1368  */
   1369 static void
   1370 drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
   1371 {
   1372 	kref_get(&mstb->malloc_kref);
   1373 	DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
   1374 }
   1375 
   1376 /**
   1377  * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch
   1378  * device
   1379  * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of
   1380  *
   1381  * Decrements &drm_dp_mst_branch.malloc_kref. When
   1382  * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
   1383  * will be released and @mstb may no longer be used.
   1384  *
   1385  * See also: drm_dp_mst_get_mstb_malloc()
   1386  */
   1387 static void
   1388 drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
   1389 {
   1390 	DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
   1391 	kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
   1392 }
   1393 
   1394 static void drm_dp_free_mst_port(struct kref *kref)
   1395 {
   1396 	struct drm_dp_mst_port *port =
   1397 		container_of(kref, struct drm_dp_mst_port, malloc_kref);
   1398 
   1399 	drm_dp_mst_put_mstb_malloc(port->parent);
   1400 	kfree(port);
   1401 }
   1402 
   1403 /**
   1404  * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port
   1405  * @port: The &struct drm_dp_mst_port to increment the malloc refcount of
   1406  *
   1407  * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
   1408  * reaches 0, the memory allocation for @port will be released and @port may
   1409  * no longer be used.
   1410  *
   1411  * Because @port could potentially be freed at any time by the DP MST helpers
   1412  * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this
   1413  * function, drivers that which to make use of &struct drm_dp_mst_port should
   1414  * ensure that they grab at least one main malloc reference to their MST ports
   1415  * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before
   1416  * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0.
   1417  *
   1418  * See also: drm_dp_mst_put_port_malloc()
   1419  */
   1420 void
   1421 drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
   1422 {
   1423 	kref_get(&port->malloc_kref);
   1424 	DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref));
   1425 }
   1426 EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
   1427 
   1428 /**
   1429  * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port
   1430  * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of
   1431  *
   1432  * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
   1433  * reaches 0, the memory allocation for @port will be released and @port may
   1434  * no longer be used.
   1435  *
   1436  * See also: drm_dp_mst_get_port_malloc()
   1437  */
   1438 void
   1439 drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
   1440 {
   1441 	DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
   1442 	kref_put(&port->malloc_kref, drm_dp_free_mst_port);
   1443 }
   1444 EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
   1445 
   1446 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
   1447 
   1448 #define STACK_DEPTH 8
   1449 
   1450 static noinline void
   1451 __topology_ref_save(struct drm_dp_mst_topology_mgr *mgr,
   1452 		    struct drm_dp_mst_topology_ref_history *history,
   1453 		    enum drm_dp_mst_topology_ref_type type)
   1454 {
   1455 	struct drm_dp_mst_topology_ref_entry *entry = NULL;
   1456 	depot_stack_handle_t backtrace;
   1457 	ulong stack_entries[STACK_DEPTH];
   1458 	uint n;
   1459 	int i;
   1460 
   1461 	n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1);
   1462 	backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL);
   1463 	if (!backtrace)
   1464 		return;
   1465 
   1466 	/* Try to find an existing entry for this backtrace */
   1467 	for (i = 0; i < history->len; i++) {
   1468 		if (history->entries[i].backtrace == backtrace) {
   1469 			entry = &history->entries[i];
   1470 			break;
   1471 		}
   1472 	}
   1473 
   1474 	/* Otherwise add one */
   1475 	if (!entry) {
   1476 		struct drm_dp_mst_topology_ref_entry *new;
   1477 		int new_len = history->len + 1;
   1478 
   1479 		new = krealloc(history->entries, sizeof(*new) * new_len,
   1480 			       GFP_KERNEL);
   1481 		if (!new)
   1482 			return;
   1483 
   1484 		entry = &new[history->len];
   1485 		history->len = new_len;
   1486 		history->entries = new;
   1487 
   1488 		entry->backtrace = backtrace;
   1489 		entry->type = type;
   1490 		entry->count = 0;
   1491 	}
   1492 	entry->count++;
   1493 	entry->ts_nsec = ktime_get_ns();
   1494 }
   1495 
   1496 static int
   1497 topology_ref_history_cmp(const void *a, const void *b)
   1498 {
   1499 	const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b;
   1500 
   1501 	if (entry_a->ts_nsec > entry_b->ts_nsec)
   1502 		return 1;
   1503 	else if (entry_a->ts_nsec < entry_b->ts_nsec)
   1504 		return -1;
   1505 	else
   1506 		return 0;
   1507 }
   1508 
   1509 static inline const char *
   1510 topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)
   1511 {
   1512 	if (type == DRM_DP_MST_TOPOLOGY_REF_GET)
   1513 		return "get";
   1514 	else
   1515 		return "put";
   1516 }
   1517 
   1518 static void
   1519 __dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history,
   1520 			    void *ptr, const char *type_str)
   1521 {
   1522 	struct drm_printer p = drm_debug_printer(DBG_PREFIX);
   1523 	char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
   1524 	int i;
   1525 
   1526 	if (!buf)
   1527 		return;
   1528 
   1529 	if (!history->len)
   1530 		goto out;
   1531 
   1532 	/* First, sort the list so that it goes from oldest to newest
   1533 	 * reference entry
   1534 	 */
   1535 	sort(history->entries, history->len, sizeof(*history->entries),
   1536 	     topology_ref_history_cmp, NULL);
   1537 
   1538 	drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n",
   1539 		   type_str, ptr);
   1540 
   1541 	for (i = 0; i < history->len; i++) {
   1542 		const struct drm_dp_mst_topology_ref_entry *entry =
   1543 			&history->entries[i];
   1544 		ulong *entries;
   1545 		uint nr_entries;
   1546 		u64 ts_nsec = entry->ts_nsec;
   1547 		u32 rem_nsec = do_div(ts_nsec, 1000000000);
   1548 
   1549 		nr_entries = stack_depot_fetch(entry->backtrace, &entries);
   1550 		stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 4);
   1551 
   1552 		drm_printf(&p, "  %d %ss (last at %5llu.%06u):\n%s",
   1553 			   entry->count,
   1554 			   topology_ref_type_to_str(entry->type),
   1555 			   ts_nsec, rem_nsec / 1000, buf);
   1556 	}
   1557 
   1558 	/* Now free the history, since this is the only time we expose it */
   1559 	kfree(history->entries);
   1560 out:
   1561 	kfree(buf);
   1562 }
   1563 
   1564 static __always_inline void
   1565 drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb)
   1566 {
   1567 	__dump_topology_ref_history(&mstb->topology_ref_history, mstb,
   1568 				    "MSTB");
   1569 }
   1570 
   1571 static __always_inline void
   1572 drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port)
   1573 {
   1574 	__dump_topology_ref_history(&port->topology_ref_history, port,
   1575 				    "Port");
   1576 }
   1577 
   1578 static __always_inline void
   1579 save_mstb_topology_ref(struct drm_dp_mst_branch *mstb,
   1580 		       enum drm_dp_mst_topology_ref_type type)
   1581 {
   1582 	__topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type);
   1583 }
   1584 
   1585 static __always_inline void
   1586 save_port_topology_ref(struct drm_dp_mst_port *port,
   1587 		       enum drm_dp_mst_topology_ref_type type)
   1588 {
   1589 	__topology_ref_save(port->mgr, &port->topology_ref_history, type);
   1590 }
   1591 
   1592 static inline void
   1593 topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr)
   1594 {
   1595 	mutex_lock(&mgr->topology_ref_history_lock);
   1596 }
   1597 
   1598 static inline void
   1599 topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr)
   1600 {
   1601 	mutex_unlock(&mgr->topology_ref_history_lock);
   1602 }
   1603 #else
   1604 static inline void
   1605 topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {}
   1606 static inline void
   1607 topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {}
   1608 static inline void
   1609 drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {}
   1610 static inline void
   1611 drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {}
   1612 #define save_mstb_topology_ref(mstb, type)
   1613 #define save_port_topology_ref(port, type)
   1614 #endif
   1615 
   1616 static void drm_dp_destroy_mst_branch_device(struct kref *kref)
   1617 {
   1618 	struct drm_dp_mst_branch *mstb =
   1619 		container_of(kref, struct drm_dp_mst_branch, topology_kref);
   1620 	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
   1621 
   1622 	drm_dp_mst_dump_mstb_topology_history(mstb);
   1623 
   1624 	INIT_LIST_HEAD(&mstb->destroy_next);
   1625 
   1626 	/*
   1627 	 * This can get called under mgr->mutex, so we need to perform the
   1628 	 * actual destruction of the mstb in another worker
   1629 	 */
   1630 	mutex_lock(&mgr->delayed_destroy_lock);
   1631 	list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list);
   1632 	mutex_unlock(&mgr->delayed_destroy_lock);
   1633 	schedule_work(&mgr->delayed_destroy_work);
   1634 }
   1635 
   1636 /**
   1637  * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a
   1638  * branch device unless it's zero
   1639  * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of
   1640  *
   1641  * Attempts to grab a topology reference to @mstb, if it hasn't yet been
   1642  * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has
   1643  * reached 0). Holding a topology reference implies that a malloc reference
   1644  * will be held to @mstb as long as the user holds the topology reference.
   1645  *
   1646  * Care should be taken to ensure that the user has at least one malloc
   1647  * reference to @mstb. If you already have a topology reference to @mstb, you
   1648  * should use drm_dp_mst_topology_get_mstb() instead.
   1649  *
   1650  * See also:
   1651  * drm_dp_mst_topology_get_mstb()
   1652  * drm_dp_mst_topology_put_mstb()
   1653  *
   1654  * Returns:
   1655  * * 1: A topology reference was grabbed successfully
   1656  * * 0: @port is no longer in the topology, no reference was grabbed
   1657  */
   1658 static int __must_check
   1659 drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
   1660 {
   1661 	int ret;
   1662 
   1663 	topology_ref_history_lock(mstb->mgr);
   1664 	ret = kref_get_unless_zero(&mstb->topology_kref);
   1665 	if (ret) {
   1666 		DRM_DEBUG("mstb %p (%d)\n",
   1667 			  mstb, kref_read(&mstb->topology_kref));
   1668 		save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
   1669 	}
   1670 
   1671 	topology_ref_history_unlock(mstb->mgr);
   1672 
   1673 	return ret;
   1674 }
   1675 
   1676 /**
   1677  * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a
   1678  * branch device
   1679  * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of
   1680  *
   1681  * Increments &drm_dp_mst_branch.topology_refcount without checking whether or
   1682  * not it's already reached 0. This is only valid to use in scenarios where
   1683  * you are already guaranteed to have at least one active topology reference
   1684  * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used.
   1685  *
   1686  * See also:
   1687  * drm_dp_mst_topology_try_get_mstb()
   1688  * drm_dp_mst_topology_put_mstb()
   1689  */
   1690 static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
   1691 {
   1692 	topology_ref_history_lock(mstb->mgr);
   1693 
   1694 	save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
   1695 	WARN_ON(kref_read(&mstb->topology_kref) == 0);
   1696 	kref_get(&mstb->topology_kref);
   1697 	DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
   1698 
   1699 	topology_ref_history_unlock(mstb->mgr);
   1700 }
   1701 
   1702 /**
   1703  * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch
   1704  * device
   1705  * @mstb: The &struct drm_dp_mst_branch to release the topology reference from
   1706  *
   1707  * Releases a topology reference from @mstb by decrementing
   1708  * &drm_dp_mst_branch.topology_kref.
   1709  *
   1710  * See also:
   1711  * drm_dp_mst_topology_try_get_mstb()
   1712  * drm_dp_mst_topology_get_mstb()
   1713  */
   1714 static void
   1715 drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
   1716 {
   1717 	topology_ref_history_lock(mstb->mgr);
   1718 
   1719 	DRM_DEBUG("mstb %p (%d)\n",
   1720 		  mstb, kref_read(&mstb->topology_kref) - 1);
   1721 	save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT);
   1722 
   1723 	topology_ref_history_unlock(mstb->mgr);
   1724 	kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
   1725 }
   1726 
   1727 static void drm_dp_destroy_port(struct kref *kref)
   1728 {
   1729 	struct drm_dp_mst_port *port =
   1730 		container_of(kref, struct drm_dp_mst_port, topology_kref);
   1731 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
   1732 
   1733 	drm_dp_mst_dump_port_topology_history(port);
   1734 
   1735 	/* There's nothing that needs locking to destroy an input port yet */
   1736 	if (port->input) {
   1737 		drm_dp_mst_put_port_malloc(port);
   1738 		return;
   1739 	}
   1740 
   1741 	kfree(port->cached_edid);
   1742 
   1743 	/*
   1744 	 * we can't destroy the connector here, as we might be holding the
   1745 	 * mode_config.mutex from an EDID retrieval
   1746 	 */
   1747 	mutex_lock(&mgr->delayed_destroy_lock);
   1748 	list_add(&port->next, &mgr->destroy_port_list);
   1749 	mutex_unlock(&mgr->delayed_destroy_lock);
   1750 	schedule_work(&mgr->delayed_destroy_work);
   1751 }
   1752 
   1753 /**
   1754  * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a
   1755  * port unless it's zero
   1756  * @port: &struct drm_dp_mst_port to increment the topology refcount of
   1757  *
   1758  * Attempts to grab a topology reference to @port, if it hasn't yet been
   1759  * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached
   1760  * 0). Holding a topology reference implies that a malloc reference will be
   1761  * held to @port as long as the user holds the topology reference.
   1762  *
   1763  * Care should be taken to ensure that the user has at least one malloc
   1764  * reference to @port. If you already have a topology reference to @port, you
   1765  * should use drm_dp_mst_topology_get_port() instead.
   1766  *
   1767  * See also:
   1768  * drm_dp_mst_topology_get_port()
   1769  * drm_dp_mst_topology_put_port()
   1770  *
   1771  * Returns:
   1772  * * 1: A topology reference was grabbed successfully
   1773  * * 0: @port is no longer in the topology, no reference was grabbed
   1774  */
   1775 static int __must_check
   1776 drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
   1777 {
   1778 	int ret;
   1779 
   1780 	topology_ref_history_lock(port->mgr);
   1781 	ret = kref_get_unless_zero(&port->topology_kref);
   1782 	if (ret) {
   1783 		DRM_DEBUG("port %p (%d)\n",
   1784 			  port, kref_read(&port->topology_kref));
   1785 		save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
   1786 	}
   1787 
   1788 	topology_ref_history_unlock(port->mgr);
   1789 	return ret;
   1790 }
   1791 
   1792 /**
   1793  * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port
   1794  * @port: The &struct drm_dp_mst_port to increment the topology refcount of
   1795  *
   1796  * Increments &drm_dp_mst_port.topology_refcount without checking whether or
   1797  * not it's already reached 0. This is only valid to use in scenarios where
   1798  * you are already guaranteed to have at least one active topology reference
   1799  * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used.
   1800  *
   1801  * See also:
   1802  * drm_dp_mst_topology_try_get_port()
   1803  * drm_dp_mst_topology_put_port()
   1804  */
   1805 static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
   1806 {
   1807 	topology_ref_history_lock(port->mgr);
   1808 
   1809 	WARN_ON(kref_read(&port->topology_kref) == 0);
   1810 	kref_get(&port->topology_kref);
   1811 	DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
   1812 	save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
   1813 
   1814 	topology_ref_history_unlock(port->mgr);
   1815 }
   1816 
   1817 /**
   1818  * drm_dp_mst_topology_put_port() - release a topology reference to a port
   1819  * @port: The &struct drm_dp_mst_port to release the topology reference from
   1820  *
   1821  * Releases a topology reference from @port by decrementing
   1822  * &drm_dp_mst_port.topology_kref.
   1823  *
   1824  * See also:
   1825  * drm_dp_mst_topology_try_get_port()
   1826  * drm_dp_mst_topology_get_port()
   1827  */
   1828 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
   1829 {
   1830 	topology_ref_history_lock(port->mgr);
   1831 
   1832 	DRM_DEBUG("port %p (%d)\n",
   1833 		  port, kref_read(&port->topology_kref) - 1);
   1834 	save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT);
   1835 
   1836 	topology_ref_history_unlock(port->mgr);
   1837 	kref_put(&port->topology_kref, drm_dp_destroy_port);
   1838 }
   1839 
   1840 static struct drm_dp_mst_branch *
   1841 drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
   1842 					      struct drm_dp_mst_branch *to_find)
   1843 {
   1844 	struct drm_dp_mst_port *port;
   1845 	struct drm_dp_mst_branch *rmstb;
   1846 
   1847 	if (to_find == mstb)
   1848 		return mstb;
   1849 
   1850 	list_for_each_entry(port, &mstb->ports, next) {
   1851 		if (port->mstb) {
   1852 			rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
   1853 			    port->mstb, to_find);
   1854 			if (rmstb)
   1855 				return rmstb;
   1856 		}
   1857 	}
   1858 	return NULL;
   1859 }
   1860 
   1861 static struct drm_dp_mst_branch *
   1862 drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
   1863 				       struct drm_dp_mst_branch *mstb)
   1864 {
   1865 	struct drm_dp_mst_branch *rmstb = NULL;
   1866 
   1867 	mutex_lock(&mgr->lock);
   1868 	if (mgr->mst_primary) {
   1869 		rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
   1870 		    mgr->mst_primary, mstb);
   1871 
   1872 		if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
   1873 			rmstb = NULL;
   1874 	}
   1875 	mutex_unlock(&mgr->lock);
   1876 	return rmstb;
   1877 }
   1878 
   1879 static struct drm_dp_mst_port *
   1880 drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
   1881 					      struct drm_dp_mst_port *to_find)
   1882 {
   1883 	struct drm_dp_mst_port *port, *mport;
   1884 
   1885 	list_for_each_entry(port, &mstb->ports, next) {
   1886 		if (port == to_find)
   1887 			return port;
   1888 
   1889 		if (port->mstb) {
   1890 			mport = drm_dp_mst_topology_get_port_validated_locked(
   1891 			    port->mstb, to_find);
   1892 			if (mport)
   1893 				return mport;
   1894 		}
   1895 	}
   1896 	return NULL;
   1897 }
   1898 
   1899 static struct drm_dp_mst_port *
   1900 drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
   1901 				       struct drm_dp_mst_port *port)
   1902 {
   1903 	struct drm_dp_mst_port *rport = NULL;
   1904 
   1905 	mutex_lock(&mgr->lock);
   1906 	if (mgr->mst_primary) {
   1907 		rport = drm_dp_mst_topology_get_port_validated_locked(
   1908 		    mgr->mst_primary, port);
   1909 
   1910 		if (rport && !drm_dp_mst_topology_try_get_port(rport))
   1911 			rport = NULL;
   1912 	}
   1913 	mutex_unlock(&mgr->lock);
   1914 	return rport;
   1915 }
   1916 
   1917 static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
   1918 {
   1919 	struct drm_dp_mst_port *port;
   1920 	int ret;
   1921 
   1922 	list_for_each_entry(port, &mstb->ports, next) {
   1923 		if (port->port_num == port_num) {
   1924 			ret = drm_dp_mst_topology_try_get_port(port);
   1925 			return ret ? port : NULL;
   1926 		}
   1927 	}
   1928 
   1929 	return NULL;
   1930 }
   1931 
   1932 /*
   1933  * calculate a new RAD for this MST branch device
   1934  * if parent has an LCT of 2 then it has 1 nibble of RAD,
   1935  * if parent has an LCT of 3 then it has 2 nibbles of RAD,
   1936  */
   1937 static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
   1938 				 u8 *rad)
   1939 {
   1940 	int parent_lct = port->parent->lct;
   1941 	int shift = 4;
   1942 	int idx = (parent_lct - 1) / 2;
   1943 	if (parent_lct > 1) {
   1944 		memcpy(rad, port->parent->rad, idx + 1);
   1945 		shift = (parent_lct % 2) ? 4 : 0;
   1946 	} else
   1947 		rad[0] = 0;
   1948 
   1949 	rad[idx] |= port->port_num << shift;
   1950 	return parent_lct + 1;
   1951 }
   1952 
   1953 static bool drm_dp_mst_is_dp_mst_end_device(u8 pdt, bool mcs)
   1954 {
   1955 	switch (pdt) {
   1956 	case DP_PEER_DEVICE_DP_LEGACY_CONV:
   1957 	case DP_PEER_DEVICE_SST_SINK:
   1958 		return true;
   1959 	case DP_PEER_DEVICE_MST_BRANCHING:
   1960 		/* For sst branch device */
   1961 		if (!mcs)
   1962 			return true;
   1963 
   1964 		return false;
   1965 	}
   1966 	return true;
   1967 }
   1968 
   1969 static int
   1970 drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
   1971 		    bool new_mcs)
   1972 {
   1973 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
   1974 	struct drm_dp_mst_branch *mstb;
   1975 	u8 rad[8], lct;
   1976 	int ret = 0;
   1977 
   1978 	if (port->pdt == new_pdt && port->mcs == new_mcs)
   1979 		return 0;
   1980 
   1981 	/* Teardown the old pdt, if there is one */
   1982 	if (port->pdt != DP_PEER_DEVICE_NONE) {
   1983 		if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
   1984 			/*
   1985 			 * If the new PDT would also have an i2c bus,
   1986 			 * don't bother with reregistering it
   1987 			 */
   1988 			if (new_pdt != DP_PEER_DEVICE_NONE &&
   1989 			    drm_dp_mst_is_dp_mst_end_device(new_pdt, new_mcs)) {
   1990 				port->pdt = new_pdt;
   1991 				port->mcs = new_mcs;
   1992 				return 0;
   1993 			}
   1994 
   1995 			/* remove i2c over sideband */
   1996 			drm_dp_mst_unregister_i2c_bus(&port->aux);
   1997 		} else {
   1998 			mutex_lock(&mgr->lock);
   1999 			drm_dp_mst_topology_put_mstb(port->mstb);
   2000 			port->mstb = NULL;
   2001 			mutex_unlock(&mgr->lock);
   2002 		}
   2003 	}
   2004 
   2005 	port->pdt = new_pdt;
   2006 	port->mcs = new_mcs;
   2007 
   2008 	if (port->pdt != DP_PEER_DEVICE_NONE) {
   2009 		if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
   2010 			/* add i2c over sideband */
   2011 			ret = drm_dp_mst_register_i2c_bus(&port->aux);
   2012 		} else {
   2013 			lct = drm_dp_calculate_rad(port, rad);
   2014 			mstb = drm_dp_add_mst_branch_device(lct, rad);
   2015 			if (!mstb) {
   2016 				ret = -ENOMEM;
   2017 				DRM_ERROR("Failed to create MSTB for port %p",
   2018 					  port);
   2019 				goto out;
   2020 			}
   2021 
   2022 			mutex_lock(&mgr->lock);
   2023 			port->mstb = mstb;
   2024 			mstb->mgr = port->mgr;
   2025 			mstb->port_parent = port;
   2026 
   2027 			/*
   2028 			 * Make sure this port's memory allocation stays
   2029 			 * around until its child MSTB releases it
   2030 			 */
   2031 			drm_dp_mst_get_port_malloc(port);
   2032 			mutex_unlock(&mgr->lock);
   2033 
   2034 			/* And make sure we send a link address for this */
   2035 			ret = 1;
   2036 		}
   2037 	}
   2038 
   2039 out:
   2040 	if (ret < 0)
   2041 		port->pdt = DP_PEER_DEVICE_NONE;
   2042 	return ret;
   2043 }
   2044 
   2045 /**
   2046  * drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband
   2047  * @aux: Fake sideband AUX CH
   2048  * @offset: address of the (first) register to read
   2049  * @buffer: buffer to store the register values
   2050  * @size: number of bytes in @buffer
   2051  *
   2052  * Performs the same functionality for remote devices via
   2053  * sideband messaging as drm_dp_dpcd_read() does for local
   2054  * devices via actual AUX CH.
   2055  *
   2056  * Return: Number of bytes read, or negative error code on failure.
   2057  */
   2058 ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
   2059 			     unsigned int offset, void *buffer, size_t size)
   2060 {
   2061 	struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
   2062 						    aux);
   2063 
   2064 	return drm_dp_send_dpcd_read(port->mgr, port,
   2065 				     offset, size, buffer);
   2066 }
   2067 
   2068 /**
   2069  * drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband
   2070  * @aux: Fake sideband AUX CH
   2071  * @offset: address of the (first) register to write
   2072  * @buffer: buffer containing the values to write
   2073  * @size: number of bytes in @buffer
   2074  *
   2075  * Performs the same functionality for remote devices via
   2076  * sideband messaging as drm_dp_dpcd_write() does for local
   2077  * devices via actual AUX CH.
   2078  *
   2079  * Return: 0 on success, negative error code on failure.
   2080  */
   2081 ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
   2082 			      unsigned int offset, void *buffer, size_t size)
   2083 {
   2084 	struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
   2085 						    aux);
   2086 
   2087 	return drm_dp_send_dpcd_write(port->mgr, port,
   2088 				      offset, size, buffer);
   2089 }
   2090 
   2091 static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
   2092 {
   2093 	int ret __unused;
   2094 
   2095 	memcpy(mstb->guid, guid, 16);
   2096 
   2097 	if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
   2098 		if (mstb->port_parent) {
   2099 			ret = drm_dp_send_dpcd_write(
   2100 					mstb->mgr,
   2101 					mstb->port_parent,
   2102 					DP_GUID,
   2103 					16,
   2104 					mstb->guid);
   2105 		} else {
   2106 
   2107 			ret = drm_dp_dpcd_write(
   2108 					mstb->mgr->aux,
   2109 					DP_GUID,
   2110 					mstb->guid,
   2111 					16);
   2112 		}
   2113 	}
   2114 }
   2115 
   2116 static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
   2117 				int pnum,
   2118 				char *proppath,
   2119 				size_t proppath_size)
   2120 {
   2121 	int i;
   2122 	char temp[8];
   2123 	snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
   2124 	for (i = 0; i < (mstb->lct - 1); i++) {
   2125 		int shift = (i % 2) ? 0 : 4;
   2126 		int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
   2127 		snprintf(temp, sizeof(temp), "-%d", port_num);
   2128 		strlcat(proppath, temp, proppath_size);
   2129 	}
   2130 	snprintf(temp, sizeof(temp), "-%d", pnum);
   2131 	strlcat(proppath, temp, proppath_size);
   2132 }
   2133 
   2134 /**
   2135  * drm_dp_mst_connector_late_register() - Late MST connector registration
   2136  * @connector: The MST connector
   2137  * @port: The MST port for this connector
   2138  *
   2139  * Helper to register the remote aux device for this MST port. Drivers should
   2140  * call this from their mst connector's late_register hook to enable MST aux
   2141  * devices.
   2142  *
   2143  * Return: 0 on success, negative error code on failure.
   2144  */
   2145 int drm_dp_mst_connector_late_register(struct drm_connector *connector,
   2146 				       struct drm_dp_mst_port *port)
   2147 {
   2148 	DRM_DEBUG_KMS("registering %s remote bus for %s\n",
   2149 		      port->aux.name, device_xname(connector->dev->dev));
   2150 
   2151 	port->aux.dev = connector->kdev;
   2152 	return drm_dp_aux_register_devnode(&port->aux);
   2153 }
   2154 EXPORT_SYMBOL(drm_dp_mst_connector_late_register);
   2155 
   2156 /**
   2157  * drm_dp_mst_connector_early_unregister() - Early MST connector unregistration
   2158  * @connector: The MST connector
   2159  * @port: The MST port for this connector
   2160  *
   2161  * Helper to unregister the remote aux device for this MST port, registered by
   2162  * drm_dp_mst_connector_late_register(). Drivers should call this from their mst
   2163  * connector's early_unregister hook.
   2164  */
   2165 void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
   2166 					   struct drm_dp_mst_port *port)
   2167 {
   2168 	DRM_DEBUG_KMS("unregistering %s remote bus for %s\n",
   2169 		      port->aux.name, device_xname(connector->dev->dev));
   2170 	drm_dp_aux_unregister_devnode(&port->aux);
   2171 }
   2172 EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
   2173 
   2174 static void
   2175 drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
   2176 			      struct drm_dp_mst_port *port)
   2177 {
   2178 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
   2179 	char proppath[255];
   2180 	int ret;
   2181 
   2182 	build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
   2183 	port->connector = mgr->cbs->add_connector(mgr, port, proppath);
   2184 	if (!port->connector) {
   2185 		ret = -ENOMEM;
   2186 		goto error;
   2187 	}
   2188 
   2189 	if (port->pdt != DP_PEER_DEVICE_NONE &&
   2190 	    drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
   2191 		port->cached_edid = drm_get_edid(port->connector,
   2192 						 &port->aux.ddc);
   2193 		drm_connector_set_tile_property(port->connector);
   2194 	}
   2195 
   2196 	mgr->cbs->register_connector(port->connector);
   2197 	return;
   2198 
   2199 error:
   2200 	DRM_ERROR("Failed to create connector for port %p: %d\n", port, ret);
   2201 }
   2202 
   2203 /*
   2204  * Drop a topology reference, and unlink the port from the in-memory topology
   2205  * layout
   2206  */
   2207 static void
   2208 drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
   2209 				struct drm_dp_mst_port *port)
   2210 {
   2211 	mutex_lock(&mgr->lock);
   2212 	port->parent->num_ports--;
   2213 	list_del(&port->next);
   2214 	mutex_unlock(&mgr->lock);
   2215 	drm_dp_mst_topology_put_port(port);
   2216 }
   2217 
   2218 static struct drm_dp_mst_port *
   2219 drm_dp_mst_add_port(struct drm_device *dev,
   2220 		    struct drm_dp_mst_topology_mgr *mgr,
   2221 		    struct drm_dp_mst_branch *mstb, u8 port_number)
   2222 {
   2223 	struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL);
   2224 
   2225 	if (!port)
   2226 		return NULL;
   2227 
   2228 	kref_init(&port->topology_kref);
   2229 	kref_init(&port->malloc_kref);
   2230 	port->parent = mstb;
   2231 	port->port_num = port_number;
   2232 	port->mgr = mgr;
   2233 	port->aux.name = "DPMST";
   2234 	port->aux.dev = dev->dev;
   2235 	port->aux.is_remote = true;
   2236 
   2237 	/* initialize the MST downstream port's AUX crc work queue */
   2238 	drm_dp_remote_aux_init(&port->aux);
   2239 
   2240 	/*
   2241 	 * Make sure the memory allocation for our parent branch stays
   2242 	 * around until our own memory allocation is released
   2243 	 */
   2244 	drm_dp_mst_get_mstb_malloc(mstb);
   2245 
   2246 	return port;
   2247 }
   2248 
   2249 static int
   2250 drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
   2251 				    struct drm_device *dev,
   2252 				    struct drm_dp_link_addr_reply_port *port_msg)
   2253 {
   2254 	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
   2255 	struct drm_dp_mst_port *port;
   2256 	int old_ddps = 0, ret;
   2257 	u8 new_pdt = DP_PEER_DEVICE_NONE;
   2258 	bool new_mcs = 0;
   2259 	bool created = false, send_link_addr = false, changed = false;
   2260 
   2261 	port = drm_dp_get_port(mstb, port_msg->port_number);
   2262 	if (!port) {
   2263 		port = drm_dp_mst_add_port(dev, mgr, mstb,
   2264 					   port_msg->port_number);
   2265 		if (!port)
   2266 			return -ENOMEM;
   2267 		created = true;
   2268 		changed = true;
   2269 	} else if (!port->input && port_msg->input_port && port->connector) {
   2270 		/* Since port->connector can't be changed here, we create a
   2271 		 * new port if input_port changes from 0 to 1
   2272 		 */
   2273 		drm_dp_mst_topology_unlink_port(mgr, port);
   2274 		drm_dp_mst_topology_put_port(port);
   2275 		port = drm_dp_mst_add_port(dev, mgr, mstb,
   2276 					   port_msg->port_number);
   2277 		if (!port)
   2278 			return -ENOMEM;
   2279 		changed = true;
   2280 		created = true;
   2281 	} else if (port->input && !port_msg->input_port) {
   2282 		changed = true;
   2283 	} else if (port->connector) {
   2284 		/* We're updating a port that's exposed to userspace, so do it
   2285 		 * under lock
   2286 		 */
   2287 		drm_modeset_lock(&mgr->base.lock, NULL);
   2288 
   2289 		old_ddps = port->ddps;
   2290 		changed = port->ddps != port_msg->ddps ||
   2291 			(port->ddps &&
   2292 			 (port->ldps != port_msg->legacy_device_plug_status ||
   2293 			  port->dpcd_rev != port_msg->dpcd_revision ||
   2294 			  port->mcs != port_msg->mcs ||
   2295 			  port->pdt != port_msg->peer_device_type ||
   2296 			  port->num_sdp_stream_sinks !=
   2297 			  port_msg->num_sdp_stream_sinks));
   2298 	}
   2299 
   2300 	port->input = port_msg->input_port;
   2301 	if (!port->input)
   2302 		new_pdt = port_msg->peer_device_type;
   2303 	new_mcs = port_msg->mcs;
   2304 	port->ddps = port_msg->ddps;
   2305 	port->ldps = port_msg->legacy_device_plug_status;
   2306 	port->dpcd_rev = port_msg->dpcd_revision;
   2307 	port->num_sdp_streams = port_msg->num_sdp_streams;
   2308 	port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
   2309 
   2310 	/* manage mstb port lists with mgr lock - take a reference
   2311 	   for this list */
   2312 	if (created) {
   2313 		mutex_lock(&mgr->lock);
   2314 		drm_dp_mst_topology_get_port(port);
   2315 		list_add(&port->next, &mstb->ports);
   2316 		mstb->num_ports++;
   2317 		mutex_unlock(&mgr->lock);
   2318 	}
   2319 
   2320 	if (old_ddps != port->ddps) {
   2321 		if (port->ddps) {
   2322 			if (!port->input) {
   2323 				drm_dp_send_enum_path_resources(mgr, mstb,
   2324 								port);
   2325 			}
   2326 		} else {
   2327 			port->available_pbn = 0;
   2328 		}
   2329 	}
   2330 
   2331 	ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
   2332 	if (ret == 1) {
   2333 		send_link_addr = true;
   2334 	} else if (ret < 0) {
   2335 		DRM_ERROR("Failed to change PDT on port %p: %d\n",
   2336 			  port, ret);
   2337 		goto fail;
   2338 	}
   2339 
   2340 	/*
   2341 	 * If this port wasn't just created, then we're reprobing because
   2342 	 * we're coming out of suspend. In this case, always resend the link
   2343 	 * address if there's an MSTB on this port
   2344 	 */
   2345 	if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
   2346 	    port->mcs)
   2347 		send_link_addr = true;
   2348 
   2349 	if (port->connector)
   2350 		drm_modeset_unlock(&mgr->base.lock);
   2351 	else if (!port->input)
   2352 		drm_dp_mst_port_add_connector(mstb, port);
   2353 
   2354 	if (send_link_addr && port->mstb) {
   2355 		ret = drm_dp_send_link_address(mgr, port->mstb);
   2356 		if (ret == 1) /* MSTB below us changed */
   2357 			changed = true;
   2358 		else if (ret < 0)
   2359 			goto fail_put;
   2360 	}
   2361 
   2362 	/* put reference to this port */
   2363 	drm_dp_mst_topology_put_port(port);
   2364 	return changed;
   2365 
   2366 fail:
   2367 	drm_dp_mst_topology_unlink_port(mgr, port);
   2368 	if (port->connector)
   2369 		drm_modeset_unlock(&mgr->base.lock);
   2370 fail_put:
   2371 	drm_dp_mst_topology_put_port(port);
   2372 	return ret;
   2373 }
   2374 
   2375 static void
   2376 drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
   2377 			    struct drm_dp_connection_status_notify *conn_stat)
   2378 {
   2379 	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
   2380 	struct drm_dp_mst_port *port;
   2381 	int old_ddps, old_input, ret, i;
   2382 	u8 new_pdt;
   2383 	bool new_mcs;
   2384 	bool dowork = false, create_connector = false;
   2385 
   2386 	port = drm_dp_get_port(mstb, conn_stat->port_number);
   2387 	if (!port)
   2388 		return;
   2389 
   2390 	if (port->connector) {
   2391 		if (!port->input && conn_stat->input_port) {
   2392 			/*
   2393 			 * We can't remove a connector from an already exposed
   2394 			 * port, so just throw the port out and make sure we
   2395 			 * reprobe the link address of it's parent MSTB
   2396 			 */
   2397 			drm_dp_mst_topology_unlink_port(mgr, port);
   2398 			mstb->link_address_sent = false;
   2399 			dowork = true;
   2400 			goto out;
   2401 		}
   2402 
   2403 		/* Locking is only needed if the port's exposed to userspace */
   2404 		drm_modeset_lock(&mgr->base.lock, NULL);
   2405 	} else if (port->input && !conn_stat->input_port) {
   2406 		create_connector = true;
   2407 		/* Reprobe link address so we get num_sdp_streams */
   2408 		mstb->link_address_sent = false;
   2409 		dowork = true;
   2410 	}
   2411 
   2412 	old_ddps = port->ddps;
   2413 	old_input = port->input;
   2414 	port->input = conn_stat->input_port;
   2415 	port->ldps = conn_stat->legacy_device_plug_status;
   2416 	port->ddps = conn_stat->displayport_device_plug_status;
   2417 
   2418 	if (old_ddps != port->ddps) {
   2419 		if (port->ddps) {
   2420 			dowork = true;
   2421 		} else {
   2422 			port->available_pbn = 0;
   2423 		}
   2424 	}
   2425 
   2426 	new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
   2427 	new_mcs = conn_stat->message_capability_status;
   2428 	ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
   2429 	if (ret == 1) {
   2430 		dowork = true;
   2431 	} else if (ret < 0) {
   2432 		DRM_ERROR("Failed to change PDT for port %p: %d\n",
   2433 			  port, ret);
   2434 		dowork = false;
   2435 	}
   2436 
   2437 	if (!old_input && old_ddps != port->ddps && !port->ddps) {
   2438 		for (i = 0; i < mgr->max_payloads; i++) {
   2439 			struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
   2440 			struct drm_dp_mst_port *port_validated;
   2441 
   2442 			if (!vcpi)
   2443 				continue;
   2444 
   2445 			port_validated =
   2446 				container_of(vcpi, struct drm_dp_mst_port, vcpi);
   2447 			port_validated =
   2448 				drm_dp_mst_topology_get_port_validated(mgr, port_validated);
   2449 			if (!port_validated) {
   2450 				mutex_lock(&mgr->payload_lock);
   2451 				vcpi->num_slots = 0;
   2452 				mutex_unlock(&mgr->payload_lock);
   2453 			} else {
   2454 				drm_dp_mst_topology_put_port(port_validated);
   2455 			}
   2456 		}
   2457 	}
   2458 
   2459 	if (port->connector)
   2460 		drm_modeset_unlock(&mgr->base.lock);
   2461 	else if (create_connector)
   2462 		drm_dp_mst_port_add_connector(mstb, port);
   2463 
   2464 out:
   2465 	drm_dp_mst_topology_put_port(port);
   2466 	if (dowork)
   2467 		queue_work(system_long_wq, &mstb->mgr->work);
   2468 }
   2469 
   2470 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
   2471 							       u8 lct, u8 *rad)
   2472 {
   2473 	struct drm_dp_mst_branch *mstb;
   2474 	struct drm_dp_mst_port *port;
   2475 	int i, ret;
   2476 	/* find the port by iterating down */
   2477 
   2478 	mutex_lock(&mgr->lock);
   2479 	mstb = mgr->mst_primary;
   2480 
   2481 	if (!mstb)
   2482 		goto out;
   2483 
   2484 	for (i = 0; i < lct - 1; i++) {
   2485 		int shift = (i % 2) ? 0 : 4;
   2486 		int port_num = (rad[i / 2] >> shift) & 0xf;
   2487 
   2488 		list_for_each_entry(port, &mstb->ports, next) {
   2489 			if (port->port_num == port_num) {
   2490 				mstb = port->mstb;
   2491 				if (!mstb) {
   2492 					DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
   2493 					goto out;
   2494 				}
   2495 
   2496 				break;
   2497 			}
   2498 		}
   2499 	}
   2500 	ret = drm_dp_mst_topology_try_get_mstb(mstb);
   2501 	if (!ret)
   2502 		mstb = NULL;
   2503 out:
   2504 	mutex_unlock(&mgr->lock);
   2505 	return mstb;
   2506 }
   2507 
   2508 static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
   2509 	struct drm_dp_mst_branch *mstb,
   2510 	const uint8_t *guid)
   2511 {
   2512 	struct drm_dp_mst_branch *found_mstb;
   2513 	struct drm_dp_mst_port *port;
   2514 
   2515 	if (memcmp(mstb->guid, guid, 16) == 0)
   2516 		return mstb;
   2517 
   2518 
   2519 	list_for_each_entry(port, &mstb->ports, next) {
   2520 		if (!port->mstb)
   2521 			continue;
   2522 
   2523 		found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
   2524 
   2525 		if (found_mstb)
   2526 			return found_mstb;
   2527 	}
   2528 
   2529 	return NULL;
   2530 }
   2531 
   2532 static struct drm_dp_mst_branch *
   2533 drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
   2534 				     const uint8_t *guid)
   2535 {
   2536 	struct drm_dp_mst_branch *mstb;
   2537 	int ret;
   2538 
   2539 	/* find the port by iterating down */
   2540 	mutex_lock(&mgr->lock);
   2541 
   2542 	mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
   2543 	if (mstb) {
   2544 		ret = drm_dp_mst_topology_try_get_mstb(mstb);
   2545 		if (!ret)
   2546 			mstb = NULL;
   2547 	}
   2548 
   2549 	mutex_unlock(&mgr->lock);
   2550 	return mstb;
   2551 }
   2552 
   2553 static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
   2554 					       struct drm_dp_mst_branch *mstb)
   2555 {
   2556 	struct drm_dp_mst_port *port;
   2557 	int ret;
   2558 	bool changed = false;
   2559 
   2560 	if (!mstb->link_address_sent) {
   2561 		ret = drm_dp_send_link_address(mgr, mstb);
   2562 		if (ret == 1)
   2563 			changed = true;
   2564 		else if (ret < 0)
   2565 			return ret;
   2566 	}
   2567 
   2568 	list_for_each_entry(port, &mstb->ports, next) {
   2569 		struct drm_dp_mst_branch *mstb_child = NULL;
   2570 
   2571 		if (port->input || !port->ddps)
   2572 			continue;
   2573 
   2574 		if (!port->available_pbn) {
   2575 			drm_modeset_lock(&mgr->base.lock, NULL);
   2576 			drm_dp_send_enum_path_resources(mgr, mstb, port);
   2577 			drm_modeset_unlock(&mgr->base.lock);
   2578 			changed = true;
   2579 		}
   2580 
   2581 		if (port->mstb)
   2582 			mstb_child = drm_dp_mst_topology_get_mstb_validated(
   2583 			    mgr, port->mstb);
   2584 
   2585 		if (mstb_child) {
   2586 			ret = drm_dp_check_and_send_link_address(mgr,
   2587 								 mstb_child);
   2588 			drm_dp_mst_topology_put_mstb(mstb_child);
   2589 			if (ret == 1)
   2590 				changed = true;
   2591 			else if (ret < 0)
   2592 				return ret;
   2593 		}
   2594 	}
   2595 
   2596 	return changed;
   2597 }
   2598 
   2599 static void drm_dp_mst_link_probe_work(struct work_struct *work)
   2600 {
   2601 	struct drm_dp_mst_topology_mgr *mgr =
   2602 		container_of(work, struct drm_dp_mst_topology_mgr, work);
   2603 	struct drm_device *dev = mgr->dev;
   2604 	struct drm_dp_mst_branch *mstb;
   2605 	int ret;
   2606 	bool clear_payload_id_table;
   2607 
   2608 	mutex_lock(&mgr->probe_lock);
   2609 
   2610 	mutex_lock(&mgr->lock);
   2611 	clear_payload_id_table = !mgr->payload_id_table_cleared;
   2612 	mgr->payload_id_table_cleared = true;
   2613 
   2614 	mstb = mgr->mst_primary;
   2615 	if (mstb) {
   2616 		ret = drm_dp_mst_topology_try_get_mstb(mstb);
   2617 		if (!ret)
   2618 			mstb = NULL;
   2619 	}
   2620 	mutex_unlock(&mgr->lock);
   2621 	if (!mstb) {
   2622 		mutex_unlock(&mgr->probe_lock);
   2623 		return;
   2624 	}
   2625 
   2626 	/*
   2627 	 * Certain branch devices seem to incorrectly report an available_pbn
   2628 	 * of 0 on downstream sinks, even after clearing the
   2629 	 * DP_PAYLOAD_ALLOCATE_* registers in
   2630 	 * drm_dp_mst_topology_mgr_set_mst(). Namely, the CableMatters USB-C
   2631 	 * 2x DP hub. Sending a CLEAR_PAYLOAD_ID_TABLE message seems to make
   2632 	 * things work again.
   2633 	 */
   2634 	if (clear_payload_id_table) {
   2635 		DRM_DEBUG_KMS("Clearing payload ID table\n");
   2636 		drm_dp_send_clear_payload_id_table(mgr, mstb);
   2637 	}
   2638 
   2639 	ret = drm_dp_check_and_send_link_address(mgr, mstb);
   2640 	drm_dp_mst_topology_put_mstb(mstb);
   2641 
   2642 	mutex_unlock(&mgr->probe_lock);
   2643 	if (ret)
   2644 		drm_kms_helper_hotplug_event(dev);
   2645 }
   2646 
   2647 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
   2648 				 u8 *guid)
   2649 {
   2650 	u64 salt;
   2651 
   2652 	if (memchr_inv(guid, 0, 16))
   2653 		return true;
   2654 
   2655 	salt = get_jiffies_64();
   2656 
   2657 	memcpy(&guid[0], &salt, sizeof(u64));
   2658 	memcpy(&guid[8], &salt, sizeof(u64));
   2659 
   2660 	return false;
   2661 }
   2662 
   2663 static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
   2664 {
   2665 	struct drm_dp_sideband_msg_req_body req;
   2666 
   2667 	req.req_type = DP_REMOTE_DPCD_READ;
   2668 	req.u.dpcd_read.port_number = port_num;
   2669 	req.u.dpcd_read.dpcd_address = offset;
   2670 	req.u.dpcd_read.num_bytes = num_bytes;
   2671 	drm_dp_encode_sideband_req(&req, msg);
   2672 
   2673 	return 0;
   2674 }
   2675 
   2676 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
   2677 				    bool up, u8 *msg, int len)
   2678 {
   2679 	int ret;
   2680 	int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
   2681 	int tosend, total, offset;
   2682 	int retries = 0;
   2683 
   2684 retry:
   2685 	total = len;
   2686 	offset = 0;
   2687 	do {
   2688 		tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
   2689 
   2690 		ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
   2691 					&msg[offset],
   2692 					tosend);
   2693 		if (ret != tosend) {
   2694 			if (ret == -EIO && retries < 5) {
   2695 				retries++;
   2696 				goto retry;
   2697 			}
   2698 			DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
   2699 
   2700 			return -EIO;
   2701 		}
   2702 		offset += tosend;
   2703 		total -= tosend;
   2704 	} while (total > 0);
   2705 	return 0;
   2706 }
   2707 
   2708 static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
   2709 				  struct drm_dp_sideband_msg_tx *txmsg)
   2710 {
   2711 	struct drm_dp_mst_branch *mstb = txmsg->dst;
   2712 	u8 req_type;
   2713 
   2714 	/* both msg slots are full */
   2715 	if (txmsg->seqno == -1) {
   2716 		if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
   2717 			DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
   2718 			return -EAGAIN;
   2719 		}
   2720 		if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
   2721 			txmsg->seqno = mstb->last_seqno;
   2722 			mstb->last_seqno ^= 1;
   2723 		} else if (mstb->tx_slots[0] == NULL)
   2724 			txmsg->seqno = 0;
   2725 		else
   2726 			txmsg->seqno = 1;
   2727 		mstb->tx_slots[txmsg->seqno] = txmsg;
   2728 	}
   2729 
   2730 	req_type = txmsg->msg[0] & 0x7f;
   2731 	if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
   2732 		req_type == DP_RESOURCE_STATUS_NOTIFY)
   2733 		hdr->broadcast = 1;
   2734 	else
   2735 		hdr->broadcast = 0;
   2736 	hdr->path_msg = txmsg->path_msg;
   2737 	hdr->lct = mstb->lct;
   2738 	hdr->lcr = mstb->lct - 1;
   2739 	if (mstb->lct > 1)
   2740 		memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
   2741 	hdr->seqno = txmsg->seqno;
   2742 	return 0;
   2743 }
   2744 /*
   2745  * process a single block of the next message in the sideband queue
   2746  */
   2747 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
   2748 				   struct drm_dp_sideband_msg_tx *txmsg,
   2749 				   bool up)
   2750 {
   2751 	u8 chunk[48];
   2752 	struct drm_dp_sideband_msg_hdr hdr;
   2753 	int len, space, idx, tosend;
   2754 	int ret;
   2755 
   2756 	memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
   2757 
   2758 	if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
   2759 		txmsg->seqno = -1;
   2760 		txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
   2761 	}
   2762 
   2763 	/* make hdr from dst mst - for replies use seqno
   2764 	   otherwise assign one */
   2765 	ret = set_hdr_from_dst_qlock(&hdr, txmsg);
   2766 	if (ret < 0)
   2767 		return ret;
   2768 
   2769 	/* amount left to send in this message */
   2770 	len = txmsg->cur_len - txmsg->cur_offset;
   2771 
   2772 	/* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
   2773 	space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
   2774 
   2775 	tosend = min(len, space);
   2776 	if (len == txmsg->cur_len)
   2777 		hdr.somt = 1;
   2778 	if (space >= len)
   2779 		hdr.eomt = 1;
   2780 
   2781 
   2782 	hdr.msg_len = tosend + 1;
   2783 	drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
   2784 	memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
   2785 	/* add crc at end */
   2786 	drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
   2787 	idx += tosend + 1;
   2788 
   2789 	ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
   2790 	if (unlikely(ret) && drm_debug_enabled(DRM_UT_DP)) {
   2791 		struct drm_printer p = drm_debug_printer(DBG_PREFIX);
   2792 
   2793 		drm_printf(&p, "sideband msg failed to send\n");
   2794 		drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
   2795 		return ret;
   2796 	}
   2797 
   2798 	txmsg->cur_offset += tosend;
   2799 	if (txmsg->cur_offset == txmsg->cur_len) {
   2800 		txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
   2801 		return 1;
   2802 	}
   2803 	return 0;
   2804 }
   2805 
   2806 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
   2807 {
   2808 	struct drm_dp_sideband_msg_tx *txmsg;
   2809 	int ret;
   2810 
   2811 	WARN_ON(!mutex_is_locked(&mgr->qlock));
   2812 
   2813 	/* construct a chunk from the first msg in the tx_msg queue */
   2814 	if (list_empty(&mgr->tx_msg_downq))
   2815 		return;
   2816 
   2817 	txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
   2818 	ret = process_single_tx_qlock(mgr, txmsg, false);
   2819 	if (ret == 1) {
   2820 		/* txmsg is sent it should be in the slots now */
   2821 		mgr->is_waiting_for_dwn_reply = true;
   2822 		list_del(&txmsg->next);
   2823 	} else if (ret) {
   2824 		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
   2825 		mgr->is_waiting_for_dwn_reply = false;
   2826 		list_del(&txmsg->next);
   2827 		if (txmsg->seqno != -1)
   2828 			txmsg->dst->tx_slots[txmsg->seqno] = NULL;
   2829 		txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
   2830 #ifdef __NetBSD__
   2831 		DRM_WAKEUP_ALL(&mgr->tx_waitq, &mgr->qlock);
   2832 #else
   2833 		wake_up_all(&mgr->tx_waitq);
   2834 #endif
   2835 	}
   2836 }
   2837 
   2838 /* called holding qlock */
   2839 static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
   2840 				       struct drm_dp_sideband_msg_tx *txmsg)
   2841 {
   2842 	int ret;
   2843 
   2844 	/* construct a chunk from the first msg in the tx_msg queue */
   2845 	ret = process_single_tx_qlock(mgr, txmsg, true);
   2846 
   2847 	if (ret != 1)
   2848 		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
   2849 
   2850 	if (txmsg->seqno != -1) {
   2851 		WARN_ON((unsigned int)txmsg->seqno >
   2852 			ARRAY_SIZE(txmsg->dst->tx_slots));
   2853 		txmsg->dst->tx_slots[txmsg->seqno] = NULL;
   2854 	}
   2855 }
   2856 
   2857 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
   2858 				 struct drm_dp_sideband_msg_tx *txmsg)
   2859 {
   2860 	mutex_lock(&mgr->qlock);
   2861 	list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
   2862 
   2863 	if (drm_debug_enabled(DRM_UT_DP)) {
   2864 		struct drm_printer p = drm_debug_printer(DBG_PREFIX);
   2865 
   2866 		drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
   2867 	}
   2868 
   2869 	if (list_is_singular(&mgr->tx_msg_downq) &&
   2870 	    !mgr->is_waiting_for_dwn_reply)
   2871 		process_single_down_tx_qlock(mgr);
   2872 	mutex_unlock(&mgr->qlock);
   2873 }
   2874 
   2875 static void
   2876 drm_dp_dump_link_address(struct drm_dp_link_address_ack_reply *reply)
   2877 {
   2878 	struct drm_dp_link_addr_reply_port *port_reply;
   2879 	int i;
   2880 
   2881 	for (i = 0; i < reply->nports; i++) {
   2882 		port_reply = &reply->ports[i];
   2883 		DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n",
   2884 			      i,
   2885 			      port_reply->input_port,
   2886 			      port_reply->peer_device_type,
   2887 			      port_reply->port_number,
   2888 			      port_reply->dpcd_revision,
   2889 			      port_reply->mcs,
   2890 			      port_reply->ddps,
   2891 			      port_reply->legacy_device_plug_status,
   2892 			      port_reply->num_sdp_streams,
   2893 			      port_reply->num_sdp_stream_sinks);
   2894 	}
   2895 }
   2896 
   2897 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
   2898 				     struct drm_dp_mst_branch *mstb)
   2899 {
   2900 	struct drm_dp_sideband_msg_tx *txmsg;
   2901 	struct drm_dp_link_address_ack_reply *reply;
   2902 	struct drm_dp_mst_port *port, *tmp;
   2903 	int i, len __unused, ret, port_mask = 0;
   2904 	bool changed = false;
   2905 
   2906 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   2907 	if (!txmsg)
   2908 		return -ENOMEM;
   2909 
   2910 	txmsg->dst = mstb;
   2911 	len = build_link_address(txmsg);
   2912 
   2913 	mstb->link_address_sent = true;
   2914 	drm_dp_queue_down_tx(mgr, txmsg);
   2915 
   2916 	/* FIXME: Actually do some real error handling here */
   2917 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   2918 	if (ret <= 0) {
   2919 		DRM_ERROR("Sending link address failed with %d\n", ret);
   2920 		goto out;
   2921 	}
   2922 	if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
   2923 		DRM_ERROR("link address NAK received\n");
   2924 		ret = -EIO;
   2925 		goto out;
   2926 	}
   2927 
   2928 	reply = &txmsg->reply.u.link_addr;
   2929 	DRM_DEBUG_KMS("link address reply: %d\n", reply->nports);
   2930 	drm_dp_dump_link_address(reply);
   2931 
   2932 	drm_dp_check_mstb_guid(mstb, reply->guid);
   2933 
   2934 	for (i = 0; i < reply->nports; i++) {
   2935 		port_mask |= BIT(reply->ports[i].port_number);
   2936 		ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev,
   2937 							  &reply->ports[i]);
   2938 		if (ret == 1)
   2939 			changed = true;
   2940 		else if (ret < 0)
   2941 			goto out;
   2942 	}
   2943 
   2944 	/* Prune any ports that are currently a part of mstb in our in-memory
   2945 	 * topology, but were not seen in this link address. Usually this
   2946 	 * means that they were removed while the topology was out of sync,
   2947 	 * e.g. during suspend/resume
   2948 	 */
   2949 	mutex_lock(&mgr->lock);
   2950 	list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
   2951 		if (port_mask & BIT(port->port_num))
   2952 			continue;
   2953 
   2954 		DRM_DEBUG_KMS("port %d was not in link address, removing\n",
   2955 			      port->port_num);
   2956 		list_del(&port->next);
   2957 		drm_dp_mst_topology_put_port(port);
   2958 		changed = true;
   2959 	}
   2960 	mutex_unlock(&mgr->lock);
   2961 
   2962 out:
   2963 	if (ret <= 0)
   2964 		mstb->link_address_sent = false;
   2965 	kfree(txmsg);
   2966 	return ret < 0 ? ret : changed;
   2967 }
   2968 
   2969 void drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
   2970 					struct drm_dp_mst_branch *mstb)
   2971 {
   2972 	struct drm_dp_sideband_msg_tx *txmsg;
   2973 	int len __unused, ret;
   2974 
   2975 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   2976 	if (!txmsg)
   2977 		return;
   2978 
   2979 	txmsg->dst = mstb;
   2980 	len = build_clear_payload_id_table(txmsg);
   2981 
   2982 	drm_dp_queue_down_tx(mgr, txmsg);
   2983 
   2984 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   2985 	if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
   2986 		DRM_DEBUG_KMS("clear payload table id nak received\n");
   2987 
   2988 	kfree(txmsg);
   2989 }
   2990 
   2991 static int
   2992 drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
   2993 				struct drm_dp_mst_branch *mstb,
   2994 				struct drm_dp_mst_port *port)
   2995 {
   2996 	struct drm_dp_enum_path_resources_ack_reply *path_res;
   2997 	struct drm_dp_sideband_msg_tx *txmsg;
   2998 	int len __unused;
   2999 	int ret;
   3000 
   3001 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   3002 	if (!txmsg)
   3003 		return -ENOMEM;
   3004 
   3005 	txmsg->dst = mstb;
   3006 	len = build_enum_path_resources(txmsg, port->port_num);
   3007 
   3008 	drm_dp_queue_down_tx(mgr, txmsg);
   3009 
   3010 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   3011 	if (ret > 0) {
   3012 		path_res = &txmsg->reply.u.path_resources;
   3013 
   3014 		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
   3015 			DRM_DEBUG_KMS("enum path resources nak received\n");
   3016 		} else {
   3017 			if (port->port_num != path_res->port_number)
   3018 				DRM_ERROR("got incorrect port in response\n");
   3019 
   3020 			DRM_DEBUG_KMS("enum path resources %d: %d %d\n",
   3021 				      path_res->port_number,
   3022 				      path_res->full_payload_bw_number,
   3023 				      path_res->avail_payload_bw_number);
   3024 			port->available_pbn =
   3025 				path_res->avail_payload_bw_number;
   3026 			port->fec_capable = path_res->fec_capable;
   3027 		}
   3028 	}
   3029 
   3030 	kfree(txmsg);
   3031 	return 0;
   3032 }
   3033 
   3034 static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
   3035 {
   3036 	if (!mstb->port_parent)
   3037 		return NULL;
   3038 
   3039 	if (mstb->port_parent->mstb != mstb)
   3040 		return mstb->port_parent;
   3041 
   3042 	return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
   3043 }
   3044 
   3045 /*
   3046  * Searches upwards in the topology starting from mstb to try to find the
   3047  * closest available parent of mstb that's still connected to the rest of the
   3048  * topology. This can be used in order to perform operations like releasing
   3049  * payloads, where the branch device which owned the payload may no longer be
   3050  * around and thus would require that the payload on the last living relative
   3051  * be freed instead.
   3052  */
   3053 static struct drm_dp_mst_branch *
   3054 drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
   3055 					struct drm_dp_mst_branch *mstb,
   3056 					int *port_num)
   3057 {
   3058 	struct drm_dp_mst_branch *rmstb = NULL;
   3059 	struct drm_dp_mst_port *found_port;
   3060 
   3061 	mutex_lock(&mgr->lock);
   3062 	if (!mgr->mst_primary)
   3063 		goto out;
   3064 
   3065 	do {
   3066 		found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
   3067 		if (!found_port)
   3068 			break;
   3069 
   3070 		if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
   3071 			rmstb = found_port->parent;
   3072 			*port_num = found_port->port_num;
   3073 		} else {
   3074 			/* Search again, starting from this parent */
   3075 			mstb = found_port->parent;
   3076 		}
   3077 	} while (!rmstb);
   3078 out:
   3079 	mutex_unlock(&mgr->lock);
   3080 	return rmstb;
   3081 }
   3082 
   3083 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
   3084 				   struct drm_dp_mst_port *port,
   3085 				   int id,
   3086 				   int pbn)
   3087 {
   3088 	struct drm_dp_sideband_msg_tx *txmsg;
   3089 	struct drm_dp_mst_branch *mstb;
   3090 	int len __unused, ret, port_num;
   3091 	u8 sinks[DRM_DP_MAX_SDP_STREAMS];
   3092 	int i;
   3093 
   3094 	port_num = port->port_num;
   3095 	mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
   3096 	if (!mstb) {
   3097 		mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
   3098 							       port->parent,
   3099 							       &port_num);
   3100 
   3101 		if (!mstb)
   3102 			return -EINVAL;
   3103 	}
   3104 
   3105 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   3106 	if (!txmsg) {
   3107 		ret = -ENOMEM;
   3108 		goto fail_put;
   3109 	}
   3110 
   3111 	for (i = 0; i < port->num_sdp_streams; i++)
   3112 		sinks[i] = i;
   3113 
   3114 	txmsg->dst = mstb;
   3115 	len = build_allocate_payload(txmsg, port_num,
   3116 				     id,
   3117 				     pbn, port->num_sdp_streams, sinks);
   3118 
   3119 	drm_dp_queue_down_tx(mgr, txmsg);
   3120 
   3121 	/*
   3122 	 * FIXME: there is a small chance that between getting the last
   3123 	 * connected mstb and sending the payload message, the last connected
   3124 	 * mstb could also be removed from the topology. In the future, this
   3125 	 * needs to be fixed by restarting the
   3126 	 * drm_dp_get_last_connected_port_and_mstb() search in the event of a
   3127 	 * timeout if the topology is still connected to the system.
   3128 	 */
   3129 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   3130 	if (ret > 0) {
   3131 		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
   3132 			ret = -EINVAL;
   3133 		else
   3134 			ret = 0;
   3135 	}
   3136 	kfree(txmsg);
   3137 fail_put:
   3138 	drm_dp_mst_topology_put_mstb(mstb);
   3139 	return ret;
   3140 }
   3141 
   3142 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
   3143 				 struct drm_dp_mst_port *port, bool power_up)
   3144 {
   3145 	struct drm_dp_sideband_msg_tx *txmsg;
   3146 	int len __unused, ret;
   3147 
   3148 	port = drm_dp_mst_topology_get_port_validated(mgr, port);
   3149 	if (!port)
   3150 		return -EINVAL;
   3151 
   3152 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   3153 	if (!txmsg) {
   3154 		drm_dp_mst_topology_put_port(port);
   3155 		return -ENOMEM;
   3156 	}
   3157 
   3158 	txmsg->dst = port->parent;
   3159 	len = build_power_updown_phy(txmsg, port->port_num, power_up);
   3160 	drm_dp_queue_down_tx(mgr, txmsg);
   3161 
   3162 	ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
   3163 	if (ret > 0) {
   3164 		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
   3165 			ret = -EINVAL;
   3166 		else
   3167 			ret = 0;
   3168 	}
   3169 	kfree(txmsg);
   3170 	drm_dp_mst_topology_put_port(port);
   3171 
   3172 	return ret;
   3173 }
   3174 EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
   3175 
   3176 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
   3177 				       int id,
   3178 				       struct drm_dp_payload *payload)
   3179 {
   3180 	int ret;
   3181 
   3182 	ret = drm_dp_dpcd_write_payload(mgr, id, payload);
   3183 	if (ret < 0) {
   3184 		payload->payload_state = 0;
   3185 		return ret;
   3186 	}
   3187 	payload->payload_state = DP_PAYLOAD_LOCAL;
   3188 	return 0;
   3189 }
   3190 
   3191 static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
   3192 				       struct drm_dp_mst_port *port,
   3193 				       int id,
   3194 				       struct drm_dp_payload *payload)
   3195 {
   3196 	int ret;
   3197 	ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
   3198 	if (ret < 0)
   3199 		return ret;
   3200 	payload->payload_state = DP_PAYLOAD_REMOTE;
   3201 	return ret;
   3202 }
   3203 
   3204 static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
   3205 					struct drm_dp_mst_port *port,
   3206 					int id,
   3207 					struct drm_dp_payload *payload)
   3208 {
   3209 	DRM_DEBUG_KMS("\n");
   3210 	/* it's okay for these to fail */
   3211 	if (port) {
   3212 		drm_dp_payload_send_msg(mgr, port, id, 0);
   3213 	}
   3214 
   3215 	drm_dp_dpcd_write_payload(mgr, id, payload);
   3216 	payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
   3217 	return 0;
   3218 }
   3219 
   3220 static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
   3221 					int id,
   3222 					struct drm_dp_payload *payload)
   3223 {
   3224 	payload->payload_state = 0;
   3225 	return 0;
   3226 }
   3227 
   3228 /**
   3229  * drm_dp_update_payload_part1() - Execute payload update part 1
   3230  * @mgr: manager to use.
   3231  *
   3232  * This iterates over all proposed virtual channels, and tries to
   3233  * allocate space in the link for them. For 0->slots transitions,
   3234  * this step just writes the VCPI to the MST device. For slots->0
   3235  * transitions, this writes the updated VCPIs and removes the
   3236  * remote VC payloads.
   3237  *
   3238  * after calling this the driver should generate ACT and payload
   3239  * packets.
   3240  */
   3241 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
   3242 {
   3243 	struct drm_dp_payload req_payload;
   3244 	struct drm_dp_mst_port *port;
   3245 	int i, j;
   3246 	int cur_slots = 1;
   3247 
   3248 	mutex_lock(&mgr->payload_lock);
   3249 	for (i = 0; i < mgr->max_payloads; i++) {
   3250 		struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
   3251 		struct drm_dp_payload *payload = &mgr->payloads[i];
   3252 		bool put_port = false;
   3253 
   3254 		/* solve the current payloads - compare to the hw ones
   3255 		   - update the hw view */
   3256 		req_payload.start_slot = cur_slots;
   3257 		if (vcpi) {
   3258 			port = container_of(vcpi, struct drm_dp_mst_port,
   3259 					    vcpi);
   3260 
   3261 			/* Validated ports don't matter if we're releasing
   3262 			 * VCPI
   3263 			 */
   3264 			if (vcpi->num_slots) {
   3265 				port = drm_dp_mst_topology_get_port_validated(
   3266 				    mgr, port);
   3267 				if (!port) {
   3268 					mutex_unlock(&mgr->payload_lock);
   3269 					return -EINVAL;
   3270 				}
   3271 				put_port = true;
   3272 			}
   3273 
   3274 			req_payload.num_slots = vcpi->num_slots;
   3275 			req_payload.vcpi = vcpi->vcpi;
   3276 		} else {
   3277 			port = NULL;
   3278 			req_payload.num_slots = 0;
   3279 		}
   3280 
   3281 		payload->start_slot = req_payload.start_slot;
   3282 		/* work out what is required to happen with this payload */
   3283 		if (payload->num_slots != req_payload.num_slots) {
   3284 
   3285 			/* need to push an update for this payload */
   3286 			if (req_payload.num_slots) {
   3287 				drm_dp_create_payload_step1(mgr, vcpi->vcpi,
   3288 							    &req_payload);
   3289 				payload->num_slots = req_payload.num_slots;
   3290 				payload->vcpi = req_payload.vcpi;
   3291 
   3292 			} else if (payload->num_slots) {
   3293 				payload->num_slots = 0;
   3294 				drm_dp_destroy_payload_step1(mgr, port,
   3295 							     payload->vcpi,
   3296 							     payload);
   3297 				req_payload.payload_state =
   3298 					payload->payload_state;
   3299 				payload->start_slot = 0;
   3300 			}
   3301 			payload->payload_state = req_payload.payload_state;
   3302 		}
   3303 		cur_slots += req_payload.num_slots;
   3304 
   3305 		if (put_port)
   3306 			drm_dp_mst_topology_put_port(port);
   3307 	}
   3308 
   3309 	for (i = 0; i < mgr->max_payloads; /* do nothing */) {
   3310 		if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) {
   3311 			i++;
   3312 			continue;
   3313 		}
   3314 
   3315 		DRM_DEBUG_KMS("removing payload %d\n", i);
   3316 		for (j = i; j < mgr->max_payloads - 1; j++) {
   3317 			mgr->payloads[j] = mgr->payloads[j + 1];
   3318 			mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
   3319 
   3320 			if (mgr->proposed_vcpis[j] &&
   3321 			    mgr->proposed_vcpis[j]->num_slots) {
   3322 				set_bit(j + 1, &mgr->payload_mask);
   3323 			} else {
   3324 				clear_bit(j + 1, &mgr->payload_mask);
   3325 			}
   3326 		}
   3327 
   3328 		memset(&mgr->payloads[mgr->max_payloads - 1], 0,
   3329 		       sizeof(struct drm_dp_payload));
   3330 		mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
   3331 		clear_bit(mgr->max_payloads, &mgr->payload_mask);
   3332 	}
   3333 	mutex_unlock(&mgr->payload_lock);
   3334 
   3335 	return 0;
   3336 }
   3337 EXPORT_SYMBOL(drm_dp_update_payload_part1);
   3338 
   3339 /**
   3340  * drm_dp_update_payload_part2() - Execute payload update part 2
   3341  * @mgr: manager to use.
   3342  *
   3343  * This iterates over all proposed virtual channels, and tries to
   3344  * allocate space in the link for them. For 0->slots transitions,
   3345  * this step writes the remote VC payload commands. For slots->0
   3346  * this just resets some internal state.
   3347  */
   3348 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
   3349 {
   3350 	struct drm_dp_mst_port *port;
   3351 	int i;
   3352 	int ret = 0;
   3353 	mutex_lock(&mgr->payload_lock);
   3354 	for (i = 0; i < mgr->max_payloads; i++) {
   3355 
   3356 		if (!mgr->proposed_vcpis[i])
   3357 			continue;
   3358 
   3359 		port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
   3360 
   3361 		DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
   3362 		if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
   3363 			ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
   3364 		} else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
   3365 			ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
   3366 		}
   3367 		if (ret) {
   3368 			mutex_unlock(&mgr->payload_lock);
   3369 			return ret;
   3370 		}
   3371 	}
   3372 	mutex_unlock(&mgr->payload_lock);
   3373 	return 0;
   3374 }
   3375 EXPORT_SYMBOL(drm_dp_update_payload_part2);
   3376 
   3377 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
   3378 				 struct drm_dp_mst_port *port,
   3379 				 int offset, int size, u8 *bytes)
   3380 {
   3381 	int len __unused;
   3382 	int ret = 0;
   3383 	struct drm_dp_sideband_msg_tx *txmsg;
   3384 	struct drm_dp_mst_branch *mstb;
   3385 
   3386 	mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
   3387 	if (!mstb)
   3388 		return -EINVAL;
   3389 
   3390 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   3391 	if (!txmsg) {
   3392 		ret = -ENOMEM;
   3393 		goto fail_put;
   3394 	}
   3395 
   3396 	len = build_dpcd_read(txmsg, port->port_num, offset, size);
   3397 	txmsg->dst = port->parent;
   3398 
   3399 	drm_dp_queue_down_tx(mgr, txmsg);
   3400 
   3401 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   3402 	if (ret < 0)
   3403 		goto fail_free;
   3404 
   3405 	/* DPCD read should never be NACKed */
   3406 	if (txmsg->reply.reply_type == 1) {
   3407 		DRM_ERROR("mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",
   3408 			  mstb, port->port_num, offset, size);
   3409 		ret = -EIO;
   3410 		goto fail_free;
   3411 	}
   3412 
   3413 	if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) {
   3414 		ret = -EPROTO;
   3415 		goto fail_free;
   3416 	}
   3417 
   3418 	ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes,
   3419 		    size);
   3420 	memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret);
   3421 
   3422 fail_free:
   3423 	kfree(txmsg);
   3424 fail_put:
   3425 	drm_dp_mst_topology_put_mstb(mstb);
   3426 
   3427 	return ret;
   3428 }
   3429 
   3430 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
   3431 				  struct drm_dp_mst_port *port,
   3432 				  int offset, int size, u8 *bytes)
   3433 {
   3434 	int len __unused;
   3435 	int ret;
   3436 	struct drm_dp_sideband_msg_tx *txmsg;
   3437 	struct drm_dp_mst_branch *mstb;
   3438 
   3439 	mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
   3440 	if (!mstb)
   3441 		return -EINVAL;
   3442 
   3443 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   3444 	if (!txmsg) {
   3445 		ret = -ENOMEM;
   3446 		goto fail_put;
   3447 	}
   3448 
   3449 	len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
   3450 	txmsg->dst = mstb;
   3451 
   3452 	drm_dp_queue_down_tx(mgr, txmsg);
   3453 
   3454 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   3455 	if (ret > 0) {
   3456 		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
   3457 			ret = -EIO;
   3458 		else
   3459 			ret = 0;
   3460 	}
   3461 	kfree(txmsg);
   3462 fail_put:
   3463 	drm_dp_mst_topology_put_mstb(mstb);
   3464 	return ret;
   3465 }
   3466 
   3467 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
   3468 {
   3469 	struct drm_dp_sideband_msg_reply_body reply;
   3470 
   3471 	reply.reply_type = DP_SIDEBAND_REPLY_ACK;
   3472 	reply.req_type = req_type;
   3473 	drm_dp_encode_sideband_reply(&reply, msg);
   3474 	return 0;
   3475 }
   3476 
   3477 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
   3478 				    struct drm_dp_mst_branch *mstb,
   3479 				    int req_type, int seqno, bool broadcast)
   3480 {
   3481 	struct drm_dp_sideband_msg_tx *txmsg;
   3482 
   3483 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   3484 	if (!txmsg)
   3485 		return -ENOMEM;
   3486 
   3487 	txmsg->dst = mstb;
   3488 	txmsg->seqno = seqno;
   3489 	drm_dp_encode_up_ack_reply(txmsg, req_type);
   3490 
   3491 	mutex_lock(&mgr->qlock);
   3492 
   3493 	process_single_up_tx_qlock(mgr, txmsg);
   3494 
   3495 	mutex_unlock(&mgr->qlock);
   3496 
   3497 	kfree(txmsg);
   3498 	return 0;
   3499 }
   3500 
   3501 static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8  dp_link_count)
   3502 {
   3503 	if (dp_link_bw == 0 || dp_link_count == 0)
   3504 		DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
   3505 			      dp_link_bw, dp_link_count);
   3506 
   3507 	return dp_link_bw * dp_link_count / 2;
   3508 }
   3509 
   3510 /**
   3511  * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
   3512  * @mgr: manager to set state for
   3513  * @mst_state: true to enable MST on this connector - false to disable.
   3514  *
   3515  * This is called by the driver when it detects an MST capable device plugged
   3516  * into a DP MST capable port, or when a DP MST capable device is unplugged.
   3517  */
   3518 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
   3519 {
   3520 	int ret = 0;
   3521 	int i = 0;
   3522 	struct drm_dp_mst_branch *mstb = NULL;
   3523 
   3524 	mutex_lock(&mgr->lock);
   3525 	if (mst_state == mgr->mst_state)
   3526 		goto out_unlock;
   3527 
   3528 	mgr->mst_state = mst_state;
   3529 	/* set the device into MST mode */
   3530 	if (mst_state) {
   3531 		WARN_ON(mgr->mst_primary);
   3532 
   3533 		/* get dpcd info */
   3534 		ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
   3535 		if (ret != DP_RECEIVER_CAP_SIZE) {
   3536 			DRM_DEBUG_KMS("failed to read DPCD\n");
   3537 			goto out_unlock;
   3538 		}
   3539 
   3540 		mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1],
   3541 							mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
   3542 		if (mgr->pbn_div == 0) {
   3543 			ret = -EINVAL;
   3544 			goto out_unlock;
   3545 		}
   3546 
   3547 		/* add initial branch device at LCT 1 */
   3548 		mstb = drm_dp_add_mst_branch_device(1, NULL);
   3549 		if (mstb == NULL) {
   3550 			ret = -ENOMEM;
   3551 			goto out_unlock;
   3552 		}
   3553 		mstb->mgr = mgr;
   3554 
   3555 		/* give this the main reference */
   3556 		mgr->mst_primary = mstb;
   3557 		drm_dp_mst_topology_get_mstb(mgr->mst_primary);
   3558 
   3559 		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
   3560 							 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
   3561 		if (ret < 0) {
   3562 			goto out_unlock;
   3563 		}
   3564 
   3565 		{
   3566 			struct drm_dp_payload reset_pay;
   3567 			reset_pay.start_slot = 0;
   3568 			reset_pay.num_slots = 0x3f;
   3569 			drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
   3570 		}
   3571 
   3572 		queue_work(system_long_wq, &mgr->work);
   3573 
   3574 		ret = 0;
   3575 	} else {
   3576 		/* disable MST on the device */
   3577 		mstb = mgr->mst_primary;
   3578 		mgr->mst_primary = NULL;
   3579 		/* this can fail if the device is gone */
   3580 		drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
   3581 		ret = 0;
   3582 		mutex_lock(&mgr->payload_lock);
   3583 		memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
   3584 		mgr->payload_mask = 0;
   3585 		set_bit(0, &mgr->payload_mask);
   3586 		for (i = 0; i < mgr->max_payloads; i++) {
   3587 			struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
   3588 
   3589 			if (vcpi) {
   3590 				vcpi->vcpi = 0;
   3591 				vcpi->num_slots = 0;
   3592 			}
   3593 			mgr->proposed_vcpis[i] = NULL;
   3594 		}
   3595 		mgr->vcpi_mask = 0;
   3596 		mutex_unlock(&mgr->payload_lock);
   3597 
   3598 		mgr->payload_id_table_cleared = false;
   3599 	}
   3600 
   3601 out_unlock:
   3602 	mutex_unlock(&mgr->lock);
   3603 	if (mstb)
   3604 		drm_dp_mst_topology_put_mstb(mstb);
   3605 	return ret;
   3606 
   3607 }
   3608 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
   3609 
   3610 static void
   3611 drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
   3612 {
   3613 	struct drm_dp_mst_port *port;
   3614 
   3615 	/* The link address will need to be re-sent on resume */
   3616 	mstb->link_address_sent = false;
   3617 
   3618 	list_for_each_entry(port, &mstb->ports, next) {
   3619 		/* The PBN for each port will also need to be re-probed */
   3620 		port->available_pbn = 0;
   3621 
   3622 		if (port->mstb)
   3623 			drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
   3624 	}
   3625 }
   3626 
   3627 /**
   3628  * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
   3629  * @mgr: manager to suspend
   3630  *
   3631  * This function tells the MST device that we can't handle UP messages
   3632  * anymore. This should stop it from sending any since we are suspended.
   3633  */
   3634 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
   3635 {
   3636 	mutex_lock(&mgr->lock);
   3637 	drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
   3638 			   DP_MST_EN | DP_UPSTREAM_IS_SRC);
   3639 	mutex_unlock(&mgr->lock);
   3640 	flush_work(&mgr->up_req_work);
   3641 	flush_work(&mgr->work);
   3642 	flush_work(&mgr->delayed_destroy_work);
   3643 
   3644 	mutex_lock(&mgr->lock);
   3645 	if (mgr->mst_state && mgr->mst_primary)
   3646 		drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
   3647 	mutex_unlock(&mgr->lock);
   3648 }
   3649 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
   3650 
   3651 /**
   3652  * drm_dp_mst_topology_mgr_resume() - resume the MST manager
   3653  * @mgr: manager to resume
   3654  * @sync: whether or not to perform topology reprobing synchronously
   3655  *
   3656  * This will fetch DPCD and see if the device is still there,
   3657  * if it is, it will rewrite the MSTM control bits, and return.
   3658  *
   3659  * If the device fails this returns -1, and the driver should do
   3660  * a full MST reprobe, in case we were undocked.
   3661  *
   3662  * During system resume (where it is assumed that the driver will be calling
   3663  * drm_atomic_helper_resume()) this function should be called beforehand with
   3664  * @sync set to true. In contexts like runtime resume where the driver is not
   3665  * expected to be calling drm_atomic_helper_resume(), this function should be
   3666  * called with @sync set to false in order to avoid deadlocking.
   3667  *
   3668  * Returns: -1 if the MST topology was removed while we were suspended, 0
   3669  * otherwise.
   3670  */
   3671 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
   3672 				   bool sync)
   3673 {
   3674 	int ret;
   3675 	u8 guid[16];
   3676 
   3677 	mutex_lock(&mgr->lock);
   3678 	if (!mgr->mst_primary)
   3679 		goto out_fail;
   3680 
   3681 	ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd,
   3682 			       DP_RECEIVER_CAP_SIZE);
   3683 	if (ret != DP_RECEIVER_CAP_SIZE) {
   3684 		DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
   3685 		goto out_fail;
   3686 	}
   3687 
   3688 	ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
   3689 				 DP_MST_EN |
   3690 				 DP_UP_REQ_EN |
   3691 				 DP_UPSTREAM_IS_SRC);
   3692 	if (ret < 0) {
   3693 		DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
   3694 		goto out_fail;
   3695 	}
   3696 
   3697 	/* Some hubs forget their guids after they resume */
   3698 	ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
   3699 	if (ret != 16) {
   3700 		DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
   3701 		goto out_fail;
   3702 	}
   3703 	drm_dp_check_mstb_guid(mgr->mst_primary, guid);
   3704 
   3705 	/*
   3706 	 * For the final step of resuming the topology, we need to bring the
   3707 	 * state of our in-memory topology back into sync with reality. So,
   3708 	 * restart the probing process as if we're probing a new hub
   3709 	 */
   3710 	queue_work(system_long_wq, &mgr->work);
   3711 	mutex_unlock(&mgr->lock);
   3712 
   3713 	if (sync) {
   3714 		DRM_DEBUG_KMS("Waiting for link probe work to finish re-syncing topology...\n");
   3715 		flush_work(&mgr->work);
   3716 	}
   3717 
   3718 	return 0;
   3719 
   3720 out_fail:
   3721 	mutex_unlock(&mgr->lock);
   3722 	return -1;
   3723 }
   3724 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
   3725 
   3726 static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
   3727 {
   3728 	int len;
   3729 	u8 replyblock[32];
   3730 	int replylen, origlen __unused, curreply;
   3731 	int ret;
   3732 	struct drm_dp_sideband_msg_rx *msg;
   3733 	int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
   3734 	msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
   3735 
   3736 	len = min(mgr->max_dpcd_transaction_bytes, 16);
   3737 	ret = drm_dp_dpcd_read(mgr->aux, basereg,
   3738 			       replyblock, len);
   3739 	if (ret != len) {
   3740 		DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
   3741 		return false;
   3742 	}
   3743 	ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
   3744 	if (!ret) {
   3745 		DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
   3746 		return false;
   3747 	}
   3748 	replylen = msg->curchunk_len + msg->curchunk_hdrlen;
   3749 
   3750 	origlen = replylen;
   3751 	replylen -= len;
   3752 	curreply = len;
   3753 	while (replylen > 0) {
   3754 		len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
   3755 		ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
   3756 				    replyblock, len);
   3757 		if (ret != len) {
   3758 			DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
   3759 				      len, ret);
   3760 			return false;
   3761 		}
   3762 
   3763 		ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
   3764 		if (!ret) {
   3765 			DRM_DEBUG_KMS("failed to build sideband msg\n");
   3766 			return false;
   3767 		}
   3768 
   3769 		curreply += len;
   3770 		replylen -= len;
   3771 	}
   3772 	return true;
   3773 }
   3774 
   3775 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
   3776 {
   3777 	struct drm_dp_sideband_msg_tx *txmsg;
   3778 	struct drm_dp_mst_branch *mstb;
   3779 	struct drm_dp_sideband_msg_hdr *hdr = &mgr->down_rep_recv.initial_hdr;
   3780 	int slot = -1;
   3781 
   3782 	if (!drm_dp_get_one_sb_msg(mgr, false))
   3783 		goto clear_down_rep_recv;
   3784 
   3785 	if (!mgr->down_rep_recv.have_eomt)
   3786 		return 0;
   3787 
   3788 	mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
   3789 	if (!mstb) {
   3790 		DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
   3791 			      hdr->lct);
   3792 		goto clear_down_rep_recv;
   3793 	}
   3794 
   3795 	/* find the message */
   3796 	slot = hdr->seqno;
   3797 	mutex_lock(&mgr->qlock);
   3798 	txmsg = mstb->tx_slots[slot];
   3799 	/* remove from slots */
   3800 	mutex_unlock(&mgr->qlock);
   3801 
   3802 	if (!txmsg) {
   3803 		DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
   3804 			      mstb, hdr->seqno, hdr->lct, hdr->rad[0],
   3805 			      mgr->down_rep_recv.msg[0]);
   3806 		goto no_msg;
   3807 	}
   3808 
   3809 	drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
   3810 
   3811 	if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
   3812 		DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
   3813 			      txmsg->reply.req_type,
   3814 			      drm_dp_mst_req_type_str(txmsg->reply.req_type),
   3815 			      txmsg->reply.u.nak.reason,
   3816 			      drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
   3817 			      txmsg->reply.u.nak.nak_data);
   3818 
   3819 	memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
   3820 	drm_dp_mst_topology_put_mstb(mstb);
   3821 
   3822 	mutex_lock(&mgr->qlock);
   3823 	txmsg->state = DRM_DP_SIDEBAND_TX_RX;
   3824 	mstb->tx_slots[slot] = NULL;
   3825 	mgr->is_waiting_for_dwn_reply = false;
   3826 	mutex_unlock(&mgr->qlock);
   3827 
   3828 	wake_up_all(&mgr->tx_waitq);
   3829 
   3830 	return 0;
   3831 
   3832 no_msg:
   3833 	drm_dp_mst_topology_put_mstb(mstb);
   3834 clear_down_rep_recv:
   3835 	mutex_lock(&mgr->qlock);
   3836 	mgr->is_waiting_for_dwn_reply = false;
   3837 	mutex_unlock(&mgr->qlock);
   3838 	memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
   3839 
   3840 	return 0;
   3841 }
   3842 
   3843 static inline bool
   3844 drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
   3845 			  struct drm_dp_pending_up_req *up_req)
   3846 {
   3847 	struct drm_dp_mst_branch *mstb = NULL;
   3848 	struct drm_dp_sideband_msg_req_body *msg = &up_req->msg;
   3849 	struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr;
   3850 	bool hotplug = false;
   3851 
   3852 	if (hdr->broadcast) {
   3853 		const u8 *guid = NULL;
   3854 
   3855 		if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY)
   3856 			guid = msg->u.conn_stat.guid;
   3857 		else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
   3858 			guid = msg->u.resource_stat.guid;
   3859 
   3860 		if (guid)
   3861 			mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
   3862 	} else {
   3863 		mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
   3864 	}
   3865 
   3866 	if (!mstb) {
   3867 		DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
   3868 			      hdr->lct);
   3869 		return false;
   3870 	}
   3871 
   3872 	/* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
   3873 	if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
   3874 		drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
   3875 		hotplug = true;
   3876 	}
   3877 
   3878 	drm_dp_mst_topology_put_mstb(mstb);
   3879 	return hotplug;
   3880 }
   3881 
   3882 static void drm_dp_mst_up_req_work(struct work_struct *work)
   3883 {
   3884 	struct drm_dp_mst_topology_mgr *mgr =
   3885 		container_of(work, struct drm_dp_mst_topology_mgr,
   3886 			     up_req_work);
   3887 	struct drm_dp_pending_up_req *up_req;
   3888 	bool send_hotplug = false;
   3889 
   3890 	mutex_lock(&mgr->probe_lock);
   3891 	while (true) {
   3892 		mutex_lock(&mgr->up_req_lock);
   3893 		up_req = list_first_entry_or_null(&mgr->up_req_list,
   3894 						  struct drm_dp_pending_up_req,
   3895 						  next);
   3896 		if (up_req)
   3897 			list_del(&up_req->next);
   3898 		mutex_unlock(&mgr->up_req_lock);
   3899 
   3900 		if (!up_req)
   3901 			break;
   3902 
   3903 		send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req);
   3904 		kfree(up_req);
   3905 	}
   3906 	mutex_unlock(&mgr->probe_lock);
   3907 
   3908 	if (send_hotplug)
   3909 		drm_kms_helper_hotplug_event(mgr->dev);
   3910 }
   3911 
   3912 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
   3913 {
   3914 	struct drm_dp_sideband_msg_hdr *hdr = &mgr->up_req_recv.initial_hdr;
   3915 	struct drm_dp_pending_up_req *up_req;
   3916 	bool seqno;
   3917 
   3918 	if (!drm_dp_get_one_sb_msg(mgr, true))
   3919 		goto out;
   3920 
   3921 	if (!mgr->up_req_recv.have_eomt)
   3922 		return 0;
   3923 
   3924 	up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);
   3925 	if (!up_req) {
   3926 		DRM_ERROR("Not enough memory to process MST up req\n");
   3927 		return -ENOMEM;
   3928 	}
   3929 	INIT_LIST_HEAD(&up_req->next);
   3930 
   3931 	seqno = hdr->seqno;
   3932 	drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg);
   3933 
   3934 	if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&
   3935 	    up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) {
   3936 		DRM_DEBUG_KMS("Received unknown up req type, ignoring: %x\n",
   3937 			      up_req->msg.req_type);
   3938 		kfree(up_req);
   3939 		goto out;
   3940 	}
   3941 
   3942 	drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
   3943 				 seqno, false);
   3944 
   3945 	if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
   3946 		const struct drm_dp_connection_status_notify *conn_stat =
   3947 			&up_req->msg.u.conn_stat;
   3948 
   3949 		DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
   3950 			      conn_stat->port_number,
   3951 			      conn_stat->legacy_device_plug_status,
   3952 			      conn_stat->displayport_device_plug_status,
   3953 			      conn_stat->message_capability_status,
   3954 			      conn_stat->input_port,
   3955 			      conn_stat->peer_device_type);
   3956 	} else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
   3957 		const struct drm_dp_resource_status_notify *res_stat =
   3958 			&up_req->msg.u.resource_stat;
   3959 
   3960 		DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n",
   3961 			      res_stat->port_number,
   3962 			      res_stat->available_pbn);
   3963 	}
   3964 
   3965 	up_req->hdr = *hdr;
   3966 	mutex_lock(&mgr->up_req_lock);
   3967 	list_add_tail(&up_req->next, &mgr->up_req_list);
   3968 	mutex_unlock(&mgr->up_req_lock);
   3969 	queue_work(system_long_wq, &mgr->up_req_work);
   3970 
   3971 out:
   3972 	memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
   3973 	return 0;
   3974 }
   3975 
   3976 /**
   3977  * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
   3978  * @mgr: manager to notify irq for.
   3979  * @esi: 4 bytes from SINK_COUNT_ESI
   3980  * @handled: whether the hpd interrupt was consumed or not
   3981  *
   3982  * This should be called from the driver when it detects a short IRQ,
   3983  * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
   3984  * topology manager will process the sideband messages received as a result
   3985  * of this.
   3986  */
   3987 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
   3988 {
   3989 	int ret = 0;
   3990 	int sc;
   3991 	*handled = false;
   3992 	sc = esi[0] & 0x3f;
   3993 
   3994 	if (sc != mgr->sink_count) {
   3995 		mgr->sink_count = sc;
   3996 		*handled = true;
   3997 	}
   3998 
   3999 	if (esi[1] & DP_DOWN_REP_MSG_RDY) {
   4000 		ret = drm_dp_mst_handle_down_rep(mgr);
   4001 		*handled = true;
   4002 	}
   4003 
   4004 	if (esi[1] & DP_UP_REQ_MSG_RDY) {
   4005 		ret |= drm_dp_mst_handle_up_req(mgr);
   4006 		*handled = true;
   4007 	}
   4008 
   4009 	drm_dp_mst_kick_tx(mgr);
   4010 	return ret;
   4011 }
   4012 EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
   4013 
   4014 /**
   4015  * drm_dp_mst_detect_port() - get connection status for an MST port
   4016  * @connector: DRM connector for this port
   4017  * @ctx: The acquisition context to use for grabbing locks
   4018  * @mgr: manager for this port
   4019  * @port: pointer to a port
   4020  *
   4021  * This returns the current connection state for a port.
   4022  */
   4023 int
   4024 drm_dp_mst_detect_port(struct drm_connector *connector,
   4025 		       struct drm_modeset_acquire_ctx *ctx,
   4026 		       struct drm_dp_mst_topology_mgr *mgr,
   4027 		       struct drm_dp_mst_port *port)
   4028 {
   4029 	int ret;
   4030 
   4031 	/* we need to search for the port in the mgr in case it's gone */
   4032 	port = drm_dp_mst_topology_get_port_validated(mgr, port);
   4033 	if (!port)
   4034 		return connector_status_disconnected;
   4035 
   4036 	ret = drm_modeset_lock(&mgr->base.lock, ctx);
   4037 	if (ret)
   4038 		goto out;
   4039 
   4040 	ret = connector_status_disconnected;
   4041 
   4042 	if (!port->ddps)
   4043 		goto out;
   4044 
   4045 	switch (port->pdt) {
   4046 	case DP_PEER_DEVICE_NONE:
   4047 	case DP_PEER_DEVICE_MST_BRANCHING:
   4048 		if (!port->mcs)
   4049 			ret = connector_status_connected;
   4050 		break;
   4051 
   4052 	case DP_PEER_DEVICE_SST_SINK:
   4053 		ret = connector_status_connected;
   4054 		/* for logical ports - cache the EDID */
   4055 		if (port->port_num >= 8 && !port->cached_edid) {
   4056 			port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
   4057 		}
   4058 		break;
   4059 	case DP_PEER_DEVICE_DP_LEGACY_CONV:
   4060 		if (port->ldps)
   4061 			ret = connector_status_connected;
   4062 		break;
   4063 	}
   4064 out:
   4065 	drm_dp_mst_topology_put_port(port);
   4066 	return ret;
   4067 }
   4068 EXPORT_SYMBOL(drm_dp_mst_detect_port);
   4069 
   4070 /**
   4071  * drm_dp_mst_port_has_audio() - Check whether port has audio capability or not
   4072  * @mgr: manager for this port
   4073  * @port: unverified pointer to a port.
   4074  *
   4075  * This returns whether the port supports audio or not.
   4076  */
   4077 bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
   4078 					struct drm_dp_mst_port *port)
   4079 {
   4080 	bool ret = false;
   4081 
   4082 	port = drm_dp_mst_topology_get_port_validated(mgr, port);
   4083 	if (!port)
   4084 		return ret;
   4085 	ret = port->has_audio;
   4086 	drm_dp_mst_topology_put_port(port);
   4087 	return ret;
   4088 }
   4089 EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
   4090 
   4091 /**
   4092  * drm_dp_mst_get_edid() - get EDID for an MST port
   4093  * @connector: toplevel connector to get EDID for
   4094  * @mgr: manager for this port
   4095  * @port: unverified pointer to a port.
   4096  *
   4097  * This returns an EDID for the port connected to a connector,
   4098  * It validates the pointer still exists so the caller doesn't require a
   4099  * reference.
   4100  */
   4101 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
   4102 {
   4103 	struct edid *edid = NULL;
   4104 
   4105 	/* we need to search for the port in the mgr in case it's gone */
   4106 	port = drm_dp_mst_topology_get_port_validated(mgr, port);
   4107 	if (!port)
   4108 		return NULL;
   4109 
   4110 	if (port->cached_edid)
   4111 		edid = drm_edid_duplicate(port->cached_edid);
   4112 	else {
   4113 		edid = drm_get_edid(connector, &port->aux.ddc);
   4114 	}
   4115 	port->has_audio = drm_detect_monitor_audio(edid);
   4116 	drm_dp_mst_topology_put_port(port);
   4117 	return edid;
   4118 }
   4119 EXPORT_SYMBOL(drm_dp_mst_get_edid);
   4120 
   4121 /**
   4122  * drm_dp_find_vcpi_slots() - Find VCPI slots for this PBN value
   4123  * @mgr: manager to use
   4124  * @pbn: payload bandwidth to convert into slots.
   4125  *
   4126  * Calculate the number of VCPI slots that will be required for the given PBN
   4127  * value. This function is deprecated, and should not be used in atomic
   4128  * drivers.
   4129  *
   4130  * RETURNS:
   4131  * The total slots required for this port, or error.
   4132  */
   4133 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
   4134 			   int pbn)
   4135 {
   4136 	int num_slots;
   4137 
   4138 	num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
   4139 
   4140 	/* max. time slots - one slot for MTP header */
   4141 	if (num_slots > 63)
   4142 		return -ENOSPC;
   4143 	return num_slots;
   4144 }
   4145 EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
   4146 
   4147 static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
   4148 			    struct drm_dp_vcpi *vcpi, int pbn, int slots)
   4149 {
   4150 	int ret;
   4151 
   4152 	/* max. time slots - one slot for MTP header */
   4153 	if (slots > 63)
   4154 		return -ENOSPC;
   4155 
   4156 	vcpi->pbn = pbn;
   4157 	vcpi->aligned_pbn = slots * mgr->pbn_div;
   4158 	vcpi->num_slots = slots;
   4159 
   4160 	ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
   4161 	if (ret < 0)
   4162 		return ret;
   4163 	return 0;
   4164 }
   4165 
   4166 /**
   4167  * drm_dp_atomic_find_vcpi_slots() - Find and add VCPI slots to the state
   4168  * @state: global atomic state
   4169  * @mgr: MST topology manager for the port
   4170  * @port: port to find vcpi slots for
   4171  * @pbn: bandwidth required for the mode in PBN
   4172  * @pbn_div: divider for DSC mode that takes FEC into account
   4173  *
   4174  * Allocates VCPI slots to @port, replacing any previous VCPI allocations it
   4175  * may have had. Any atomic drivers which support MST must call this function
   4176  * in their &drm_encoder_helper_funcs.atomic_check() callback to change the
   4177  * current VCPI allocation for the new state, but only when
   4178  * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set
   4179  * to ensure compatibility with userspace applications that still use the
   4180  * legacy modesetting UAPI.
   4181  *
   4182  * Allocations set by this function are not checked against the bandwidth
   4183  * restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
   4184  *
   4185  * Additionally, it is OK to call this function multiple times on the same
   4186  * @port as needed. It is not OK however, to call this function and
   4187  * drm_dp_atomic_release_vcpi_slots() in the same atomic check phase.
   4188  *
   4189  * See also:
   4190  * drm_dp_atomic_release_vcpi_slots()
   4191  * drm_dp_mst_atomic_check()
   4192  *
   4193  * Returns:
   4194  * Total slots in the atomic state assigned for this port, or a negative error
   4195  * code if the port no longer exists
   4196  */
   4197 int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
   4198 				  struct drm_dp_mst_topology_mgr *mgr,
   4199 				  struct drm_dp_mst_port *port, int pbn,
   4200 				  int pbn_div)
   4201 {
   4202 	struct drm_dp_mst_topology_state *topology_state;
   4203 	struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
   4204 	int prev_slots, prev_bw, req_slots;
   4205 
   4206 	topology_state = drm_atomic_get_mst_topology_state(state, mgr);
   4207 	if (IS_ERR(topology_state))
   4208 		return PTR_ERR(topology_state);
   4209 
   4210 	/* Find the current allocation for this port, if any */
   4211 	list_for_each_entry(pos, &topology_state->vcpis, next) {
   4212 		if (pos->port == port) {
   4213 			vcpi = pos;
   4214 			prev_slots = vcpi->vcpi;
   4215 			prev_bw = vcpi->pbn;
   4216 
   4217 			/*
   4218 			 * This should never happen, unless the driver tries
   4219 			 * releasing and allocating the same VCPI allocation,
   4220 			 * which is an error
   4221 			 */
   4222 			if (WARN_ON(!prev_slots)) {
   4223 				DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
   4224 					  port);
   4225 				return -EINVAL;
   4226 			}
   4227 
   4228 			break;
   4229 		}
   4230 	}
   4231 	if (!vcpi) {
   4232 		prev_slots = 0;
   4233 		prev_bw = 0;
   4234 	}
   4235 
   4236 	if (pbn_div <= 0)
   4237 		pbn_div = mgr->pbn_div;
   4238 
   4239 	req_slots = DIV_ROUND_UP(pbn, pbn_div);
   4240 
   4241 	DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
   4242 			 port->connector->base.id, port->connector->name,
   4243 			 port, prev_slots, req_slots);
   4244 	DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n",
   4245 			 port->connector->base.id, port->connector->name,
   4246 			 port, prev_bw, pbn);
   4247 
   4248 	/* Add the new allocation to the state */
   4249 	if (!vcpi) {
   4250 		vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
   4251 		if (!vcpi)
   4252 			return -ENOMEM;
   4253 
   4254 		drm_dp_mst_get_port_malloc(port);
   4255 		vcpi->port = port;
   4256 		list_add(&vcpi->next, &topology_state->vcpis);
   4257 	}
   4258 	vcpi->vcpi = req_slots;
   4259 	vcpi->pbn = pbn;
   4260 
   4261 	return req_slots;
   4262 }
   4263 EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
   4264 
   4265 /**
   4266  * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots
   4267  * @state: global atomic state
   4268  * @mgr: MST topology manager for the port
   4269  * @port: The port to release the VCPI slots from
   4270  *
   4271  * Releases any VCPI slots that have been allocated to a port in the atomic
   4272  * state. Any atomic drivers which support MST must call this function in
   4273  * their &drm_connector_helper_funcs.atomic_check() callback when the
   4274  * connector will no longer have VCPI allocated (e.g. because its CRTC was
   4275  * removed) when it had VCPI allocated in the previous atomic state.
   4276  *
   4277  * It is OK to call this even if @port has been removed from the system.
   4278  * Additionally, it is OK to call this function multiple times on the same
   4279  * @port as needed. It is not OK however, to call this function and
   4280  * drm_dp_atomic_find_vcpi_slots() on the same @port in a single atomic check
   4281  * phase.
   4282  *
   4283  * See also:
   4284  * drm_dp_atomic_find_vcpi_slots()
   4285  * drm_dp_mst_atomic_check()
   4286  *
   4287  * Returns:
   4288  * 0 if all slots for this port were added back to
   4289  * &drm_dp_mst_topology_state.avail_slots or negative error code
   4290  */
   4291 int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
   4292 				     struct drm_dp_mst_topology_mgr *mgr,
   4293 				     struct drm_dp_mst_port *port)
   4294 {
   4295 	struct drm_dp_mst_topology_state *topology_state;
   4296 	struct drm_dp_vcpi_allocation *pos;
   4297 	bool found = false;
   4298 
   4299 	topology_state = drm_atomic_get_mst_topology_state(state, mgr);
   4300 	if (IS_ERR(topology_state))
   4301 		return PTR_ERR(topology_state);
   4302 
   4303 	list_for_each_entry(pos, &topology_state->vcpis, next) {
   4304 		if (pos->port == port) {
   4305 			found = true;
   4306 			break;
   4307 		}
   4308 	}
   4309 	if (WARN_ON(!found)) {
   4310 		DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p\n",
   4311 			  port, &topology_state->base);
   4312 		return -EINVAL;
   4313 	}
   4314 
   4315 	DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
   4316 	if (pos->vcpi) {
   4317 		drm_dp_mst_put_port_malloc(port);
   4318 		pos->vcpi = 0;
   4319 	}
   4320 
   4321 	return 0;
   4322 }
   4323 EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots);
   4324 
   4325 /**
   4326  * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
   4327  * @mgr: manager for this port
   4328  * @port: port to allocate a virtual channel for.
   4329  * @pbn: payload bandwidth number to request
   4330  * @slots: returned number of slots for this PBN.
   4331  */
   4332 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
   4333 			      struct drm_dp_mst_port *port, int pbn, int slots)
   4334 {
   4335 	int ret;
   4336 
   4337 	port = drm_dp_mst_topology_get_port_validated(mgr, port);
   4338 	if (!port)
   4339 		return false;
   4340 
   4341 	if (slots < 0)
   4342 		return false;
   4343 
   4344 	if (port->vcpi.vcpi > 0) {
   4345 		DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
   4346 			      port->vcpi.vcpi, port->vcpi.pbn, pbn);
   4347 		if (pbn == port->vcpi.pbn) {
   4348 			drm_dp_mst_topology_put_port(port);
   4349 			return true;
   4350 		}
   4351 	}
   4352 
   4353 	ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
   4354 	if (ret) {
   4355 		DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
   4356 			      DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
   4357 		goto out;
   4358 	}
   4359 	DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
   4360 		      pbn, port->vcpi.num_slots);
   4361 
   4362 	/* Keep port allocated until its payload has been removed */
   4363 	drm_dp_mst_get_port_malloc(port);
   4364 	drm_dp_mst_topology_put_port(port);
   4365 	return true;
   4366 out:
   4367 	return false;
   4368 }
   4369 EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
   4370 
   4371 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
   4372 {
   4373 	int slots = 0;
   4374 	port = drm_dp_mst_topology_get_port_validated(mgr, port);
   4375 	if (!port)
   4376 		return slots;
   4377 
   4378 	slots = port->vcpi.num_slots;
   4379 	drm_dp_mst_topology_put_port(port);
   4380 	return slots;
   4381 }
   4382 EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
   4383 
   4384 /**
   4385  * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
   4386  * @mgr: manager for this port
   4387  * @port: unverified pointer to a port.
   4388  *
   4389  * This just resets the number of slots for the ports VCPI for later programming.
   4390  */
   4391 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
   4392 {
   4393 	/*
   4394 	 * A port with VCPI will remain allocated until its VCPI is
   4395 	 * released, no verified ref needed
   4396 	 */
   4397 
   4398 	port->vcpi.num_slots = 0;
   4399 }
   4400 EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
   4401 
   4402 /**
   4403  * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
   4404  * @mgr: manager for this port
   4405  * @port: port to deallocate vcpi for
   4406  *
   4407  * This can be called unconditionally, regardless of whether
   4408  * drm_dp_mst_allocate_vcpi() succeeded or not.
   4409  */
   4410 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
   4411 				struct drm_dp_mst_port *port)
   4412 {
   4413 	if (!port->vcpi.vcpi)
   4414 		return;
   4415 
   4416 	drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
   4417 	port->vcpi.num_slots = 0;
   4418 	port->vcpi.pbn = 0;
   4419 	port->vcpi.aligned_pbn = 0;
   4420 	port->vcpi.vcpi = 0;
   4421 	drm_dp_mst_put_port_malloc(port);
   4422 }
   4423 EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
   4424 
   4425 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
   4426 				     int id, struct drm_dp_payload *payload)
   4427 {
   4428 	u8 payload_alloc[3], status;
   4429 	int ret;
   4430 	int retries = 0;
   4431 
   4432 	drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
   4433 			   DP_PAYLOAD_TABLE_UPDATED);
   4434 
   4435 	payload_alloc[0] = id;
   4436 	payload_alloc[1] = payload->start_slot;
   4437 	payload_alloc[2] = payload->num_slots;
   4438 
   4439 	ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
   4440 	if (ret != 3) {
   4441 		DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
   4442 		goto fail;
   4443 	}
   4444 
   4445 retry:
   4446 	ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
   4447 	if (ret < 0) {
   4448 		DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
   4449 		goto fail;
   4450 	}
   4451 
   4452 	if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
   4453 		retries++;
   4454 		if (retries < 20) {
   4455 			usleep_range(10000, 20000);
   4456 			goto retry;
   4457 		}
   4458 		DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
   4459 		ret = -EINVAL;
   4460 		goto fail;
   4461 	}
   4462 	ret = 0;
   4463 fail:
   4464 	return ret;
   4465 }
   4466 
   4467 
   4468 /**
   4469  * drm_dp_check_act_status() - Check ACT handled status.
   4470  * @mgr: manager to use
   4471  *
   4472  * Check the payload status bits in the DPCD for ACT handled completion.
   4473  */
   4474 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
   4475 {
   4476 	u8 status;
   4477 	int ret;
   4478 	int count = 0;
   4479 
   4480 	do {
   4481 		ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
   4482 
   4483 		if (ret < 0) {
   4484 			DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
   4485 			goto fail;
   4486 		}
   4487 
   4488 		if (status & DP_PAYLOAD_ACT_HANDLED)
   4489 			break;
   4490 		count++;
   4491 		udelay(100);
   4492 
   4493 	} while (count < 30);
   4494 
   4495 	if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
   4496 		DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
   4497 		ret = -EINVAL;
   4498 		goto fail;
   4499 	}
   4500 	return 0;
   4501 fail:
   4502 	return ret;
   4503 }
   4504 EXPORT_SYMBOL(drm_dp_check_act_status);
   4505 
   4506 /**
   4507  * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
   4508  * @clock: dot clock for the mode
   4509  * @bpp: bpp for the mode.
   4510  * @dsc: DSC mode. If true, bpp has units of 1/16 of a bit per pixel
   4511  *
   4512  * This uses the formula in the spec to calculate the PBN value for a mode.
   4513  */
   4514 int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc)
   4515 {
   4516 	/*
   4517 	 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
   4518 	 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
   4519 	 * common multiplier to render an integer PBN for all link rate/lane
   4520 	 * counts combinations
   4521 	 * calculate
   4522 	 * peak_kbps *= (1006/1000)
   4523 	 * peak_kbps *= (64/54)
   4524 	 * peak_kbps *= 8    convert to bytes
   4525 	 *
   4526 	 * If the bpp is in units of 1/16, further divide by 16. Put this
   4527 	 * factor in the numerator rather than the denominator to avoid
   4528 	 * integer overflow
   4529 	 */
   4530 
   4531 	if (dsc)
   4532 		return DIV_ROUND_UP_ULL(mul_u32_u32(clock * (bpp / 16), 64 * 1006),
   4533 					8 * 54 * 1000 * 1000);
   4534 
   4535 	return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
   4536 				8 * 54 * 1000 * 1000);
   4537 }
   4538 EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
   4539 
   4540 /* we want to kick the TX after we've ack the up/down IRQs. */
   4541 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
   4542 {
   4543 	queue_work(system_long_wq, &mgr->tx_work);
   4544 }
   4545 
   4546 #if IS_ENABLED(CONFIG_DEBUG_FS)
   4547 static void drm_dp_mst_dump_mstb(struct seq_file *m,
   4548 				 struct drm_dp_mst_branch *mstb)
   4549 {
   4550 	struct drm_dp_mst_port *port;
   4551 	int tabs = mstb->lct;
   4552 	char prefix[10];
   4553 	int i;
   4554 
   4555 	for (i = 0; i < tabs; i++)
   4556 		prefix[i] = '\t';
   4557 	prefix[i] = '\0';
   4558 
   4559 	seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
   4560 	list_for_each_entry(port, &mstb->ports, next) {
   4561 		seq_printf(m, "%sport: %d: input: %d: pdt: %d, ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->input, port->pdt, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector);
   4562 		if (port->mstb)
   4563 			drm_dp_mst_dump_mstb(m, port->mstb);
   4564 	}
   4565 }
   4566 
   4567 #define DP_PAYLOAD_TABLE_SIZE		64
   4568 
   4569 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
   4570 				  char *buf)
   4571 {
   4572 	int i;
   4573 
   4574 	for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
   4575 		if (drm_dp_dpcd_read(mgr->aux,
   4576 				     DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
   4577 				     &buf[i], 16) != 16)
   4578 			return false;
   4579 	}
   4580 	return true;
   4581 }
   4582 
   4583 static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
   4584 			       struct drm_dp_mst_port *port, char *name,
   4585 			       int namelen)
   4586 {
   4587 	struct edid *mst_edid;
   4588 
   4589 	mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
   4590 	drm_edid_get_monitor_name(mst_edid, name, namelen);
   4591 }
   4592 
   4593 /**
   4594  * drm_dp_mst_dump_topology(): dump topology to seq file.
   4595  * @m: seq_file to dump output to
   4596  * @mgr: manager to dump current topology for.
   4597  *
   4598  * helper to dump MST topology to a seq file for debugfs.
   4599  */
   4600 void drm_dp_mst_dump_topology(struct seq_file *m,
   4601 			      struct drm_dp_mst_topology_mgr *mgr)
   4602 {
   4603 	int i;
   4604 	struct drm_dp_mst_port *port;
   4605 
   4606 	mutex_lock(&mgr->lock);
   4607 	if (mgr->mst_primary)
   4608 		drm_dp_mst_dump_mstb(m, mgr->mst_primary);
   4609 
   4610 	/* dump VCPIs */
   4611 	mutex_unlock(&mgr->lock);
   4612 
   4613 	mutex_lock(&mgr->payload_lock);
   4614 	seq_printf(m, "vcpi: %lx %lx %d\n", mgr->payload_mask, mgr->vcpi_mask,
   4615 		mgr->max_payloads);
   4616 
   4617 	for (i = 0; i < mgr->max_payloads; i++) {
   4618 		if (mgr->proposed_vcpis[i]) {
   4619 			char name[14];
   4620 
   4621 			port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
   4622 			fetch_monitor_name(mgr, port, name, sizeof(name));
   4623 			seq_printf(m, "vcpi %d: %d %d %d sink name: %s\n", i,
   4624 				   port->port_num, port->vcpi.vcpi,
   4625 				   port->vcpi.num_slots,
   4626 				   (*name != 0) ? name :  "Unknown");
   4627 		} else
   4628 			seq_printf(m, "vcpi %d:unused\n", i);
   4629 	}
   4630 	for (i = 0; i < mgr->max_payloads; i++) {
   4631 		seq_printf(m, "payload %d: %d, %d, %d\n",
   4632 			   i,
   4633 			   mgr->payloads[i].payload_state,
   4634 			   mgr->payloads[i].start_slot,
   4635 			   mgr->payloads[i].num_slots);
   4636 
   4637 
   4638 	}
   4639 	mutex_unlock(&mgr->payload_lock);
   4640 
   4641 	mutex_lock(&mgr->lock);
   4642 	if (mgr->mst_primary) {
   4643 		u8 buf[DP_PAYLOAD_TABLE_SIZE];
   4644 		int ret;
   4645 
   4646 		ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
   4647 		seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
   4648 		ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
   4649 		seq_printf(m, "faux/mst: %*ph\n", 2, buf);
   4650 		ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
   4651 		seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
   4652 
   4653 		/* dump the standard OUI branch header */
   4654 		ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
   4655 		seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
   4656 		for (i = 0x3; i < 0x8 && buf[i]; i++)
   4657 			seq_printf(m, "%c", buf[i]);
   4658 		seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
   4659 			   buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
   4660 		if (dump_dp_payload_table(mgr, buf))
   4661 			seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
   4662 	}
   4663 
   4664 	mutex_unlock(&mgr->lock);
   4665 
   4666 }
   4667 EXPORT_SYMBOL(drm_dp_mst_dump_topology);
   4668 #endif	/* IS_ENABLED(CONFIG_DEBUG_FS) */
   4669 
   4670 static void drm_dp_tx_work(struct work_struct *work)
   4671 {
   4672 	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
   4673 
   4674 	mutex_lock(&mgr->qlock);
   4675 	if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply)
   4676 		process_single_down_tx_qlock(mgr);
   4677 	mutex_unlock(&mgr->qlock);
   4678 }
   4679 
   4680 static inline void
   4681 drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
   4682 {
   4683 	if (port->connector)
   4684 		port->mgr->cbs->destroy_connector(port->mgr, port->connector);
   4685 
   4686 	drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
   4687 	drm_dp_mst_put_port_malloc(port);
   4688 }
   4689 
   4690 static inline void
   4691 drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)
   4692 {
   4693 	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
   4694 	struct drm_dp_mst_port *port, *tmp;
   4695 	bool wake_tx = false;
   4696 
   4697 	mutex_lock(&mgr->lock);
   4698 	list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
   4699 		list_del(&port->next);
   4700 		drm_dp_mst_topology_put_port(port);
   4701 	}
   4702 	mutex_unlock(&mgr->lock);
   4703 
   4704 	/* drop any tx slots msg */
   4705 	mutex_lock(&mstb->mgr->qlock);
   4706 	if (mstb->tx_slots[0]) {
   4707 		mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
   4708 		mstb->tx_slots[0] = NULL;
   4709 		wake_tx = true;
   4710 	}
   4711 	if (mstb->tx_slots[1]) {
   4712 		mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
   4713 		mstb->tx_slots[1] = NULL;
   4714 		wake_tx = true;
   4715 	}
   4716 	mutex_unlock(&mstb->mgr->qlock);
   4717 
   4718 	if (wake_tx)
   4719 		wake_up_all(&mstb->mgr->tx_waitq);
   4720 
   4721 	drm_dp_mst_put_mstb_malloc(mstb);
   4722 }
   4723 
   4724 static void drm_dp_delayed_destroy_work(struct work_struct *work)
   4725 {
   4726 	struct drm_dp_mst_topology_mgr *mgr =
   4727 		container_of(work, struct drm_dp_mst_topology_mgr,
   4728 			     delayed_destroy_work);
   4729 	bool send_hotplug = false, go_again;
   4730 
   4731 	/*
   4732 	 * Not a regular list traverse as we have to drop the destroy
   4733 	 * connector lock before destroying the mstb/port, to avoid AB->BA
   4734 	 * ordering between this lock and the config mutex.
   4735 	 */
   4736 	do {
   4737 		go_again = false;
   4738 
   4739 		for (;;) {
   4740 			struct drm_dp_mst_branch *mstb;
   4741 
   4742 			mutex_lock(&mgr->delayed_destroy_lock);
   4743 			mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list,
   4744 							struct drm_dp_mst_branch,
   4745 							destroy_next);
   4746 			if (mstb)
   4747 				list_del(&mstb->destroy_next);
   4748 			mutex_unlock(&mgr->delayed_destroy_lock);
   4749 
   4750 			if (!mstb)
   4751 				break;
   4752 
   4753 			drm_dp_delayed_destroy_mstb(mstb);
   4754 			go_again = true;
   4755 		}
   4756 
   4757 		for (;;) {
   4758 			struct drm_dp_mst_port *port;
   4759 
   4760 			mutex_lock(&mgr->delayed_destroy_lock);
   4761 			port = list_first_entry_or_null(&mgr->destroy_port_list,
   4762 							struct drm_dp_mst_port,
   4763 							next);
   4764 			if (port)
   4765 				list_del(&port->next);
   4766 			mutex_unlock(&mgr->delayed_destroy_lock);
   4767 
   4768 			if (!port)
   4769 				break;
   4770 
   4771 			drm_dp_delayed_destroy_port(port);
   4772 			send_hotplug = true;
   4773 			go_again = true;
   4774 		}
   4775 	} while (go_again);
   4776 
   4777 	if (send_hotplug)
   4778 		drm_kms_helper_hotplug_event(mgr->dev);
   4779 }
   4780 
   4781 static struct drm_private_state *
   4782 drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
   4783 {
   4784 	struct drm_dp_mst_topology_state *state, *old_state =
   4785 		to_dp_mst_topology_state(obj->state);
   4786 	struct drm_dp_vcpi_allocation *pos, *vcpi;
   4787 
   4788 	state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
   4789 	if (!state)
   4790 		return NULL;
   4791 
   4792 	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
   4793 
   4794 	INIT_LIST_HEAD(&state->vcpis);
   4795 
   4796 	list_for_each_entry(pos, &old_state->vcpis, next) {
   4797 		/* Prune leftover freed VCPI allocations */
   4798 		if (!pos->vcpi)
   4799 			continue;
   4800 
   4801 		vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL);
   4802 		if (!vcpi)
   4803 			goto fail;
   4804 
   4805 		drm_dp_mst_get_port_malloc(vcpi->port);
   4806 		list_add(&vcpi->next, &state->vcpis);
   4807 	}
   4808 
   4809 	return &state->base;
   4810 
   4811 fail:
   4812 	list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) {
   4813 		drm_dp_mst_put_port_malloc(pos->port);
   4814 		kfree(pos);
   4815 	}
   4816 	kfree(state);
   4817 
   4818 	return NULL;
   4819 }
   4820 
   4821 static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
   4822 				     struct drm_private_state *state)
   4823 {
   4824 	struct drm_dp_mst_topology_state *mst_state =
   4825 		to_dp_mst_topology_state(state);
   4826 	struct drm_dp_vcpi_allocation *pos, *tmp;
   4827 
   4828 	list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) {
   4829 		/* We only keep references to ports with non-zero VCPIs */
   4830 		if (pos->vcpi)
   4831 			drm_dp_mst_put_port_malloc(pos->port);
   4832 		kfree(pos);
   4833 	}
   4834 
   4835 	kfree(mst_state);
   4836 }
   4837 
   4838 static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
   4839 						 struct drm_dp_mst_branch *branch)
   4840 {
   4841 	while (port->parent) {
   4842 		if (port->parent == branch)
   4843 			return true;
   4844 
   4845 		if (port->parent->port_parent)
   4846 			port = port->parent->port_parent;
   4847 		else
   4848 			break;
   4849 	}
   4850 	return false;
   4851 }
   4852 
   4853 static inline
   4854 int drm_dp_mst_atomic_check_bw_limit(struct drm_dp_mst_branch *branch,
   4855 				     struct drm_dp_mst_topology_state *mst_state)
   4856 {
   4857 	struct drm_dp_mst_port *port;
   4858 	struct drm_dp_vcpi_allocation *vcpi;
   4859 	int pbn_limit = 0, pbn_used = 0;
   4860 
   4861 	list_for_each_entry(port, &branch->ports, next) {
   4862 		if (port->mstb)
   4863 			if (drm_dp_mst_atomic_check_bw_limit(port->mstb, mst_state))
   4864 				return -ENOSPC;
   4865 
   4866 		if (port->available_pbn > 0)
   4867 			pbn_limit = port->available_pbn;
   4868 	}
   4869 	DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch has %d PBN available\n",
   4870 			 branch, pbn_limit);
   4871 
   4872 	list_for_each_entry(vcpi, &mst_state->vcpis, next) {
   4873 		if (!vcpi->pbn)
   4874 			continue;
   4875 
   4876 		if (drm_dp_mst_port_downstream_of_branch(vcpi->port, branch))
   4877 			pbn_used += vcpi->pbn;
   4878 	}
   4879 	DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch used %d PBN\n",
   4880 			 branch, pbn_used);
   4881 
   4882 	if (pbn_used > pbn_limit) {
   4883 		DRM_DEBUG_ATOMIC("[MST BRANCH:%p] No available bandwidth\n",
   4884 				 branch);
   4885 		return -ENOSPC;
   4886 	}
   4887 	return 0;
   4888 }
   4889 
   4890 static inline int
   4891 drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr,
   4892 					 struct drm_dp_mst_topology_state *mst_state)
   4893 {
   4894 	struct drm_dp_vcpi_allocation *vcpi;
   4895 	int avail_slots = 63, payload_count = 0;
   4896 
   4897 	list_for_each_entry(vcpi, &mst_state->vcpis, next) {
   4898 		/* Releasing VCPI is always OK-even if the port is gone */
   4899 		if (!vcpi->vcpi) {
   4900 			DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots\n",
   4901 					 vcpi->port);
   4902 			continue;
   4903 		}
   4904 
   4905 		DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots\n",
   4906 				 vcpi->port, vcpi->vcpi);
   4907 
   4908 		avail_slots -= vcpi->vcpi;
   4909 		if (avail_slots < 0) {
   4910 			DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
   4911 					 vcpi->port, mst_state,
   4912 					 avail_slots + vcpi->vcpi);
   4913 			return -ENOSPC;
   4914 		}
   4915 
   4916 		if (++payload_count > mgr->max_payloads) {
   4917 			DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)\n",
   4918 					 mgr, mst_state, mgr->max_payloads);
   4919 			return -EINVAL;
   4920 		}
   4921 	}
   4922 	DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
   4923 			 mgr, mst_state, avail_slots,
   4924 			 63 - avail_slots);
   4925 
   4926 	return 0;
   4927 }
   4928 
   4929 /**
   4930  * drm_dp_mst_add_affected_dsc_crtcs
   4931  * @state: Pointer to the new struct drm_dp_mst_topology_state
   4932  * @mgr: MST topology manager
   4933  *
   4934  * Whenever there is a change in mst topology
   4935  * DSC configuration would have to be recalculated
   4936  * therefore we need to trigger modeset on all affected
   4937  * CRTCs in that topology
   4938  *
   4939  * See also:
   4940  * drm_dp_mst_atomic_enable_dsc()
   4941  */
   4942 int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr)
   4943 {
   4944 	struct drm_dp_mst_topology_state *mst_state;
   4945 	struct drm_dp_vcpi_allocation *pos;
   4946 	struct drm_connector *connector;
   4947 	struct drm_connector_state *conn_state;
   4948 	struct drm_crtc *crtc;
   4949 	struct drm_crtc_state *crtc_state;
   4950 
   4951 	mst_state = drm_atomic_get_mst_topology_state(state, mgr);
   4952 
   4953 	if (IS_ERR(mst_state))
   4954 		return -EINVAL;
   4955 
   4956 	list_for_each_entry(pos, &mst_state->vcpis, next) {
   4957 
   4958 		connector = pos->port->connector;
   4959 
   4960 		if (!connector)
   4961 			return -EINVAL;
   4962 
   4963 		conn_state = drm_atomic_get_connector_state(state, connector);
   4964 
   4965 		if (IS_ERR(conn_state))
   4966 			return PTR_ERR(conn_state);
   4967 
   4968 		crtc = conn_state->crtc;
   4969 
   4970 		if (WARN_ON(!crtc))
   4971 			return -EINVAL;
   4972 
   4973 		if (!drm_dp_mst_dsc_aux_for_port(pos->port))
   4974 			continue;
   4975 
   4976 		crtc_state = drm_atomic_get_crtc_state(mst_state->base.state, crtc);
   4977 
   4978 		if (IS_ERR(crtc_state))
   4979 			return PTR_ERR(crtc_state);
   4980 
   4981 		DRM_DEBUG_ATOMIC("[MST MGR:%p] Setting mode_changed flag on CRTC %p\n",
   4982 				 mgr, crtc);
   4983 
   4984 		crtc_state->mode_changed = true;
   4985 	}
   4986 	return 0;
   4987 }
   4988 EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs);
   4989 
   4990 /**
   4991  * drm_dp_mst_atomic_enable_dsc - Set DSC Enable Flag to On/Off
   4992  * @state: Pointer to the new drm_atomic_state
   4993  * @port: Pointer to the affected MST Port
   4994  * @pbn: Newly recalculated bw required for link with DSC enabled
   4995  * @pbn_div: Divider to calculate correct number of pbn per slot
   4996  * @enable: Boolean flag to enable or disable DSC on the port
   4997  *
   4998  * This function enables DSC on the given Port
   4999  * by recalculating its vcpi from pbn provided
   5000  * and sets dsc_enable flag to keep track of which
   5001  * ports have DSC enabled
   5002  *
   5003  */
   5004 int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
   5005 				 struct drm_dp_mst_port *port,
   5006 				 int pbn, int pbn_div,
   5007 				 bool enable)
   5008 {
   5009 	struct drm_dp_mst_topology_state *mst_state;
   5010 	struct drm_dp_vcpi_allocation *pos;
   5011 	bool found = false;
   5012 	int vcpi = 0;
   5013 
   5014 	mst_state = drm_atomic_get_mst_topology_state(state, port->mgr);
   5015 
   5016 	if (IS_ERR(mst_state))
   5017 		return PTR_ERR(mst_state);
   5018 
   5019 	list_for_each_entry(pos, &mst_state->vcpis, next) {
   5020 		if (pos->port == port) {
   5021 			found = true;
   5022 			break;
   5023 		}
   5024 	}
   5025 
   5026 	if (!found) {
   5027 		DRM_DEBUG_ATOMIC("[MST PORT:%p] Couldn't find VCPI allocation in mst state %p\n",
   5028 				 port, mst_state);
   5029 		return -EINVAL;
   5030 	}
   5031 
   5032 	if (pos->dsc_enabled == enable) {
   5033 		DRM_DEBUG_ATOMIC("[MST PORT:%p] DSC flag is already set to %d, returning %d VCPI slots\n",
   5034 				 port, enable, pos->vcpi);
   5035 		vcpi = pos->vcpi;
   5036 	}
   5037 
   5038 	if (enable) {
   5039 		vcpi = drm_dp_atomic_find_vcpi_slots(state, port->mgr, port, pbn, pbn_div);
   5040 		DRM_DEBUG_ATOMIC("[MST PORT:%p] Enabling DSC flag, reallocating %d VCPI slots on the port\n",
   5041 				 port, vcpi);
   5042 		if (vcpi < 0)
   5043 			return -EINVAL;
   5044 	}
   5045 
   5046 	pos->dsc_enabled = enable;
   5047 
   5048 	return vcpi;
   5049 }
   5050 EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
   5051 /**
   5052  * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
   5053  * atomic update is valid
   5054  * @state: Pointer to the new &struct drm_dp_mst_topology_state
   5055  *
   5056  * Checks the given topology state for an atomic update to ensure that it's
   5057  * valid. This includes checking whether there's enough bandwidth to support
   5058  * the new VCPI allocations in the atomic update.
   5059  *
   5060  * Any atomic drivers supporting DP MST must make sure to call this after
   5061  * checking the rest of their state in their
   5062  * &drm_mode_config_funcs.atomic_check() callback.
   5063  *
   5064  * See also:
   5065  * drm_dp_atomic_find_vcpi_slots()
   5066  * drm_dp_atomic_release_vcpi_slots()
   5067  *
   5068  * Returns:
   5069  *
   5070  * 0 if the new state is valid, negative error code otherwise.
   5071  */
   5072 int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
   5073 {
   5074 	struct drm_dp_mst_topology_mgr *mgr;
   5075 	struct drm_dp_mst_topology_state *mst_state;
   5076 	int i, ret = 0;
   5077 
   5078 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
   5079 		if (!mgr->mst_state)
   5080 			continue;
   5081 
   5082 		ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state);
   5083 		if (ret)
   5084 			break;
   5085 		ret = drm_dp_mst_atomic_check_bw_limit(mgr->mst_primary, mst_state);
   5086 		if (ret)
   5087 			break;
   5088 	}
   5089 
   5090 	return ret;
   5091 }
   5092 EXPORT_SYMBOL(drm_dp_mst_atomic_check);
   5093 
   5094 const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
   5095 	.atomic_duplicate_state = drm_dp_mst_duplicate_state,
   5096 	.atomic_destroy_state = drm_dp_mst_destroy_state,
   5097 };
   5098 EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
   5099 
   5100 /**
   5101  * drm_atomic_get_mst_topology_state: get MST topology state
   5102  *
   5103  * @state: global atomic state
   5104  * @mgr: MST topology manager, also the private object in this case
   5105  *
   5106  * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
   5107  * state vtable so that the private object state returned is that of a MST
   5108  * topology object. Also, drm_atomic_get_private_obj_state() expects the caller
   5109  * to care of the locking, so warn if don't hold the connection_mutex.
   5110  *
   5111  * RETURNS:
   5112  *
   5113  * The MST topology state or error pointer.
   5114  */
   5115 struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
   5116 								    struct drm_dp_mst_topology_mgr *mgr)
   5117 {
   5118 	return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
   5119 }
   5120 EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
   5121 
   5122 /**
   5123  * drm_dp_mst_topology_mgr_init - initialise a topology manager
   5124  * @mgr: manager struct to initialise
   5125  * @dev: device providing this structure - for i2c addition.
   5126  * @aux: DP helper aux channel to talk to this device
   5127  * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
   5128  * @max_payloads: maximum number of payloads this GPU can source
   5129  * @conn_base_id: the connector object ID the MST device is connected to.
   5130  *
   5131  * Return 0 for success, or negative error code on failure
   5132  */
   5133 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
   5134 				 struct drm_device *dev, struct drm_dp_aux *aux,
   5135 				 int max_dpcd_transaction_bytes,
   5136 				 int max_payloads, int conn_base_id)
   5137 {
   5138 	struct drm_dp_mst_topology_state *mst_state;
   5139 
   5140 	mutex_init(&mgr->lock);
   5141 	mutex_init(&mgr->qlock);
   5142 	mutex_init(&mgr->payload_lock);
   5143 	mutex_init(&mgr->delayed_destroy_lock);
   5144 	mutex_init(&mgr->up_req_lock);
   5145 	mutex_init(&mgr->probe_lock);
   5146 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
   5147 	mutex_init(&mgr->topology_ref_history_lock);
   5148 #endif
   5149 	INIT_LIST_HEAD(&mgr->tx_msg_downq);
   5150 	INIT_LIST_HEAD(&mgr->destroy_port_list);
   5151 	INIT_LIST_HEAD(&mgr->destroy_branch_device_list);
   5152 	INIT_LIST_HEAD(&mgr->up_req_list);
   5153 	INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
   5154 	INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
   5155 	INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work);
   5156 	INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work);
   5157 #ifdef __NetBSD__
   5158 	DRM_INIT_WAITQUEUE(&mgr->tx_waitq, "dpmstwait");
   5159 #else
   5160 	init_waitqueue_head(&mgr->tx_waitq);
   5161 #endif
   5162 	mgr->dev = dev;
   5163 	mgr->aux = aux;
   5164 	mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
   5165 	mgr->max_payloads = max_payloads;
   5166 	mgr->conn_base_id = conn_base_id;
   5167 	if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
   5168 	    max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
   5169 		return -EINVAL;
   5170 	mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
   5171 	if (!mgr->payloads)
   5172 		return -ENOMEM;
   5173 	mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
   5174 	if (!mgr->proposed_vcpis)
   5175 		return -ENOMEM;
   5176 	set_bit(0, &mgr->payload_mask);
   5177 
   5178 	mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
   5179 	if (mst_state == NULL)
   5180 		return -ENOMEM;
   5181 
   5182 	mst_state->mgr = mgr;
   5183 	INIT_LIST_HEAD(&mst_state->vcpis);
   5184 
   5185 	drm_atomic_private_obj_init(dev, &mgr->base,
   5186 				    &mst_state->base,
   5187 				    &drm_dp_mst_topology_state_funcs);
   5188 
   5189 	return 0;
   5190 }
   5191 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
   5192 
   5193 /**
   5194  * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
   5195  * @mgr: manager to destroy
   5196  */
   5197 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
   5198 {
   5199 	drm_dp_mst_topology_mgr_set_mst(mgr, false);
   5200 	flush_work(&mgr->work);
   5201 	cancel_work_sync(&mgr->delayed_destroy_work);
   5202 	mutex_lock(&mgr->payload_lock);
   5203 	kfree(mgr->payloads);
   5204 	mgr->payloads = NULL;
   5205 	kfree(mgr->proposed_vcpis);
   5206 	mgr->proposed_vcpis = NULL;
   5207 	mutex_unlock(&mgr->payload_lock);
   5208 	mgr->dev = NULL;
   5209 	mgr->aux = NULL;
   5210 	drm_atomic_private_obj_fini(&mgr->base);
   5211 	mgr->funcs = NULL;
   5212 
   5213 #ifdef __NetBSD__
   5214 	DRM_DESTROY_WAITQUEUE(&mgr->tx_waitq);
   5215 #endif
   5216 	mutex_destroy(&mgr->delayed_destroy_lock);
   5217 	mutex_destroy(&mgr->payload_lock);
   5218 	mutex_destroy(&mgr->qlock);
   5219 	mutex_destroy(&mgr->lock);
   5220 	mutex_destroy(&mgr->up_req_lock);
   5221 	mutex_destroy(&mgr->probe_lock);
   5222 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
   5223 	mutex_destroy(&mgr->topology_ref_history_lock);
   5224 #endif
   5225 }
   5226 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
   5227 
   5228 static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
   5229 {
   5230 	int i;
   5231 
   5232 	if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
   5233 		return false;
   5234 
   5235 	for (i = 0; i < num - 1; i++) {
   5236 		if (msgs[i].flags & I2C_M_RD ||
   5237 		    msgs[i].len > 0xff)
   5238 			return false;
   5239 	}
   5240 
   5241 	return msgs[num - 1].flags & I2C_M_RD &&
   5242 		msgs[num - 1].len <= 0xff;
   5243 }
   5244 
   5245 /* I2C device */
   5246 static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
   5247 			       int num)
   5248 {
   5249 	struct drm_dp_aux *aux = adapter->algo_data;
   5250 	struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
   5251 	struct drm_dp_mst_branch *mstb;
   5252 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
   5253 	unsigned int i;
   5254 	struct drm_dp_sideband_msg_req_body msg;
   5255 	struct drm_dp_sideband_msg_tx *txmsg = NULL;
   5256 	int ret;
   5257 
   5258 	mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
   5259 	if (!mstb)
   5260 		return -EREMOTEIO;
   5261 
   5262 	if (!remote_i2c_read_ok(msgs, num)) {
   5263 		DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
   5264 		ret = -EIO;
   5265 		goto out;
   5266 	}
   5267 
   5268 	memset(&msg, 0, sizeof(msg));
   5269 	msg.req_type = DP_REMOTE_I2C_READ;
   5270 	msg.u.i2c_read.num_transactions = num - 1;
   5271 	msg.u.i2c_read.port_number = port->port_num;
   5272 	for (i = 0; i < num - 1; i++) {
   5273 		msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
   5274 		msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
   5275 		msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
   5276 		msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
   5277 	}
   5278 	msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
   5279 	msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
   5280 
   5281 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   5282 	if (!txmsg) {
   5283 		ret = -ENOMEM;
   5284 		goto out;
   5285 	}
   5286 
   5287 	txmsg->dst = mstb;
   5288 	drm_dp_encode_sideband_req(&msg, txmsg);
   5289 
   5290 	drm_dp_queue_down_tx(mgr, txmsg);
   5291 
   5292 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   5293 	if (ret > 0) {
   5294 
   5295 		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
   5296 			ret = -EREMOTEIO;
   5297 			goto out;
   5298 		}
   5299 		if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
   5300 			ret = -EIO;
   5301 			goto out;
   5302 		}
   5303 		memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
   5304 		ret = num;
   5305 	}
   5306 out:
   5307 	kfree(txmsg);
   5308 	drm_dp_mst_topology_put_mstb(mstb);
   5309 	return ret;
   5310 }
   5311 
   5312 static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
   5313 {
   5314 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
   5315 	       I2C_FUNC_SMBUS_READ_BLOCK_DATA |
   5316 	       I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
   5317 	       I2C_FUNC_10BIT_ADDR;
   5318 }
   5319 
   5320 static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
   5321 	.functionality = drm_dp_mst_i2c_functionality,
   5322 	.master_xfer = drm_dp_mst_i2c_xfer,
   5323 };
   5324 
   5325 /**
   5326  * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
   5327  * @aux: DisplayPort AUX channel
   5328  *
   5329  * Returns 0 on success or a negative error code on failure.
   5330  */
   5331 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
   5332 {
   5333 	aux->ddc.algo = &drm_dp_mst_i2c_algo;
   5334 	aux->ddc.algo_data = aux;
   5335 	aux->ddc.retries = 3;
   5336 
   5337 	aux->ddc.class = I2C_CLASS_DDC;
   5338 	aux->ddc.owner = THIS_MODULE;
   5339 	aux->ddc.dev.parent = aux->dev;
   5340 #ifndef __NetBSD__		/* XXX of? */
   5341 	aux->ddc.dev.of_node = aux->dev->of_node;
   5342 #endif
   5343 
   5344 	strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
   5345 		sizeof(aux->ddc.name));
   5346 
   5347 	return i2c_add_adapter(&aux->ddc);
   5348 }
   5349 
   5350 /**
   5351  * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
   5352  * @aux: DisplayPort AUX channel
   5353  */
   5354 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
   5355 {
   5356 	i2c_del_adapter(&aux->ddc);
   5357 }
   5358 
   5359 /**
   5360  * drm_dp_mst_is_virtual_dpcd() - Is the given port a virtual DP Peer Device
   5361  * @port: The port to check
   5362  *
   5363  * A single physical MST hub object can be represented in the topology
   5364  * by multiple branches, with virtual ports between those branches.
   5365  *
   5366  * As of DP1.4, An MST hub with internal (virtual) ports must expose
   5367  * certain DPCD registers over those ports. See sections 2.6.1.1.1
   5368  * and 2.6.1.1.2 of Display Port specification v1.4 for details.
   5369  *
   5370  * May acquire mgr->lock
   5371  *
   5372  * Returns:
   5373  * true if the port is a virtual DP peer device, false otherwise
   5374  */
   5375 static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
   5376 {
   5377 	struct drm_dp_mst_port *downstream_port;
   5378 
   5379 	if (!port || port->dpcd_rev < DP_DPCD_REV_14)
   5380 		return false;
   5381 
   5382 	/* Virtual DP Sink (Internal Display Panel) */
   5383 	if (port->port_num >= 8)
   5384 		return true;
   5385 
   5386 	/* DP-to-HDMI Protocol Converter */
   5387 	if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV &&
   5388 	    !port->mcs &&
   5389 	    port->ldps)
   5390 		return true;
   5391 
   5392 	/* DP-to-DP */
   5393 	mutex_lock(&port->mgr->lock);
   5394 	if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
   5395 	    port->mstb &&
   5396 	    port->mstb->num_ports == 2) {
   5397 		list_for_each_entry(downstream_port, &port->mstb->ports, next) {
   5398 			if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK &&
   5399 			    !downstream_port->input) {
   5400 				mutex_unlock(&port->mgr->lock);
   5401 				return true;
   5402 			}
   5403 		}
   5404 	}
   5405 	mutex_unlock(&port->mgr->lock);
   5406 
   5407 	return false;
   5408 }
   5409 
   5410 /**
   5411  * drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC
   5412  * @port: The port to check. A leaf of the MST tree with an attached display.
   5413  *
   5414  * Depending on the situation, DSC may be enabled via the endpoint aux,
   5415  * the immediately upstream aux, or the connector's physical aux.
   5416  *
   5417  * This is both the correct aux to read DSC_CAPABILITY and the
   5418  * correct aux to write DSC_ENABLED.
   5419  *
   5420  * This operation can be expensive (up to four aux reads), so
   5421  * the caller should cache the return.
   5422  *
   5423  * Returns:
   5424  * NULL if DSC cannot be enabled on this port, otherwise the aux device
   5425  */
   5426 struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
   5427 {
   5428 	struct drm_dp_mst_port *immediate_upstream_port;
   5429 	struct drm_dp_mst_port *fec_port;
   5430 	struct drm_dp_desc desc = { 0 };
   5431 	u8 endpoint_fec;
   5432 	u8 endpoint_dsc;
   5433 
   5434 	if (!port)
   5435 		return NULL;
   5436 
   5437 	if (port->parent->port_parent)
   5438 		immediate_upstream_port = port->parent->port_parent;
   5439 	else
   5440 		immediate_upstream_port = NULL;
   5441 
   5442 	fec_port = immediate_upstream_port;
   5443 	while (fec_port) {
   5444 		/*
   5445 		 * Each physical link (i.e. not a virtual port) between the
   5446 		 * output and the primary device must support FEC
   5447 		 */
   5448 		if (!drm_dp_mst_is_virtual_dpcd(fec_port) &&
   5449 		    !fec_port->fec_capable)
   5450 			return NULL;
   5451 
   5452 		fec_port = fec_port->parent->port_parent;
   5453 	}
   5454 
   5455 	/* DP-to-DP peer device */
   5456 	if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
   5457 		u8 upstream_dsc;
   5458 
   5459 		if (drm_dp_dpcd_read(&port->aux,
   5460 				     DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
   5461 			return NULL;
   5462 		if (drm_dp_dpcd_read(&port->aux,
   5463 				     DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
   5464 			return NULL;
   5465 		if (drm_dp_dpcd_read(&immediate_upstream_port->aux,
   5466 				     DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
   5467 			return NULL;
   5468 
   5469 		/* Enpoint decompression with DP-to-DP peer device */
   5470 		if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
   5471 		    (endpoint_fec & DP_FEC_CAPABLE) &&
   5472 		    (upstream_dsc & 0x2) /* DSC passthrough */)
   5473 			return &port->aux;
   5474 
   5475 		/* Virtual DPCD decompression with DP-to-DP peer device */
   5476 		return &immediate_upstream_port->aux;
   5477 	}
   5478 
   5479 	/* Virtual DPCD decompression with DP-to-HDMI or Virtual DP Sink */
   5480 	if (drm_dp_mst_is_virtual_dpcd(port))
   5481 		return &port->aux;
   5482 
   5483 	/*
   5484 	 * Synaptics quirk
   5485 	 * Applies to ports for which:
   5486 	 * - Physical aux has Synaptics OUI
   5487 	 * - DPv1.4 or higher
   5488 	 * - Port is on primary branch device
   5489 	 * - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG)
   5490 	 */
   5491 	if (drm_dp_read_desc(port->mgr->aux, &desc, true))
   5492 		return NULL;
   5493 
   5494 	if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
   5495 	    port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
   5496 	    port->parent == port->mgr->mst_primary) {
   5497 		u8 downstreamport;
   5498 
   5499 		if (drm_dp_dpcd_read(&port->aux, DP_DOWNSTREAMPORT_PRESENT,
   5500 				     &downstreamport, 1) < 0)
   5501 			return NULL;
   5502 
   5503 		if ((downstreamport & DP_DWN_STRM_PORT_PRESENT) &&
   5504 		   ((downstreamport & DP_DWN_STRM_PORT_TYPE_MASK)
   5505 		     != DP_DWN_STRM_PORT_TYPE_ANALOG))
   5506 			return port->mgr->aux;
   5507 	}
   5508 
   5509 	/*
   5510 	 * The check below verifies if the MST sink
   5511 	 * connected to the GPU is capable of DSC -
   5512 	 * therefore the endpoint needs to be
   5513 	 * both DSC and FEC capable.
   5514 	 */
   5515 	if (drm_dp_dpcd_read(&port->aux,
   5516 	   DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
   5517 		return NULL;
   5518 	if (drm_dp_dpcd_read(&port->aux,
   5519 	   DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
   5520 		return NULL;
   5521 	if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
   5522 	   (endpoint_fec & DP_FEC_CAPABLE))
   5523 		return &port->aux;
   5524 
   5525 	return NULL;
   5526 }
   5527 EXPORT_SYMBOL(drm_dp_mst_dsc_aux_for_port);
   5528