Home | History | Annotate | Line # | Download | only in amdgpu
      1 /*	$NetBSD: amdgpu_atombios_dp.c,v 1.4 2021/12/18 23:44:58 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2007-8 Advanced Micro Devices, Inc.
      5  * Copyright 2008 Red Hat Inc.
      6  *
      7  * Permission is hereby granted, free of charge, to any person obtaining a
      8  * copy of this software and associated documentation files (the "Software"),
      9  * to deal in the Software without restriction, including without limitation
     10  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     11  * and/or sell copies of the Software, and to permit persons to whom the
     12  * Software is furnished to do so, subject to the following conditions:
     13  *
     14  * The above copyright notice and this permission notice shall be included in
     15  * all copies or substantial portions of the Software.
     16  *
     17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     20  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     21  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     22  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     23  * OTHER DEALINGS IN THE SOFTWARE.
     24  *
     25  * Authors: Dave Airlie
     26  *          Alex Deucher
     27  *          Jerome Glisse
     28  */
     29 
     30 #include <sys/cdefs.h>
     31 __KERNEL_RCSID(0, "$NetBSD: amdgpu_atombios_dp.c,v 1.4 2021/12/18 23:44:58 riastradh Exp $");
     32 
     33 #include <drm/amdgpu_drm.h>
     34 #include "amdgpu.h"
     35 
     36 #include "atom.h"
     37 #include "atom-bits.h"
     38 #include "atombios_encoders.h"
     39 #include "atombios_dp.h"
     40 #include "amdgpu_connectors.h"
     41 #include "amdgpu_atombios.h"
     42 #include <drm/drm_dp_helper.h>
     43 
     44 /* move these to drm_dp_helper.c/h */
     45 #define DP_LINK_CONFIGURATION_SIZE 9
     46 #define DP_DPCD_SIZE DP_RECEIVER_CAP_SIZE
     47 
     48 static const char *const voltage_names[] = {
     49 	"0.4V", "0.6V", "0.8V", "1.2V"
     50 };
     51 static const char *const pre_emph_names[] = {
     52 	"0dB", "3.5dB", "6dB", "9.5dB"
     53 };
     54 
     55 /***** amdgpu AUX functions *****/
     56 
     57 union aux_channel_transaction {
     58 	PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
     59 	PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
     60 };
     61 
     62 static int amdgpu_atombios_dp_process_aux_ch(struct amdgpu_i2c_chan *chan,
     63 				      u8 *send, int send_bytes,
     64 				      u8 *recv, int recv_size,
     65 				      u8 delay, u8 *ack)
     66 {
     67 	struct drm_device *dev = chan->dev;
     68 	struct amdgpu_device *adev = dev->dev_private;
     69 	union aux_channel_transaction args;
     70 	int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
     71 	unsigned char *base;
     72 	int recv_bytes;
     73 	int r = 0;
     74 
     75 	memset(&args, 0, sizeof(args));
     76 
     77 	mutex_lock(&chan->mutex);
     78 
     79 	base = (unsigned char *)(adev->mode_info.atom_context->scratch + 1);
     80 
     81 	amdgpu_atombios_copy_swap(base, send, send_bytes, true);
     82 
     83 	args.v2.lpAuxRequest = cpu_to_le16((u16)(0 + 4));
     84 	args.v2.lpDataOut = cpu_to_le16((u16)(16 + 4));
     85 	args.v2.ucDataOutLen = 0;
     86 	args.v2.ucChannelID = chan->rec.i2c_id;
     87 	args.v2.ucDelay = delay / 10;
     88 	args.v2.ucHPD_ID = chan->rec.hpd;
     89 
     90 	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
     91 
     92 	*ack = args.v2.ucReplyStatus;
     93 
     94 	/* timeout */
     95 	if (args.v2.ucReplyStatus == 1) {
     96 		r = -ETIMEDOUT;
     97 		goto done;
     98 	}
     99 
    100 	/* flags not zero */
    101 	if (args.v2.ucReplyStatus == 2) {
    102 		DRM_DEBUG_KMS("dp_aux_ch flags not zero\n");
    103 		r = -EIO;
    104 		goto done;
    105 	}
    106 
    107 	/* error */
    108 	if (args.v2.ucReplyStatus == 3) {
    109 		DRM_DEBUG_KMS("dp_aux_ch error\n");
    110 		r = -EIO;
    111 		goto done;
    112 	}
    113 
    114 	recv_bytes = args.v1.ucDataOutLen;
    115 	if (recv_bytes > recv_size)
    116 		recv_bytes = recv_size;
    117 
    118 	if (recv && recv_size)
    119 		amdgpu_atombios_copy_swap(recv, base + 16, recv_bytes, false);
    120 
    121 	r = recv_bytes;
    122 done:
    123 	mutex_unlock(&chan->mutex);
    124 
    125 	return r;
    126 }
    127 
    128 #define BARE_ADDRESS_SIZE 3
    129 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
    130 
    131 static ssize_t
    132 amdgpu_atombios_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
    133 {
    134 	struct amdgpu_i2c_chan *chan =
    135 		container_of(aux, struct amdgpu_i2c_chan, aux);
    136 	int ret;
    137 	u8 tx_buf[20];
    138 	size_t tx_size;
    139 	u8 ack, delay = 0;
    140 
    141 	if (WARN_ON(msg->size > 16))
    142 		return -E2BIG;
    143 
    144 	tx_buf[0] = msg->address & 0xff;
    145 	tx_buf[1] = msg->address >> 8;
    146 	tx_buf[2] = (msg->request << 4) |
    147 		((msg->address >> 16) & 0xf);
    148 	tx_buf[3] = msg->size ? (msg->size - 1) : 0;
    149 
    150 	switch (msg->request & ~DP_AUX_I2C_MOT) {
    151 	case DP_AUX_NATIVE_WRITE:
    152 	case DP_AUX_I2C_WRITE:
    153 		/* tx_size needs to be 4 even for bare address packets since the atom
    154 		 * table needs the info in tx_buf[3].
    155 		 */
    156 		tx_size = HEADER_SIZE + msg->size;
    157 		if (msg->size == 0)
    158 			tx_buf[3] |= BARE_ADDRESS_SIZE << 4;
    159 		else
    160 			tx_buf[3] |= tx_size << 4;
    161 		memcpy(tx_buf + HEADER_SIZE, msg->buffer, msg->size);
    162 		ret = amdgpu_atombios_dp_process_aux_ch(chan,
    163 						 tx_buf, tx_size, NULL, 0, delay, &ack);
    164 		if (ret >= 0)
    165 			/* Return payload size. */
    166 			ret = msg->size;
    167 		break;
    168 	case DP_AUX_NATIVE_READ:
    169 	case DP_AUX_I2C_READ:
    170 		/* tx_size needs to be 4 even for bare address packets since the atom
    171 		 * table needs the info in tx_buf[3].
    172 		 */
    173 		tx_size = HEADER_SIZE;
    174 		if (msg->size == 0)
    175 			tx_buf[3] |= BARE_ADDRESS_SIZE << 4;
    176 		else
    177 			tx_buf[3] |= tx_size << 4;
    178 		ret = amdgpu_atombios_dp_process_aux_ch(chan,
    179 						 tx_buf, tx_size, msg->buffer, msg->size, delay, &ack);
    180 		break;
    181 	default:
    182 		ret = -EINVAL;
    183 		break;
    184 	}
    185 
    186 	if (ret >= 0)
    187 		msg->reply = ack >> 4;
    188 
    189 	return ret;
    190 }
    191 
    192 void amdgpu_atombios_dp_aux_init(struct amdgpu_connector *amdgpu_connector)
    193 {
    194 	int ret;
    195 
    196 	amdgpu_connector->ddc_bus->rec.hpd = amdgpu_connector->hpd.hpd;
    197 	amdgpu_connector->ddc_bus->aux.dev = amdgpu_connector->base.kdev;
    198 	amdgpu_connector->ddc_bus->aux.transfer = amdgpu_atombios_dp_aux_transfer;
    199 	ret = drm_dp_aux_register(&amdgpu_connector->ddc_bus->aux);
    200 	if (!ret)
    201 		amdgpu_connector->ddc_bus->has_aux = true;
    202 
    203 	WARN(ret, "drm_dp_aux_register_i2c_bus() failed with error %d\n", ret);
    204 }
    205 
    206 /***** general DP utility functions *****/
    207 
    208 #define DP_VOLTAGE_MAX         DP_TRAIN_VOLTAGE_SWING_LEVEL_3
    209 #define DP_PRE_EMPHASIS_MAX    DP_TRAIN_PRE_EMPH_LEVEL_3
    210 
    211 static void amdgpu_atombios_dp_get_adjust_train(const u8 link_status[DP_LINK_STATUS_SIZE],
    212 						int lane_count,
    213 						u8 train_set[4])
    214 {
    215 	u8 v = 0;
    216 	u8 p = 0;
    217 	int lane;
    218 
    219 	for (lane = 0; lane < lane_count; lane++) {
    220 		u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
    221 		u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
    222 
    223 		DRM_DEBUG_KMS("requested signal parameters: lane %d voltage %s pre_emph %s\n",
    224 			  lane,
    225 			  voltage_names[this_v >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
    226 			  pre_emph_names[this_p >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
    227 
    228 		if (this_v > v)
    229 			v = this_v;
    230 		if (this_p > p)
    231 			p = this_p;
    232 	}
    233 
    234 	if (v >= DP_VOLTAGE_MAX)
    235 		v |= DP_TRAIN_MAX_SWING_REACHED;
    236 
    237 	if (p >= DP_PRE_EMPHASIS_MAX)
    238 		p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
    239 
    240 	DRM_DEBUG_KMS("using signal parameters: voltage %s pre_emph %s\n",
    241 		  voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
    242 		  pre_emph_names[(p & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
    243 
    244 	for (lane = 0; lane < 4; lane++)
    245 		train_set[lane] = v | p;
    246 }
    247 
    248 /* convert bits per color to bits per pixel */
    249 /* get bpc from the EDID */
    250 static unsigned amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc)
    251 {
    252 	if (bpc == 0)
    253 		return 24;
    254 	else
    255 		return bpc * 3;
    256 }
    257 
    258 /***** amdgpu specific DP functions *****/
    259 
    260 static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector,
    261 						 const u8 dpcd[DP_DPCD_SIZE],
    262 						 unsigned pix_clock,
    263 						 unsigned *dp_lanes, unsigned *dp_rate)
    264 {
    265 	unsigned bpp =
    266 		amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
    267 	static const unsigned link_rates[3] = { 162000, 270000, 540000 };
    268 	unsigned max_link_rate = drm_dp_max_link_rate(dpcd);
    269 	unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
    270 	unsigned lane_num, i, max_pix_clock;
    271 
    272 	if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) ==
    273 	    ENCODER_OBJECT_ID_NUTMEG) {
    274 		for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
    275 			max_pix_clock = (lane_num * 270000 * 8) / bpp;
    276 			if (max_pix_clock >= pix_clock) {
    277 				*dp_lanes = lane_num;
    278 				*dp_rate = 270000;
    279 				return 0;
    280 			}
    281 		}
    282 	} else {
    283 		for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
    284 			for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
    285 				max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
    286 				if (max_pix_clock >= pix_clock) {
    287 					*dp_lanes = lane_num;
    288 					*dp_rate = link_rates[i];
    289 					return 0;
    290 				}
    291 			}
    292 		}
    293 	}
    294 
    295 	return -EINVAL;
    296 }
    297 
    298 static u8 amdgpu_atombios_dp_encoder_service(struct amdgpu_device *adev,
    299 				      int action, int dp_clock,
    300 				      u8 ucconfig, u8 lane_num)
    301 {
    302 	DP_ENCODER_SERVICE_PARAMETERS args;
    303 	int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
    304 
    305 	memset(&args, 0, sizeof(args));
    306 	args.ucLinkClock = dp_clock / 10;
    307 	args.ucConfig = ucconfig;
    308 	args.ucAction = action;
    309 	args.ucLaneNum = lane_num;
    310 	args.ucStatus = 0;
    311 
    312 	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
    313 	return args.ucStatus;
    314 }
    315 
    316 u8 amdgpu_atombios_dp_get_sinktype(struct amdgpu_connector *amdgpu_connector)
    317 {
    318 	struct drm_device *dev = amdgpu_connector->base.dev;
    319 	struct amdgpu_device *adev = dev->dev_private;
    320 
    321 	return amdgpu_atombios_dp_encoder_service(adev, ATOM_DP_ACTION_GET_SINK_TYPE, 0,
    322 					   amdgpu_connector->ddc_bus->rec.i2c_id, 0);
    323 }
    324 
    325 static void amdgpu_atombios_dp_probe_oui(struct amdgpu_connector *amdgpu_connector)
    326 {
    327 	struct amdgpu_connector_atom_dig *dig_connector = amdgpu_connector->con_priv;
    328 	u8 buf[3];
    329 
    330 	if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
    331 		return;
    332 
    333 	if (drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_SINK_OUI, buf, 3) == 3)
    334 		DRM_DEBUG_KMS("Sink OUI: %02hhx%02hhx%02hhx\n",
    335 			      buf[0], buf[1], buf[2]);
    336 
    337 	if (drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_BRANCH_OUI, buf, 3) == 3)
    338 		DRM_DEBUG_KMS("Branch OUI: %02hhx%02hhx%02hhx\n",
    339 			      buf[0], buf[1], buf[2]);
    340 }
    341 
    342 int amdgpu_atombios_dp_get_dpcd(struct amdgpu_connector *amdgpu_connector)
    343 {
    344 	struct amdgpu_connector_atom_dig *dig_connector = amdgpu_connector->con_priv;
    345 	u8 msg[DP_DPCD_SIZE];
    346 	int ret;
    347 
    348 	ret = drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_DPCD_REV,
    349 			       msg, DP_DPCD_SIZE);
    350 	if (ret == DP_DPCD_SIZE) {
    351 		memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
    352 
    353 		DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
    354 			      dig_connector->dpcd);
    355 
    356 		amdgpu_atombios_dp_probe_oui(amdgpu_connector);
    357 
    358 		return 0;
    359 	}
    360 
    361 	dig_connector->dpcd[0] = 0;
    362 	return -EINVAL;
    363 }
    364 
    365 int amdgpu_atombios_dp_get_panel_mode(struct drm_encoder *encoder,
    366 			       struct drm_connector *connector)
    367 {
    368 	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
    369 	int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
    370 	u16 dp_bridge = amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector);
    371 	u8 tmp;
    372 
    373 	if (!amdgpu_connector->con_priv)
    374 		return panel_mode;
    375 
    376 	if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
    377 		/* DP bridge chips */
    378 		if (drm_dp_dpcd_readb(&amdgpu_connector->ddc_bus->aux,
    379 				      DP_EDP_CONFIGURATION_CAP, &tmp) == 1) {
    380 			if (tmp & 1)
    381 				panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
    382 			else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) ||
    383 				 (dp_bridge == ENCODER_OBJECT_ID_TRAVIS))
    384 				panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
    385 			else
    386 				panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
    387 		}
    388 	} else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
    389 		/* eDP */
    390 		if (drm_dp_dpcd_readb(&amdgpu_connector->ddc_bus->aux,
    391 				      DP_EDP_CONFIGURATION_CAP, &tmp) == 1) {
    392 			if (tmp & 1)
    393 				panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
    394 		}
    395 	}
    396 
    397 	return panel_mode;
    398 }
    399 
    400 void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector,
    401 				 const struct drm_display_mode *mode)
    402 {
    403 	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
    404 	struct amdgpu_connector_atom_dig *dig_connector;
    405 	int ret;
    406 
    407 	if (!amdgpu_connector->con_priv)
    408 		return;
    409 	dig_connector = amdgpu_connector->con_priv;
    410 
    411 	if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
    412 	    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
    413 		ret = amdgpu_atombios_dp_get_dp_link_config(connector, dig_connector->dpcd,
    414 							    mode->clock,
    415 							    &dig_connector->dp_lane_count,
    416 							    &dig_connector->dp_clock);
    417 		if (ret) {
    418 			dig_connector->dp_clock = 0;
    419 			dig_connector->dp_lane_count = 0;
    420 		}
    421 	}
    422 }
    423 
    424 int amdgpu_atombios_dp_mode_valid_helper(struct drm_connector *connector,
    425 				  struct drm_display_mode *mode)
    426 {
    427 	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
    428 	struct amdgpu_connector_atom_dig *dig_connector;
    429 	unsigned dp_lanes, dp_clock;
    430 	int ret;
    431 
    432 	if (!amdgpu_connector->con_priv)
    433 		return MODE_CLOCK_HIGH;
    434 	dig_connector = amdgpu_connector->con_priv;
    435 
    436 	ret = amdgpu_atombios_dp_get_dp_link_config(connector, dig_connector->dpcd,
    437 						    mode->clock, &dp_lanes, &dp_clock);
    438 	if (ret)
    439 		return MODE_CLOCK_HIGH;
    440 
    441 	if ((dp_clock == 540000) &&
    442 	    (!amdgpu_connector_is_dp12_capable(connector)))
    443 		return MODE_CLOCK_HIGH;
    444 
    445 	return MODE_OK;
    446 }
    447 
    448 bool amdgpu_atombios_dp_needs_link_train(struct amdgpu_connector *amdgpu_connector)
    449 {
    450 	u8 link_status[DP_LINK_STATUS_SIZE];
    451 	struct amdgpu_connector_atom_dig *dig = amdgpu_connector->con_priv;
    452 
    453 	if (drm_dp_dpcd_read_link_status(&amdgpu_connector->ddc_bus->aux, link_status)
    454 	    <= 0)
    455 		return false;
    456 	if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
    457 		return false;
    458 	return true;
    459 }
    460 
    461 void amdgpu_atombios_dp_set_rx_power_state(struct drm_connector *connector,
    462 				    u8 power_state)
    463 {
    464 	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
    465 	struct amdgpu_connector_atom_dig *dig_connector;
    466 
    467 	if (!amdgpu_connector->con_priv)
    468 		return;
    469 
    470 	dig_connector = amdgpu_connector->con_priv;
    471 
    472 	/* power up/down the sink */
    473 	if (dig_connector->dpcd[0] >= 0x11) {
    474 		drm_dp_dpcd_writeb(&amdgpu_connector->ddc_bus->aux,
    475 				   DP_SET_POWER, power_state);
    476 		usleep_range(1000, 2000);
    477 	}
    478 }
    479 
    480 struct amdgpu_atombios_dp_link_train_info {
    481 	struct amdgpu_device *adev;
    482 	struct drm_encoder *encoder;
    483 	struct drm_connector *connector;
    484 	int dp_clock;
    485 	int dp_lane_count;
    486 	bool tp3_supported;
    487 	u8 dpcd[DP_RECEIVER_CAP_SIZE];
    488 	u8 train_set[4];
    489 	u8 link_status[DP_LINK_STATUS_SIZE];
    490 	u8 tries;
    491 	struct drm_dp_aux *aux;
    492 };
    493 
    494 static void
    495 amdgpu_atombios_dp_update_vs_emph(struct amdgpu_atombios_dp_link_train_info *dp_info)
    496 {
    497 	/* set the initial vs/emph on the source */
    498 	amdgpu_atombios_encoder_setup_dig_transmitter(dp_info->encoder,
    499 					       ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH,
    500 					       0, dp_info->train_set[0]); /* sets all lanes at once */
    501 
    502 	/* set the vs/emph on the sink */
    503 	drm_dp_dpcd_write(dp_info->aux, DP_TRAINING_LANE0_SET,
    504 			  dp_info->train_set, dp_info->dp_lane_count);
    505 }
    506 
    507 static void
    508 amdgpu_atombios_dp_set_tp(struct amdgpu_atombios_dp_link_train_info *dp_info, int tp)
    509 {
    510 	int rtp = 0;
    511 
    512 	/* set training pattern on the source */
    513 	switch (tp) {
    514 	case DP_TRAINING_PATTERN_1:
    515 		rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1;
    516 		break;
    517 	case DP_TRAINING_PATTERN_2:
    518 		rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2;
    519 		break;
    520 	case DP_TRAINING_PATTERN_3:
    521 		rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3;
    522 			break;
    523 	}
    524 	amdgpu_atombios_encoder_setup_dig_encoder(dp_info->encoder, rtp, 0);
    525 
    526 	/* enable training pattern on the sink */
    527 	drm_dp_dpcd_writeb(dp_info->aux, DP_TRAINING_PATTERN_SET, tp);
    528 }
    529 
    530 static int
    531 amdgpu_atombios_dp_link_train_init(struct amdgpu_atombios_dp_link_train_info *dp_info)
    532 {
    533 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(dp_info->encoder);
    534 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
    535 	u8 tmp;
    536 
    537 	/* power up the sink */
    538 	amdgpu_atombios_dp_set_rx_power_state(dp_info->connector, DP_SET_POWER_D0);
    539 
    540 	/* possibly enable downspread on the sink */
    541 	if (dp_info->dpcd[3] & 0x1)
    542 		drm_dp_dpcd_writeb(dp_info->aux,
    543 				   DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5);
    544 	else
    545 		drm_dp_dpcd_writeb(dp_info->aux,
    546 				   DP_DOWNSPREAD_CTRL, 0);
    547 
    548 	if (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)
    549 		drm_dp_dpcd_writeb(dp_info->aux, DP_EDP_CONFIGURATION_SET, 1);
    550 
    551 	/* set the lane count on the sink */
    552 	tmp = dp_info->dp_lane_count;
    553 	if (drm_dp_enhanced_frame_cap(dp_info->dpcd))
    554 		tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
    555 	drm_dp_dpcd_writeb(dp_info->aux, DP_LANE_COUNT_SET, tmp);
    556 
    557 	/* set the link rate on the sink */
    558 	tmp = drm_dp_link_rate_to_bw_code(dp_info->dp_clock);
    559 	drm_dp_dpcd_writeb(dp_info->aux, DP_LINK_BW_SET, tmp);
    560 
    561 	/* start training on the source */
    562 	amdgpu_atombios_encoder_setup_dig_encoder(dp_info->encoder,
    563 					   ATOM_ENCODER_CMD_DP_LINK_TRAINING_START, 0);
    564 
    565 	/* disable the training pattern on the sink */
    566 	drm_dp_dpcd_writeb(dp_info->aux,
    567 			   DP_TRAINING_PATTERN_SET,
    568 			   DP_TRAINING_PATTERN_DISABLE);
    569 
    570 	return 0;
    571 }
    572 
    573 static int
    574 amdgpu_atombios_dp_link_train_finish(struct amdgpu_atombios_dp_link_train_info *dp_info)
    575 {
    576 	udelay(400);
    577 
    578 	/* disable the training pattern on the sink */
    579 	drm_dp_dpcd_writeb(dp_info->aux,
    580 			   DP_TRAINING_PATTERN_SET,
    581 			   DP_TRAINING_PATTERN_DISABLE);
    582 
    583 	/* disable the training pattern on the source */
    584 	amdgpu_atombios_encoder_setup_dig_encoder(dp_info->encoder,
    585 					   ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE, 0);
    586 
    587 	return 0;
    588 }
    589 
    590 static int
    591 amdgpu_atombios_dp_link_train_cr(struct amdgpu_atombios_dp_link_train_info *dp_info)
    592 {
    593 	bool clock_recovery;
    594 	u8 voltage;
    595 	int i;
    596 
    597 	amdgpu_atombios_dp_set_tp(dp_info, DP_TRAINING_PATTERN_1);
    598 	memset(dp_info->train_set, 0, 4);
    599 	amdgpu_atombios_dp_update_vs_emph(dp_info);
    600 
    601 	udelay(400);
    602 
    603 	/* clock recovery loop */
    604 	clock_recovery = false;
    605 	dp_info->tries = 0;
    606 	voltage = 0xff;
    607 	while (1) {
    608 		drm_dp_link_train_clock_recovery_delay(dp_info->dpcd);
    609 
    610 		if (drm_dp_dpcd_read_link_status(dp_info->aux,
    611 						 dp_info->link_status) <= 0) {
    612 			DRM_ERROR("displayport link status failed\n");
    613 			break;
    614 		}
    615 
    616 		if (drm_dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
    617 			clock_recovery = true;
    618 			break;
    619 		}
    620 
    621 		for (i = 0; i < dp_info->dp_lane_count; i++) {
    622 			if ((dp_info->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
    623 				break;
    624 		}
    625 		if (i == dp_info->dp_lane_count) {
    626 			DRM_ERROR("clock recovery reached max voltage\n");
    627 			break;
    628 		}
    629 
    630 		if ((dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
    631 			++dp_info->tries;
    632 			if (dp_info->tries == 5) {
    633 				DRM_ERROR("clock recovery tried 5 times\n");
    634 				break;
    635 			}
    636 		} else
    637 			dp_info->tries = 0;
    638 
    639 		voltage = dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
    640 
    641 		/* Compute new train_set as requested by sink */
    642 		amdgpu_atombios_dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count,
    643 					     dp_info->train_set);
    644 
    645 		amdgpu_atombios_dp_update_vs_emph(dp_info);
    646 	}
    647 	if (!clock_recovery) {
    648 		DRM_ERROR("clock recovery failed\n");
    649 		return -1;
    650 	} else {
    651 		DRM_DEBUG_KMS("clock recovery at voltage %d pre-emphasis %d\n",
    652 			  dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
    653 			  (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
    654 			  DP_TRAIN_PRE_EMPHASIS_SHIFT);
    655 		return 0;
    656 	}
    657 }
    658 
    659 static int
    660 amdgpu_atombios_dp_link_train_ce(struct amdgpu_atombios_dp_link_train_info *dp_info)
    661 {
    662 	bool channel_eq;
    663 
    664 	if (dp_info->tp3_supported)
    665 		amdgpu_atombios_dp_set_tp(dp_info, DP_TRAINING_PATTERN_3);
    666 	else
    667 		amdgpu_atombios_dp_set_tp(dp_info, DP_TRAINING_PATTERN_2);
    668 
    669 	/* channel equalization loop */
    670 	dp_info->tries = 0;
    671 	channel_eq = false;
    672 	while (1) {
    673 		drm_dp_link_train_channel_eq_delay(dp_info->dpcd);
    674 
    675 		if (drm_dp_dpcd_read_link_status(dp_info->aux,
    676 						 dp_info->link_status) <= 0) {
    677 			DRM_ERROR("displayport link status failed\n");
    678 			break;
    679 		}
    680 
    681 		if (drm_dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
    682 			channel_eq = true;
    683 			break;
    684 		}
    685 
    686 		/* Try 5 times */
    687 		if (dp_info->tries > 5) {
    688 			DRM_ERROR("channel eq failed: 5 tries\n");
    689 			break;
    690 		}
    691 
    692 		/* Compute new train_set as requested by sink */
    693 		amdgpu_atombios_dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count,
    694 					     dp_info->train_set);
    695 
    696 		amdgpu_atombios_dp_update_vs_emph(dp_info);
    697 		dp_info->tries++;
    698 	}
    699 
    700 	if (!channel_eq) {
    701 		DRM_ERROR("channel eq failed\n");
    702 		return -1;
    703 	} else {
    704 		DRM_DEBUG_KMS("channel eq at voltage %d pre-emphasis %d\n",
    705 			  dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
    706 			  (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK)
    707 			  >> DP_TRAIN_PRE_EMPHASIS_SHIFT);
    708 		return 0;
    709 	}
    710 }
    711 
    712 void amdgpu_atombios_dp_link_train(struct drm_encoder *encoder,
    713 			    struct drm_connector *connector)
    714 {
    715 	struct drm_device *dev = encoder->dev;
    716 	struct amdgpu_device *adev = dev->dev_private;
    717 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
    718 	struct amdgpu_connector *amdgpu_connector;
    719 	struct amdgpu_connector_atom_dig *dig_connector;
    720 	struct amdgpu_atombios_dp_link_train_info dp_info;
    721 	u8 tmp;
    722 
    723 	if (!amdgpu_encoder->enc_priv)
    724 		return;
    725 
    726 	amdgpu_connector = to_amdgpu_connector(connector);
    727 	if (!amdgpu_connector->con_priv)
    728 		return;
    729 	dig_connector = amdgpu_connector->con_priv;
    730 
    731 	if ((dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT) &&
    732 	    (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_eDP))
    733 		return;
    734 
    735 	if (drm_dp_dpcd_readb(&amdgpu_connector->ddc_bus->aux, DP_MAX_LANE_COUNT, &tmp)
    736 	    == 1) {
    737 		if (tmp & DP_TPS3_SUPPORTED)
    738 			dp_info.tp3_supported = true;
    739 		else
    740 			dp_info.tp3_supported = false;
    741 	} else {
    742 		dp_info.tp3_supported = false;
    743 	}
    744 
    745 	memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE);
    746 	dp_info.adev = adev;
    747 	dp_info.encoder = encoder;
    748 	dp_info.connector = connector;
    749 	dp_info.dp_lane_count = dig_connector->dp_lane_count;
    750 	dp_info.dp_clock = dig_connector->dp_clock;
    751 	dp_info.aux = &amdgpu_connector->ddc_bus->aux;
    752 
    753 	if (amdgpu_atombios_dp_link_train_init(&dp_info))
    754 		goto done;
    755 	if (amdgpu_atombios_dp_link_train_cr(&dp_info))
    756 		goto done;
    757 	if (amdgpu_atombios_dp_link_train_ce(&dp_info))
    758 		goto done;
    759 done:
    760 	if (amdgpu_atombios_dp_link_train_finish(&dp_info))
    761 		return;
    762 }
    763