Home | History | Annotate | Line # | Download | only in display
      1 /*	$NetBSD: intel_hdcp.c,v 1.6 2021/12/19 12:32:15 riastradh Exp $	*/
      2 
      3 /* SPDX-License-Identifier: MIT */
      4 /*
      5  * Copyright (C) 2017 Google, Inc.
      6  * Copyright _ 2017-2019, Intel Corporation.
      7  *
      8  * Authors:
      9  * Sean Paul <seanpaul (at) chromium.org>
     10  * Ramalingam C <ramalingam.c (at) intel.com>
     11  */
     12 
     13 #include <sys/cdefs.h>
     14 __KERNEL_RCSID(0, "$NetBSD: intel_hdcp.c,v 1.6 2021/12/19 12:32:15 riastradh Exp $");
     15 
     16 #include <linux/component.h>
     17 #include <linux/i2c.h>
     18 #include <linux/random.h>
     19 
     20 #include <drm/drm_hdcp.h>
     21 #include <drm/i915_component.h>
     22 
     23 #include "i915_reg.h"
     24 #include "intel_display_power.h"
     25 #include "intel_display_types.h"
     26 #include "intel_hdcp.h"
     27 #include "intel_sideband.h"
     28 #include "intel_connector.h"
     29 
     30 #include <linux/nbsd-namespace.h>
     31 
     32 #define KEY_LOAD_TRIES	5
     33 #define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS	50
     34 #define HDCP2_LC_RETRY_CNT			3
     35 
     36 static
     37 bool intel_hdcp_is_ksv_valid(u8 *ksv)
     38 {
     39 	int i, ones = 0;
     40 	/* KSV has 20 1's and 20 0's */
     41 	for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
     42 		ones += hweight8(ksv[i]);
     43 	if (ones != 20)
     44 		return false;
     45 
     46 	return true;
     47 }
     48 
     49 static
     50 int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
     51 			       const struct intel_hdcp_shim *shim, u8 *bksv)
     52 {
     53 	int ret, i, tries = 2;
     54 
     55 	/* HDCP spec states that we must retry the bksv if it is invalid */
     56 	for (i = 0; i < tries; i++) {
     57 		ret = shim->read_bksv(intel_dig_port, bksv);
     58 		if (ret)
     59 			return ret;
     60 		if (intel_hdcp_is_ksv_valid(bksv))
     61 			break;
     62 	}
     63 	if (i == tries) {
     64 		DRM_DEBUG_KMS("Bksv is invalid\n");
     65 		return -ENODEV;
     66 	}
     67 
     68 	return 0;
     69 }
     70 
     71 /* Is HDCP1.4 capable on Platform and Sink */
     72 bool intel_hdcp_capable(struct intel_connector *connector)
     73 {
     74 	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
     75 	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
     76 	bool capable = false;
     77 	u8 bksv[5];
     78 
     79 	if (!shim)
     80 		return capable;
     81 
     82 	if (shim->hdcp_capable) {
     83 		shim->hdcp_capable(intel_dig_port, &capable);
     84 	} else {
     85 		if (!intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv))
     86 			capable = true;
     87 	}
     88 
     89 	return capable;
     90 }
     91 
     92 /* Is HDCP2.2 capable on Platform and Sink */
     93 bool intel_hdcp2_capable(struct intel_connector *connector)
     94 {
     95 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
     96 	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
     97 	struct intel_hdcp *hdcp = &connector->hdcp;
     98 	bool capable = false;
     99 
    100 	/* I915 support for HDCP2.2 */
    101 	if (!hdcp->hdcp2_supported)
    102 		return false;
    103 
    104 	/* MEI interface is solid */
    105 	mutex_lock(&dev_priv->hdcp_comp_mutex);
    106 	if (!dev_priv->hdcp_comp_added ||  !dev_priv->hdcp_master) {
    107 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
    108 		return false;
    109 	}
    110 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
    111 
    112 	/* Sink's capability for HDCP2.2 */
    113 	hdcp->shim->hdcp_2_2_capable(intel_dig_port, &capable);
    114 
    115 	return capable;
    116 }
    117 
    118 static inline
    119 bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
    120 		       enum transcoder cpu_transcoder, enum port port)
    121 {
    122 	return I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
    123 	       HDCP_STATUS_ENC;
    124 }
    125 
    126 static inline
    127 bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
    128 			enum transcoder cpu_transcoder, enum port port)
    129 {
    130 	return I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
    131 	       LINK_ENCRYPTION_STATUS;
    132 }
    133 
    134 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
    135 				    const struct intel_hdcp_shim *shim)
    136 {
    137 	int ret, read_ret;
    138 	bool ksv_ready;
    139 
    140 	/* Poll for ksv list ready (spec says max time allowed is 5s) */
    141 	ret = __wait_for(read_ret = shim->read_ksv_ready(intel_dig_port,
    142 							 &ksv_ready),
    143 			 read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
    144 			 100 * 1000);
    145 	if (ret)
    146 		return ret;
    147 	if (read_ret)
    148 		return read_ret;
    149 	if (!ksv_ready)
    150 		return -ETIMEDOUT;
    151 
    152 	return 0;
    153 }
    154 
    155 static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
    156 {
    157 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
    158 	struct i915_power_well *power_well;
    159 	enum i915_power_well_id id;
    160 	bool enabled = false;
    161 
    162 	/*
    163 	 * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
    164 	 * On all BXT+, SW can load the keys only when the PW#1 is turned on.
    165 	 */
    166 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
    167 		id = HSW_DISP_PW_GLOBAL;
    168 	else
    169 		id = SKL_DISP_PW_1;
    170 
    171 	mutex_lock(&power_domains->lock);
    172 
    173 	/* PG1 (power well #1) needs to be enabled */
    174 	for_each_power_well(dev_priv, power_well) {
    175 		if (power_well->desc->id == id) {
    176 			enabled = power_well->desc->ops->is_enabled(dev_priv,
    177 								    power_well);
    178 			break;
    179 		}
    180 	}
    181 	mutex_unlock(&power_domains->lock);
    182 
    183 	/*
    184 	 * Another req for hdcp key loadability is enabled state of pll for
    185 	 * cdclk. Without active crtc we wont land here. So we are assuming that
    186 	 * cdclk is already on.
    187 	 */
    188 
    189 	return enabled;
    190 }
    191 
    192 static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
    193 {
    194 	I915_WRITE(HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
    195 	I915_WRITE(HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS |
    196 		   HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
    197 }
    198 
    199 static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
    200 {
    201 	int ret;
    202 	u32 val;
    203 
    204 	val = I915_READ(HDCP_KEY_STATUS);
    205 	if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
    206 		return 0;
    207 
    208 	/*
    209 	 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
    210 	 * out of reset. So if Key is not already loaded, its an error state.
    211 	 */
    212 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
    213 		if (!(I915_READ(HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
    214 			return -ENXIO;
    215 
    216 	/*
    217 	 * Initiate loading the HDCP key from fuses.
    218 	 *
    219 	 * BXT+ platforms, HDCP key needs to be loaded by SW. Only Gen 9
    220 	 * platforms except BXT and GLK, differ in the key load trigger process
    221 	 * from other platforms. So GEN9_BC uses the GT Driver Mailbox i/f.
    222 	 */
    223 	if (IS_GEN9_BC(dev_priv)) {
    224 		ret = sandybridge_pcode_write(dev_priv,
    225 					      SKL_PCODE_LOAD_HDCP_KEYS, 1);
    226 		if (ret) {
    227 			DRM_ERROR("Failed to initiate HDCP key load (%d)\n",
    228 			          ret);
    229 			return ret;
    230 		}
    231 	} else {
    232 		I915_WRITE(HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
    233 	}
    234 
    235 	/* Wait for the keys to load (500us) */
    236 	ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS,
    237 					HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
    238 					10, 1, &val);
    239 	if (ret)
    240 		return ret;
    241 	else if (!(val & HDCP_KEY_LOAD_STATUS))
    242 		return -ENXIO;
    243 
    244 	/* Send Aksv over to PCH display for use in authentication */
    245 	I915_WRITE(HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
    246 
    247 	return 0;
    248 }
    249 
    250 /* Returns updated SHA-1 index */
    251 static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
    252 {
    253 	I915_WRITE(HDCP_SHA_TEXT, sha_text);
    254 	if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
    255 		DRM_ERROR("Timed out waiting for SHA1 ready\n");
    256 		return -ETIMEDOUT;
    257 	}
    258 	return 0;
    259 }
    260 
    261 static
    262 u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv,
    263 				enum transcoder cpu_transcoder, enum port port)
    264 {
    265 	if (INTEL_GEN(dev_priv) >= 12) {
    266 		switch (cpu_transcoder) {
    267 		case TRANSCODER_A:
    268 			return HDCP_TRANSA_REP_PRESENT |
    269 			       HDCP_TRANSA_SHA1_M0;
    270 		case TRANSCODER_B:
    271 			return HDCP_TRANSB_REP_PRESENT |
    272 			       HDCP_TRANSB_SHA1_M0;
    273 		case TRANSCODER_C:
    274 			return HDCP_TRANSC_REP_PRESENT |
    275 			       HDCP_TRANSC_SHA1_M0;
    276 		case TRANSCODER_D:
    277 			return HDCP_TRANSD_REP_PRESENT |
    278 			       HDCP_TRANSD_SHA1_M0;
    279 		default:
    280 			DRM_ERROR("Unknown transcoder %d\n", cpu_transcoder);
    281 			return -EINVAL;
    282 		}
    283 	}
    284 
    285 	switch (port) {
    286 	case PORT_A:
    287 		return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
    288 	case PORT_B:
    289 		return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
    290 	case PORT_C:
    291 		return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
    292 	case PORT_D:
    293 		return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
    294 	case PORT_E:
    295 		return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
    296 	default:
    297 		DRM_ERROR("Unknown port %d\n", port);
    298 		return -EINVAL;
    299 	}
    300 }
    301 
    302 static
    303 int intel_hdcp_validate_v_prime(struct intel_connector *connector,
    304 				const struct intel_hdcp_shim *shim,
    305 				u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
    306 {
    307 	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
    308 	struct drm_i915_private *dev_priv;
    309 	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
    310 	enum port port = intel_dig_port->base.port;
    311 	u32 vprime, sha_text, sha_leftovers, rep_ctl;
    312 	int ret, i, j, sha_idx;
    313 
    314 	dev_priv = intel_dig_port->base.base.dev->dev_private;
    315 
    316 	/* Process V' values from the receiver */
    317 	for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
    318 		ret = shim->read_v_prime_part(intel_dig_port, i, &vprime);
    319 		if (ret)
    320 			return ret;
    321 		I915_WRITE(HDCP_SHA_V_PRIME(i), vprime);
    322 	}
    323 
    324 	/*
    325 	 * We need to write the concatenation of all device KSVs, BINFO (DP) ||
    326 	 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
    327 	 * stream is written via the HDCP_SHA_TEXT register in 32-bit
    328 	 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
    329 	 * index will keep track of our progress through the 64 bytes as well as
    330 	 * helping us work the 40-bit KSVs through our 32-bit register.
    331 	 *
    332 	 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
    333 	 */
    334 	sha_idx = 0;
    335 	sha_text = 0;
    336 	sha_leftovers = 0;
    337 	rep_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port);
    338 	I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
    339 	for (i = 0; i < num_downstream; i++) {
    340 		unsigned int sha_empty;
    341 		u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
    342 
    343 		/* Fill up the empty slots in sha_text and write it out */
    344 		sha_empty = sizeof(sha_text) - sha_leftovers;
    345 		for (j = 0; j < sha_empty; j++)
    346 			sha_text |= ksv[j] << ((sizeof(sha_text) - j - 1) * 8);
    347 
    348 		ret = intel_write_sha_text(dev_priv, sha_text);
    349 		if (ret < 0)
    350 			return ret;
    351 
    352 		/* Programming guide writes this every 64 bytes */
    353 		sha_idx += sizeof(sha_text);
    354 		if (!(sha_idx % 64))
    355 			I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
    356 
    357 		/* Store the leftover bytes from the ksv in sha_text */
    358 		sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
    359 		sha_text = 0;
    360 		for (j = 0; j < sha_leftovers; j++)
    361 			sha_text |= ksv[sha_empty + j] <<
    362 					((sizeof(sha_text) - j - 1) * 8);
    363 
    364 		/*
    365 		 * If we still have room in sha_text for more data, continue.
    366 		 * Otherwise, write it out immediately.
    367 		 */
    368 		if (sizeof(sha_text) > sha_leftovers)
    369 			continue;
    370 
    371 		ret = intel_write_sha_text(dev_priv, sha_text);
    372 		if (ret < 0)
    373 			return ret;
    374 		sha_leftovers = 0;
    375 		sha_text = 0;
    376 		sha_idx += sizeof(sha_text);
    377 	}
    378 
    379 	/*
    380 	 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
    381 	 * bytes are leftover from the last ksv, we might be able to fit them
    382 	 * all in sha_text (first 2 cases), or we might need to split them up
    383 	 * into 2 writes (last 2 cases).
    384 	 */
    385 	if (sha_leftovers == 0) {
    386 		/* Write 16 bits of text, 16 bits of M0 */
    387 		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
    388 		ret = intel_write_sha_text(dev_priv,
    389 					   bstatus[0] << 8 | bstatus[1]);
    390 		if (ret < 0)
    391 			return ret;
    392 		sha_idx += sizeof(sha_text);
    393 
    394 		/* Write 32 bits of M0 */
    395 		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
    396 		ret = intel_write_sha_text(dev_priv, 0);
    397 		if (ret < 0)
    398 			return ret;
    399 		sha_idx += sizeof(sha_text);
    400 
    401 		/* Write 16 bits of M0 */
    402 		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
    403 		ret = intel_write_sha_text(dev_priv, 0);
    404 		if (ret < 0)
    405 			return ret;
    406 		sha_idx += sizeof(sha_text);
    407 
    408 	} else if (sha_leftovers == 1) {
    409 		/* Write 24 bits of text, 8 bits of M0 */
    410 		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
    411 		sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
    412 		/* Only 24-bits of data, must be in the LSB */
    413 		sha_text = (sha_text & 0xffffff00) >> 8;
    414 		ret = intel_write_sha_text(dev_priv, sha_text);
    415 		if (ret < 0)
    416 			return ret;
    417 		sha_idx += sizeof(sha_text);
    418 
    419 		/* Write 32 bits of M0 */
    420 		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
    421 		ret = intel_write_sha_text(dev_priv, 0);
    422 		if (ret < 0)
    423 			return ret;
    424 		sha_idx += sizeof(sha_text);
    425 
    426 		/* Write 24 bits of M0 */
    427 		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
    428 		ret = intel_write_sha_text(dev_priv, 0);
    429 		if (ret < 0)
    430 			return ret;
    431 		sha_idx += sizeof(sha_text);
    432 
    433 	} else if (sha_leftovers == 2) {
    434 		/* Write 32 bits of text */
    435 		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
    436 		sha_text |= bstatus[0] << 24 | bstatus[1] << 16;
    437 		ret = intel_write_sha_text(dev_priv, sha_text);
    438 		if (ret < 0)
    439 			return ret;
    440 		sha_idx += sizeof(sha_text);
    441 
    442 		/* Write 64 bits of M0 */
    443 		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
    444 		for (i = 0; i < 2; i++) {
    445 			ret = intel_write_sha_text(dev_priv, 0);
    446 			if (ret < 0)
    447 				return ret;
    448 			sha_idx += sizeof(sha_text);
    449 		}
    450 	} else if (sha_leftovers == 3) {
    451 		/* Write 32 bits of text */
    452 		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
    453 		sha_text |= bstatus[0] << 24;
    454 		ret = intel_write_sha_text(dev_priv, sha_text);
    455 		if (ret < 0)
    456 			return ret;
    457 		sha_idx += sizeof(sha_text);
    458 
    459 		/* Write 8 bits of text, 24 bits of M0 */
    460 		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
    461 		ret = intel_write_sha_text(dev_priv, bstatus[1]);
    462 		if (ret < 0)
    463 			return ret;
    464 		sha_idx += sizeof(sha_text);
    465 
    466 		/* Write 32 bits of M0 */
    467 		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
    468 		ret = intel_write_sha_text(dev_priv, 0);
    469 		if (ret < 0)
    470 			return ret;
    471 		sha_idx += sizeof(sha_text);
    472 
    473 		/* Write 8 bits of M0 */
    474 		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
    475 		ret = intel_write_sha_text(dev_priv, 0);
    476 		if (ret < 0)
    477 			return ret;
    478 		sha_idx += sizeof(sha_text);
    479 	} else {
    480 		DRM_DEBUG_KMS("Invalid number of leftovers %d\n",
    481 			      sha_leftovers);
    482 		return -EINVAL;
    483 	}
    484 
    485 	I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
    486 	/* Fill up to 64-4 bytes with zeros (leave the last write for length) */
    487 	while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
    488 		ret = intel_write_sha_text(dev_priv, 0);
    489 		if (ret < 0)
    490 			return ret;
    491 		sha_idx += sizeof(sha_text);
    492 	}
    493 
    494 	/*
    495 	 * Last write gets the length of the concatenation in bits. That is:
    496 	 *  - 5 bytes per device
    497 	 *  - 10 bytes for BINFO/BSTATUS(2), M0(8)
    498 	 */
    499 	sha_text = (num_downstream * 5 + 10) * 8;
    500 	ret = intel_write_sha_text(dev_priv, sha_text);
    501 	if (ret < 0)
    502 		return ret;
    503 
    504 	/* Tell the HW we're done with the hash and wait for it to ACK */
    505 	I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH);
    506 	if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL,
    507 				  HDCP_SHA1_COMPLETE, 1)) {
    508 		DRM_ERROR("Timed out waiting for SHA1 complete\n");
    509 		return -ETIMEDOUT;
    510 	}
    511 	if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
    512 		DRM_DEBUG_KMS("SHA-1 mismatch, HDCP failed\n");
    513 		return -ENXIO;
    514 	}
    515 
    516 	return 0;
    517 }
    518 
    519 /* Implements Part 2 of the HDCP authorization procedure */
    520 static
    521 int intel_hdcp_auth_downstream(struct intel_connector *connector)
    522 {
    523 	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
    524 	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
    525 	struct drm_device *dev = connector->base.dev;
    526 	u8 bstatus[2], num_downstream, *ksv_fifo;
    527 	int ret, i, tries = 3;
    528 
    529 	ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
    530 	if (ret) {
    531 		DRM_DEBUG_KMS("KSV list failed to become ready (%d)\n", ret);
    532 		return ret;
    533 	}
    534 
    535 	ret = shim->read_bstatus(intel_dig_port, bstatus);
    536 	if (ret)
    537 		return ret;
    538 
    539 	if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
    540 	    DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
    541 		DRM_DEBUG_KMS("Max Topology Limit Exceeded\n");
    542 		return -EPERM;
    543 	}
    544 
    545 	/*
    546 	 * When repeater reports 0 device count, HDCP1.4 spec allows disabling
    547 	 * the HDCP encryption. That implies that repeater can't have its own
    548 	 * display. As there is no consumption of encrypted content in the
    549 	 * repeater with 0 downstream devices, we are failing the
    550 	 * authentication.
    551 	 */
    552 	num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
    553 	if (num_downstream == 0) {
    554 		DRM_DEBUG_KMS("Repeater with zero downstream devices\n");
    555 		return -EINVAL;
    556 	}
    557 
    558 	ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
    559 	if (!ksv_fifo) {
    560 		DRM_DEBUG_KMS("Out of mem: ksv_fifo\n");
    561 		return -ENOMEM;
    562 	}
    563 
    564 	ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo);
    565 	if (ret)
    566 		goto err;
    567 
    568 	if (drm_hdcp_check_ksvs_revoked(dev, ksv_fifo, num_downstream)) {
    569 		DRM_ERROR("Revoked Ksv(s) in ksv_fifo\n");
    570 		ret = -EPERM;
    571 		goto err;
    572 	}
    573 
    574 	/*
    575 	 * When V prime mismatches, DP Spec mandates re-read of
    576 	 * V prime atleast twice.
    577 	 */
    578 	for (i = 0; i < tries; i++) {
    579 		ret = intel_hdcp_validate_v_prime(connector, shim,
    580 						  ksv_fifo, num_downstream,
    581 						  bstatus);
    582 		if (!ret)
    583 			break;
    584 	}
    585 
    586 	if (i == tries) {
    587 		DRM_DEBUG_KMS("V Prime validation failed.(%d)\n", ret);
    588 		goto err;
    589 	}
    590 
    591 	DRM_DEBUG_KMS("HDCP is enabled (%d downstream devices)\n",
    592 		      num_downstream);
    593 	ret = 0;
    594 err:
    595 	kfree(ksv_fifo);
    596 	return ret;
    597 }
    598 
    599 /* Implements Part 1 of the HDCP authorization procedure */
    600 static int intel_hdcp_auth(struct intel_connector *connector)
    601 {
    602 	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
    603 	struct intel_hdcp *hdcp = &connector->hdcp;
    604 	struct drm_device *dev = connector->base.dev;
    605 	const struct intel_hdcp_shim *shim = hdcp->shim;
    606 	struct drm_i915_private *dev_priv;
    607 	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
    608 	enum port port;
    609 	unsigned long r0_prime_gen_start;
    610 	int ret, i, tries = 2;
    611 	union {
    612 		u32 reg[2];
    613 		u8 shim[DRM_HDCP_AN_LEN];
    614 	} an;
    615 	union {
    616 		u32 reg[2];
    617 		u8 shim[DRM_HDCP_KSV_LEN];
    618 	} bksv;
    619 	union {
    620 		u32 reg;
    621 		u8 shim[DRM_HDCP_RI_LEN];
    622 	} ri;
    623 	bool repeater_present, hdcp_capable;
    624 
    625 	dev_priv = intel_dig_port->base.base.dev->dev_private;
    626 
    627 	port = intel_dig_port->base.port;
    628 
    629 	/*
    630 	 * Detects whether the display is HDCP capable. Although we check for
    631 	 * valid Bksv below, the HDCP over DP spec requires that we check
    632 	 * whether the display supports HDCP before we write An. For HDMI
    633 	 * displays, this is not necessary.
    634 	 */
    635 	if (shim->hdcp_capable) {
    636 		ret = shim->hdcp_capable(intel_dig_port, &hdcp_capable);
    637 		if (ret)
    638 			return ret;
    639 		if (!hdcp_capable) {
    640 			DRM_DEBUG_KMS("Panel is not HDCP capable\n");
    641 			return -EINVAL;
    642 		}
    643 	}
    644 
    645 	/* Initialize An with 2 random values and acquire it */
    646 	for (i = 0; i < 2; i++)
    647 		I915_WRITE(HDCP_ANINIT(dev_priv, cpu_transcoder, port),
    648 			   get_random_u32());
    649 	I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port),
    650 		   HDCP_CONF_CAPTURE_AN);
    651 
    652 	/* Wait for An to be acquired */
    653 	if (intel_de_wait_for_set(dev_priv,
    654 				  HDCP_STATUS(dev_priv, cpu_transcoder, port),
    655 				  HDCP_STATUS_AN_READY, 1)) {
    656 		DRM_ERROR("Timed out waiting for An\n");
    657 		return -ETIMEDOUT;
    658 	}
    659 
    660 	an.reg[0] = I915_READ(HDCP_ANLO(dev_priv, cpu_transcoder, port));
    661 	an.reg[1] = I915_READ(HDCP_ANHI(dev_priv, cpu_transcoder, port));
    662 	ret = shim->write_an_aksv(intel_dig_port, an.shim);
    663 	if (ret)
    664 		return ret;
    665 
    666 	r0_prime_gen_start = jiffies;
    667 
    668 	memset(&bksv, 0, sizeof(bksv));
    669 
    670 	ret = intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv.shim);
    671 	if (ret < 0)
    672 		return ret;
    673 
    674 	if (drm_hdcp_check_ksvs_revoked(dev, bksv.shim, 1)) {
    675 		DRM_ERROR("BKSV is revoked\n");
    676 		return -EPERM;
    677 	}
    678 
    679 	I915_WRITE(HDCP_BKSVLO(dev_priv, cpu_transcoder, port), bksv.reg[0]);
    680 	I915_WRITE(HDCP_BKSVHI(dev_priv, cpu_transcoder, port), bksv.reg[1]);
    681 
    682 	ret = shim->repeater_present(intel_dig_port, &repeater_present);
    683 	if (ret)
    684 		return ret;
    685 	if (repeater_present)
    686 		I915_WRITE(HDCP_REP_CTL,
    687 			   intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder,
    688 						       port));
    689 
    690 	ret = shim->toggle_signalling(intel_dig_port, true);
    691 	if (ret)
    692 		return ret;
    693 
    694 	I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port),
    695 		   HDCP_CONF_AUTH_AND_ENC);
    696 
    697 	/* Wait for R0 ready */
    698 	if (wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
    699 		     (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
    700 		DRM_ERROR("Timed out waiting for R0 ready\n");
    701 		return -ETIMEDOUT;
    702 	}
    703 
    704 	/*
    705 	 * Wait for R0' to become available. The spec says 100ms from Aksv, but
    706 	 * some monitors can take longer than this. We'll set the timeout at
    707 	 * 300ms just to be sure.
    708 	 *
    709 	 * On DP, there's an R0_READY bit available but no such bit
    710 	 * exists on HDMI. Since the upper-bound is the same, we'll just do
    711 	 * the stupid thing instead of polling on one and not the other.
    712 	 */
    713 	wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
    714 
    715 	tries = 3;
    716 
    717 	/*
    718 	 * DP HDCP Spec mandates the two more reattempt to read R0, incase
    719 	 * of R0 mismatch.
    720 	 */
    721 	for (i = 0; i < tries; i++) {
    722 		ri.reg = 0;
    723 		ret = shim->read_ri_prime(intel_dig_port, ri.shim);
    724 		if (ret)
    725 			return ret;
    726 		I915_WRITE(HDCP_RPRIME(dev_priv, cpu_transcoder, port), ri.reg);
    727 
    728 		/* Wait for Ri prime match */
    729 		if (!wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
    730 						    port)) &
    731 		    (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
    732 			break;
    733 	}
    734 
    735 	if (i == tries) {
    736 		DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n",
    737 			      I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
    738 						    port)));
    739 		return -ETIMEDOUT;
    740 	}
    741 
    742 	/* Wait for encryption confirmation */
    743 	if (intel_de_wait_for_set(dev_priv,
    744 				  HDCP_STATUS(dev_priv, cpu_transcoder, port),
    745 				  HDCP_STATUS_ENC,
    746 				  ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
    747 		DRM_ERROR("Timed out waiting for encryption\n");
    748 		return -ETIMEDOUT;
    749 	}
    750 
    751 	/*
    752 	 * XXX: If we have MST-connected devices, we need to enable encryption
    753 	 * on those as well.
    754 	 */
    755 
    756 	if (repeater_present)
    757 		return intel_hdcp_auth_downstream(connector);
    758 
    759 	DRM_DEBUG_KMS("HDCP is enabled (no repeater present)\n");
    760 	return 0;
    761 }
    762 
    763 static int _intel_hdcp_disable(struct intel_connector *connector)
    764 {
    765 	struct intel_hdcp *hdcp = &connector->hdcp;
    766 	struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
    767 	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
    768 	enum port port = intel_dig_port->base.port;
    769 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
    770 	int ret;
    771 
    772 	DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n",
    773 		      connector->base.name, connector->base.base.id);
    774 
    775 	hdcp->hdcp_encrypted = false;
    776 	I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
    777 	if (intel_de_wait_for_clear(dev_priv,
    778 				    HDCP_STATUS(dev_priv, cpu_transcoder, port),
    779 				    ~0, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
    780 		DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
    781 		return -ETIMEDOUT;
    782 	}
    783 
    784 	ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
    785 	if (ret) {
    786 		DRM_ERROR("Failed to disable HDCP signalling\n");
    787 		return ret;
    788 	}
    789 
    790 	DRM_DEBUG_KMS("HDCP is disabled\n");
    791 	return 0;
    792 }
    793 
    794 static int _intel_hdcp_enable(struct intel_connector *connector)
    795 {
    796 	struct intel_hdcp *hdcp = &connector->hdcp;
    797 	struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
    798 	int i, ret, tries = 3;
    799 
    800 	DRM_DEBUG_KMS("[%s:%d] HDCP is being enabled...\n",
    801 		      connector->base.name, connector->base.base.id);
    802 
    803 	if (!hdcp_key_loadable(dev_priv)) {
    804 		DRM_ERROR("HDCP key Load is not possible\n");
    805 		return -ENXIO;
    806 	}
    807 
    808 	for (i = 0; i < KEY_LOAD_TRIES; i++) {
    809 		ret = intel_hdcp_load_keys(dev_priv);
    810 		if (!ret)
    811 			break;
    812 		intel_hdcp_clear_keys(dev_priv);
    813 	}
    814 	if (ret) {
    815 		DRM_ERROR("Could not load HDCP keys, (%d)\n", ret);
    816 		return ret;
    817 	}
    818 
    819 	/* Incase of authentication failures, HDCP spec expects reauth. */
    820 	for (i = 0; i < tries; i++) {
    821 		ret = intel_hdcp_auth(connector);
    822 		if (!ret) {
    823 			hdcp->hdcp_encrypted = true;
    824 			return 0;
    825 		}
    826 
    827 		DRM_DEBUG_KMS("HDCP Auth failure (%d)\n", ret);
    828 
    829 		/* Ensuring HDCP encryption and signalling are stopped. */
    830 		_intel_hdcp_disable(connector);
    831 	}
    832 
    833 	DRM_DEBUG_KMS("HDCP authentication failed (%d tries/%d)\n", tries, ret);
    834 	return ret;
    835 }
    836 
    837 static inline
    838 struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
    839 {
    840 	return container_of(hdcp, struct intel_connector, hdcp);
    841 }
    842 
    843 /* Implements Part 3 of the HDCP authorization procedure */
    844 static int intel_hdcp_check_link(struct intel_connector *connector)
    845 {
    846 	struct intel_hdcp *hdcp = &connector->hdcp;
    847 	struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
    848 	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
    849 	enum port port = intel_dig_port->base.port;
    850 	enum transcoder cpu_transcoder;
    851 	int ret = 0;
    852 
    853 	mutex_lock(&hdcp->mutex);
    854 	cpu_transcoder = hdcp->cpu_transcoder;
    855 
    856 	/* Check_link valid only when HDCP1.4 is enabled */
    857 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
    858 	    !hdcp->hdcp_encrypted) {
    859 		ret = -EINVAL;
    860 		goto out;
    861 	}
    862 
    863 	if (WARN_ON(!intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) {
    864 		DRM_ERROR("%s:%d HDCP link stopped encryption,%x\n",
    865 			  connector->base.name, connector->base.base.id,
    866 			  I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
    867 						port)));
    868 		ret = -ENXIO;
    869 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
    870 		schedule_work(&hdcp->prop_work);
    871 		goto out;
    872 	}
    873 
    874 	if (hdcp->shim->check_link(intel_dig_port)) {
    875 		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
    876 			hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
    877 			schedule_work(&hdcp->prop_work);
    878 		}
    879 		goto out;
    880 	}
    881 
    882 	DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n",
    883 		      connector->base.name, connector->base.base.id);
    884 
    885 	ret = _intel_hdcp_disable(connector);
    886 	if (ret) {
    887 		DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
    888 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
    889 		schedule_work(&hdcp->prop_work);
    890 		goto out;
    891 	}
    892 
    893 	ret = _intel_hdcp_enable(connector);
    894 	if (ret) {
    895 		DRM_ERROR("Failed to enable hdcp (%d)\n", ret);
    896 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
    897 		schedule_work(&hdcp->prop_work);
    898 		goto out;
    899 	}
    900 
    901 out:
    902 	mutex_unlock(&hdcp->mutex);
    903 	return ret;
    904 }
    905 
    906 static void intel_hdcp_prop_work(struct work_struct *work)
    907 {
    908 	struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
    909 					       prop_work);
    910 	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
    911 	struct drm_device *dev = connector->base.dev;
    912 
    913 	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
    914 	mutex_lock(&hdcp->mutex);
    915 
    916 	/*
    917 	 * This worker is only used to flip between ENABLED/DESIRED. Either of
    918 	 * those to UNDESIRED is handled by core. If value == UNDESIRED,
    919 	 * we're running just after hdcp has been disabled, so just exit
    920 	 */
    921 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
    922 		drm_hdcp_update_content_protection(&connector->base,
    923 						   hdcp->value);
    924 
    925 	mutex_unlock(&hdcp->mutex);
    926 	drm_modeset_unlock(&dev->mode_config.connection_mutex);
    927 }
    928 
    929 bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
    930 {
    931 	/* PORT E doesn't have HDCP, and PORT F is disabled */
    932 	return INTEL_INFO(dev_priv)->display.has_hdcp && port < PORT_E;
    933 }
    934 
    935 static int
    936 hdcp2_prepare_ake_init(struct intel_connector *connector,
    937 		       struct hdcp2_ake_init *ake_data)
    938 {
    939 	struct hdcp_port_data *data = &connector->hdcp.port_data;
    940 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
    941 	struct i915_hdcp_comp_master *comp;
    942 	int ret;
    943 
    944 	mutex_lock(&dev_priv->hdcp_comp_mutex);
    945 	comp = dev_priv->hdcp_master;
    946 
    947 	if (!comp || !comp->ops) {
    948 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
    949 		return -EINVAL;
    950 	}
    951 
    952 	ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
    953 	if (ret)
    954 		DRM_DEBUG_KMS("Prepare_ake_init failed. %d\n", ret);
    955 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
    956 
    957 	return ret;
    958 }
    959 
    960 static int
    961 hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
    962 				struct hdcp2_ake_send_cert *rx_cert,
    963 				bool *paired,
    964 				struct hdcp2_ake_no_stored_km *ek_pub_km,
    965 				size_t *msg_sz)
    966 {
    967 	struct hdcp_port_data *data = &connector->hdcp.port_data;
    968 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
    969 	struct i915_hdcp_comp_master *comp;
    970 	int ret;
    971 
    972 	mutex_lock(&dev_priv->hdcp_comp_mutex);
    973 	comp = dev_priv->hdcp_master;
    974 
    975 	if (!comp || !comp->ops) {
    976 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
    977 		return -EINVAL;
    978 	}
    979 
    980 	ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data,
    981 							 rx_cert, paired,
    982 							 ek_pub_km, msg_sz);
    983 	if (ret < 0)
    984 		DRM_DEBUG_KMS("Verify rx_cert failed. %d\n", ret);
    985 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
    986 
    987 	return ret;
    988 }
    989 
    990 static int hdcp2_verify_hprime(struct intel_connector *connector,
    991 			       struct hdcp2_ake_send_hprime *rx_hprime)
    992 {
    993 	struct hdcp_port_data *data = &connector->hdcp.port_data;
    994 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
    995 	struct i915_hdcp_comp_master *comp;
    996 	int ret;
    997 
    998 	mutex_lock(&dev_priv->hdcp_comp_mutex);
    999 	comp = dev_priv->hdcp_master;
   1000 
   1001 	if (!comp || !comp->ops) {
   1002 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1003 		return -EINVAL;
   1004 	}
   1005 
   1006 	ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
   1007 	if (ret < 0)
   1008 		DRM_DEBUG_KMS("Verify hprime failed. %d\n", ret);
   1009 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1010 
   1011 	return ret;
   1012 }
   1013 
   1014 static int
   1015 hdcp2_store_pairing_info(struct intel_connector *connector,
   1016 			 struct hdcp2_ake_send_pairing_info *pairing_info)
   1017 {
   1018 	struct hdcp_port_data *data = &connector->hdcp.port_data;
   1019 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
   1020 	struct i915_hdcp_comp_master *comp;
   1021 	int ret;
   1022 
   1023 	mutex_lock(&dev_priv->hdcp_comp_mutex);
   1024 	comp = dev_priv->hdcp_master;
   1025 
   1026 	if (!comp || !comp->ops) {
   1027 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1028 		return -EINVAL;
   1029 	}
   1030 
   1031 	ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
   1032 	if (ret < 0)
   1033 		DRM_DEBUG_KMS("Store pairing info failed. %d\n", ret);
   1034 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1035 
   1036 	return ret;
   1037 }
   1038 
   1039 static int
   1040 hdcp2_prepare_lc_init(struct intel_connector *connector,
   1041 		      struct hdcp2_lc_init *lc_init)
   1042 {
   1043 	struct hdcp_port_data *data = &connector->hdcp.port_data;
   1044 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
   1045 	struct i915_hdcp_comp_master *comp;
   1046 	int ret;
   1047 
   1048 	mutex_lock(&dev_priv->hdcp_comp_mutex);
   1049 	comp = dev_priv->hdcp_master;
   1050 
   1051 	if (!comp || !comp->ops) {
   1052 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1053 		return -EINVAL;
   1054 	}
   1055 
   1056 	ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
   1057 	if (ret < 0)
   1058 		DRM_DEBUG_KMS("Prepare lc_init failed. %d\n", ret);
   1059 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1060 
   1061 	return ret;
   1062 }
   1063 
   1064 static int
   1065 hdcp2_verify_lprime(struct intel_connector *connector,
   1066 		    struct hdcp2_lc_send_lprime *rx_lprime)
   1067 {
   1068 	struct hdcp_port_data *data = &connector->hdcp.port_data;
   1069 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
   1070 	struct i915_hdcp_comp_master *comp;
   1071 	int ret;
   1072 
   1073 	mutex_lock(&dev_priv->hdcp_comp_mutex);
   1074 	comp = dev_priv->hdcp_master;
   1075 
   1076 	if (!comp || !comp->ops) {
   1077 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1078 		return -EINVAL;
   1079 	}
   1080 
   1081 	ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
   1082 	if (ret < 0)
   1083 		DRM_DEBUG_KMS("Verify L_Prime failed. %d\n", ret);
   1084 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1085 
   1086 	return ret;
   1087 }
   1088 
   1089 static int hdcp2_prepare_skey(struct intel_connector *connector,
   1090 			      struct hdcp2_ske_send_eks *ske_data)
   1091 {
   1092 	struct hdcp_port_data *data = &connector->hdcp.port_data;
   1093 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
   1094 	struct i915_hdcp_comp_master *comp;
   1095 	int ret;
   1096 
   1097 	mutex_lock(&dev_priv->hdcp_comp_mutex);
   1098 	comp = dev_priv->hdcp_master;
   1099 
   1100 	if (!comp || !comp->ops) {
   1101 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1102 		return -EINVAL;
   1103 	}
   1104 
   1105 	ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
   1106 	if (ret < 0)
   1107 		DRM_DEBUG_KMS("Get session key failed. %d\n", ret);
   1108 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1109 
   1110 	return ret;
   1111 }
   1112 
   1113 static int
   1114 hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
   1115 				      struct hdcp2_rep_send_receiverid_list
   1116 								*rep_topology,
   1117 				      struct hdcp2_rep_send_ack *rep_send_ack)
   1118 {
   1119 	struct hdcp_port_data *data = &connector->hdcp.port_data;
   1120 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
   1121 	struct i915_hdcp_comp_master *comp;
   1122 	int ret;
   1123 
   1124 	mutex_lock(&dev_priv->hdcp_comp_mutex);
   1125 	comp = dev_priv->hdcp_master;
   1126 
   1127 	if (!comp || !comp->ops) {
   1128 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1129 		return -EINVAL;
   1130 	}
   1131 
   1132 	ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data,
   1133 							 rep_topology,
   1134 							 rep_send_ack);
   1135 	if (ret < 0)
   1136 		DRM_DEBUG_KMS("Verify rep topology failed. %d\n", ret);
   1137 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1138 
   1139 	return ret;
   1140 }
   1141 
   1142 static int
   1143 hdcp2_verify_mprime(struct intel_connector *connector,
   1144 		    struct hdcp2_rep_stream_ready *stream_ready)
   1145 {
   1146 	struct hdcp_port_data *data = &connector->hdcp.port_data;
   1147 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
   1148 	struct i915_hdcp_comp_master *comp;
   1149 	int ret;
   1150 
   1151 	mutex_lock(&dev_priv->hdcp_comp_mutex);
   1152 	comp = dev_priv->hdcp_master;
   1153 
   1154 	if (!comp || !comp->ops) {
   1155 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1156 		return -EINVAL;
   1157 	}
   1158 
   1159 	ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
   1160 	if (ret < 0)
   1161 		DRM_DEBUG_KMS("Verify mprime failed. %d\n", ret);
   1162 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1163 
   1164 	return ret;
   1165 }
   1166 
   1167 static int hdcp2_authenticate_port(struct intel_connector *connector)
   1168 {
   1169 	struct hdcp_port_data *data = &connector->hdcp.port_data;
   1170 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
   1171 	struct i915_hdcp_comp_master *comp;
   1172 	int ret;
   1173 
   1174 	mutex_lock(&dev_priv->hdcp_comp_mutex);
   1175 	comp = dev_priv->hdcp_master;
   1176 
   1177 	if (!comp || !comp->ops) {
   1178 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1179 		return -EINVAL;
   1180 	}
   1181 
   1182 	ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
   1183 	if (ret < 0)
   1184 		DRM_DEBUG_KMS("Enable hdcp auth failed. %d\n", ret);
   1185 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1186 
   1187 	return ret;
   1188 }
   1189 
   1190 static int hdcp2_close_mei_session(struct intel_connector *connector)
   1191 {
   1192 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
   1193 	struct i915_hdcp_comp_master *comp;
   1194 	int ret;
   1195 
   1196 	mutex_lock(&dev_priv->hdcp_comp_mutex);
   1197 	comp = dev_priv->hdcp_master;
   1198 
   1199 	if (!comp || !comp->ops) {
   1200 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1201 		return -EINVAL;
   1202 	}
   1203 
   1204 	ret = comp->ops->close_hdcp_session(comp->mei_dev,
   1205 					     &connector->hdcp.port_data);
   1206 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1207 
   1208 	return ret;
   1209 }
   1210 
   1211 static int hdcp2_deauthenticate_port(struct intel_connector *connector)
   1212 {
   1213 	return hdcp2_close_mei_session(connector);
   1214 }
   1215 
   1216 /* Authentication flow starts from here */
   1217 static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
   1218 {
   1219 	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
   1220 	struct intel_hdcp *hdcp = &connector->hdcp;
   1221 	struct drm_device *dev = connector->base.dev;
   1222 	union {
   1223 		struct hdcp2_ake_init ake_init;
   1224 		struct hdcp2_ake_send_cert send_cert;
   1225 		struct hdcp2_ake_no_stored_km no_stored_km;
   1226 		struct hdcp2_ake_send_hprime send_hprime;
   1227 		struct hdcp2_ake_send_pairing_info pairing_info;
   1228 	} msgs;
   1229 	const struct intel_hdcp_shim *shim = hdcp->shim;
   1230 	size_t size;
   1231 	int ret;
   1232 
   1233 	/* Init for seq_num */
   1234 	hdcp->seq_num_v = 0;
   1235 	hdcp->seq_num_m = 0;
   1236 
   1237 	ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
   1238 	if (ret < 0)
   1239 		return ret;
   1240 
   1241 	ret = shim->write_2_2_msg(intel_dig_port, &msgs.ake_init,
   1242 				  sizeof(msgs.ake_init));
   1243 	if (ret < 0)
   1244 		return ret;
   1245 
   1246 	ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_CERT,
   1247 				 &msgs.send_cert, sizeof(msgs.send_cert));
   1248 	if (ret < 0)
   1249 		return ret;
   1250 
   1251 	if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
   1252 		DRM_DEBUG_KMS("cert.rx_caps dont claim HDCP2.2\n");
   1253 		return -EINVAL;
   1254 	}
   1255 
   1256 	hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
   1257 
   1258 	if (drm_hdcp_check_ksvs_revoked(dev, msgs.send_cert.cert_rx.receiver_id,
   1259 					1)) {
   1260 		DRM_ERROR("Receiver ID is revoked\n");
   1261 		return -EPERM;
   1262 	}
   1263 
   1264 	/*
   1265 	 * Here msgs.no_stored_km will hold msgs corresponding to the km
   1266 	 * stored also.
   1267 	 */
   1268 	ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
   1269 					      &hdcp->is_paired,
   1270 					      &msgs.no_stored_km, &size);
   1271 	if (ret < 0)
   1272 		return ret;
   1273 
   1274 	ret = shim->write_2_2_msg(intel_dig_port, &msgs.no_stored_km, size);
   1275 	if (ret < 0)
   1276 		return ret;
   1277 
   1278 	ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_HPRIME,
   1279 				 &msgs.send_hprime, sizeof(msgs.send_hprime));
   1280 	if (ret < 0)
   1281 		return ret;
   1282 
   1283 	ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
   1284 	if (ret < 0)
   1285 		return ret;
   1286 
   1287 	if (!hdcp->is_paired) {
   1288 		/* Pairing is required */
   1289 		ret = shim->read_2_2_msg(intel_dig_port,
   1290 					 HDCP_2_2_AKE_SEND_PAIRING_INFO,
   1291 					 &msgs.pairing_info,
   1292 					 sizeof(msgs.pairing_info));
   1293 		if (ret < 0)
   1294 			return ret;
   1295 
   1296 		ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
   1297 		if (ret < 0)
   1298 			return ret;
   1299 		hdcp->is_paired = true;
   1300 	}
   1301 
   1302 	return 0;
   1303 }
   1304 
   1305 static int hdcp2_locality_check(struct intel_connector *connector)
   1306 {
   1307 	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
   1308 	struct intel_hdcp *hdcp = &connector->hdcp;
   1309 	union {
   1310 		struct hdcp2_lc_init lc_init;
   1311 		struct hdcp2_lc_send_lprime send_lprime;
   1312 	} msgs;
   1313 	const struct intel_hdcp_shim *shim = hdcp->shim;
   1314 	int tries = HDCP2_LC_RETRY_CNT, ret, i;
   1315 
   1316 	for (i = 0; i < tries; i++) {
   1317 		ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
   1318 		if (ret < 0)
   1319 			continue;
   1320 
   1321 		ret = shim->write_2_2_msg(intel_dig_port, &msgs.lc_init,
   1322 				      sizeof(msgs.lc_init));
   1323 		if (ret < 0)
   1324 			continue;
   1325 
   1326 		ret = shim->read_2_2_msg(intel_dig_port,
   1327 					 HDCP_2_2_LC_SEND_LPRIME,
   1328 					 &msgs.send_lprime,
   1329 					 sizeof(msgs.send_lprime));
   1330 		if (ret < 0)
   1331 			continue;
   1332 
   1333 		ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
   1334 		if (!ret)
   1335 			break;
   1336 	}
   1337 
   1338 	return ret;
   1339 }
   1340 
   1341 static int hdcp2_session_key_exchange(struct intel_connector *connector)
   1342 {
   1343 	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
   1344 	struct intel_hdcp *hdcp = &connector->hdcp;
   1345 	struct hdcp2_ske_send_eks send_eks;
   1346 	int ret;
   1347 
   1348 	ret = hdcp2_prepare_skey(connector, &send_eks);
   1349 	if (ret < 0)
   1350 		return ret;
   1351 
   1352 	ret = hdcp->shim->write_2_2_msg(intel_dig_port, &send_eks,
   1353 					sizeof(send_eks));
   1354 	if (ret < 0)
   1355 		return ret;
   1356 
   1357 	return 0;
   1358 }
   1359 
   1360 static
   1361 int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
   1362 {
   1363 	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
   1364 	struct intel_hdcp *hdcp = &connector->hdcp;
   1365 	union {
   1366 		struct hdcp2_rep_stream_manage stream_manage;
   1367 		struct hdcp2_rep_stream_ready stream_ready;
   1368 	} msgs;
   1369 	const struct intel_hdcp_shim *shim = hdcp->shim;
   1370 	int ret;
   1371 
   1372 	/* Prepare RepeaterAuth_Stream_Manage msg */
   1373 	msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
   1374 	drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
   1375 
   1376 	/* K no of streams is fixed as 1. Stored as big-endian. */
   1377 	msgs.stream_manage.k = cpu_to_be16(1);
   1378 
   1379 	/* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
   1380 	msgs.stream_manage.streams[0].stream_id = 0;
   1381 	msgs.stream_manage.streams[0].stream_type = hdcp->content_type;
   1382 
   1383 	/* Send it to Repeater */
   1384 	ret = shim->write_2_2_msg(intel_dig_port, &msgs.stream_manage,
   1385 				  sizeof(msgs.stream_manage));
   1386 	if (ret < 0)
   1387 		return ret;
   1388 
   1389 	ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_STREAM_READY,
   1390 				 &msgs.stream_ready, sizeof(msgs.stream_ready));
   1391 	if (ret < 0)
   1392 		return ret;
   1393 
   1394 	hdcp->port_data.seq_num_m = hdcp->seq_num_m;
   1395 	hdcp->port_data.streams[0].stream_type = hdcp->content_type;
   1396 
   1397 	ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
   1398 	if (ret < 0)
   1399 		return ret;
   1400 
   1401 	hdcp->seq_num_m++;
   1402 
   1403 	if (hdcp->seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
   1404 		DRM_DEBUG_KMS("seq_num_m roll over.\n");
   1405 		return -1;
   1406 	}
   1407 
   1408 	return 0;
   1409 }
   1410 
   1411 static
   1412 int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
   1413 {
   1414 	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
   1415 	struct intel_hdcp *hdcp = &connector->hdcp;
   1416 	struct drm_device *dev = connector->base.dev;
   1417 	union {
   1418 		struct hdcp2_rep_send_receiverid_list recvid_list;
   1419 		struct hdcp2_rep_send_ack rep_ack;
   1420 	} msgs;
   1421 	const struct intel_hdcp_shim *shim = hdcp->shim;
   1422 	u32 seq_num_v, device_cnt;
   1423 	u8 *rx_info;
   1424 	int ret;
   1425 
   1426 	ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
   1427 				 &msgs.recvid_list, sizeof(msgs.recvid_list));
   1428 	if (ret < 0)
   1429 		return ret;
   1430 
   1431 	rx_info = msgs.recvid_list.rx_info;
   1432 
   1433 	if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
   1434 	    HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
   1435 		DRM_DEBUG_KMS("Topology Max Size Exceeded\n");
   1436 		return -EINVAL;
   1437 	}
   1438 
   1439 	/* Converting and Storing the seq_num_v to local variable as DWORD */
   1440 	seq_num_v =
   1441 		drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
   1442 
   1443 	if (seq_num_v < hdcp->seq_num_v) {
   1444 		/* Roll over of the seq_num_v from repeater. Reauthenticate. */
   1445 		DRM_DEBUG_KMS("Seq_num_v roll over.\n");
   1446 		return -EINVAL;
   1447 	}
   1448 
   1449 	device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
   1450 		      HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
   1451 	if (drm_hdcp_check_ksvs_revoked(dev, msgs.recvid_list.receiver_ids,
   1452 					device_cnt)) {
   1453 		DRM_ERROR("Revoked receiver ID(s) is in list\n");
   1454 		return -EPERM;
   1455 	}
   1456 
   1457 	ret = hdcp2_verify_rep_topology_prepare_ack(connector,
   1458 						    &msgs.recvid_list,
   1459 						    &msgs.rep_ack);
   1460 	if (ret < 0)
   1461 		return ret;
   1462 
   1463 	hdcp->seq_num_v = seq_num_v;
   1464 	ret = shim->write_2_2_msg(intel_dig_port, &msgs.rep_ack,
   1465 				  sizeof(msgs.rep_ack));
   1466 	if (ret < 0)
   1467 		return ret;
   1468 
   1469 	return 0;
   1470 }
   1471 
   1472 static int hdcp2_authenticate_repeater(struct intel_connector *connector)
   1473 {
   1474 	int ret;
   1475 
   1476 	ret = hdcp2_authenticate_repeater_topology(connector);
   1477 	if (ret < 0)
   1478 		return ret;
   1479 
   1480 	return hdcp2_propagate_stream_management_info(connector);
   1481 }
   1482 
   1483 static int hdcp2_authenticate_sink(struct intel_connector *connector)
   1484 {
   1485 	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
   1486 	struct intel_hdcp *hdcp = &connector->hdcp;
   1487 	const struct intel_hdcp_shim *shim = hdcp->shim;
   1488 	int ret;
   1489 
   1490 	ret = hdcp2_authentication_key_exchange(connector);
   1491 	if (ret < 0) {
   1492 		DRM_DEBUG_KMS("AKE Failed. Err : %d\n", ret);
   1493 		return ret;
   1494 	}
   1495 
   1496 	ret = hdcp2_locality_check(connector);
   1497 	if (ret < 0) {
   1498 		DRM_DEBUG_KMS("Locality Check failed. Err : %d\n", ret);
   1499 		return ret;
   1500 	}
   1501 
   1502 	ret = hdcp2_session_key_exchange(connector);
   1503 	if (ret < 0) {
   1504 		DRM_DEBUG_KMS("SKE Failed. Err : %d\n", ret);
   1505 		return ret;
   1506 	}
   1507 
   1508 	if (shim->config_stream_type) {
   1509 		ret = shim->config_stream_type(intel_dig_port,
   1510 					       hdcp->is_repeater,
   1511 					       hdcp->content_type);
   1512 		if (ret < 0)
   1513 			return ret;
   1514 	}
   1515 
   1516 	if (hdcp->is_repeater) {
   1517 		ret = hdcp2_authenticate_repeater(connector);
   1518 		if (ret < 0) {
   1519 			DRM_DEBUG_KMS("Repeater Auth Failed. Err: %d\n", ret);
   1520 			return ret;
   1521 		}
   1522 	}
   1523 
   1524 	hdcp->port_data.streams[0].stream_type = hdcp->content_type;
   1525 	ret = hdcp2_authenticate_port(connector);
   1526 	if (ret < 0)
   1527 		return ret;
   1528 
   1529 	return ret;
   1530 }
   1531 
   1532 static int hdcp2_enable_encryption(struct intel_connector *connector)
   1533 {
   1534 	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
   1535 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
   1536 	struct intel_hdcp *hdcp = &connector->hdcp;
   1537 	enum port port = connector->encoder->port;
   1538 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
   1539 	int ret;
   1540 
   1541 	WARN_ON(I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
   1542 		LINK_ENCRYPTION_STATUS);
   1543 	if (hdcp->shim->toggle_signalling) {
   1544 		ret = hdcp->shim->toggle_signalling(intel_dig_port, true);
   1545 		if (ret) {
   1546 			DRM_ERROR("Failed to enable HDCP signalling. %d\n",
   1547 				  ret);
   1548 			return ret;
   1549 		}
   1550 	}
   1551 
   1552 	if (I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
   1553 	    LINK_AUTH_STATUS) {
   1554 		/* Link is Authenticated. Now set for Encryption */
   1555 		I915_WRITE(HDCP2_CTL(dev_priv, cpu_transcoder, port),
   1556 			   I915_READ(HDCP2_CTL(dev_priv, cpu_transcoder,
   1557 					       port)) |
   1558 			   CTL_LINK_ENCRYPTION_REQ);
   1559 	}
   1560 
   1561 	ret = intel_de_wait_for_set(dev_priv,
   1562 				    HDCP2_STATUS(dev_priv, cpu_transcoder,
   1563 						 port),
   1564 				    LINK_ENCRYPTION_STATUS,
   1565 				    ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
   1566 
   1567 	return ret;
   1568 }
   1569 
   1570 static int hdcp2_disable_encryption(struct intel_connector *connector)
   1571 {
   1572 	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
   1573 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
   1574 	struct intel_hdcp *hdcp = &connector->hdcp;
   1575 	enum port port = connector->encoder->port;
   1576 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
   1577 	int ret;
   1578 
   1579 	WARN_ON(!(I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
   1580 			    LINK_ENCRYPTION_STATUS));
   1581 
   1582 	I915_WRITE(HDCP2_CTL(dev_priv, cpu_transcoder, port),
   1583 		   I915_READ(HDCP2_CTL(dev_priv, cpu_transcoder, port)) &
   1584 		   ~CTL_LINK_ENCRYPTION_REQ);
   1585 
   1586 	ret = intel_de_wait_for_clear(dev_priv,
   1587 				      HDCP2_STATUS(dev_priv, cpu_transcoder,
   1588 						   port),
   1589 				      LINK_ENCRYPTION_STATUS,
   1590 				      ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
   1591 	if (ret == -ETIMEDOUT)
   1592 		DRM_DEBUG_KMS("Disable Encryption Timedout");
   1593 
   1594 	if (hdcp->shim->toggle_signalling) {
   1595 		ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
   1596 		if (ret) {
   1597 			DRM_ERROR("Failed to disable HDCP signalling. %d\n",
   1598 				  ret);
   1599 			return ret;
   1600 		}
   1601 	}
   1602 
   1603 	return ret;
   1604 }
   1605 
   1606 static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
   1607 {
   1608 	int ret, i, tries = 3;
   1609 
   1610 	for (i = 0; i < tries; i++) {
   1611 		ret = hdcp2_authenticate_sink(connector);
   1612 		if (!ret)
   1613 			break;
   1614 
   1615 		/* Clearing the mei hdcp session */
   1616 		DRM_DEBUG_KMS("HDCP2.2 Auth %d of %d Failed.(%d)\n",
   1617 			      i + 1, tries, ret);
   1618 		if (hdcp2_deauthenticate_port(connector) < 0)
   1619 			DRM_DEBUG_KMS("Port deauth failed.\n");
   1620 	}
   1621 
   1622 	if (i != tries) {
   1623 		/*
   1624 		 * Ensuring the required 200mSec min time interval between
   1625 		 * Session Key Exchange and encryption.
   1626 		 */
   1627 		msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
   1628 		ret = hdcp2_enable_encryption(connector);
   1629 		if (ret < 0) {
   1630 			DRM_DEBUG_KMS("Encryption Enable Failed.(%d)\n", ret);
   1631 			if (hdcp2_deauthenticate_port(connector) < 0)
   1632 				DRM_DEBUG_KMS("Port deauth failed.\n");
   1633 		}
   1634 	}
   1635 
   1636 	return ret;
   1637 }
   1638 
   1639 static int _intel_hdcp2_enable(struct intel_connector *connector)
   1640 {
   1641 	struct intel_hdcp *hdcp = &connector->hdcp;
   1642 	int ret;
   1643 
   1644 	DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
   1645 		      connector->base.name, connector->base.base.id,
   1646 		      hdcp->content_type);
   1647 
   1648 	ret = hdcp2_authenticate_and_encrypt(connector);
   1649 	if (ret) {
   1650 		DRM_DEBUG_KMS("HDCP2 Type%d  Enabling Failed. (%d)\n",
   1651 			      hdcp->content_type, ret);
   1652 		return ret;
   1653 	}
   1654 
   1655 	DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is enabled. Type %d\n",
   1656 		      connector->base.name, connector->base.base.id,
   1657 		      hdcp->content_type);
   1658 
   1659 	hdcp->hdcp2_encrypted = true;
   1660 	return 0;
   1661 }
   1662 
   1663 static int _intel_hdcp2_disable(struct intel_connector *connector)
   1664 {
   1665 	int ret;
   1666 
   1667 	DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being Disabled\n",
   1668 		      connector->base.name, connector->base.base.id);
   1669 
   1670 	ret = hdcp2_disable_encryption(connector);
   1671 
   1672 	if (hdcp2_deauthenticate_port(connector) < 0)
   1673 		DRM_DEBUG_KMS("Port deauth failed.\n");
   1674 
   1675 	connector->hdcp.hdcp2_encrypted = false;
   1676 
   1677 	return ret;
   1678 }
   1679 
   1680 /* Implements the Link Integrity Check for HDCP2.2 */
   1681 static int intel_hdcp2_check_link(struct intel_connector *connector)
   1682 {
   1683 	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
   1684 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
   1685 	struct intel_hdcp *hdcp = &connector->hdcp;
   1686 	enum port port = connector->encoder->port;
   1687 	enum transcoder cpu_transcoder;
   1688 	int ret = 0;
   1689 
   1690 	mutex_lock(&hdcp->mutex);
   1691 	cpu_transcoder = hdcp->cpu_transcoder;
   1692 
   1693 	/* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
   1694 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
   1695 	    !hdcp->hdcp2_encrypted) {
   1696 		ret = -EINVAL;
   1697 		goto out;
   1698 	}
   1699 
   1700 	if (WARN_ON(!intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) {
   1701 		DRM_ERROR("HDCP2.2 link stopped the encryption, %x\n",
   1702 			  I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder,
   1703 						 port)));
   1704 		ret = -ENXIO;
   1705 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
   1706 		schedule_work(&hdcp->prop_work);
   1707 		goto out;
   1708 	}
   1709 
   1710 	ret = hdcp->shim->check_2_2_link(intel_dig_port);
   1711 	if (ret == HDCP_LINK_PROTECTED) {
   1712 		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
   1713 			hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
   1714 			schedule_work(&hdcp->prop_work);
   1715 		}
   1716 		goto out;
   1717 	}
   1718 
   1719 	if (ret == HDCP_TOPOLOGY_CHANGE) {
   1720 		if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
   1721 			goto out;
   1722 
   1723 		DRM_DEBUG_KMS("HDCP2.2 Downstream topology change\n");
   1724 		ret = hdcp2_authenticate_repeater_topology(connector);
   1725 		if (!ret) {
   1726 			hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
   1727 			schedule_work(&hdcp->prop_work);
   1728 			goto out;
   1729 		}
   1730 		DRM_DEBUG_KMS("[%s:%d] Repeater topology auth failed.(%d)\n",
   1731 			      connector->base.name, connector->base.base.id,
   1732 			      ret);
   1733 	} else {
   1734 		DRM_DEBUG_KMS("[%s:%d] HDCP2.2 link failed, retrying auth\n",
   1735 			      connector->base.name, connector->base.base.id);
   1736 	}
   1737 
   1738 	ret = _intel_hdcp2_disable(connector);
   1739 	if (ret) {
   1740 		DRM_ERROR("[%s:%d] Failed to disable hdcp2.2 (%d)\n",
   1741 			  connector->base.name, connector->base.base.id, ret);
   1742 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
   1743 		schedule_work(&hdcp->prop_work);
   1744 		goto out;
   1745 	}
   1746 
   1747 	ret = _intel_hdcp2_enable(connector);
   1748 	if (ret) {
   1749 		DRM_DEBUG_KMS("[%s:%d] Failed to enable hdcp2.2 (%d)\n",
   1750 			      connector->base.name, connector->base.base.id,
   1751 			      ret);
   1752 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
   1753 		schedule_work(&hdcp->prop_work);
   1754 		goto out;
   1755 	}
   1756 
   1757 out:
   1758 	mutex_unlock(&hdcp->mutex);
   1759 	return ret;
   1760 }
   1761 
   1762 static void intel_hdcp_check_work(struct work_struct *work)
   1763 {
   1764 	struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
   1765 					       struct intel_hdcp,
   1766 					       check_work);
   1767 	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
   1768 
   1769 	if (!intel_hdcp2_check_link(connector))
   1770 		schedule_delayed_work(&hdcp->check_work,
   1771 				      DRM_HDCP2_CHECK_PERIOD_MS);
   1772 	else if (!intel_hdcp_check_link(connector))
   1773 		schedule_delayed_work(&hdcp->check_work,
   1774 				      DRM_HDCP_CHECK_PERIOD_MS);
   1775 }
   1776 
   1777 #ifndef __NetBSD__		/* XXX i915 hdmi audio */
   1778 
   1779 static int i915_hdcp_component_bind(struct device *i915_kdev,
   1780 				    struct device *mei_kdev, void *data)
   1781 {
   1782 	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
   1783 
   1784 	DRM_DEBUG("I915 HDCP comp bind\n");
   1785 	mutex_lock(&dev_priv->hdcp_comp_mutex);
   1786 	dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data;
   1787 	dev_priv->hdcp_master->mei_dev = mei_kdev;
   1788 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1789 
   1790 	return 0;
   1791 }
   1792 
   1793 static void i915_hdcp_component_unbind(struct device *i915_kdev,
   1794 				       struct device *mei_kdev, void *data)
   1795 {
   1796 	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
   1797 
   1798 	DRM_DEBUG("I915 HDCP comp unbind\n");
   1799 	mutex_lock(&dev_priv->hdcp_comp_mutex);
   1800 	dev_priv->hdcp_master = NULL;
   1801 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1802 }
   1803 
   1804 static const struct component_ops i915_hdcp_component_ops = {
   1805 	.bind   = i915_hdcp_component_bind,
   1806 	.unbind = i915_hdcp_component_unbind,
   1807 };
   1808 
   1809 #endif
   1810 
   1811 static inline
   1812 enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
   1813 {
   1814 	switch (port) {
   1815 	case PORT_A:
   1816 		return MEI_DDI_A;
   1817 	case PORT_B ... PORT_F:
   1818 		return (enum mei_fw_ddi)port;
   1819 	default:
   1820 		return MEI_DDI_INVALID_PORT;
   1821 	}
   1822 }
   1823 
   1824 static inline
   1825 enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
   1826 {
   1827 	switch (cpu_transcoder) {
   1828 	case TRANSCODER_A ... TRANSCODER_D:
   1829 		return (enum mei_fw_tc)(cpu_transcoder | 0x10);
   1830 	default: /* eDP, DSI TRANSCODERS are non HDCP capable */
   1831 		return MEI_INVALID_TRANSCODER;
   1832 	}
   1833 }
   1834 
   1835 static inline int initialize_hdcp_port_data(struct intel_connector *connector,
   1836 					    const struct intel_hdcp_shim *shim)
   1837 {
   1838 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
   1839 	struct intel_hdcp *hdcp = &connector->hdcp;
   1840 	struct hdcp_port_data *data = &hdcp->port_data;
   1841 
   1842 	if (INTEL_GEN(dev_priv) < 12)
   1843 		data->fw_ddi =
   1844 			intel_get_mei_fw_ddi_index(connector->encoder->port);
   1845 	else
   1846 		/*
   1847 		 * As per ME FW API expectation, for GEN 12+, fw_ddi is filled
   1848 		 * with zero(INVALID PORT index).
   1849 		 */
   1850 		data->fw_ddi = MEI_DDI_INVALID_PORT;
   1851 
   1852 	/*
   1853 	 * As associated transcoder is set and modified at modeset, here fw_tc
   1854 	 * is initialized to zero (invalid transcoder index). This will be
   1855 	 * retained for <Gen12 forever.
   1856 	 */
   1857 	data->fw_tc = MEI_INVALID_TRANSCODER;
   1858 
   1859 	data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
   1860 	data->protocol = (u8)shim->protocol;
   1861 
   1862 	data->k = 1;
   1863 	if (!data->streams)
   1864 		data->streams = kcalloc(data->k,
   1865 					sizeof(struct hdcp2_streamid_type),
   1866 					GFP_KERNEL);
   1867 	if (!data->streams) {
   1868 		DRM_ERROR("Out of Memory\n");
   1869 		return -ENOMEM;
   1870 	}
   1871 
   1872 	data->streams[0].stream_id = 0;
   1873 	data->streams[0].stream_type = hdcp->content_type;
   1874 
   1875 	return 0;
   1876 }
   1877 
   1878 static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
   1879 {
   1880 	if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
   1881 		return false;
   1882 
   1883 	return (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) ||
   1884 		IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv));
   1885 }
   1886 
   1887 void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
   1888 {
   1889 	int ret;
   1890 
   1891 	if (!is_hdcp2_supported(dev_priv))
   1892 		return;
   1893 
   1894 	mutex_lock(&dev_priv->hdcp_comp_mutex);
   1895 	WARN_ON(dev_priv->hdcp_comp_added);
   1896 
   1897 	dev_priv->hdcp_comp_added = true;
   1898 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1899 #ifdef __NetBSD__		/* XXX i915 hdmi audio */
   1900 	ret = 0;
   1901 #else
   1902 	ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
   1903 				  I915_COMPONENT_HDCP);
   1904 #endif
   1905 	if (ret < 0) {
   1906 		DRM_DEBUG_KMS("Failed at component add(%d)\n", ret);
   1907 		mutex_lock(&dev_priv->hdcp_comp_mutex);
   1908 		dev_priv->hdcp_comp_added = false;
   1909 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1910 		return;
   1911 	}
   1912 }
   1913 
   1914 static void intel_hdcp2_init(struct intel_connector *connector,
   1915 			     const struct intel_hdcp_shim *shim)
   1916 {
   1917 	struct intel_hdcp *hdcp = &connector->hdcp;
   1918 	int ret;
   1919 
   1920 	ret = initialize_hdcp_port_data(connector, shim);
   1921 	if (ret) {
   1922 		DRM_DEBUG_KMS("Mei hdcp data init failed\n");
   1923 		return;
   1924 	}
   1925 
   1926 	hdcp->hdcp2_supported = true;
   1927 }
   1928 
   1929 int intel_hdcp_init(struct intel_connector *connector,
   1930 		    const struct intel_hdcp_shim *shim)
   1931 {
   1932 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
   1933 	struct intel_hdcp *hdcp = &connector->hdcp;
   1934 	int ret;
   1935 
   1936 	if (!shim)
   1937 		return -EINVAL;
   1938 
   1939 	if (is_hdcp2_supported(dev_priv))
   1940 		intel_hdcp2_init(connector, shim);
   1941 
   1942 	ret =
   1943 	drm_connector_attach_content_protection_property(&connector->base,
   1944 							 hdcp->hdcp2_supported);
   1945 	if (ret) {
   1946 		hdcp->hdcp2_supported = false;
   1947 		kfree(hdcp->port_data.streams);
   1948 		return ret;
   1949 	}
   1950 
   1951 	hdcp->shim = shim;
   1952 	mutex_init(&hdcp->mutex);
   1953 	INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
   1954 	INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
   1955 	DRM_INIT_WAITQUEUE(&hdcp->cp_irq_queue, "hdcpirq");
   1956 
   1957 	return 0;
   1958 }
   1959 
   1960 int intel_hdcp_enable(struct intel_connector *connector,
   1961 		      enum transcoder cpu_transcoder, u8 content_type)
   1962 {
   1963 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
   1964 	struct intel_hdcp *hdcp = &connector->hdcp;
   1965 	unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
   1966 	int ret = -EINVAL;
   1967 
   1968 	if (!hdcp->shim)
   1969 		return -ENOENT;
   1970 
   1971 	mutex_lock(&hdcp->mutex);
   1972 	WARN_ON(hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
   1973 	hdcp->content_type = content_type;
   1974 
   1975 	if (INTEL_GEN(dev_priv) >= 12) {
   1976 		hdcp->cpu_transcoder = cpu_transcoder;
   1977 		hdcp->port_data.fw_tc = intel_get_mei_fw_tc(cpu_transcoder);
   1978 	}
   1979 
   1980 	/*
   1981 	 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
   1982 	 * is capable of HDCP2.2, it is preferred to use HDCP2.2.
   1983 	 */
   1984 	if (intel_hdcp2_capable(connector)) {
   1985 		ret = _intel_hdcp2_enable(connector);
   1986 		if (!ret)
   1987 			check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS;
   1988 	}
   1989 
   1990 	/*
   1991 	 * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
   1992 	 * be attempted.
   1993 	 */
   1994 	if (ret && intel_hdcp_capable(connector) &&
   1995 	    hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
   1996 		ret = _intel_hdcp_enable(connector);
   1997 	}
   1998 
   1999 	if (!ret) {
   2000 		schedule_delayed_work(&hdcp->check_work, check_link_interval);
   2001 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
   2002 		schedule_work(&hdcp->prop_work);
   2003 	}
   2004 
   2005 	mutex_unlock(&hdcp->mutex);
   2006 	return ret;
   2007 }
   2008 
   2009 int intel_hdcp_disable(struct intel_connector *connector)
   2010 {
   2011 	struct intel_hdcp *hdcp = &connector->hdcp;
   2012 	int ret = 0;
   2013 
   2014 	if (!hdcp->shim)
   2015 		return -ENOENT;
   2016 
   2017 	mutex_lock(&hdcp->mutex);
   2018 
   2019 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
   2020 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
   2021 		if (hdcp->hdcp2_encrypted)
   2022 			ret = _intel_hdcp2_disable(connector);
   2023 		else if (hdcp->hdcp_encrypted)
   2024 			ret = _intel_hdcp_disable(connector);
   2025 	}
   2026 
   2027 	mutex_unlock(&hdcp->mutex);
   2028 	cancel_delayed_work_sync(&hdcp->check_work);
   2029 	return ret;
   2030 }
   2031 
   2032 void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
   2033 {
   2034 	mutex_lock(&dev_priv->hdcp_comp_mutex);
   2035 	if (!dev_priv->hdcp_comp_added) {
   2036 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
   2037 		return;
   2038 	}
   2039 
   2040 	dev_priv->hdcp_comp_added = false;
   2041 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
   2042 
   2043 #ifndef __NetBSD__		/* XXX i915 hdmi audio */
   2044 	component_del(dev_priv->drm.dev, &i915_hdcp_component_ops);
   2045 #endif
   2046 }
   2047 
   2048 void intel_hdcp_cleanup(struct intel_connector *connector)
   2049 {
   2050 	if (!connector->hdcp.shim)
   2051 		return;
   2052 
   2053 	mutex_lock(&connector->hdcp.mutex);
   2054 	kfree(connector->hdcp.port_data.streams);
   2055 	mutex_unlock(&connector->hdcp.mutex);
   2056 
   2057 	mutex_destroy(&connector->hdcp.mutex);
   2058 }
   2059 
   2060 void intel_hdcp_atomic_check(struct drm_connector *connector,
   2061 			     struct drm_connector_state *old_state,
   2062 			     struct drm_connector_state *new_state)
   2063 {
   2064 	u64 old_cp = old_state->content_protection;
   2065 	u64 new_cp = new_state->content_protection;
   2066 	struct drm_crtc_state *crtc_state;
   2067 
   2068 	if (!new_state->crtc) {
   2069 		/*
   2070 		 * If the connector is being disabled with CP enabled, mark it
   2071 		 * desired so it's re-enabled when the connector is brought back
   2072 		 */
   2073 		if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
   2074 			new_state->content_protection =
   2075 				DRM_MODE_CONTENT_PROTECTION_DESIRED;
   2076 		return;
   2077 	}
   2078 
   2079 	/*
   2080 	 * Nothing to do if the state didn't change, or HDCP was activated since
   2081 	 * the last commit. And also no change in hdcp content type.
   2082 	 */
   2083 	if (old_cp == new_cp ||
   2084 	    (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
   2085 	     new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
   2086 		if (old_state->hdcp_content_type ==
   2087 				new_state->hdcp_content_type)
   2088 			return;
   2089 	}
   2090 
   2091 	crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
   2092 						   new_state->crtc);
   2093 	crtc_state->mode_changed = true;
   2094 }
   2095 
   2096 /* Handles the CP_IRQ raised from the DP HDCP sink */
   2097 void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
   2098 {
   2099 	struct intel_hdcp *hdcp = &connector->hdcp;
   2100 
   2101 	if (!hdcp->shim)
   2102 		return;
   2103 
   2104 	unsigned long irqflags;
   2105 	spin_lock_irqsave(&connector->hdcp.cp_irq_lock, irqflags);
   2106 	atomic_inc(&connector->hdcp.cp_irq_count);
   2107 	DRM_SPIN_WAKEUP_ALL(&connector->hdcp.cp_irq_queue,
   2108 	    &connector->hdcp.cp_irq_lock);
   2109 	spin_unlock_irqrestore(&connector->hdcp.cp_irq_lock, irqflags);
   2110 
   2111 	schedule_delayed_work(&hdcp->check_work, 0);
   2112 }
   2113