Home | History | Annotate | Line # | Download | only in i915
      1 /*	$NetBSD: intel_sideband.c,v 1.5 2021/12/18 23:45:29 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright  2013 Intel Corporation
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice (including the next
     14  * paragraph) shall be included in all copies or substantial portions of the
     15  * Software.
     16  *
     17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     22  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     23  * IN THE SOFTWARE.
     24  *
     25  */
     26 
     27 #include <sys/cdefs.h>
     28 __KERNEL_RCSID(0, "$NetBSD: intel_sideband.c,v 1.5 2021/12/18 23:45:29 riastradh Exp $");
     29 
     30 #include <asm/iosf_mbi.h>
     31 
     32 #include "i915_drv.h"
     33 #include "intel_sideband.h"
     34 
     35 /*
     36  * IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
     37  * VLV_VLV2_PUNIT_HAS_0.8.docx
     38  */
     39 
     40 /* Standard MMIO read, non-posted */
     41 #define SB_MRD_NP	0x00
     42 /* Standard MMIO write, non-posted */
     43 #define SB_MWR_NP	0x01
     44 /* Private register read, double-word addressing, non-posted */
     45 #define SB_CRRDDA_NP	0x06
     46 /* Private register write, double-word addressing, non-posted */
     47 #define SB_CRWRDA_NP	0x07
     48 
     49 static void ping(void *info)
     50 {
     51 }
     52 
     53 static void __vlv_punit_get(struct drm_i915_private *i915)
     54 {
     55 	iosf_mbi_punit_acquire();
     56 
     57 	/*
     58 	 * Prevent the cpu from sleeping while we use this sideband, otherwise
     59 	 * the punit may cause a machine hang. The issue appears to be isolated
     60 	 * with changing the power state of the CPU package while changing
     61 	 * the power state via the punit, and we have only observed it
     62 	 * reliably on 4-core Baytail systems suggesting the issue is in the
     63 	 * power delivery mechanism and likely to be be board/function
     64 	 * specific. Hence we presume the workaround needs only be applied
     65 	 * to the Valleyview P-unit and not all sideband communications.
     66 	 */
     67 	if (IS_VALLEYVIEW(i915)) {
     68 		pm_qos_update_request(&i915->sb_qos, 0);
     69 		on_each_cpu(ping, NULL, 1);
     70 	}
     71 }
     72 
     73 static void __vlv_punit_put(struct drm_i915_private *i915)
     74 {
     75 	if (IS_VALLEYVIEW(i915))
     76 		pm_qos_update_request(&i915->sb_qos, PM_QOS_DEFAULT_VALUE);
     77 
     78 	iosf_mbi_punit_release();
     79 }
     80 
     81 void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports)
     82 {
     83 	if (ports & BIT(VLV_IOSF_SB_PUNIT))
     84 		__vlv_punit_get(i915);
     85 
     86 	mutex_lock(&i915->sb_lock);
     87 }
     88 
     89 void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports)
     90 {
     91 	mutex_unlock(&i915->sb_lock);
     92 
     93 	if (ports & BIT(VLV_IOSF_SB_PUNIT))
     94 		__vlv_punit_put(i915);
     95 }
     96 
     97 static int vlv_sideband_rw(struct drm_i915_private *i915,
     98 			   u32 devfn, u32 port, u32 opcode,
     99 			   u32 addr, u32 *val)
    100 {
    101 	struct intel_uncore *uncore = &i915->uncore;
    102 	const bool is_read = (opcode == SB_MRD_NP || opcode == SB_CRRDDA_NP);
    103 	int err;
    104 
    105 	lockdep_assert_held(&i915->sb_lock);
    106 	if (port == IOSF_PORT_PUNIT)
    107 		iosf_mbi_assert_punit_acquired();
    108 
    109 	/* Flush the previous comms, just in case it failed last time. */
    110 	if (intel_wait_for_register(uncore,
    111 				    VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
    112 				    5)) {
    113 		drm_dbg(&i915->drm, "IOSF sideband idle wait (%s) timed out\n",
    114 			is_read ? "read" : "write");
    115 		return -EAGAIN;
    116 	}
    117 
    118 	preempt_disable();
    119 
    120 	intel_uncore_write_fw(uncore, VLV_IOSF_ADDR, addr);
    121 	intel_uncore_write_fw(uncore, VLV_IOSF_DATA, is_read ? 0 : *val);
    122 	intel_uncore_write_fw(uncore, VLV_IOSF_DOORBELL_REQ,
    123 			      (devfn << IOSF_DEVFN_SHIFT) |
    124 			      (opcode << IOSF_OPCODE_SHIFT) |
    125 			      (port << IOSF_PORT_SHIFT) |
    126 			      (0xf << IOSF_BYTE_ENABLES_SHIFT) |
    127 			      (0 << IOSF_BAR_SHIFT) |
    128 			      IOSF_SB_BUSY);
    129 
    130 	if (__intel_wait_for_register_fw(uncore,
    131 					 VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
    132 					 10000, 0, NULL) == 0) {
    133 		if (is_read)
    134 			*val = intel_uncore_read_fw(uncore, VLV_IOSF_DATA);
    135 		err = 0;
    136 	} else {
    137 		drm_dbg(&i915->drm, "IOSF sideband finish wait (%s) timed out\n",
    138 			is_read ? "read" : "write");
    139 		err = -ETIMEDOUT;
    140 	}
    141 
    142 	preempt_enable();
    143 
    144 	return err;
    145 }
    146 
    147 u32 vlv_punit_read(struct drm_i915_private *i915, u32 addr)
    148 {
    149 	u32 val = 0;
    150 
    151 	vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
    152 			SB_CRRDDA_NP, addr, &val);
    153 
    154 	return val;
    155 }
    156 
    157 int vlv_punit_write(struct drm_i915_private *i915, u32 addr, u32 val)
    158 {
    159 	return vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
    160 			       SB_CRWRDA_NP, addr, &val);
    161 }
    162 
    163 u32 vlv_bunit_read(struct drm_i915_private *i915, u32 reg)
    164 {
    165 	u32 val = 0;
    166 
    167 	vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
    168 			SB_CRRDDA_NP, reg, &val);
    169 
    170 	return val;
    171 }
    172 
    173 void vlv_bunit_write(struct drm_i915_private *i915, u32 reg, u32 val)
    174 {
    175 	vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
    176 			SB_CRWRDA_NP, reg, &val);
    177 }
    178 
    179 u32 vlv_nc_read(struct drm_i915_private *i915, u8 addr)
    180 {
    181 	u32 val = 0;
    182 
    183 	vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_NC,
    184 			SB_CRRDDA_NP, addr, &val);
    185 
    186 	return val;
    187 }
    188 
    189 u32 vlv_iosf_sb_read(struct drm_i915_private *i915, u8 port, u32 reg)
    190 {
    191 	u32 val = 0;
    192 
    193 	vlv_sideband_rw(i915, PCI_DEVFN(0, 0), port,
    194 			SB_CRRDDA_NP, reg, &val);
    195 
    196 	return val;
    197 }
    198 
    199 void vlv_iosf_sb_write(struct drm_i915_private *i915,
    200 		       u8 port, u32 reg, u32 val)
    201 {
    202 	vlv_sideband_rw(i915, PCI_DEVFN(0, 0), port,
    203 			SB_CRWRDA_NP, reg, &val);
    204 }
    205 
    206 u32 vlv_cck_read(struct drm_i915_private *i915, u32 reg)
    207 {
    208 	u32 val = 0;
    209 
    210 	vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
    211 			SB_CRRDDA_NP, reg, &val);
    212 
    213 	return val;
    214 }
    215 
    216 void vlv_cck_write(struct drm_i915_private *i915, u32 reg, u32 val)
    217 {
    218 	vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
    219 			SB_CRWRDA_NP, reg, &val);
    220 }
    221 
    222 u32 vlv_ccu_read(struct drm_i915_private *i915, u32 reg)
    223 {
    224 	u32 val = 0;
    225 
    226 	vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
    227 			SB_CRRDDA_NP, reg, &val);
    228 
    229 	return val;
    230 }
    231 
    232 void vlv_ccu_write(struct drm_i915_private *i915, u32 reg, u32 val)
    233 {
    234 	vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
    235 			SB_CRWRDA_NP, reg, &val);
    236 }
    237 
    238 u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg)
    239 {
    240 	int port = i915->dpio_phy_iosf_port[DPIO_PHY(pipe)];
    241 	u32 val = 0;
    242 
    243 	vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MRD_NP, reg, &val);
    244 
    245 	/*
    246 	 * FIXME: There might be some registers where all 1's is a valid value,
    247 	 * so ideally we should check the register offset instead...
    248 	 */
    249 	WARN(val == 0xffffffff, "DPIO read pipe %c reg 0x%x == 0x%x\n",
    250 	     pipe_name(pipe), reg, val);
    251 
    252 	return val;
    253 }
    254 
    255 void vlv_dpio_write(struct drm_i915_private *i915,
    256 		    enum pipe pipe, int reg, u32 val)
    257 {
    258 	int port = i915->dpio_phy_iosf_port[DPIO_PHY(pipe)];
    259 
    260 	vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MWR_NP, reg, &val);
    261 }
    262 
    263 u32 vlv_flisdsi_read(struct drm_i915_private *i915, u32 reg)
    264 {
    265 	u32 val = 0;
    266 
    267 	vlv_sideband_rw(i915, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRRDDA_NP,
    268 			reg, &val);
    269 	return val;
    270 }
    271 
    272 void vlv_flisdsi_write(struct drm_i915_private *i915, u32 reg, u32 val)
    273 {
    274 	vlv_sideband_rw(i915, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRWRDA_NP,
    275 			reg, &val);
    276 }
    277 
    278 /* SBI access */
    279 static int intel_sbi_rw(struct drm_i915_private *i915, u16 reg,
    280 			enum intel_sbi_destination destination,
    281 			u32 *val, bool is_read)
    282 {
    283 	struct intel_uncore *uncore = &i915->uncore;
    284 	u32 cmd;
    285 
    286 	lockdep_assert_held(&i915->sb_lock);
    287 
    288 	if (intel_wait_for_register_fw(uncore,
    289 				       SBI_CTL_STAT, SBI_BUSY, 0,
    290 				       100)) {
    291 		drm_err(&i915->drm,
    292 			"timeout waiting for SBI to become ready\n");
    293 		return -EBUSY;
    294 	}
    295 
    296 	intel_uncore_write_fw(uncore, SBI_ADDR, (u32)reg << 16);
    297 	intel_uncore_write_fw(uncore, SBI_DATA, is_read ? 0 : *val);
    298 
    299 	if (destination == SBI_ICLK)
    300 		cmd = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
    301 	else
    302 		cmd = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
    303 	if (!is_read)
    304 		cmd |= BIT(8);
    305 	intel_uncore_write_fw(uncore, SBI_CTL_STAT, cmd | SBI_BUSY);
    306 
    307 	if (__intel_wait_for_register_fw(uncore,
    308 					 SBI_CTL_STAT, SBI_BUSY, 0,
    309 					 100, 100, &cmd)) {
    310 		drm_err(&i915->drm,
    311 			"timeout waiting for SBI to complete read\n");
    312 		return -ETIMEDOUT;
    313 	}
    314 
    315 	if (cmd & SBI_RESPONSE_FAIL) {
    316 		drm_err(&i915->drm, "error during SBI read of reg %x\n", reg);
    317 		return -ENXIO;
    318 	}
    319 
    320 	if (is_read)
    321 		*val = intel_uncore_read_fw(uncore, SBI_DATA);
    322 
    323 	return 0;
    324 }
    325 
    326 u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg,
    327 		   enum intel_sbi_destination destination)
    328 {
    329 	u32 result = 0;
    330 
    331 	intel_sbi_rw(i915, reg, destination, &result, true);
    332 
    333 	return result;
    334 }
    335 
    336 void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
    337 		     enum intel_sbi_destination destination)
    338 {
    339 	intel_sbi_rw(i915, reg, destination, &value, false);
    340 }
    341 
    342 static inline int gen6_check_mailbox_status(u32 mbox)
    343 {
    344 	switch (mbox & GEN6_PCODE_ERROR_MASK) {
    345 	case GEN6_PCODE_SUCCESS:
    346 		return 0;
    347 	case GEN6_PCODE_UNIMPLEMENTED_CMD:
    348 		return -ENODEV;
    349 	case GEN6_PCODE_ILLEGAL_CMD:
    350 		return -ENXIO;
    351 	case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
    352 	case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
    353 		return -EOVERFLOW;
    354 	case GEN6_PCODE_TIMEOUT:
    355 		return -ETIMEDOUT;
    356 	default:
    357 		MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
    358 		return 0;
    359 	}
    360 }
    361 
    362 static inline int gen7_check_mailbox_status(u32 mbox)
    363 {
    364 	switch (mbox & GEN6_PCODE_ERROR_MASK) {
    365 	case GEN6_PCODE_SUCCESS:
    366 		return 0;
    367 	case GEN6_PCODE_ILLEGAL_CMD:
    368 		return -ENXIO;
    369 	case GEN7_PCODE_TIMEOUT:
    370 		return -ETIMEDOUT;
    371 	case GEN7_PCODE_ILLEGAL_DATA:
    372 		return -EINVAL;
    373 	case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
    374 		return -EOVERFLOW;
    375 	default:
    376 		MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
    377 		return 0;
    378 	}
    379 }
    380 
    381 static int __sandybridge_pcode_rw(struct drm_i915_private *i915,
    382 				  u32 mbox, u32 *val, u32 *val1,
    383 				  int fast_timeout_us,
    384 				  int slow_timeout_ms,
    385 				  bool is_read)
    386 {
    387 	struct intel_uncore *uncore = &i915->uncore;
    388 
    389 	lockdep_assert_held(&i915->sb_lock);
    390 
    391 	/*
    392 	 * GEN6_PCODE_* are outside of the forcewake domain, we can
    393 	 * use te fw I915_READ variants to reduce the amount of work
    394 	 * required when reading/writing.
    395 	 */
    396 
    397 	if (intel_uncore_read_fw(uncore, GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY)
    398 		return -EAGAIN;
    399 
    400 	intel_uncore_write_fw(uncore, GEN6_PCODE_DATA, *val);
    401 	intel_uncore_write_fw(uncore, GEN6_PCODE_DATA1, val1 ? *val1 : 0);
    402 	intel_uncore_write_fw(uncore,
    403 			      GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
    404 
    405 	if (__intel_wait_for_register_fw(uncore,
    406 					 GEN6_PCODE_MAILBOX,
    407 					 GEN6_PCODE_READY, 0,
    408 					 fast_timeout_us,
    409 					 slow_timeout_ms,
    410 					 &mbox))
    411 		return -ETIMEDOUT;
    412 
    413 	if (is_read)
    414 		*val = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA);
    415 	if (is_read && val1)
    416 		*val1 = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA1);
    417 
    418 	if (INTEL_GEN(i915) > 6)
    419 		return gen7_check_mailbox_status(mbox);
    420 	else
    421 		return gen6_check_mailbox_status(mbox);
    422 }
    423 
    424 int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox,
    425 			   u32 *val, u32 *val1)
    426 {
    427 	int err;
    428 
    429 	mutex_lock(&i915->sb_lock);
    430 	err = __sandybridge_pcode_rw(i915, mbox, val, val1,
    431 				     500, 0,
    432 				     true);
    433 	mutex_unlock(&i915->sb_lock);
    434 
    435 	if (err) {
    436 		drm_dbg(&i915->drm,
    437 			"warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
    438 			mbox, __builtin_return_address(0), err);
    439 	}
    440 
    441 	return err;
    442 }
    443 
    444 int sandybridge_pcode_write_timeout(struct drm_i915_private *i915,
    445 				    u32 mbox, u32 val,
    446 				    int fast_timeout_us,
    447 				    int slow_timeout_ms)
    448 {
    449 	int err;
    450 
    451 	mutex_lock(&i915->sb_lock);
    452 	err = __sandybridge_pcode_rw(i915, mbox, &val, NULL,
    453 				     fast_timeout_us, slow_timeout_ms,
    454 				     false);
    455 	mutex_unlock(&i915->sb_lock);
    456 
    457 	if (err) {
    458 		drm_dbg(&i915->drm,
    459 			"warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
    460 			val, mbox, __builtin_return_address(0), err);
    461 	}
    462 
    463 	return err;
    464 }
    465 
    466 static bool skl_pcode_try_request(struct drm_i915_private *i915, u32 mbox,
    467 				  u32 request, u32 reply_mask, u32 reply,
    468 				  u32 *status)
    469 {
    470 	*status = __sandybridge_pcode_rw(i915, mbox, &request, NULL,
    471 					 500, 0,
    472 					 true);
    473 
    474 	return *status || ((request & reply_mask) == reply);
    475 }
    476 
    477 /**
    478  * skl_pcode_request - send PCODE request until acknowledgment
    479  * @i915: device private
    480  * @mbox: PCODE mailbox ID the request is targeted for
    481  * @request: request ID
    482  * @reply_mask: mask used to check for request acknowledgment
    483  * @reply: value used to check for request acknowledgment
    484  * @timeout_base_ms: timeout for polling with preemption enabled
    485  *
    486  * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
    487  * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
    488  * The request is acknowledged once the PCODE reply dword equals @reply after
    489  * applying @reply_mask. Polling is first attempted with preemption enabled
    490  * for @timeout_base_ms and if this times out for another 50 ms with
    491  * preemption disabled.
    492  *
    493  * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
    494  * other error as reported by PCODE.
    495  */
    496 int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
    497 		      u32 reply_mask, u32 reply, int timeout_base_ms)
    498 {
    499 	u32 status;
    500 	int ret;
    501 
    502 	mutex_lock(&i915->sb_lock);
    503 
    504 #define COND \
    505 	skl_pcode_try_request(i915, mbox, request, reply_mask, reply, &status)
    506 
    507 	/*
    508 	 * Prime the PCODE by doing a request first. Normally it guarantees
    509 	 * that a subsequent request, at most @timeout_base_ms later, succeeds.
    510 	 * _wait_for() doesn't guarantee when its passed condition is evaluated
    511 	 * first, so send the first request explicitly.
    512 	 */
    513 	if (COND) {
    514 		ret = 0;
    515 		goto out;
    516 	}
    517 	ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10);
    518 	if (!ret)
    519 		goto out;
    520 
    521 	/*
    522 	 * The above can time out if the number of requests was low (2 in the
    523 	 * worst case) _and_ PCODE was busy for some reason even after a
    524 	 * (queued) request and @timeout_base_ms delay. As a workaround retry
    525 	 * the poll with preemption disabled to maximize the number of
    526 	 * requests. Increase the timeout from @timeout_base_ms to 50ms to
    527 	 * account for interrupts that could reduce the number of these
    528 	 * requests, and for any quirks of the PCODE firmware that delays
    529 	 * the request completion.
    530 	 */
    531 	drm_dbg_kms(&i915->drm,
    532 		    "PCODE timeout, retrying with preemption disabled\n");
    533 	WARN_ON_ONCE(timeout_base_ms > 3);
    534 	preempt_disable();
    535 	ret = wait_for_atomic(COND, 50);
    536 	preempt_enable();
    537 
    538 out:
    539 	mutex_unlock(&i915->sb_lock);
    540 	return ret ? ret : status;
    541 #undef COND
    542 }
    543