Home | History | Annotate | Line # | Download | only in gt
      1 /*	$NetBSD: intel_gt_pm.c,v 1.5 2021/12/19 12:33:56 riastradh Exp $	*/
      2 
      3 /*
      4  * SPDX-License-Identifier: MIT
      5  *
      6  * Copyright  2019 Intel Corporation
      7  */
      8 
      9 #include <sys/cdefs.h>
     10 __KERNEL_RCSID(0, "$NetBSD: intel_gt_pm.c,v 1.5 2021/12/19 12:33:56 riastradh Exp $");
     11 
     12 #include <linux/suspend.h>
     13 
     14 #include "i915_drv.h"
     15 #include "i915_globals.h"
     16 #include "i915_params.h"
     17 #include "intel_context.h"
     18 #include "intel_engine_pm.h"
     19 #include "intel_gt.h"
     20 #include "intel_gt_pm.h"
     21 #include "intel_gt_requests.h"
     22 #include "intel_llc.h"
     23 #include "intel_pm.h"
     24 #include "intel_rc6.h"
     25 #include "intel_rps.h"
     26 #include "intel_wakeref.h"
     27 
     28 static void user_forcewake(struct intel_gt *gt, bool suspend)
     29 {
     30 	int count = atomic_read(&gt->user_wakeref);
     31 
     32 	/* Inside suspend/resume so single threaded, no races to worry about. */
     33 	if (likely(!count))
     34 		return;
     35 
     36 	intel_gt_pm_get(gt);
     37 	if (suspend) {
     38 		GEM_BUG_ON(count > atomic_read(&gt->wakeref.count));
     39 		atomic_sub(count, &gt->wakeref.count);
     40 	} else {
     41 		atomic_add(count, &gt->wakeref.count);
     42 	}
     43 	intel_gt_pm_put(gt);
     44 }
     45 
     46 static int __gt_unpark(struct intel_wakeref *wf)
     47 {
     48 	struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
     49 	struct drm_i915_private *i915 = gt->i915;
     50 
     51 	GT_TRACE(gt, "\n");
     52 
     53 	i915_globals_unpark();
     54 
     55 	/*
     56 	 * It seems that the DMC likes to transition between the DC states a lot
     57 	 * when there are no connected displays (no active power domains) during
     58 	 * command submission.
     59 	 *
     60 	 * This activity has negative impact on the performance of the chip with
     61 	 * huge latencies observed in the interrupt handler and elsewhere.
     62 	 *
     63 	 * Work around it by grabbing a GT IRQ power domain whilst there is any
     64 	 * GT activity, preventing any DC state transitions.
     65 	 */
     66 	gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
     67 	GEM_BUG_ON(!gt->awake);
     68 
     69 	intel_rc6_unpark(&gt->rc6);
     70 	intel_rps_unpark(&gt->rps);
     71 	i915_pmu_gt_unparked(i915);
     72 
     73 	intel_gt_unpark_requests(gt);
     74 
     75 	return 0;
     76 }
     77 
     78 static int __gt_park(struct intel_wakeref *wf)
     79 {
     80 	struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
     81 	intel_wakeref_t wakeref = fetch_and_zero(&gt->awake);
     82 	struct drm_i915_private *i915 = gt->i915;
     83 
     84 	GT_TRACE(gt, "\n");
     85 
     86 	intel_gt_park_requests(gt);
     87 
     88 	i915_vma_parked(gt);
     89 	i915_pmu_gt_parked(i915);
     90 	intel_rps_park(&gt->rps);
     91 	intel_rc6_park(&gt->rc6);
     92 
     93 	/* Everything switched off, flush any residual interrupt just in case */
     94 	intel_synchronize_irq(i915);
     95 
     96 	/* Defer dropping the display power well for 100ms, it's slow! */
     97 	GEM_BUG_ON(!wakeref);
     98 	intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
     99 
    100 	i915_globals_park();
    101 
    102 	return 0;
    103 }
    104 
    105 static const struct intel_wakeref_ops wf_ops = {
    106 	.get = __gt_unpark,
    107 	.put = __gt_park,
    108 };
    109 
    110 void intel_gt_pm_init_early(struct intel_gt *gt)
    111 {
    112 	intel_wakeref_init(&gt->wakeref, gt->uncore->rpm, &wf_ops);
    113 }
    114 
    115 void intel_gt_pm_init(struct intel_gt *gt)
    116 {
    117 	/*
    118 	 * Enabling power-management should be "self-healing". If we cannot
    119 	 * enable a feature, simply leave it disabled with a notice to the
    120 	 * user.
    121 	 */
    122 	intel_rc6_init(&gt->rc6);
    123 	intel_rps_init(&gt->rps);
    124 }
    125 
    126 static bool reset_engines(struct intel_gt *gt)
    127 {
    128 	if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
    129 		return false;
    130 
    131 	return __intel_gt_reset(gt, ALL_ENGINES) == 0;
    132 }
    133 
    134 static void gt_sanitize(struct intel_gt *gt, bool force)
    135 {
    136 	struct intel_engine_cs *engine;
    137 	enum intel_engine_id id;
    138 	intel_wakeref_t wakeref;
    139 
    140 	GT_TRACE(gt, "force:%s", yesno(force));
    141 
    142 	/* Use a raw wakeref to avoid calling intel_display_power_get early */
    143 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
    144 	intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
    145 
    146 	/*
    147 	 * As we have just resumed the machine and woken the device up from
    148 	 * deep PCI sleep (presumably D3_cold), assume the HW has been reset
    149 	 * back to defaults, recovering from whatever wedged state we left it
    150 	 * in and so worth trying to use the device once more.
    151 	 */
    152 	if (intel_gt_is_wedged(gt))
    153 		intel_gt_unset_wedged(gt);
    154 
    155 	intel_uc_sanitize(&gt->uc);
    156 
    157 	for_each_engine(engine, gt, id)
    158 		if (engine->reset.prepare)
    159 			engine->reset.prepare(engine);
    160 
    161 	intel_uc_reset_prepare(&gt->uc);
    162 
    163 	if (reset_engines(gt) || force) {
    164 		for_each_engine(engine, gt, id)
    165 			__intel_engine_reset(engine, false);
    166 	}
    167 
    168 	for_each_engine(engine, gt, id)
    169 		if (engine->reset.finish)
    170 			engine->reset.finish(engine);
    171 
    172 	intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
    173 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
    174 }
    175 
    176 void intel_gt_pm_fini(struct intel_gt *gt)
    177 {
    178 	intel_rps_fini(&gt->rps);
    179 	intel_rc6_fini(&gt->rc6);
    180 	intel_wakeref_fini(&gt->wakeref);
    181 }
    182 
    183 int intel_gt_resume(struct intel_gt *gt)
    184 {
    185 	struct intel_engine_cs *engine;
    186 	enum intel_engine_id id;
    187 	int err;
    188 
    189 	err = intel_gt_has_init_error(gt);
    190 	if (err)
    191 		return err;
    192 
    193 	GT_TRACE(gt, "\n");
    194 
    195 	/*
    196 	 * After resume, we may need to poke into the pinned kernel
    197 	 * contexts to paper over any damage caused by the sudden suspend.
    198 	 * Only the kernel contexts should remain pinned over suspend,
    199 	 * allowing us to fixup the user contexts on their first pin.
    200 	 */
    201 	intel_gt_pm_get(gt);
    202 
    203 	intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
    204 	intel_rc6_sanitize(&gt->rc6);
    205 	gt_sanitize(gt, true);
    206 	if (intel_gt_is_wedged(gt)) {
    207 		err = -EIO;
    208 		goto out_fw;
    209 	}
    210 
    211 	/* Only when the HW is re-initialised, can we replay the requests */
    212 	err = intel_gt_init_hw(gt);
    213 	if (err) {
    214 		dev_err(gt->i915->drm.dev,
    215 			"Failed to initialize GPU, declaring it wedged!\n");
    216 		goto err_wedged;
    217 	}
    218 
    219 	intel_rps_enable(&gt->rps);
    220 	intel_llc_enable(&gt->llc);
    221 
    222 	for_each_engine(engine, gt, id) {
    223 		intel_engine_pm_get(engine);
    224 
    225 		engine->serial++; /* kernel context lost */
    226 		err = engine->resume(engine);
    227 
    228 		intel_engine_pm_put(engine);
    229 		if (err) {
    230 			dev_err(gt->i915->drm.dev,
    231 				"Failed to restart %s (%d)\n",
    232 				engine->name, err);
    233 			goto err_wedged;
    234 		}
    235 	}
    236 
    237 	intel_rc6_enable(&gt->rc6);
    238 
    239 	intel_uc_resume(&gt->uc);
    240 
    241 	user_forcewake(gt, false);
    242 
    243 out_fw:
    244 	intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
    245 	intel_gt_pm_put(gt);
    246 	return err;
    247 
    248 err_wedged:
    249 	intel_gt_set_wedged(gt);
    250 	goto out_fw;
    251 }
    252 
    253 static void wait_for_suspend(struct intel_gt *gt)
    254 {
    255 	if (!intel_gt_pm_is_awake(gt))
    256 		return;
    257 
    258 	if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
    259 		/*
    260 		 * Forcibly cancel outstanding work and leave
    261 		 * the gpu quiet.
    262 		 */
    263 		intel_gt_set_wedged(gt);
    264 		intel_gt_retire_requests(gt);
    265 	}
    266 
    267 	intel_gt_pm_wait_for_idle(gt);
    268 }
    269 
    270 void intel_gt_suspend_prepare(struct intel_gt *gt)
    271 {
    272 	user_forcewake(gt, true);
    273 	wait_for_suspend(gt);
    274 
    275 	intel_uc_suspend(&gt->uc);
    276 }
    277 
    278 #ifndef __NetBSD__		/* XXX i915 pm */
    279 static suspend_state_t pm_suspend_target(void)
    280 {
    281 #if IS_ENABLED(CONFIG_SUSPEND) && IS_ENABLED(CONFIG_PM_SLEEP)
    282 	return pm_suspend_target_state;
    283 #else
    284 	return PM_SUSPEND_TO_IDLE;
    285 #endif
    286 }
    287 #endif
    288 
    289 void intel_gt_suspend_late(struct intel_gt *gt)
    290 {
    291 	intel_wakeref_t wakeref;
    292 
    293 	/* We expect to be idle already; but also want to be independent */
    294 	wait_for_suspend(gt);
    295 
    296 	if (is_mock_gt(gt))
    297 		return;
    298 
    299 	GEM_BUG_ON(gt->awake);
    300 
    301 #ifndef __NetBSD__
    302 	/*
    303 	 * On disabling the device, we want to turn off HW access to memory
    304 	 * that we no longer own.
    305 	 *
    306 	 * However, not all suspend-states disable the device. S0 (s2idle)
    307 	 * is effectively runtime-suspend, the device is left powered on
    308 	 * but needs to be put into a low power state. We need to keep
    309 	 * powermanagement enabled, but we also retain system state and so
    310 	 * it remains safe to keep on using our allocated memory.
    311 	 */
    312 	if (pm_suspend_target() == PM_SUSPEND_TO_IDLE)
    313 		return;
    314 #endif
    315 
    316 	with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
    317 		intel_rps_disable(&gt->rps);
    318 		intel_rc6_disable(&gt->rc6);
    319 		intel_llc_disable(&gt->llc);
    320 	}
    321 
    322 	gt_sanitize(gt, false);
    323 
    324 	GT_TRACE(gt, "\n");
    325 }
    326 
    327 void intel_gt_runtime_suspend(struct intel_gt *gt)
    328 {
    329 	intel_uc_runtime_suspend(&gt->uc);
    330 
    331 	GT_TRACE(gt, "\n");
    332 }
    333 
    334 int intel_gt_runtime_resume(struct intel_gt *gt)
    335 {
    336 	GT_TRACE(gt, "\n");
    337 	intel_gt_init_swizzling(gt);
    338 
    339 	return intel_uc_runtime_resume(&gt->uc);
    340 }
    341 
    342 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
    343 #include "selftest_gt_pm.c"
    344 #endif
    345