Home | History | Annotate | Line # | Download | only in gt
intel_gt.c revision 1.2
      1 /*	$NetBSD: intel_gt.c,v 1.2 2021/12/18 23:45:30 riastradh Exp $	*/
      2 
      3 // SPDX-License-Identifier: MIT
      4 /*
      5  * Copyright  2019 Intel Corporation
      6  */
      7 
      8 #include <sys/cdefs.h>
      9 __KERNEL_RCSID(0, "$NetBSD: intel_gt.c,v 1.2 2021/12/18 23:45:30 riastradh Exp $");
     10 
     11 #include "debugfs_gt.h"
     12 #include "i915_drv.h"
     13 #include "intel_context.h"
     14 #include "intel_gt.h"
     15 #include "intel_gt_pm.h"
     16 #include "intel_gt_requests.h"
     17 #include "intel_mocs.h"
     18 #include "intel_rc6.h"
     19 #include "intel_renderstate.h"
     20 #include "intel_rps.h"
     21 #include "intel_uncore.h"
     22 #include "intel_pm.h"
     23 
     24 void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
     25 {
     26 	gt->i915 = i915;
     27 	gt->uncore = &i915->uncore;
     28 
     29 	spin_lock_init(&gt->irq_lock);
     30 
     31 	INIT_LIST_HEAD(&gt->closed_vma);
     32 	spin_lock_init(&gt->closed_lock);
     33 
     34 	intel_gt_init_reset(gt);
     35 	intel_gt_init_requests(gt);
     36 	intel_gt_init_timelines(gt);
     37 	intel_gt_pm_init_early(gt);
     38 
     39 	intel_rps_init_early(&gt->rps);
     40 	intel_uc_init_early(&gt->uc);
     41 }
     42 
     43 void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt)
     44 {
     45 	gt->ggtt = ggtt;
     46 }
     47 
     48 static void init_unused_ring(struct intel_gt *gt, u32 base)
     49 {
     50 	struct intel_uncore *uncore = gt->uncore;
     51 
     52 	intel_uncore_write(uncore, RING_CTL(base), 0);
     53 	intel_uncore_write(uncore, RING_HEAD(base), 0);
     54 	intel_uncore_write(uncore, RING_TAIL(base), 0);
     55 	intel_uncore_write(uncore, RING_START(base), 0);
     56 }
     57 
     58 static void init_unused_rings(struct intel_gt *gt)
     59 {
     60 	struct drm_i915_private *i915 = gt->i915;
     61 
     62 	if (IS_I830(i915)) {
     63 		init_unused_ring(gt, PRB1_BASE);
     64 		init_unused_ring(gt, SRB0_BASE);
     65 		init_unused_ring(gt, SRB1_BASE);
     66 		init_unused_ring(gt, SRB2_BASE);
     67 		init_unused_ring(gt, SRB3_BASE);
     68 	} else if (IS_GEN(i915, 2)) {
     69 		init_unused_ring(gt, SRB0_BASE);
     70 		init_unused_ring(gt, SRB1_BASE);
     71 	} else if (IS_GEN(i915, 3)) {
     72 		init_unused_ring(gt, PRB1_BASE);
     73 		init_unused_ring(gt, PRB2_BASE);
     74 	}
     75 }
     76 
     77 int intel_gt_init_hw(struct intel_gt *gt)
     78 {
     79 	struct drm_i915_private *i915 = gt->i915;
     80 	struct intel_uncore *uncore = gt->uncore;
     81 	int ret;
     82 
     83 	gt->last_init_time = ktime_get();
     84 
     85 	/* Double layer security blanket, see i915_gem_init() */
     86 	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
     87 
     88 	if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9)
     89 		intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
     90 
     91 	if (IS_HASWELL(i915))
     92 		intel_uncore_write(uncore,
     93 				   MI_PREDICATE_RESULT_2,
     94 				   IS_HSW_GT3(i915) ?
     95 				   LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
     96 
     97 	/* Apply the GT workarounds... */
     98 	intel_gt_apply_workarounds(gt);
     99 	/* ...and determine whether they are sticking. */
    100 	intel_gt_verify_workarounds(gt, "init");
    101 
    102 	intel_gt_init_swizzling(gt);
    103 
    104 	/*
    105 	 * At least 830 can leave some of the unused rings
    106 	 * "active" (ie. head != tail) after resume which
    107 	 * will prevent c3 entry. Makes sure all unused rings
    108 	 * are totally idle.
    109 	 */
    110 	init_unused_rings(gt);
    111 
    112 	ret = i915_ppgtt_init_hw(gt);
    113 	if (ret) {
    114 		DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
    115 		goto out;
    116 	}
    117 
    118 	/* We can't enable contexts until all firmware is loaded */
    119 	ret = intel_uc_init_hw(&gt->uc);
    120 	if (ret) {
    121 		i915_probe_error(i915, "Enabling uc failed (%d)\n", ret);
    122 		goto out;
    123 	}
    124 
    125 	intel_mocs_init(gt);
    126 
    127 out:
    128 	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
    129 	return ret;
    130 }
    131 
    132 static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
    133 {
    134 	intel_uncore_rmw(uncore, reg, 0, set);
    135 }
    136 
    137 static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
    138 {
    139 	intel_uncore_rmw(uncore, reg, clr, 0);
    140 }
    141 
    142 static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
    143 {
    144 	intel_uncore_rmw(uncore, reg, 0, 0);
    145 }
    146 
    147 static void gen8_clear_engine_error_register(struct intel_engine_cs *engine)
    148 {
    149 	GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
    150 	GEN6_RING_FAULT_REG_POSTING_READ(engine);
    151 }
    152 
    153 void
    154 intel_gt_clear_error_registers(struct intel_gt *gt,
    155 			       intel_engine_mask_t engine_mask)
    156 {
    157 	struct drm_i915_private *i915 = gt->i915;
    158 	struct intel_uncore *uncore = gt->uncore;
    159 	u32 eir;
    160 
    161 	if (!IS_GEN(i915, 2))
    162 		clear_register(uncore, PGTBL_ER);
    163 
    164 	if (INTEL_GEN(i915) < 4)
    165 		clear_register(uncore, IPEIR(RENDER_RING_BASE));
    166 	else
    167 		clear_register(uncore, IPEIR_I965);
    168 
    169 	clear_register(uncore, EIR);
    170 	eir = intel_uncore_read(uncore, EIR);
    171 	if (eir) {
    172 		/*
    173 		 * some errors might have become stuck,
    174 		 * mask them.
    175 		 */
    176 		DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
    177 		rmw_set(uncore, EMR, eir);
    178 		intel_uncore_write(uncore, GEN2_IIR,
    179 				   I915_MASTER_ERROR_INTERRUPT);
    180 	}
    181 
    182 	if (INTEL_GEN(i915) >= 12) {
    183 		rmw_clear(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID);
    184 		intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG);
    185 	} else if (INTEL_GEN(i915) >= 8) {
    186 		rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
    187 		intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
    188 	} else if (INTEL_GEN(i915) >= 6) {
    189 		struct intel_engine_cs *engine;
    190 		enum intel_engine_id id;
    191 
    192 		for_each_engine_masked(engine, gt, engine_mask, id)
    193 			gen8_clear_engine_error_register(engine);
    194 	}
    195 }
    196 
    197 static void gen6_check_faults(struct intel_gt *gt)
    198 {
    199 	struct intel_engine_cs *engine;
    200 	enum intel_engine_id id;
    201 	u32 fault;
    202 
    203 	for_each_engine(engine, gt, id) {
    204 		fault = GEN6_RING_FAULT_REG_READ(engine);
    205 		if (fault & RING_FAULT_VALID) {
    206 			DRM_DEBUG_DRIVER("Unexpected fault\n"
    207 					 "\tAddr: 0x%08lx\n"
    208 					 "\tAddress space: %s\n"
    209 					 "\tSource ID: %d\n"
    210 					 "\tType: %d\n",
    211 					 fault & PAGE_MASK,
    212 					 fault & RING_FAULT_GTTSEL_MASK ?
    213 					 "GGTT" : "PPGTT",
    214 					 RING_FAULT_SRCID(fault),
    215 					 RING_FAULT_FAULT_TYPE(fault));
    216 		}
    217 	}
    218 }
    219 
    220 static void gen8_check_faults(struct intel_gt *gt)
    221 {
    222 	struct intel_uncore *uncore = gt->uncore;
    223 	i915_reg_t fault_reg, fault_data0_reg, fault_data1_reg;
    224 	u32 fault;
    225 
    226 	if (INTEL_GEN(gt->i915) >= 12) {
    227 		fault_reg = GEN12_RING_FAULT_REG;
    228 		fault_data0_reg = GEN12_FAULT_TLB_DATA0;
    229 		fault_data1_reg = GEN12_FAULT_TLB_DATA1;
    230 	} else {
    231 		fault_reg = GEN8_RING_FAULT_REG;
    232 		fault_data0_reg = GEN8_FAULT_TLB_DATA0;
    233 		fault_data1_reg = GEN8_FAULT_TLB_DATA1;
    234 	}
    235 
    236 	fault = intel_uncore_read(uncore, fault_reg);
    237 	if (fault & RING_FAULT_VALID) {
    238 		u32 fault_data0, fault_data1;
    239 		u64 fault_addr;
    240 
    241 		fault_data0 = intel_uncore_read(uncore, fault_data0_reg);
    242 		fault_data1 = intel_uncore_read(uncore, fault_data1_reg);
    243 
    244 		fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
    245 			     ((u64)fault_data0 << 12);
    246 
    247 		DRM_DEBUG_DRIVER("Unexpected fault\n"
    248 				 "\tAddr: 0x%08x_%08x\n"
    249 				 "\tAddress space: %s\n"
    250 				 "\tEngine ID: %d\n"
    251 				 "\tSource ID: %d\n"
    252 				 "\tType: %d\n",
    253 				 upper_32_bits(fault_addr),
    254 				 lower_32_bits(fault_addr),
    255 				 fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
    256 				 GEN8_RING_FAULT_ENGINE_ID(fault),
    257 				 RING_FAULT_SRCID(fault),
    258 				 RING_FAULT_FAULT_TYPE(fault));
    259 	}
    260 }
    261 
    262 void intel_gt_check_and_clear_faults(struct intel_gt *gt)
    263 {
    264 	struct drm_i915_private *i915 = gt->i915;
    265 
    266 	/* From GEN8 onwards we only have one 'All Engine Fault Register' */
    267 	if (INTEL_GEN(i915) >= 8)
    268 		gen8_check_faults(gt);
    269 	else if (INTEL_GEN(i915) >= 6)
    270 		gen6_check_faults(gt);
    271 	else
    272 		return;
    273 
    274 	intel_gt_clear_error_registers(gt, ALL_ENGINES);
    275 }
    276 
    277 void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
    278 {
    279 	struct intel_uncore *uncore = gt->uncore;
    280 	intel_wakeref_t wakeref;
    281 
    282 	/*
    283 	 * No actual flushing is required for the GTT write domain for reads
    284 	 * from the GTT domain. Writes to it "immediately" go to main memory
    285 	 * as far as we know, so there's no chipset flush. It also doesn't
    286 	 * land in the GPU render cache.
    287 	 *
    288 	 * However, we do have to enforce the order so that all writes through
    289 	 * the GTT land before any writes to the device, such as updates to
    290 	 * the GATT itself.
    291 	 *
    292 	 * We also have to wait a bit for the writes to land from the GTT.
    293 	 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
    294 	 * timing. This issue has only been observed when switching quickly
    295 	 * between GTT writes and CPU reads from inside the kernel on recent hw,
    296 	 * and it appears to only affect discrete GTT blocks (i.e. on LLC
    297 	 * system agents we cannot reproduce this behaviour, until Cannonlake
    298 	 * that was!).
    299 	 */
    300 
    301 	wmb();
    302 
    303 	if (INTEL_INFO(gt->i915)->has_coherent_ggtt)
    304 		return;
    305 
    306 	intel_gt_chipset_flush(gt);
    307 
    308 	with_intel_runtime_pm_if_in_use(uncore->rpm, wakeref) {
    309 		unsigned long flags;
    310 
    311 		spin_lock_irqsave(&uncore->lock, flags);
    312 		intel_uncore_posting_read_fw(uncore,
    313 					     RING_HEAD(RENDER_RING_BASE));
    314 		spin_unlock_irqrestore(&uncore->lock, flags);
    315 	}
    316 }
    317 
    318 void intel_gt_chipset_flush(struct intel_gt *gt)
    319 {
    320 	wmb();
    321 	if (INTEL_GEN(gt->i915) < 6)
    322 		intel_gtt_chipset_flush();
    323 }
    324 
    325 void intel_gt_driver_register(struct intel_gt *gt)
    326 {
    327 	intel_rps_driver_register(&gt->rps);
    328 
    329 	debugfs_gt_register(gt);
    330 }
    331 
    332 static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
    333 {
    334 	struct drm_i915_private *i915 = gt->i915;
    335 	struct drm_i915_gem_object *obj;
    336 	struct i915_vma *vma;
    337 	int ret;
    338 
    339 	obj = i915_gem_object_create_stolen(i915, size);
    340 	if (IS_ERR(obj))
    341 		obj = i915_gem_object_create_internal(i915, size);
    342 	if (IS_ERR(obj)) {
    343 		DRM_ERROR("Failed to allocate scratch page\n");
    344 		return PTR_ERR(obj);
    345 	}
    346 
    347 	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
    348 	if (IS_ERR(vma)) {
    349 		ret = PTR_ERR(vma);
    350 		goto err_unref;
    351 	}
    352 
    353 	ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
    354 	if (ret)
    355 		goto err_unref;
    356 
    357 	gt->scratch = i915_vma_make_unshrinkable(vma);
    358 
    359 	return 0;
    360 
    361 err_unref:
    362 	i915_gem_object_put(obj);
    363 	return ret;
    364 }
    365 
    366 static void intel_gt_fini_scratch(struct intel_gt *gt)
    367 {
    368 	i915_vma_unpin_and_release(&gt->scratch, 0);
    369 }
    370 
    371 static struct i915_address_space *kernel_vm(struct intel_gt *gt)
    372 {
    373 	if (INTEL_PPGTT(gt->i915) > INTEL_PPGTT_ALIASING)
    374 		return &i915_ppgtt_create(gt)->vm;
    375 	else
    376 		return i915_vm_get(&gt->ggtt->vm);
    377 }
    378 
    379 static int __intel_context_flush_retire(struct intel_context *ce)
    380 {
    381 	struct intel_timeline *tl;
    382 
    383 	tl = intel_context_timeline_lock(ce);
    384 	if (IS_ERR(tl))
    385 		return PTR_ERR(tl);
    386 
    387 	intel_context_timeline_unlock(tl);
    388 	return 0;
    389 }
    390 
    391 static int __engines_record_defaults(struct intel_gt *gt)
    392 {
    393 	struct i915_request *requests[I915_NUM_ENGINES] = {};
    394 	struct intel_engine_cs *engine;
    395 	enum intel_engine_id id;
    396 	int err = 0;
    397 
    398 	/*
    399 	 * As we reset the gpu during very early sanitisation, the current
    400 	 * register state on the GPU should reflect its defaults values.
    401 	 * We load a context onto the hw (with restore-inhibit), then switch
    402 	 * over to a second context to save that default register state. We
    403 	 * can then prime every new context with that state so they all start
    404 	 * from the same default HW values.
    405 	 */
    406 
    407 	for_each_engine(engine, gt, id) {
    408 		struct intel_renderstate so;
    409 		struct intel_context *ce;
    410 		struct i915_request *rq;
    411 
    412 		/* We must be able to switch to something! */
    413 		GEM_BUG_ON(!engine->kernel_context);
    414 
    415 		err = intel_renderstate_init(&so, engine);
    416 		if (err)
    417 			goto out;
    418 
    419 		ce = intel_context_create(engine);
    420 		if (IS_ERR(ce)) {
    421 			err = PTR_ERR(ce);
    422 			goto out;
    423 		}
    424 
    425 		rq = intel_context_create_request(ce);
    426 		if (IS_ERR(rq)) {
    427 			err = PTR_ERR(rq);
    428 			intel_context_put(ce);
    429 			goto out;
    430 		}
    431 
    432 		err = intel_engine_emit_ctx_wa(rq);
    433 		if (err)
    434 			goto err_rq;
    435 
    436 		err = intel_renderstate_emit(&so, rq);
    437 		if (err)
    438 			goto err_rq;
    439 
    440 err_rq:
    441 		requests[id] = i915_request_get(rq);
    442 		i915_request_add(rq);
    443 		intel_renderstate_fini(&so);
    444 		if (err)
    445 			goto out;
    446 	}
    447 
    448 	/* Flush the default context image to memory, and enable powersaving. */
    449 	if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
    450 		err = -EIO;
    451 		goto out;
    452 	}
    453 
    454 	for (id = 0; id < ARRAY_SIZE(requests); id++) {
    455 		struct i915_request *rq;
    456 		struct i915_vma *state;
    457 		void *vaddr;
    458 
    459 		rq = requests[id];
    460 		if (!rq)
    461 			continue;
    462 
    463 		GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags));
    464 		state = rq->context->state;
    465 		if (!state)
    466 			continue;
    467 
    468 		/* Serialise with retirement on another CPU */
    469 		GEM_BUG_ON(!i915_request_completed(rq));
    470 		err = __intel_context_flush_retire(rq->context);
    471 		if (err)
    472 			goto out;
    473 
    474 		/* We want to be able to unbind the state from the GGTT */
    475 		GEM_BUG_ON(intel_context_is_pinned(rq->context));
    476 
    477 		/*
    478 		 * As we will hold a reference to the logical state, it will
    479 		 * not be torn down with the context, and importantly the
    480 		 * object will hold onto its vma (making it possible for a
    481 		 * stray GTT write to corrupt our defaults). Unmap the vma
    482 		 * from the GTT to prevent such accidents and reclaim the
    483 		 * space.
    484 		 */
    485 		err = i915_vma_unbind(state);
    486 		if (err)
    487 			goto out;
    488 
    489 		i915_gem_object_lock(state->obj);
    490 		err = i915_gem_object_set_to_cpu_domain(state->obj, false);
    491 		i915_gem_object_unlock(state->obj);
    492 		if (err)
    493 			goto out;
    494 
    495 		i915_gem_object_set_cache_coherency(state->obj, I915_CACHE_LLC);
    496 
    497 		/* Check we can acquire the image of the context state */
    498 		vaddr = i915_gem_object_pin_map(state->obj, I915_MAP_FORCE_WB);
    499 		if (IS_ERR(vaddr)) {
    500 			err = PTR_ERR(vaddr);
    501 			goto out;
    502 		}
    503 
    504 		rq->engine->default_state = i915_gem_object_get(state->obj);
    505 		i915_gem_object_unpin_map(state->obj);
    506 	}
    507 
    508 out:
    509 	/*
    510 	 * If we have to abandon now, we expect the engines to be idle
    511 	 * and ready to be torn-down. The quickest way we can accomplish
    512 	 * this is by declaring ourselves wedged.
    513 	 */
    514 	if (err)
    515 		intel_gt_set_wedged(gt);
    516 
    517 	for (id = 0; id < ARRAY_SIZE(requests); id++) {
    518 		struct intel_context *ce;
    519 		struct i915_request *rq;
    520 
    521 		rq = requests[id];
    522 		if (!rq)
    523 			continue;
    524 
    525 		ce = rq->context;
    526 		i915_request_put(rq);
    527 		intel_context_put(ce);
    528 	}
    529 	return err;
    530 }
    531 
    532 static int __engines_verify_workarounds(struct intel_gt *gt)
    533 {
    534 	struct intel_engine_cs *engine;
    535 	enum intel_engine_id id;
    536 	int err = 0;
    537 
    538 	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
    539 		return 0;
    540 
    541 	for_each_engine(engine, gt, id) {
    542 		if (intel_engine_verify_workarounds(engine, "load"))
    543 			err = -EIO;
    544 	}
    545 
    546 	return err;
    547 }
    548 
    549 static void __intel_gt_disable(struct intel_gt *gt)
    550 {
    551 	intel_gt_set_wedged_on_init(gt);
    552 
    553 	intel_gt_suspend_prepare(gt);
    554 	intel_gt_suspend_late(gt);
    555 
    556 	GEM_BUG_ON(intel_gt_pm_is_awake(gt));
    557 }
    558 
    559 int intel_gt_init(struct intel_gt *gt)
    560 {
    561 	int err;
    562 
    563 	err = i915_inject_probe_error(gt->i915, -ENODEV);
    564 	if (err)
    565 		return err;
    566 
    567 	/*
    568 	 * This is just a security blanket to placate dragons.
    569 	 * On some systems, we very sporadically observe that the first TLBs
    570 	 * used by the CS may be stale, despite us poking the TLB reset. If
    571 	 * we hold the forcewake during initialisation these problems
    572 	 * just magically go away.
    573 	 */
    574 	intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
    575 
    576 	err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K);
    577 	if (err)
    578 		goto out_fw;
    579 
    580 	intel_gt_pm_init(gt);
    581 
    582 	gt->vm = kernel_vm(gt);
    583 	if (!gt->vm) {
    584 		err = -ENOMEM;
    585 		goto err_pm;
    586 	}
    587 
    588 	err = intel_engines_init(gt);
    589 	if (err)
    590 		goto err_engines;
    591 
    592 	intel_uc_init(&gt->uc);
    593 
    594 	err = intel_gt_resume(gt);
    595 	if (err)
    596 		goto err_uc_init;
    597 
    598 	err = __engines_record_defaults(gt);
    599 	if (err)
    600 		goto err_gt;
    601 
    602 	err = __engines_verify_workarounds(gt);
    603 	if (err)
    604 		goto err_gt;
    605 
    606 	err = i915_inject_probe_error(gt->i915, -EIO);
    607 	if (err)
    608 		goto err_gt;
    609 
    610 	goto out_fw;
    611 err_gt:
    612 	__intel_gt_disable(gt);
    613 	intel_uc_fini_hw(&gt->uc);
    614 err_uc_init:
    615 	intel_uc_fini(&gt->uc);
    616 err_engines:
    617 	intel_engines_release(gt);
    618 	i915_vm_put(fetch_and_zero(&gt->vm));
    619 err_pm:
    620 	intel_gt_pm_fini(gt);
    621 	intel_gt_fini_scratch(gt);
    622 out_fw:
    623 	if (err)
    624 		intel_gt_set_wedged_on_init(gt);
    625 	intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
    626 	return err;
    627 }
    628 
    629 void intel_gt_driver_remove(struct intel_gt *gt)
    630 {
    631 	__intel_gt_disable(gt);
    632 
    633 	intel_uc_fini_hw(&gt->uc);
    634 	intel_uc_fini(&gt->uc);
    635 
    636 	intel_engines_release(gt);
    637 }
    638 
    639 void intel_gt_driver_unregister(struct intel_gt *gt)
    640 {
    641 	intel_rps_driver_unregister(&gt->rps);
    642 }
    643 
    644 void intel_gt_driver_release(struct intel_gt *gt)
    645 {
    646 	struct i915_address_space *vm;
    647 
    648 	vm = fetch_and_zero(&gt->vm);
    649 	if (vm) /* FIXME being called twice on error paths :( */
    650 		i915_vm_put(vm);
    651 
    652 	intel_gt_pm_fini(gt);
    653 	intel_gt_fini_scratch(gt);
    654 }
    655 
    656 void intel_gt_driver_late_release(struct intel_gt *gt)
    657 {
    658 	intel_uc_driver_late_release(&gt->uc);
    659 	intel_gt_fini_requests(gt);
    660 	intel_gt_fini_reset(gt);
    661 	intel_gt_fini_timelines(gt);
    662 	intel_engines_free(gt);
    663 }
    664