1 1.3 riastrad /* $NetBSD: intel_gt.c,v 1.3 2021/12/19 11:39:55 riastradh Exp $ */ 2 1.1 riastrad 3 1.1 riastrad // SPDX-License-Identifier: MIT 4 1.1 riastrad /* 5 1.1 riastrad * Copyright 2019 Intel Corporation 6 1.1 riastrad */ 7 1.1 riastrad 8 1.1 riastrad #include <sys/cdefs.h> 9 1.3 riastrad __KERNEL_RCSID(0, "$NetBSD: intel_gt.c,v 1.3 2021/12/19 11:39:55 riastradh Exp $"); 10 1.1 riastrad 11 1.3 riastrad #include <linux/kernel.h> 12 1.3 riastrad 13 1.3 riastrad #if IS_ENABLED(CONFIG_DEBUGFS) 14 1.1 riastrad #include "debugfs_gt.h" 15 1.3 riastrad #endif 16 1.1 riastrad #include "i915_drv.h" 17 1.1 riastrad #include "intel_context.h" 18 1.1 riastrad #include "intel_gt.h" 19 1.1 riastrad #include "intel_gt_pm.h" 20 1.1 riastrad #include "intel_gt_requests.h" 21 1.1 riastrad #include "intel_mocs.h" 22 1.1 riastrad #include "intel_rc6.h" 23 1.1 riastrad #include "intel_renderstate.h" 24 1.1 riastrad #include "intel_rps.h" 25 1.1 riastrad #include "intel_uncore.h" 26 1.1 riastrad #include "intel_pm.h" 27 1.1 riastrad 28 1.1 riastrad void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915) 29 1.1 riastrad { 30 1.1 riastrad gt->i915 = i915; 31 1.1 riastrad gt->uncore = &i915->uncore; 32 1.1 riastrad 33 1.1 riastrad spin_lock_init(>->irq_lock); 34 1.1 riastrad 35 1.1 riastrad INIT_LIST_HEAD(>->closed_vma); 36 1.1 riastrad spin_lock_init(>->closed_lock); 37 1.1 riastrad 38 1.1 riastrad intel_gt_init_reset(gt); 39 1.1 riastrad intel_gt_init_requests(gt); 40 1.1 riastrad intel_gt_init_timelines(gt); 41 1.1 riastrad intel_gt_pm_init_early(gt); 42 1.1 riastrad 43 1.1 riastrad intel_rps_init_early(>->rps); 44 1.1 riastrad intel_uc_init_early(>->uc); 45 1.1 riastrad } 46 1.1 riastrad 47 1.1 riastrad void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt) 48 1.1 riastrad { 49 1.1 riastrad gt->ggtt = ggtt; 50 1.1 riastrad } 51 1.1 riastrad 52 1.1 riastrad static void init_unused_ring(struct intel_gt *gt, u32 base) 53 1.1 riastrad { 54 1.1 riastrad struct intel_uncore *uncore = gt->uncore; 55 1.1 riastrad 56 1.1 riastrad intel_uncore_write(uncore, RING_CTL(base), 0); 57 1.1 riastrad intel_uncore_write(uncore, RING_HEAD(base), 0); 58 1.1 riastrad intel_uncore_write(uncore, RING_TAIL(base), 0); 59 1.1 riastrad intel_uncore_write(uncore, RING_START(base), 0); 60 1.1 riastrad } 61 1.1 riastrad 62 1.1 riastrad static void init_unused_rings(struct intel_gt *gt) 63 1.1 riastrad { 64 1.1 riastrad struct drm_i915_private *i915 = gt->i915; 65 1.1 riastrad 66 1.1 riastrad if (IS_I830(i915)) { 67 1.1 riastrad init_unused_ring(gt, PRB1_BASE); 68 1.1 riastrad init_unused_ring(gt, SRB0_BASE); 69 1.1 riastrad init_unused_ring(gt, SRB1_BASE); 70 1.1 riastrad init_unused_ring(gt, SRB2_BASE); 71 1.1 riastrad init_unused_ring(gt, SRB3_BASE); 72 1.1 riastrad } else if (IS_GEN(i915, 2)) { 73 1.1 riastrad init_unused_ring(gt, SRB0_BASE); 74 1.1 riastrad init_unused_ring(gt, SRB1_BASE); 75 1.1 riastrad } else if (IS_GEN(i915, 3)) { 76 1.1 riastrad init_unused_ring(gt, PRB1_BASE); 77 1.1 riastrad init_unused_ring(gt, PRB2_BASE); 78 1.1 riastrad } 79 1.1 riastrad } 80 1.1 riastrad 81 1.1 riastrad int intel_gt_init_hw(struct intel_gt *gt) 82 1.1 riastrad { 83 1.1 riastrad struct drm_i915_private *i915 = gt->i915; 84 1.1 riastrad struct intel_uncore *uncore = gt->uncore; 85 1.1 riastrad int ret; 86 1.1 riastrad 87 1.1 riastrad gt->last_init_time = ktime_get(); 88 1.1 riastrad 89 1.1 riastrad /* Double layer security blanket, see i915_gem_init() */ 90 1.1 riastrad intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); 91 1.1 riastrad 92 1.1 riastrad if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9) 93 1.1 riastrad intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf)); 94 1.1 riastrad 95 1.1 riastrad if (IS_HASWELL(i915)) 96 1.1 riastrad intel_uncore_write(uncore, 97 1.1 riastrad MI_PREDICATE_RESULT_2, 98 1.1 riastrad IS_HSW_GT3(i915) ? 99 1.1 riastrad LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); 100 1.1 riastrad 101 1.1 riastrad /* Apply the GT workarounds... */ 102 1.1 riastrad intel_gt_apply_workarounds(gt); 103 1.1 riastrad /* ...and determine whether they are sticking. */ 104 1.1 riastrad intel_gt_verify_workarounds(gt, "init"); 105 1.1 riastrad 106 1.1 riastrad intel_gt_init_swizzling(gt); 107 1.1 riastrad 108 1.1 riastrad /* 109 1.1 riastrad * At least 830 can leave some of the unused rings 110 1.1 riastrad * "active" (ie. head != tail) after resume which 111 1.1 riastrad * will prevent c3 entry. Makes sure all unused rings 112 1.1 riastrad * are totally idle. 113 1.1 riastrad */ 114 1.1 riastrad init_unused_rings(gt); 115 1.1 riastrad 116 1.1 riastrad ret = i915_ppgtt_init_hw(gt); 117 1.1 riastrad if (ret) { 118 1.1 riastrad DRM_ERROR("Enabling PPGTT failed (%d)\n", ret); 119 1.1 riastrad goto out; 120 1.1 riastrad } 121 1.1 riastrad 122 1.1 riastrad /* We can't enable contexts until all firmware is loaded */ 123 1.1 riastrad ret = intel_uc_init_hw(>->uc); 124 1.1 riastrad if (ret) { 125 1.1 riastrad i915_probe_error(i915, "Enabling uc failed (%d)\n", ret); 126 1.1 riastrad goto out; 127 1.1 riastrad } 128 1.1 riastrad 129 1.1 riastrad intel_mocs_init(gt); 130 1.1 riastrad 131 1.1 riastrad out: 132 1.1 riastrad intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); 133 1.1 riastrad return ret; 134 1.1 riastrad } 135 1.1 riastrad 136 1.1 riastrad static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set) 137 1.1 riastrad { 138 1.1 riastrad intel_uncore_rmw(uncore, reg, 0, set); 139 1.1 riastrad } 140 1.1 riastrad 141 1.1 riastrad static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr) 142 1.1 riastrad { 143 1.1 riastrad intel_uncore_rmw(uncore, reg, clr, 0); 144 1.1 riastrad } 145 1.1 riastrad 146 1.1 riastrad static void clear_register(struct intel_uncore *uncore, i915_reg_t reg) 147 1.1 riastrad { 148 1.1 riastrad intel_uncore_rmw(uncore, reg, 0, 0); 149 1.1 riastrad } 150 1.1 riastrad 151 1.1 riastrad static void gen8_clear_engine_error_register(struct intel_engine_cs *engine) 152 1.1 riastrad { 153 1.1 riastrad GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0); 154 1.1 riastrad GEN6_RING_FAULT_REG_POSTING_READ(engine); 155 1.1 riastrad } 156 1.1 riastrad 157 1.1 riastrad void 158 1.1 riastrad intel_gt_clear_error_registers(struct intel_gt *gt, 159 1.1 riastrad intel_engine_mask_t engine_mask) 160 1.1 riastrad { 161 1.1 riastrad struct drm_i915_private *i915 = gt->i915; 162 1.1 riastrad struct intel_uncore *uncore = gt->uncore; 163 1.1 riastrad u32 eir; 164 1.1 riastrad 165 1.1 riastrad if (!IS_GEN(i915, 2)) 166 1.1 riastrad clear_register(uncore, PGTBL_ER); 167 1.1 riastrad 168 1.1 riastrad if (INTEL_GEN(i915) < 4) 169 1.1 riastrad clear_register(uncore, IPEIR(RENDER_RING_BASE)); 170 1.1 riastrad else 171 1.1 riastrad clear_register(uncore, IPEIR_I965); 172 1.1 riastrad 173 1.1 riastrad clear_register(uncore, EIR); 174 1.1 riastrad eir = intel_uncore_read(uncore, EIR); 175 1.1 riastrad if (eir) { 176 1.1 riastrad /* 177 1.1 riastrad * some errors might have become stuck, 178 1.1 riastrad * mask them. 179 1.1 riastrad */ 180 1.1 riastrad DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir); 181 1.1 riastrad rmw_set(uncore, EMR, eir); 182 1.1 riastrad intel_uncore_write(uncore, GEN2_IIR, 183 1.1 riastrad I915_MASTER_ERROR_INTERRUPT); 184 1.1 riastrad } 185 1.1 riastrad 186 1.1 riastrad if (INTEL_GEN(i915) >= 12) { 187 1.1 riastrad rmw_clear(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID); 188 1.1 riastrad intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG); 189 1.1 riastrad } else if (INTEL_GEN(i915) >= 8) { 190 1.1 riastrad rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID); 191 1.1 riastrad intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG); 192 1.1 riastrad } else if (INTEL_GEN(i915) >= 6) { 193 1.1 riastrad struct intel_engine_cs *engine; 194 1.1 riastrad enum intel_engine_id id; 195 1.1 riastrad 196 1.1 riastrad for_each_engine_masked(engine, gt, engine_mask, id) 197 1.1 riastrad gen8_clear_engine_error_register(engine); 198 1.1 riastrad } 199 1.1 riastrad } 200 1.1 riastrad 201 1.1 riastrad static void gen6_check_faults(struct intel_gt *gt) 202 1.1 riastrad { 203 1.1 riastrad struct intel_engine_cs *engine; 204 1.1 riastrad enum intel_engine_id id; 205 1.1 riastrad u32 fault; 206 1.1 riastrad 207 1.1 riastrad for_each_engine(engine, gt, id) { 208 1.1 riastrad fault = GEN6_RING_FAULT_REG_READ(engine); 209 1.1 riastrad if (fault & RING_FAULT_VALID) { 210 1.1 riastrad DRM_DEBUG_DRIVER("Unexpected fault\n" 211 1.3 riastrad "\tAddr: 0x%08"PRIx32"\n" 212 1.1 riastrad "\tAddress space: %s\n" 213 1.1 riastrad "\tSource ID: %d\n" 214 1.1 riastrad "\tType: %d\n", 215 1.1 riastrad fault & PAGE_MASK, 216 1.1 riastrad fault & RING_FAULT_GTTSEL_MASK ? 217 1.1 riastrad "GGTT" : "PPGTT", 218 1.1 riastrad RING_FAULT_SRCID(fault), 219 1.1 riastrad RING_FAULT_FAULT_TYPE(fault)); 220 1.1 riastrad } 221 1.1 riastrad } 222 1.1 riastrad } 223 1.1 riastrad 224 1.1 riastrad static void gen8_check_faults(struct intel_gt *gt) 225 1.1 riastrad { 226 1.1 riastrad struct intel_uncore *uncore = gt->uncore; 227 1.1 riastrad i915_reg_t fault_reg, fault_data0_reg, fault_data1_reg; 228 1.1 riastrad u32 fault; 229 1.1 riastrad 230 1.1 riastrad if (INTEL_GEN(gt->i915) >= 12) { 231 1.1 riastrad fault_reg = GEN12_RING_FAULT_REG; 232 1.1 riastrad fault_data0_reg = GEN12_FAULT_TLB_DATA0; 233 1.1 riastrad fault_data1_reg = GEN12_FAULT_TLB_DATA1; 234 1.1 riastrad } else { 235 1.1 riastrad fault_reg = GEN8_RING_FAULT_REG; 236 1.1 riastrad fault_data0_reg = GEN8_FAULT_TLB_DATA0; 237 1.1 riastrad fault_data1_reg = GEN8_FAULT_TLB_DATA1; 238 1.1 riastrad } 239 1.1 riastrad 240 1.1 riastrad fault = intel_uncore_read(uncore, fault_reg); 241 1.1 riastrad if (fault & RING_FAULT_VALID) { 242 1.1 riastrad u32 fault_data0, fault_data1; 243 1.1 riastrad u64 fault_addr; 244 1.1 riastrad 245 1.1 riastrad fault_data0 = intel_uncore_read(uncore, fault_data0_reg); 246 1.1 riastrad fault_data1 = intel_uncore_read(uncore, fault_data1_reg); 247 1.1 riastrad 248 1.1 riastrad fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) | 249 1.1 riastrad ((u64)fault_data0 << 12); 250 1.1 riastrad 251 1.1 riastrad DRM_DEBUG_DRIVER("Unexpected fault\n" 252 1.1 riastrad "\tAddr: 0x%08x_%08x\n" 253 1.1 riastrad "\tAddress space: %s\n" 254 1.1 riastrad "\tEngine ID: %d\n" 255 1.1 riastrad "\tSource ID: %d\n" 256 1.1 riastrad "\tType: %d\n", 257 1.1 riastrad upper_32_bits(fault_addr), 258 1.1 riastrad lower_32_bits(fault_addr), 259 1.1 riastrad fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT", 260 1.1 riastrad GEN8_RING_FAULT_ENGINE_ID(fault), 261 1.1 riastrad RING_FAULT_SRCID(fault), 262 1.1 riastrad RING_FAULT_FAULT_TYPE(fault)); 263 1.1 riastrad } 264 1.1 riastrad } 265 1.1 riastrad 266 1.1 riastrad void intel_gt_check_and_clear_faults(struct intel_gt *gt) 267 1.1 riastrad { 268 1.1 riastrad struct drm_i915_private *i915 = gt->i915; 269 1.1 riastrad 270 1.1 riastrad /* From GEN8 onwards we only have one 'All Engine Fault Register' */ 271 1.1 riastrad if (INTEL_GEN(i915) >= 8) 272 1.1 riastrad gen8_check_faults(gt); 273 1.1 riastrad else if (INTEL_GEN(i915) >= 6) 274 1.1 riastrad gen6_check_faults(gt); 275 1.1 riastrad else 276 1.1 riastrad return; 277 1.1 riastrad 278 1.1 riastrad intel_gt_clear_error_registers(gt, ALL_ENGINES); 279 1.1 riastrad } 280 1.1 riastrad 281 1.1 riastrad void intel_gt_flush_ggtt_writes(struct intel_gt *gt) 282 1.1 riastrad { 283 1.1 riastrad struct intel_uncore *uncore = gt->uncore; 284 1.1 riastrad intel_wakeref_t wakeref; 285 1.1 riastrad 286 1.1 riastrad /* 287 1.1 riastrad * No actual flushing is required for the GTT write domain for reads 288 1.1 riastrad * from the GTT domain. Writes to it "immediately" go to main memory 289 1.1 riastrad * as far as we know, so there's no chipset flush. It also doesn't 290 1.1 riastrad * land in the GPU render cache. 291 1.1 riastrad * 292 1.1 riastrad * However, we do have to enforce the order so that all writes through 293 1.1 riastrad * the GTT land before any writes to the device, such as updates to 294 1.1 riastrad * the GATT itself. 295 1.1 riastrad * 296 1.1 riastrad * We also have to wait a bit for the writes to land from the GTT. 297 1.1 riastrad * An uncached read (i.e. mmio) seems to be ideal for the round-trip 298 1.1 riastrad * timing. This issue has only been observed when switching quickly 299 1.1 riastrad * between GTT writes and CPU reads from inside the kernel on recent hw, 300 1.1 riastrad * and it appears to only affect discrete GTT blocks (i.e. on LLC 301 1.1 riastrad * system agents we cannot reproduce this behaviour, until Cannonlake 302 1.1 riastrad * that was!). 303 1.1 riastrad */ 304 1.1 riastrad 305 1.1 riastrad wmb(); 306 1.1 riastrad 307 1.1 riastrad if (INTEL_INFO(gt->i915)->has_coherent_ggtt) 308 1.1 riastrad return; 309 1.1 riastrad 310 1.1 riastrad intel_gt_chipset_flush(gt); 311 1.1 riastrad 312 1.1 riastrad with_intel_runtime_pm_if_in_use(uncore->rpm, wakeref) { 313 1.1 riastrad unsigned long flags; 314 1.1 riastrad 315 1.1 riastrad spin_lock_irqsave(&uncore->lock, flags); 316 1.1 riastrad intel_uncore_posting_read_fw(uncore, 317 1.1 riastrad RING_HEAD(RENDER_RING_BASE)); 318 1.1 riastrad spin_unlock_irqrestore(&uncore->lock, flags); 319 1.1 riastrad } 320 1.1 riastrad } 321 1.1 riastrad 322 1.1 riastrad void intel_gt_chipset_flush(struct intel_gt *gt) 323 1.1 riastrad { 324 1.1 riastrad wmb(); 325 1.1 riastrad if (INTEL_GEN(gt->i915) < 6) 326 1.1 riastrad intel_gtt_chipset_flush(); 327 1.1 riastrad } 328 1.1 riastrad 329 1.1 riastrad void intel_gt_driver_register(struct intel_gt *gt) 330 1.1 riastrad { 331 1.1 riastrad intel_rps_driver_register(>->rps); 332 1.1 riastrad 333 1.3 riastrad #if IS_ENABLED(CONFIG_DEBUGFS) 334 1.1 riastrad debugfs_gt_register(gt); 335 1.3 riastrad #endif 336 1.1 riastrad } 337 1.1 riastrad 338 1.1 riastrad static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size) 339 1.1 riastrad { 340 1.1 riastrad struct drm_i915_private *i915 = gt->i915; 341 1.1 riastrad struct drm_i915_gem_object *obj; 342 1.1 riastrad struct i915_vma *vma; 343 1.1 riastrad int ret; 344 1.1 riastrad 345 1.1 riastrad obj = i915_gem_object_create_stolen(i915, size); 346 1.1 riastrad if (IS_ERR(obj)) 347 1.1 riastrad obj = i915_gem_object_create_internal(i915, size); 348 1.1 riastrad if (IS_ERR(obj)) { 349 1.1 riastrad DRM_ERROR("Failed to allocate scratch page\n"); 350 1.1 riastrad return PTR_ERR(obj); 351 1.1 riastrad } 352 1.1 riastrad 353 1.1 riastrad vma = i915_vma_instance(obj, >->ggtt->vm, NULL); 354 1.1 riastrad if (IS_ERR(vma)) { 355 1.1 riastrad ret = PTR_ERR(vma); 356 1.1 riastrad goto err_unref; 357 1.1 riastrad } 358 1.1 riastrad 359 1.1 riastrad ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); 360 1.1 riastrad if (ret) 361 1.1 riastrad goto err_unref; 362 1.1 riastrad 363 1.1 riastrad gt->scratch = i915_vma_make_unshrinkable(vma); 364 1.1 riastrad 365 1.1 riastrad return 0; 366 1.1 riastrad 367 1.1 riastrad err_unref: 368 1.1 riastrad i915_gem_object_put(obj); 369 1.1 riastrad return ret; 370 1.1 riastrad } 371 1.1 riastrad 372 1.1 riastrad static void intel_gt_fini_scratch(struct intel_gt *gt) 373 1.1 riastrad { 374 1.1 riastrad i915_vma_unpin_and_release(>->scratch, 0); 375 1.1 riastrad } 376 1.1 riastrad 377 1.1 riastrad static struct i915_address_space *kernel_vm(struct intel_gt *gt) 378 1.1 riastrad { 379 1.1 riastrad if (INTEL_PPGTT(gt->i915) > INTEL_PPGTT_ALIASING) 380 1.1 riastrad return &i915_ppgtt_create(gt)->vm; 381 1.1 riastrad else 382 1.1 riastrad return i915_vm_get(>->ggtt->vm); 383 1.1 riastrad } 384 1.1 riastrad 385 1.1 riastrad static int __intel_context_flush_retire(struct intel_context *ce) 386 1.1 riastrad { 387 1.1 riastrad struct intel_timeline *tl; 388 1.1 riastrad 389 1.1 riastrad tl = intel_context_timeline_lock(ce); 390 1.1 riastrad if (IS_ERR(tl)) 391 1.1 riastrad return PTR_ERR(tl); 392 1.1 riastrad 393 1.1 riastrad intel_context_timeline_unlock(tl); 394 1.1 riastrad return 0; 395 1.1 riastrad } 396 1.1 riastrad 397 1.1 riastrad static int __engines_record_defaults(struct intel_gt *gt) 398 1.1 riastrad { 399 1.1 riastrad struct i915_request *requests[I915_NUM_ENGINES] = {}; 400 1.1 riastrad struct intel_engine_cs *engine; 401 1.1 riastrad enum intel_engine_id id; 402 1.1 riastrad int err = 0; 403 1.1 riastrad 404 1.1 riastrad /* 405 1.1 riastrad * As we reset the gpu during very early sanitisation, the current 406 1.1 riastrad * register state on the GPU should reflect its defaults values. 407 1.1 riastrad * We load a context onto the hw (with restore-inhibit), then switch 408 1.1 riastrad * over to a second context to save that default register state. We 409 1.1 riastrad * can then prime every new context with that state so they all start 410 1.1 riastrad * from the same default HW values. 411 1.1 riastrad */ 412 1.1 riastrad 413 1.1 riastrad for_each_engine(engine, gt, id) { 414 1.1 riastrad struct intel_renderstate so; 415 1.1 riastrad struct intel_context *ce; 416 1.1 riastrad struct i915_request *rq; 417 1.1 riastrad 418 1.1 riastrad /* We must be able to switch to something! */ 419 1.1 riastrad GEM_BUG_ON(!engine->kernel_context); 420 1.1 riastrad 421 1.1 riastrad err = intel_renderstate_init(&so, engine); 422 1.1 riastrad if (err) 423 1.1 riastrad goto out; 424 1.1 riastrad 425 1.1 riastrad ce = intel_context_create(engine); 426 1.1 riastrad if (IS_ERR(ce)) { 427 1.1 riastrad err = PTR_ERR(ce); 428 1.1 riastrad goto out; 429 1.1 riastrad } 430 1.1 riastrad 431 1.1 riastrad rq = intel_context_create_request(ce); 432 1.1 riastrad if (IS_ERR(rq)) { 433 1.1 riastrad err = PTR_ERR(rq); 434 1.1 riastrad intel_context_put(ce); 435 1.1 riastrad goto out; 436 1.1 riastrad } 437 1.1 riastrad 438 1.1 riastrad err = intel_engine_emit_ctx_wa(rq); 439 1.1 riastrad if (err) 440 1.1 riastrad goto err_rq; 441 1.1 riastrad 442 1.1 riastrad err = intel_renderstate_emit(&so, rq); 443 1.1 riastrad if (err) 444 1.1 riastrad goto err_rq; 445 1.1 riastrad 446 1.1 riastrad err_rq: 447 1.1 riastrad requests[id] = i915_request_get(rq); 448 1.1 riastrad i915_request_add(rq); 449 1.1 riastrad intel_renderstate_fini(&so); 450 1.1 riastrad if (err) 451 1.1 riastrad goto out; 452 1.1 riastrad } 453 1.1 riastrad 454 1.1 riastrad /* Flush the default context image to memory, and enable powersaving. */ 455 1.1 riastrad if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) { 456 1.1 riastrad err = -EIO; 457 1.1 riastrad goto out; 458 1.1 riastrad } 459 1.1 riastrad 460 1.1 riastrad for (id = 0; id < ARRAY_SIZE(requests); id++) { 461 1.1 riastrad struct i915_request *rq; 462 1.1 riastrad struct i915_vma *state; 463 1.1 riastrad void *vaddr; 464 1.1 riastrad 465 1.1 riastrad rq = requests[id]; 466 1.1 riastrad if (!rq) 467 1.1 riastrad continue; 468 1.1 riastrad 469 1.1 riastrad GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags)); 470 1.1 riastrad state = rq->context->state; 471 1.1 riastrad if (!state) 472 1.1 riastrad continue; 473 1.1 riastrad 474 1.1 riastrad /* Serialise with retirement on another CPU */ 475 1.1 riastrad GEM_BUG_ON(!i915_request_completed(rq)); 476 1.1 riastrad err = __intel_context_flush_retire(rq->context); 477 1.1 riastrad if (err) 478 1.1 riastrad goto out; 479 1.1 riastrad 480 1.1 riastrad /* We want to be able to unbind the state from the GGTT */ 481 1.1 riastrad GEM_BUG_ON(intel_context_is_pinned(rq->context)); 482 1.1 riastrad 483 1.1 riastrad /* 484 1.1 riastrad * As we will hold a reference to the logical state, it will 485 1.1 riastrad * not be torn down with the context, and importantly the 486 1.1 riastrad * object will hold onto its vma (making it possible for a 487 1.1 riastrad * stray GTT write to corrupt our defaults). Unmap the vma 488 1.1 riastrad * from the GTT to prevent such accidents and reclaim the 489 1.1 riastrad * space. 490 1.1 riastrad */ 491 1.1 riastrad err = i915_vma_unbind(state); 492 1.1 riastrad if (err) 493 1.1 riastrad goto out; 494 1.1 riastrad 495 1.1 riastrad i915_gem_object_lock(state->obj); 496 1.1 riastrad err = i915_gem_object_set_to_cpu_domain(state->obj, false); 497 1.1 riastrad i915_gem_object_unlock(state->obj); 498 1.1 riastrad if (err) 499 1.1 riastrad goto out; 500 1.1 riastrad 501 1.1 riastrad i915_gem_object_set_cache_coherency(state->obj, I915_CACHE_LLC); 502 1.1 riastrad 503 1.1 riastrad /* Check we can acquire the image of the context state */ 504 1.1 riastrad vaddr = i915_gem_object_pin_map(state->obj, I915_MAP_FORCE_WB); 505 1.1 riastrad if (IS_ERR(vaddr)) { 506 1.1 riastrad err = PTR_ERR(vaddr); 507 1.1 riastrad goto out; 508 1.1 riastrad } 509 1.1 riastrad 510 1.1 riastrad rq->engine->default_state = i915_gem_object_get(state->obj); 511 1.1 riastrad i915_gem_object_unpin_map(state->obj); 512 1.1 riastrad } 513 1.1 riastrad 514 1.1 riastrad out: 515 1.1 riastrad /* 516 1.1 riastrad * If we have to abandon now, we expect the engines to be idle 517 1.1 riastrad * and ready to be torn-down. The quickest way we can accomplish 518 1.1 riastrad * this is by declaring ourselves wedged. 519 1.1 riastrad */ 520 1.1 riastrad if (err) 521 1.1 riastrad intel_gt_set_wedged(gt); 522 1.1 riastrad 523 1.1 riastrad for (id = 0; id < ARRAY_SIZE(requests); id++) { 524 1.1 riastrad struct intel_context *ce; 525 1.1 riastrad struct i915_request *rq; 526 1.1 riastrad 527 1.1 riastrad rq = requests[id]; 528 1.1 riastrad if (!rq) 529 1.1 riastrad continue; 530 1.1 riastrad 531 1.1 riastrad ce = rq->context; 532 1.1 riastrad i915_request_put(rq); 533 1.1 riastrad intel_context_put(ce); 534 1.1 riastrad } 535 1.1 riastrad return err; 536 1.1 riastrad } 537 1.1 riastrad 538 1.1 riastrad static int __engines_verify_workarounds(struct intel_gt *gt) 539 1.1 riastrad { 540 1.1 riastrad struct intel_engine_cs *engine; 541 1.1 riastrad enum intel_engine_id id; 542 1.1 riastrad int err = 0; 543 1.1 riastrad 544 1.1 riastrad if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) 545 1.1 riastrad return 0; 546 1.1 riastrad 547 1.1 riastrad for_each_engine(engine, gt, id) { 548 1.1 riastrad if (intel_engine_verify_workarounds(engine, "load")) 549 1.1 riastrad err = -EIO; 550 1.1 riastrad } 551 1.1 riastrad 552 1.1 riastrad return err; 553 1.1 riastrad } 554 1.1 riastrad 555 1.1 riastrad static void __intel_gt_disable(struct intel_gt *gt) 556 1.1 riastrad { 557 1.1 riastrad intel_gt_set_wedged_on_init(gt); 558 1.1 riastrad 559 1.1 riastrad intel_gt_suspend_prepare(gt); 560 1.1 riastrad intel_gt_suspend_late(gt); 561 1.1 riastrad 562 1.1 riastrad GEM_BUG_ON(intel_gt_pm_is_awake(gt)); 563 1.1 riastrad } 564 1.1 riastrad 565 1.1 riastrad int intel_gt_init(struct intel_gt *gt) 566 1.1 riastrad { 567 1.1 riastrad int err; 568 1.1 riastrad 569 1.1 riastrad err = i915_inject_probe_error(gt->i915, -ENODEV); 570 1.1 riastrad if (err) 571 1.1 riastrad return err; 572 1.1 riastrad 573 1.1 riastrad /* 574 1.1 riastrad * This is just a security blanket to placate dragons. 575 1.1 riastrad * On some systems, we very sporadically observe that the first TLBs 576 1.1 riastrad * used by the CS may be stale, despite us poking the TLB reset. If 577 1.1 riastrad * we hold the forcewake during initialisation these problems 578 1.1 riastrad * just magically go away. 579 1.1 riastrad */ 580 1.1 riastrad intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); 581 1.1 riastrad 582 1.1 riastrad err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K); 583 1.1 riastrad if (err) 584 1.1 riastrad goto out_fw; 585 1.1 riastrad 586 1.1 riastrad intel_gt_pm_init(gt); 587 1.1 riastrad 588 1.1 riastrad gt->vm = kernel_vm(gt); 589 1.1 riastrad if (!gt->vm) { 590 1.1 riastrad err = -ENOMEM; 591 1.1 riastrad goto err_pm; 592 1.1 riastrad } 593 1.1 riastrad 594 1.1 riastrad err = intel_engines_init(gt); 595 1.1 riastrad if (err) 596 1.1 riastrad goto err_engines; 597 1.1 riastrad 598 1.1 riastrad intel_uc_init(>->uc); 599 1.1 riastrad 600 1.1 riastrad err = intel_gt_resume(gt); 601 1.1 riastrad if (err) 602 1.1 riastrad goto err_uc_init; 603 1.1 riastrad 604 1.1 riastrad err = __engines_record_defaults(gt); 605 1.1 riastrad if (err) 606 1.1 riastrad goto err_gt; 607 1.1 riastrad 608 1.1 riastrad err = __engines_verify_workarounds(gt); 609 1.1 riastrad if (err) 610 1.1 riastrad goto err_gt; 611 1.1 riastrad 612 1.1 riastrad err = i915_inject_probe_error(gt->i915, -EIO); 613 1.1 riastrad if (err) 614 1.1 riastrad goto err_gt; 615 1.1 riastrad 616 1.1 riastrad goto out_fw; 617 1.1 riastrad err_gt: 618 1.1 riastrad __intel_gt_disable(gt); 619 1.1 riastrad intel_uc_fini_hw(>->uc); 620 1.1 riastrad err_uc_init: 621 1.1 riastrad intel_uc_fini(>->uc); 622 1.1 riastrad err_engines: 623 1.1 riastrad intel_engines_release(gt); 624 1.1 riastrad i915_vm_put(fetch_and_zero(>->vm)); 625 1.1 riastrad err_pm: 626 1.1 riastrad intel_gt_pm_fini(gt); 627 1.1 riastrad intel_gt_fini_scratch(gt); 628 1.1 riastrad out_fw: 629 1.1 riastrad if (err) 630 1.1 riastrad intel_gt_set_wedged_on_init(gt); 631 1.1 riastrad intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); 632 1.1 riastrad return err; 633 1.1 riastrad } 634 1.1 riastrad 635 1.1 riastrad void intel_gt_driver_remove(struct intel_gt *gt) 636 1.1 riastrad { 637 1.1 riastrad __intel_gt_disable(gt); 638 1.1 riastrad 639 1.1 riastrad intel_uc_fini_hw(>->uc); 640 1.1 riastrad intel_uc_fini(>->uc); 641 1.1 riastrad 642 1.1 riastrad intel_engines_release(gt); 643 1.1 riastrad } 644 1.1 riastrad 645 1.1 riastrad void intel_gt_driver_unregister(struct intel_gt *gt) 646 1.1 riastrad { 647 1.1 riastrad intel_rps_driver_unregister(>->rps); 648 1.1 riastrad } 649 1.1 riastrad 650 1.1 riastrad void intel_gt_driver_release(struct intel_gt *gt) 651 1.1 riastrad { 652 1.1 riastrad struct i915_address_space *vm; 653 1.1 riastrad 654 1.1 riastrad vm = fetch_and_zero(>->vm); 655 1.1 riastrad if (vm) /* FIXME being called twice on error paths :( */ 656 1.1 riastrad i915_vm_put(vm); 657 1.1 riastrad 658 1.1 riastrad intel_gt_pm_fini(gt); 659 1.1 riastrad intel_gt_fini_scratch(gt); 660 1.1 riastrad } 661 1.1 riastrad 662 1.1 riastrad void intel_gt_driver_late_release(struct intel_gt *gt) 663 1.1 riastrad { 664 1.1 riastrad intel_uc_driver_late_release(>->uc); 665 1.1 riastrad intel_gt_fini_requests(gt); 666 1.1 riastrad intel_gt_fini_reset(gt); 667 1.1 riastrad intel_gt_fini_timelines(gt); 668 1.1 riastrad intel_engines_free(gt); 669 1.1 riastrad } 670