1 1.3 riastrad /* $NetBSD: intel_context.c,v 1.3 2021/12/19 11:38:04 riastradh Exp $ */ 2 1.1 riastrad 3 1.1 riastrad /* 4 1.1 riastrad * SPDX-License-Identifier: MIT 5 1.1 riastrad * 6 1.1 riastrad * Copyright 2019 Intel Corporation 7 1.1 riastrad */ 8 1.1 riastrad 9 1.1 riastrad #include <sys/cdefs.h> 10 1.3 riastrad __KERNEL_RCSID(0, "$NetBSD: intel_context.c,v 1.3 2021/12/19 11:38:04 riastradh Exp $"); 11 1.1 riastrad 12 1.1 riastrad #include "gem/i915_gem_context.h" 13 1.1 riastrad #include "gem/i915_gem_pm.h" 14 1.1 riastrad 15 1.1 riastrad #include "i915_drv.h" 16 1.1 riastrad #include "i915_globals.h" 17 1.1 riastrad 18 1.1 riastrad #include "intel_context.h" 19 1.1 riastrad #include "intel_engine.h" 20 1.1 riastrad #include "intel_engine_pm.h" 21 1.1 riastrad #include "intel_ring.h" 22 1.1 riastrad 23 1.3 riastrad #include <linux/nbsd-namespace.h> 24 1.3 riastrad 25 1.1 riastrad static struct i915_global_context { 26 1.1 riastrad struct i915_global base; 27 1.1 riastrad struct kmem_cache *slab_ce; 28 1.1 riastrad } global; 29 1.1 riastrad 30 1.1 riastrad static struct intel_context *intel_context_alloc(void) 31 1.1 riastrad { 32 1.1 riastrad return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL); 33 1.1 riastrad } 34 1.1 riastrad 35 1.1 riastrad void intel_context_free(struct intel_context *ce) 36 1.1 riastrad { 37 1.1 riastrad kmem_cache_free(global.slab_ce, ce); 38 1.1 riastrad } 39 1.1 riastrad 40 1.1 riastrad struct intel_context * 41 1.1 riastrad intel_context_create(struct intel_engine_cs *engine) 42 1.1 riastrad { 43 1.1 riastrad struct intel_context *ce; 44 1.1 riastrad 45 1.1 riastrad ce = intel_context_alloc(); 46 1.1 riastrad if (!ce) 47 1.1 riastrad return ERR_PTR(-ENOMEM); 48 1.1 riastrad 49 1.1 riastrad intel_context_init(ce, engine); 50 1.1 riastrad return ce; 51 1.1 riastrad } 52 1.1 riastrad 53 1.1 riastrad int intel_context_alloc_state(struct intel_context *ce) 54 1.1 riastrad { 55 1.1 riastrad int err = 0; 56 1.1 riastrad 57 1.1 riastrad if (mutex_lock_interruptible(&ce->pin_mutex)) 58 1.1 riastrad return -EINTR; 59 1.1 riastrad 60 1.1 riastrad if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) { 61 1.1 riastrad err = ce->ops->alloc(ce); 62 1.1 riastrad if (unlikely(err)) 63 1.1 riastrad goto unlock; 64 1.1 riastrad 65 1.1 riastrad set_bit(CONTEXT_ALLOC_BIT, &ce->flags); 66 1.1 riastrad } 67 1.1 riastrad 68 1.1 riastrad unlock: 69 1.1 riastrad mutex_unlock(&ce->pin_mutex); 70 1.1 riastrad return err; 71 1.1 riastrad } 72 1.1 riastrad 73 1.1 riastrad static int intel_context_active_acquire(struct intel_context *ce) 74 1.1 riastrad { 75 1.1 riastrad int err; 76 1.1 riastrad 77 1.1 riastrad __i915_active_acquire(&ce->active); 78 1.1 riastrad 79 1.1 riastrad if (intel_context_is_barrier(ce)) 80 1.1 riastrad return 0; 81 1.1 riastrad 82 1.1 riastrad /* Preallocate tracking nodes */ 83 1.1 riastrad err = i915_active_acquire_preallocate_barrier(&ce->active, 84 1.1 riastrad ce->engine); 85 1.1 riastrad if (err) 86 1.1 riastrad i915_active_release(&ce->active); 87 1.1 riastrad 88 1.1 riastrad return err; 89 1.1 riastrad } 90 1.1 riastrad 91 1.1 riastrad static void intel_context_active_release(struct intel_context *ce) 92 1.1 riastrad { 93 1.1 riastrad /* Nodes preallocated in intel_context_active() */ 94 1.1 riastrad i915_active_acquire_barrier(&ce->active); 95 1.1 riastrad i915_active_release(&ce->active); 96 1.1 riastrad } 97 1.1 riastrad 98 1.1 riastrad int __intel_context_do_pin(struct intel_context *ce) 99 1.1 riastrad { 100 1.1 riastrad int err; 101 1.1 riastrad 102 1.1 riastrad if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) { 103 1.1 riastrad err = intel_context_alloc_state(ce); 104 1.1 riastrad if (err) 105 1.1 riastrad return err; 106 1.1 riastrad } 107 1.1 riastrad 108 1.1 riastrad err = i915_active_acquire(&ce->active); 109 1.1 riastrad if (err) 110 1.1 riastrad return err; 111 1.1 riastrad 112 1.1 riastrad if (mutex_lock_interruptible(&ce->pin_mutex)) { 113 1.1 riastrad err = -EINTR; 114 1.1 riastrad goto out_release; 115 1.1 riastrad } 116 1.1 riastrad 117 1.1 riastrad if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) { 118 1.1 riastrad err = intel_context_active_acquire(ce); 119 1.1 riastrad if (unlikely(err)) 120 1.1 riastrad goto out_unlock; 121 1.1 riastrad 122 1.1 riastrad err = ce->ops->pin(ce); 123 1.1 riastrad if (unlikely(err)) 124 1.1 riastrad goto err_active; 125 1.1 riastrad 126 1.1 riastrad CE_TRACE(ce, "pin ring:{head:%04x, tail:%04x}\n", 127 1.1 riastrad ce->ring->head, ce->ring->tail); 128 1.1 riastrad 129 1.1 riastrad smp_mb__before_atomic(); /* flush pin before it is visible */ 130 1.1 riastrad atomic_inc(&ce->pin_count); 131 1.1 riastrad } 132 1.1 riastrad 133 1.1 riastrad GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */ 134 1.1 riastrad GEM_BUG_ON(i915_active_is_idle(&ce->active)); 135 1.1 riastrad goto out_unlock; 136 1.1 riastrad 137 1.1 riastrad err_active: 138 1.1 riastrad intel_context_active_release(ce); 139 1.1 riastrad out_unlock: 140 1.1 riastrad mutex_unlock(&ce->pin_mutex); 141 1.1 riastrad out_release: 142 1.1 riastrad i915_active_release(&ce->active); 143 1.1 riastrad return err; 144 1.1 riastrad } 145 1.1 riastrad 146 1.1 riastrad void intel_context_unpin(struct intel_context *ce) 147 1.1 riastrad { 148 1.1 riastrad if (!atomic_dec_and_test(&ce->pin_count)) 149 1.1 riastrad return; 150 1.1 riastrad 151 1.1 riastrad CE_TRACE(ce, "unpin\n"); 152 1.1 riastrad ce->ops->unpin(ce); 153 1.1 riastrad 154 1.1 riastrad /* 155 1.1 riastrad * Once released, we may asynchronously drop the active reference. 156 1.1 riastrad * As that may be the only reference keeping the context alive, 157 1.1 riastrad * take an extra now so that it is not freed before we finish 158 1.1 riastrad * dereferencing it. 159 1.1 riastrad */ 160 1.1 riastrad intel_context_get(ce); 161 1.1 riastrad intel_context_active_release(ce); 162 1.1 riastrad intel_context_put(ce); 163 1.1 riastrad } 164 1.1 riastrad 165 1.1 riastrad static int __context_pin_state(struct i915_vma *vma) 166 1.1 riastrad { 167 1.1 riastrad unsigned int bias = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS; 168 1.1 riastrad int err; 169 1.1 riastrad 170 1.1 riastrad err = i915_ggtt_pin(vma, 0, bias | PIN_HIGH); 171 1.1 riastrad if (err) 172 1.1 riastrad return err; 173 1.1 riastrad 174 1.1 riastrad err = i915_active_acquire(&vma->active); 175 1.1 riastrad if (err) 176 1.1 riastrad goto err_unpin; 177 1.1 riastrad 178 1.1 riastrad /* 179 1.1 riastrad * And mark it as a globally pinned object to let the shrinker know 180 1.1 riastrad * it cannot reclaim the object until we release it. 181 1.1 riastrad */ 182 1.1 riastrad i915_vma_make_unshrinkable(vma); 183 1.1 riastrad vma->obj->mm.dirty = true; 184 1.1 riastrad 185 1.1 riastrad return 0; 186 1.1 riastrad 187 1.1 riastrad err_unpin: 188 1.1 riastrad i915_vma_unpin(vma); 189 1.1 riastrad return err; 190 1.1 riastrad } 191 1.1 riastrad 192 1.1 riastrad static void __context_unpin_state(struct i915_vma *vma) 193 1.1 riastrad { 194 1.1 riastrad i915_vma_make_shrinkable(vma); 195 1.1 riastrad i915_active_release(&vma->active); 196 1.1 riastrad __i915_vma_unpin(vma); 197 1.1 riastrad } 198 1.1 riastrad 199 1.1 riastrad static int __ring_active(struct intel_ring *ring) 200 1.1 riastrad { 201 1.1 riastrad int err; 202 1.1 riastrad 203 1.1 riastrad err = i915_active_acquire(&ring->vma->active); 204 1.1 riastrad if (err) 205 1.1 riastrad return err; 206 1.1 riastrad 207 1.1 riastrad err = intel_ring_pin(ring); 208 1.1 riastrad if (err) 209 1.1 riastrad goto err_active; 210 1.1 riastrad 211 1.1 riastrad return 0; 212 1.1 riastrad 213 1.1 riastrad err_active: 214 1.1 riastrad i915_active_release(&ring->vma->active); 215 1.1 riastrad return err; 216 1.1 riastrad } 217 1.1 riastrad 218 1.1 riastrad static void __ring_retire(struct intel_ring *ring) 219 1.1 riastrad { 220 1.1 riastrad intel_ring_unpin(ring); 221 1.1 riastrad i915_active_release(&ring->vma->active); 222 1.1 riastrad } 223 1.1 riastrad 224 1.1 riastrad __i915_active_call 225 1.1 riastrad static void __intel_context_retire(struct i915_active *active) 226 1.1 riastrad { 227 1.1 riastrad struct intel_context *ce = container_of(active, typeof(*ce), active); 228 1.1 riastrad 229 1.1 riastrad CE_TRACE(ce, "retire\n"); 230 1.1 riastrad 231 1.1 riastrad set_bit(CONTEXT_VALID_BIT, &ce->flags); 232 1.1 riastrad if (ce->state) 233 1.1 riastrad __context_unpin_state(ce->state); 234 1.1 riastrad 235 1.1 riastrad intel_timeline_unpin(ce->timeline); 236 1.1 riastrad __ring_retire(ce->ring); 237 1.1 riastrad 238 1.1 riastrad intel_context_put(ce); 239 1.1 riastrad } 240 1.1 riastrad 241 1.1 riastrad static int __intel_context_active(struct i915_active *active) 242 1.1 riastrad { 243 1.1 riastrad struct intel_context *ce = container_of(active, typeof(*ce), active); 244 1.1 riastrad int err; 245 1.1 riastrad 246 1.1 riastrad CE_TRACE(ce, "active\n"); 247 1.1 riastrad 248 1.1 riastrad intel_context_get(ce); 249 1.1 riastrad 250 1.1 riastrad err = __ring_active(ce->ring); 251 1.1 riastrad if (err) 252 1.1 riastrad goto err_put; 253 1.1 riastrad 254 1.1 riastrad err = intel_timeline_pin(ce->timeline); 255 1.1 riastrad if (err) 256 1.1 riastrad goto err_ring; 257 1.1 riastrad 258 1.1 riastrad if (!ce->state) 259 1.1 riastrad return 0; 260 1.1 riastrad 261 1.1 riastrad err = __context_pin_state(ce->state); 262 1.1 riastrad if (err) 263 1.1 riastrad goto err_timeline; 264 1.1 riastrad 265 1.1 riastrad return 0; 266 1.1 riastrad 267 1.1 riastrad err_timeline: 268 1.1 riastrad intel_timeline_unpin(ce->timeline); 269 1.1 riastrad err_ring: 270 1.1 riastrad __ring_retire(ce->ring); 271 1.1 riastrad err_put: 272 1.1 riastrad intel_context_put(ce); 273 1.1 riastrad return err; 274 1.1 riastrad } 275 1.1 riastrad 276 1.1 riastrad void 277 1.1 riastrad intel_context_init(struct intel_context *ce, 278 1.1 riastrad struct intel_engine_cs *engine) 279 1.1 riastrad { 280 1.1 riastrad GEM_BUG_ON(!engine->cops); 281 1.1 riastrad GEM_BUG_ON(!engine->gt->vm); 282 1.1 riastrad 283 1.1 riastrad kref_init(&ce->ref); 284 1.1 riastrad 285 1.1 riastrad ce->engine = engine; 286 1.1 riastrad ce->ops = engine->cops; 287 1.1 riastrad ce->sseu = engine->sseu; 288 1.1 riastrad ce->ring = __intel_context_ring_size(SZ_4K); 289 1.1 riastrad 290 1.1 riastrad ce->vm = i915_vm_get(engine->gt->vm); 291 1.1 riastrad 292 1.1 riastrad INIT_LIST_HEAD(&ce->signal_link); 293 1.1 riastrad INIT_LIST_HEAD(&ce->signals); 294 1.1 riastrad 295 1.1 riastrad mutex_init(&ce->pin_mutex); 296 1.1 riastrad 297 1.1 riastrad i915_active_init(&ce->active, 298 1.1 riastrad __intel_context_active, __intel_context_retire); 299 1.1 riastrad } 300 1.1 riastrad 301 1.1 riastrad void intel_context_fini(struct intel_context *ce) 302 1.1 riastrad { 303 1.1 riastrad if (ce->timeline) 304 1.1 riastrad intel_timeline_put(ce->timeline); 305 1.1 riastrad i915_vm_put(ce->vm); 306 1.1 riastrad 307 1.1 riastrad mutex_destroy(&ce->pin_mutex); 308 1.1 riastrad i915_active_fini(&ce->active); 309 1.1 riastrad } 310 1.1 riastrad 311 1.1 riastrad static void i915_global_context_shrink(void) 312 1.1 riastrad { 313 1.1 riastrad kmem_cache_shrink(global.slab_ce); 314 1.1 riastrad } 315 1.1 riastrad 316 1.1 riastrad static void i915_global_context_exit(void) 317 1.1 riastrad { 318 1.1 riastrad kmem_cache_destroy(global.slab_ce); 319 1.1 riastrad } 320 1.1 riastrad 321 1.1 riastrad static struct i915_global_context global = { { 322 1.1 riastrad .shrink = i915_global_context_shrink, 323 1.1 riastrad .exit = i915_global_context_exit, 324 1.1 riastrad } }; 325 1.1 riastrad 326 1.1 riastrad int __init i915_global_context_init(void) 327 1.1 riastrad { 328 1.1 riastrad global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN); 329 1.1 riastrad if (!global.slab_ce) 330 1.1 riastrad return -ENOMEM; 331 1.1 riastrad 332 1.1 riastrad i915_global_register(&global.base); 333 1.1 riastrad return 0; 334 1.1 riastrad } 335 1.1 riastrad 336 1.1 riastrad void intel_context_enter_engine(struct intel_context *ce) 337 1.1 riastrad { 338 1.1 riastrad intel_engine_pm_get(ce->engine); 339 1.1 riastrad intel_timeline_enter(ce->timeline); 340 1.1 riastrad } 341 1.1 riastrad 342 1.1 riastrad void intel_context_exit_engine(struct intel_context *ce) 343 1.1 riastrad { 344 1.1 riastrad intel_timeline_exit(ce->timeline); 345 1.1 riastrad intel_engine_pm_put(ce->engine); 346 1.1 riastrad } 347 1.1 riastrad 348 1.1 riastrad int intel_context_prepare_remote_request(struct intel_context *ce, 349 1.1 riastrad struct i915_request *rq) 350 1.1 riastrad { 351 1.1 riastrad struct intel_timeline *tl = ce->timeline; 352 1.1 riastrad int err; 353 1.1 riastrad 354 1.1 riastrad /* Only suitable for use in remotely modifying this context */ 355 1.1 riastrad GEM_BUG_ON(rq->context == ce); 356 1.1 riastrad 357 1.1 riastrad if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */ 358 1.1 riastrad /* Queue this switch after current activity by this context. */ 359 1.1 riastrad err = i915_active_fence_set(&tl->last_request, rq); 360 1.1 riastrad if (err) 361 1.1 riastrad return err; 362 1.1 riastrad } 363 1.1 riastrad 364 1.1 riastrad /* 365 1.1 riastrad * Guarantee context image and the timeline remains pinned until the 366 1.1 riastrad * modifying request is retired by setting the ce activity tracker. 367 1.1 riastrad * 368 1.1 riastrad * But we only need to take one pin on the account of it. Or in other 369 1.1 riastrad * words transfer the pinned ce object to tracked active request. 370 1.1 riastrad */ 371 1.1 riastrad GEM_BUG_ON(i915_active_is_idle(&ce->active)); 372 1.1 riastrad return i915_active_add_request(&ce->active, rq); 373 1.1 riastrad } 374 1.1 riastrad 375 1.1 riastrad struct i915_request *intel_context_create_request(struct intel_context *ce) 376 1.1 riastrad { 377 1.1 riastrad struct i915_request *rq; 378 1.1 riastrad int err; 379 1.1 riastrad 380 1.1 riastrad err = intel_context_pin(ce); 381 1.1 riastrad if (unlikely(err)) 382 1.1 riastrad return ERR_PTR(err); 383 1.1 riastrad 384 1.1 riastrad rq = i915_request_create(ce); 385 1.1 riastrad intel_context_unpin(ce); 386 1.1 riastrad 387 1.1 riastrad return rq; 388 1.1 riastrad } 389 1.1 riastrad 390 1.1 riastrad #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 391 1.1 riastrad #include "selftest_context.c" 392 1.1 riastrad #endif 393