Home | History | Annotate | Line # | Download | only in gt
intel_context.c revision 1.1
      1 /*	$NetBSD: intel_context.c,v 1.1 2021/12/18 20:15:32 riastradh Exp $	*/
      2 
      3 /*
      4  * SPDX-License-Identifier: MIT
      5  *
      6  * Copyright  2019 Intel Corporation
      7  */
      8 
      9 #include <sys/cdefs.h>
     10 __KERNEL_RCSID(0, "$NetBSD: intel_context.c,v 1.1 2021/12/18 20:15:32 riastradh Exp $");
     11 
     12 #include "gem/i915_gem_context.h"
     13 #include "gem/i915_gem_pm.h"
     14 
     15 #include "i915_drv.h"
     16 #include "i915_globals.h"
     17 
     18 #include "intel_context.h"
     19 #include "intel_engine.h"
     20 #include "intel_engine_pm.h"
     21 #include "intel_ring.h"
     22 
     23 static struct i915_global_context {
     24 	struct i915_global base;
     25 	struct kmem_cache *slab_ce;
     26 } global;
     27 
     28 static struct intel_context *intel_context_alloc(void)
     29 {
     30 	return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
     31 }
     32 
     33 void intel_context_free(struct intel_context *ce)
     34 {
     35 	kmem_cache_free(global.slab_ce, ce);
     36 }
     37 
     38 struct intel_context *
     39 intel_context_create(struct intel_engine_cs *engine)
     40 {
     41 	struct intel_context *ce;
     42 
     43 	ce = intel_context_alloc();
     44 	if (!ce)
     45 		return ERR_PTR(-ENOMEM);
     46 
     47 	intel_context_init(ce, engine);
     48 	return ce;
     49 }
     50 
     51 int intel_context_alloc_state(struct intel_context *ce)
     52 {
     53 	int err = 0;
     54 
     55 	if (mutex_lock_interruptible(&ce->pin_mutex))
     56 		return -EINTR;
     57 
     58 	if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
     59 		err = ce->ops->alloc(ce);
     60 		if (unlikely(err))
     61 			goto unlock;
     62 
     63 		set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
     64 	}
     65 
     66 unlock:
     67 	mutex_unlock(&ce->pin_mutex);
     68 	return err;
     69 }
     70 
     71 static int intel_context_active_acquire(struct intel_context *ce)
     72 {
     73 	int err;
     74 
     75 	__i915_active_acquire(&ce->active);
     76 
     77 	if (intel_context_is_barrier(ce))
     78 		return 0;
     79 
     80 	/* Preallocate tracking nodes */
     81 	err = i915_active_acquire_preallocate_barrier(&ce->active,
     82 						      ce->engine);
     83 	if (err)
     84 		i915_active_release(&ce->active);
     85 
     86 	return err;
     87 }
     88 
     89 static void intel_context_active_release(struct intel_context *ce)
     90 {
     91 	/* Nodes preallocated in intel_context_active() */
     92 	i915_active_acquire_barrier(&ce->active);
     93 	i915_active_release(&ce->active);
     94 }
     95 
     96 int __intel_context_do_pin(struct intel_context *ce)
     97 {
     98 	int err;
     99 
    100 	if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
    101 		err = intel_context_alloc_state(ce);
    102 		if (err)
    103 			return err;
    104 	}
    105 
    106 	err = i915_active_acquire(&ce->active);
    107 	if (err)
    108 		return err;
    109 
    110 	if (mutex_lock_interruptible(&ce->pin_mutex)) {
    111 		err = -EINTR;
    112 		goto out_release;
    113 	}
    114 
    115 	if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) {
    116 		err = intel_context_active_acquire(ce);
    117 		if (unlikely(err))
    118 			goto out_unlock;
    119 
    120 		err = ce->ops->pin(ce);
    121 		if (unlikely(err))
    122 			goto err_active;
    123 
    124 		CE_TRACE(ce, "pin ring:{head:%04x, tail:%04x}\n",
    125 			 ce->ring->head, ce->ring->tail);
    126 
    127 		smp_mb__before_atomic(); /* flush pin before it is visible */
    128 		atomic_inc(&ce->pin_count);
    129 	}
    130 
    131 	GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
    132 	GEM_BUG_ON(i915_active_is_idle(&ce->active));
    133 	goto out_unlock;
    134 
    135 err_active:
    136 	intel_context_active_release(ce);
    137 out_unlock:
    138 	mutex_unlock(&ce->pin_mutex);
    139 out_release:
    140 	i915_active_release(&ce->active);
    141 	return err;
    142 }
    143 
    144 void intel_context_unpin(struct intel_context *ce)
    145 {
    146 	if (!atomic_dec_and_test(&ce->pin_count))
    147 		return;
    148 
    149 	CE_TRACE(ce, "unpin\n");
    150 	ce->ops->unpin(ce);
    151 
    152 	/*
    153 	 * Once released, we may asynchronously drop the active reference.
    154 	 * As that may be the only reference keeping the context alive,
    155 	 * take an extra now so that it is not freed before we finish
    156 	 * dereferencing it.
    157 	 */
    158 	intel_context_get(ce);
    159 	intel_context_active_release(ce);
    160 	intel_context_put(ce);
    161 }
    162 
    163 static int __context_pin_state(struct i915_vma *vma)
    164 {
    165 	unsigned int bias = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
    166 	int err;
    167 
    168 	err = i915_ggtt_pin(vma, 0, bias | PIN_HIGH);
    169 	if (err)
    170 		return err;
    171 
    172 	err = i915_active_acquire(&vma->active);
    173 	if (err)
    174 		goto err_unpin;
    175 
    176 	/*
    177 	 * And mark it as a globally pinned object to let the shrinker know
    178 	 * it cannot reclaim the object until we release it.
    179 	 */
    180 	i915_vma_make_unshrinkable(vma);
    181 	vma->obj->mm.dirty = true;
    182 
    183 	return 0;
    184 
    185 err_unpin:
    186 	i915_vma_unpin(vma);
    187 	return err;
    188 }
    189 
    190 static void __context_unpin_state(struct i915_vma *vma)
    191 {
    192 	i915_vma_make_shrinkable(vma);
    193 	i915_active_release(&vma->active);
    194 	__i915_vma_unpin(vma);
    195 }
    196 
    197 static int __ring_active(struct intel_ring *ring)
    198 {
    199 	int err;
    200 
    201 	err = i915_active_acquire(&ring->vma->active);
    202 	if (err)
    203 		return err;
    204 
    205 	err = intel_ring_pin(ring);
    206 	if (err)
    207 		goto err_active;
    208 
    209 	return 0;
    210 
    211 err_active:
    212 	i915_active_release(&ring->vma->active);
    213 	return err;
    214 }
    215 
    216 static void __ring_retire(struct intel_ring *ring)
    217 {
    218 	intel_ring_unpin(ring);
    219 	i915_active_release(&ring->vma->active);
    220 }
    221 
    222 __i915_active_call
    223 static void __intel_context_retire(struct i915_active *active)
    224 {
    225 	struct intel_context *ce = container_of(active, typeof(*ce), active);
    226 
    227 	CE_TRACE(ce, "retire\n");
    228 
    229 	set_bit(CONTEXT_VALID_BIT, &ce->flags);
    230 	if (ce->state)
    231 		__context_unpin_state(ce->state);
    232 
    233 	intel_timeline_unpin(ce->timeline);
    234 	__ring_retire(ce->ring);
    235 
    236 	intel_context_put(ce);
    237 }
    238 
    239 static int __intel_context_active(struct i915_active *active)
    240 {
    241 	struct intel_context *ce = container_of(active, typeof(*ce), active);
    242 	int err;
    243 
    244 	CE_TRACE(ce, "active\n");
    245 
    246 	intel_context_get(ce);
    247 
    248 	err = __ring_active(ce->ring);
    249 	if (err)
    250 		goto err_put;
    251 
    252 	err = intel_timeline_pin(ce->timeline);
    253 	if (err)
    254 		goto err_ring;
    255 
    256 	if (!ce->state)
    257 		return 0;
    258 
    259 	err = __context_pin_state(ce->state);
    260 	if (err)
    261 		goto err_timeline;
    262 
    263 	return 0;
    264 
    265 err_timeline:
    266 	intel_timeline_unpin(ce->timeline);
    267 err_ring:
    268 	__ring_retire(ce->ring);
    269 err_put:
    270 	intel_context_put(ce);
    271 	return err;
    272 }
    273 
    274 void
    275 intel_context_init(struct intel_context *ce,
    276 		   struct intel_engine_cs *engine)
    277 {
    278 	GEM_BUG_ON(!engine->cops);
    279 	GEM_BUG_ON(!engine->gt->vm);
    280 
    281 	kref_init(&ce->ref);
    282 
    283 	ce->engine = engine;
    284 	ce->ops = engine->cops;
    285 	ce->sseu = engine->sseu;
    286 	ce->ring = __intel_context_ring_size(SZ_4K);
    287 
    288 	ce->vm = i915_vm_get(engine->gt->vm);
    289 
    290 	INIT_LIST_HEAD(&ce->signal_link);
    291 	INIT_LIST_HEAD(&ce->signals);
    292 
    293 	mutex_init(&ce->pin_mutex);
    294 
    295 	i915_active_init(&ce->active,
    296 			 __intel_context_active, __intel_context_retire);
    297 }
    298 
    299 void intel_context_fini(struct intel_context *ce)
    300 {
    301 	if (ce->timeline)
    302 		intel_timeline_put(ce->timeline);
    303 	i915_vm_put(ce->vm);
    304 
    305 	mutex_destroy(&ce->pin_mutex);
    306 	i915_active_fini(&ce->active);
    307 }
    308 
    309 static void i915_global_context_shrink(void)
    310 {
    311 	kmem_cache_shrink(global.slab_ce);
    312 }
    313 
    314 static void i915_global_context_exit(void)
    315 {
    316 	kmem_cache_destroy(global.slab_ce);
    317 }
    318 
    319 static struct i915_global_context global = { {
    320 	.shrink = i915_global_context_shrink,
    321 	.exit = i915_global_context_exit,
    322 } };
    323 
    324 int __init i915_global_context_init(void)
    325 {
    326 	global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
    327 	if (!global.slab_ce)
    328 		return -ENOMEM;
    329 
    330 	i915_global_register(&global.base);
    331 	return 0;
    332 }
    333 
    334 void intel_context_enter_engine(struct intel_context *ce)
    335 {
    336 	intel_engine_pm_get(ce->engine);
    337 	intel_timeline_enter(ce->timeline);
    338 }
    339 
    340 void intel_context_exit_engine(struct intel_context *ce)
    341 {
    342 	intel_timeline_exit(ce->timeline);
    343 	intel_engine_pm_put(ce->engine);
    344 }
    345 
    346 int intel_context_prepare_remote_request(struct intel_context *ce,
    347 					 struct i915_request *rq)
    348 {
    349 	struct intel_timeline *tl = ce->timeline;
    350 	int err;
    351 
    352 	/* Only suitable for use in remotely modifying this context */
    353 	GEM_BUG_ON(rq->context == ce);
    354 
    355 	if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */
    356 		/* Queue this switch after current activity by this context. */
    357 		err = i915_active_fence_set(&tl->last_request, rq);
    358 		if (err)
    359 			return err;
    360 	}
    361 
    362 	/*
    363 	 * Guarantee context image and the timeline remains pinned until the
    364 	 * modifying request is retired by setting the ce activity tracker.
    365 	 *
    366 	 * But we only need to take one pin on the account of it. Or in other
    367 	 * words transfer the pinned ce object to tracked active request.
    368 	 */
    369 	GEM_BUG_ON(i915_active_is_idle(&ce->active));
    370 	return i915_active_add_request(&ce->active, rq);
    371 }
    372 
    373 struct i915_request *intel_context_create_request(struct intel_context *ce)
    374 {
    375 	struct i915_request *rq;
    376 	int err;
    377 
    378 	err = intel_context_pin(ce);
    379 	if (unlikely(err))
    380 		return ERR_PTR(err);
    381 
    382 	rq = i915_request_create(ce);
    383 	intel_context_unpin(ce);
    384 
    385 	return rq;
    386 }
    387 
    388 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
    389 #include "selftest_context.c"
    390 #endif
    391