/src/sys/external/bsd/drm2/dist/drm/i915/gt/ |
intel_context.h | 22 #define CE_TRACE(ce, fmt, ...) do { \ 23 const struct intel_context *ce__ = (ce); \ 29 void intel_context_init(struct intel_context *ce, 31 void intel_context_fini(struct intel_context *ce); 36 int intel_context_alloc_state(struct intel_context *ce); 38 void intel_context_free(struct intel_context *ce); 42 * @ce - the context 48 static inline int intel_context_lock_pinned(struct intel_context *ce) 49 __acquires(ce->pin_mutex) 51 return mutex_lock_interruptible(&ce->pin_mutex) [all...] |
intel_context.c | 35 void intel_context_free(struct intel_context *ce) 37 kmem_cache_free(global.slab_ce, ce); 43 struct intel_context *ce; local in function:intel_context_create 45 ce = intel_context_alloc(); 46 if (!ce) 49 intel_context_init(ce, engine); 50 return ce; 53 int intel_context_alloc_state(struct intel_context *ce) 57 if (mutex_lock_interruptible(&ce->pin_mutex)) 60 if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) 227 struct intel_context *ce = container_of(active, typeof(*ce), active); local in function:__intel_context_retire 243 struct intel_context *ce = container_of(active, typeof(*ce), active); local in function:__intel_context_active [all...] |
intel_context_types.h | 30 int (*alloc)(struct intel_context *ce); 32 int (*pin)(struct intel_context *ce); 33 void (*unpin)(struct intel_context *ce); 35 void (*enter)(struct intel_context *ce); 36 void (*exit)(struct intel_context *ce); 38 void (*reset)(struct intel_context *ce); 47 #define intel_context_inflight(ce) ptr_mask_bits((ce)->inflight, 2) 48 #define intel_context_inflight_count(ce) ptr_unmask_bits((ce)->inflight, 2 [all...] |
selftest_mocs.c | 208 struct intel_context *ce) 218 rq = intel_context_create_request(ce); 232 if (!err && ce->engine->class == RENDER_CLASS) 244 err = check_mocs_table(ce->engine, &arg->table, &vaddr); 245 if (!err && ce->engine->class == RENDER_CLASS) 246 err = check_l3cc_table(ce->engine, &arg->table, &vaddr); 295 struct intel_context *ce; local in function:live_mocs_clean 297 ce = intel_context_create(engine); 298 if (IS_ERR(ce)) { 299 err = PTR_ERR(ce); 389 struct intel_context *ce; local in function:live_mocs_reset [all...] |
intel_engine_pm.c | 28 struct intel_context *ce; local in function:__engine_unpark 44 ce = engine->kernel_context; 45 if (ce) { 46 GEM_BUG_ON(test_bit(CONTEXT_VALID_BIT, &ce->flags)); 49 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && ce->state) { 50 struct drm_i915_gem_object *obj = ce->state->obj; 61 ce->ops->reset(ce); 73 static inline unsigned long __timeline_mark_lock(struct intel_context *ce) 78 mutex_acquire(&ce->timeline->mutex.dep_map, 2, 0, _THIS_IP_) 148 struct intel_context *ce = engine->kernel_context; local in function:switch_to_kernel_context [all...] |
mock_engine.c | 133 static void mock_context_unpin(struct intel_context *ce) 139 struct intel_context *ce = container_of(ref, typeof(*ce), ref); local in function:mock_context_destroy 141 GEM_BUG_ON(intel_context_is_pinned(ce)); 143 if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) { 144 mock_ring_free(ce->ring); 145 mock_timeline_unpin(ce->timeline); 148 intel_context_fini(ce); 149 intel_context_free(ce); 152 static int mock_context_alloc(struct intel_context *ce) 328 struct intel_context *ce; local in function:mock_engine_init [all...] |
intel_breadcrumbs.c | 98 check_signal_order(struct intel_context *ce, struct i915_request *rq) 100 if (!list_is_last(&rq->signal_link, &ce->signals) && 105 if (!list_is_first(&rq->signal_link, &ce->signals) && 160 struct intel_context *ce, *cn; local in function:signal_irq_work 169 list_for_each_entry_safe(ce, cn, &b->signalers, signal_link) { 170 GEM_BUG_ON(list_empty(&ce->signals)); 172 list_for_each_safe(pos, next, &ce->signals) { 176 GEM_BUG_ON(!check_signal_order(ce, rq)); 202 if (!list_is_first(pos, &ce->signals)) { 204 __list_del_many(&ce->signals, pos) 305 struct intel_context *ce = rq->context; local in function:i915_request_enable_breadcrumb 362 struct intel_context *ce = rq->context; local in function:i915_request_cancel_breadcrumb 377 struct intel_context *ce; local in function:intel_engine_print_breadcrumbs [all...] |
selftest_rc6.c | 78 static const u32 *__live_rc6_ctx(struct intel_context *ce) 85 rq = intel_context_create_request(ce); 101 *cs++ = ce->timeline->hwsp_offset + 8; 161 struct intel_context *ce; local in function:live_rc6_ctx_wa 168 ce = intel_context_create(engine); 169 if (IS_ERR(ce)) { 170 err = PTR_ERR(ce); 175 res = __live_rc6_ctx(ce); 177 intel_context_put(ce);
|
/src/lib/libc/citrus/ |
citrus_stdenc.h | 65 _citrus_stdenc_init_state(struct _citrus_stdenc * __restrict ce, 68 _DIAGASSERT(ce && ce->ce_ops && ce->ce_ops->eo_init_state); 69 return (*ce->ce_ops->eo_init_state)(ce, ps); 73 _citrus_stdenc_mbtocs(struct _citrus_stdenc * __restrict ce, 79 _DIAGASSERT(ce && ce->ce_ops && ce->ce_ops->eo_mbtocs) [all...] |
citrus_stdenc.c | 57 get_state_desc_default(struct _citrus_stdenc * __restrict ce, 72 struct _citrus_stdenc *ce; local in function:_citrus_stdenc_open 83 ce = malloc(sizeof(*ce)); 84 if (ce==NULL) { 88 ce->ce_ops = NULL; 89 ce->ce_closure = NULL; 90 ce->ce_module = NULL; 91 ce->ce_traits = NULL; 97 ce->ce_module = handle [all...] |
citrus_stdenc_template.h | 64 _FUNCNAME(stdenc_init)(struct _citrus_stdenc * __restrict ce, 85 ce->ce_closure = ei; 87 et->et_mb_cur_max = _ENCODING_MB_CUR_MAX(_CE_TO_EI(ce)); 93 _FUNCNAME(stdenc_uninit)(struct _citrus_stdenc * __restrict ce) 95 if (ce) { 96 _FUNCNAME(encoding_module_uninit)(_CE_TO_EI(ce)); 97 free(ce->ce_closure); 102 _FUNCNAME(stdenc_init_state)(struct _citrus_stdenc * __restrict ce, 105 _FUNCNAME(init_state)(_CE_TO_EI(ce), _TO_STATE(ps)); 111 _FUNCNAME(stdenc_mbtocs)(struct _citrus_stdenc * __restrict ce, [all...] |
/src/sys/external/bsd/drm2/dist/drm/i915/gem/ |
i915_gem_object_blt.h | 20 struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce, 24 struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce, 29 void intel_emit_vma_release(struct intel_context *ce, struct i915_vma *vma); 32 struct intel_context *ce, 37 struct intel_context *ce);
|
i915_gem_client_blt.h | 18 struct intel_context *ce,
|
i915_gem_object_blt.c | 20 struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce, 24 struct drm_i915_private *i915 = ce->vm->i915; 35 GEM_BUG_ON(intel_engine_is_virtual(ce->engine)); 36 intel_engine_pm_get(ce->engine); 41 pool = intel_engine_get_pool(ce->engine, size); 86 intel_gt_chipset_flush(ce->vm->gt); 90 batch = i915_vma_instance(pool->obj, ce->vm, NULL); 106 intel_engine_pm_put(ce->engine); 125 void intel_emit_vma_release(struct intel_context *ce, struct i915_vma *vma) 129 intel_engine_pm_put(ce->engine) [all...] |
/src/sys/kern/ |
subr_callback.c | 62 callback_register(struct callback_head *ch, struct callback_entry *ce, 67 ce->ce_func = fn; 68 ce->ce_obj = obj; 70 TAILQ_INSERT_TAIL(&ch->ch_q, ce, ce_q); 76 callback_unregister(struct callback_head *ch, struct callback_entry *ce) 82 if (__predict_false(ch->ch_next == ce)) { 83 ch->ch_next = TAILQ_NEXT(ce, ce_q); 85 TAILQ_REMOVE(&ch->ch_q, ce, ce_q); 93 struct callback_entry *ce; local in function:callback_runone 99 ce = ch->ch_next [all...] |
/src/sys/external/bsd/drm2/dist/drm/i915/selftests/ |
mock_request.h | 35 mock_request(struct intel_context *ce, unsigned long delay);
|
mock_request.c | 36 mock_request(struct intel_context *ce, unsigned long delay) 41 request = intel_context_create_request(ce);
|
/src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/engine/ce/ |
priv.h | 6 #include <engine/ce.h>
|
nouveau_nvkm_engine_ce_gk104.c | 52 gk104_ce_intr_launcherr(struct nvkm_engine *ce, const u32 base) 54 struct nvkm_subdev *subdev = &ce->subdev; 64 gk104_ce_intr(struct nvkm_engine *ce) 66 const u32 base = (ce->subdev.index - NVKM_ENGINE_CE0) * 0x1000; 67 struct nvkm_subdev *subdev = &ce->subdev; 82 gk104_ce_intr_launcherr(ce, base);
|
nouveau_nvkm_engine_ce_gp100.c | 54 gp100_ce_intr_launcherr(struct nvkm_engine *ce, const u32 base) 56 struct nvkm_subdev *subdev = &ce->subdev; 65 gp100_ce_intr(struct nvkm_engine *ce) 67 const u32 base = (ce->subdev.index - NVKM_ENGINE_CE0) * 0x80; 68 struct nvkm_subdev *subdev = &ce->subdev; 83 gp100_ce_intr_launcherr(ce, base);
|
nouveau_nvkm_engine_ce_gf100.c | 35 gf100_ce_init(struct nvkm_falcon *ce) 37 struct nvkm_device *device = ce->engine.subdev.device; 38 const int index = ce->engine.subdev.index - NVKM_ENGINE_CE0; 39 nvkm_wr32(device, ce->addr + 0x084, index);
|
/src/usr.bin/indent/ |
.indent.pro | 5 -ce /* Place '} else' on the same line. */
|
/src/sys/external/bsd/drm2/dist/drm/i915/gem/selftests/ |
igt_gem_utils.c | 26 struct intel_context *ce; local in function:igt_request_alloc 34 ce = i915_gem_context_get_engine(ctx, engine->legacy_idx); 35 if (IS_ERR(ce)) 36 return ERR_CAST(ce); 38 rq = intel_context_create_request(ce); 39 intel_context_put(ce); 112 int igt_gpu_fill_dw(struct intel_context *ce, 121 GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine)); 128 rq = intel_context_create_request(ce); 135 if (INTEL_GEN(ce->vm->i915) <= 5 [all...] |
i915_gem_context.c | 191 struct intel_context *ce[2]; member in struct:parallel_switch 206 for (n = 0; !err && n < ARRAY_SIZE(arg->ce); n++) { 209 rq = i915_request_create(arg->ce[n]); 232 pr_info("%s: %lu switches (sync)\n", arg->ce[0]->engine->name, count); 246 for (n = 0; n < ARRAY_SIZE(arg->ce); n++) { 250 rq = i915_request_create(arg->ce[n]); 273 pr_info("%s: %lu switches (many)\n", arg->ce[0]->engine->name, count); 290 struct intel_context *ce; local in function:live_parallel_switch 323 for_each_gem_engine(ce, engines, it) { 324 err = intel_context_pin(ce); 680 struct intel_context *ce; local in function:igt_ctx_exec 813 struct intel_context *ce; local in function:igt_shared_ctx_exec 1246 struct intel_context *ce; local in function:__igt_ctx_sseu 1356 struct intel_context *ce; local in function:igt_ctx_readonly [all...] |
igt_gem_utils.h | 30 int igt_gpu_fill_dw(struct intel_context *ce,
|