HomeSort by: relevance | last modified time | path
    Searched refs:indirect_ctx (Results 1 - 5 of 5) sorted by relevancy

  /src/sys/external/bsd/drm2/dist/drm/i915/gvt/
scheduler.c 360 if (!wa_ctx->indirect_ctx.obj)
363 i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
364 i915_gem_object_put(wa_ctx->indirect_ctx.obj);
366 wa_ctx->indirect_ctx.obj = NULL;
367 wa_ctx->indirect_ctx.shadow_va = NULL;
436 if (workload->ring_id == RCS0 && workload->wa_ctx.indirect_ctx.size) {
541 (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
548 (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
549 wa_ctx->indirect_ctx.size;
551 if (wa_ctx->indirect_ctx.size == 0
1469 u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx; local in function:intel_vgpu_create_workload
    [all...]
scheduler.h 77 struct shadow_indirect_ctx indirect_ctx; member in struct:intel_shadow_wa_ctx
cmd_parser.c 2824 if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma,
2828 ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(u32);
2829 ring_size = round_up(wa_ctx->indirect_ctx.size + CACHELINE_BYTES,
2831 gma_head = wa_ctx->indirect_ctx.guest_gma;
2832 gma_tail = wa_ctx->indirect_ctx.guest_gma + ring_tail;
2833 gma_bottom = wa_ctx->indirect_ctx.guest_gma + ring_size;
2839 s.ring_start = wa_ctx->indirect_ctx.guest_gma;
2843 s.rb_va = wa_ctx->indirect_ctx.shadow_va;
2852 wa_ctx->indirect_ctx.guest_gma, ring_size);
2938 int ctx_size = wa_ctx->indirect_ctx.size
    [all...]
  /src/sys/external/bsd/drm2/dist/drm/i915/gt/
intel_engine_types.h 101 } indirect_ctx, per_ctx; member in struct:i915_ctx_workarounds
intel_lrc.c 3137 * Typically we only have one indirect_ctx and per_ctx batch buffer which are
3349 struct i915_wa_ctx_bb *wa_bb[2] = { &wa_ctx->indirect_ctx,
4524 if (wa_ctx->indirect_ctx.size) {
4528 (ggtt_offset + wa_ctx->indirect_ctx.offset) |
4529 (wa_ctx->indirect_ctx.size / CACHELINE_BYTES);

Completed in 22 milliseconds