/src/sys/external/bsd/drm2/dist/drm/i915/gvt/ |
mmio_context.h | 42 int ring_id; member in struct:engine_mmio 50 struct intel_vgpu *next, int ring_id);
|
mmio_context.h | 42 int ring_id; member in struct:engine_mmio 50 struct intel_vgpu *next, int ring_id);
|
mmio_context.h | 42 int ring_id; member in struct:engine_mmio 50 struct intel_vgpu *next, int ring_id);
|
execlist.c | 47 #define execlist_ring_mmio(gvt, ring_id, offset) \ 48 (gvt->dev_priv->engine[ring_id]->mmio_base + (offset)) 62 static int ring_id_to_context_switch_event(unsigned int ring_id) 64 if (WARN_ON(ring_id >= ARRAY_SIZE(context_switch_events))) 67 return context_switch_events[ring_id]; 101 int ring_id = execlist->ring_id; local in function:emulate_execlist_status 103 ring_id, _EL_OFFSET_STATUS); 136 int ring_id = execlist->ring_id; local in function:emulate_csb_update 269 int ring_id = execlist->ring_id; local in function:get_next_execlist_slot 387 int ring_id = workload->ring_id; local in function:prepare_execlist_workload 407 int ring_id = workload->ring_id; local in function:complete_execlist_workload [all...] |
mmio_context.c | 171 int ring_id, i; local in function:load_render_mocs 177 for (ring_id = 0; ring_id < cnt; ring_id++) { 178 if (!HAS_ENGINE(dev_priv, ring_id)) 180 offset.reg = regs[ring_id]; 182 gen9_render_mocs.control_table[ring_id][i] = 205 int ring_id = req->engine->id; local in function:restore_context_mmio_for_inhibit 206 int count = gvt->engine_mmio_list.ctx_mmio_count[ring_id]; 222 if (mmio->ring_id != ring_id | [all...] |
sched_policy.c | 452 int ring_id; local in function:intel_vgpu_stop_schedule 475 for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { 476 if (scheduler->engine_owner[ring_id] == vgpu) { 477 intel_gvt_switch_mmio(vgpu, NULL, ring_id); 478 scheduler->engine_owner[ring_id] = NULL;
|
execlist.c | 47 #define execlist_ring_mmio(gvt, ring_id, offset) \ 48 (gvt->dev_priv->engine[ring_id]->mmio_base + (offset)) 62 static int ring_id_to_context_switch_event(unsigned int ring_id) 64 if (WARN_ON(ring_id >= ARRAY_SIZE(context_switch_events))) 67 return context_switch_events[ring_id]; 101 int ring_id = execlist->ring_id; local in function:emulate_execlist_status 103 ring_id, _EL_OFFSET_STATUS); 136 int ring_id = execlist->ring_id; local in function:emulate_csb_update 269 int ring_id = execlist->ring_id; local in function:get_next_execlist_slot 387 int ring_id = workload->ring_id; local in function:prepare_execlist_workload 407 int ring_id = workload->ring_id; local in function:complete_execlist_workload [all...] |
mmio_context.c | 171 int ring_id, i; local in function:load_render_mocs 177 for (ring_id = 0; ring_id < cnt; ring_id++) { 178 if (!HAS_ENGINE(dev_priv, ring_id)) 180 offset.reg = regs[ring_id]; 182 gen9_render_mocs.control_table[ring_id][i] = 205 int ring_id = req->engine->id; local in function:restore_context_mmio_for_inhibit 206 int count = gvt->engine_mmio_list.ctx_mmio_count[ring_id]; 222 if (mmio->ring_id != ring_id | [all...] |
sched_policy.c | 452 int ring_id; local in function:intel_vgpu_stop_schedule 475 for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { 476 if (scheduler->engine_owner[ring_id] == vgpu) { 477 intel_gvt_switch_mmio(vgpu, NULL, ring_id); 478 scheduler->engine_owner[ring_id] = NULL;
|
execlist.c | 47 #define execlist_ring_mmio(gvt, ring_id, offset) \ 48 (gvt->dev_priv->engine[ring_id]->mmio_base + (offset)) 62 static int ring_id_to_context_switch_event(unsigned int ring_id) 64 if (WARN_ON(ring_id >= ARRAY_SIZE(context_switch_events))) 67 return context_switch_events[ring_id]; 101 int ring_id = execlist->ring_id; local in function:emulate_execlist_status 103 ring_id, _EL_OFFSET_STATUS); 136 int ring_id = execlist->ring_id; local in function:emulate_csb_update 269 int ring_id = execlist->ring_id; local in function:get_next_execlist_slot 387 int ring_id = workload->ring_id; local in function:prepare_execlist_workload 407 int ring_id = workload->ring_id; local in function:complete_execlist_workload [all...] |
mmio_context.c | 171 int ring_id, i; local in function:load_render_mocs 177 for (ring_id = 0; ring_id < cnt; ring_id++) { 178 if (!HAS_ENGINE(dev_priv, ring_id)) 180 offset.reg = regs[ring_id]; 182 gen9_render_mocs.control_table[ring_id][i] = 205 int ring_id = req->engine->id; local in function:restore_context_mmio_for_inhibit 206 int count = gvt->engine_mmio_list.ctx_mmio_count[ring_id]; 222 if (mmio->ring_id != ring_id | [all...] |
sched_policy.c | 452 int ring_id; local in function:intel_vgpu_stop_schedule 475 for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { 476 if (scheduler->engine_owner[ring_id] == vgpu) { 477 intel_gvt_switch_mmio(vgpu, NULL, ring_id); 478 scheduler->engine_owner[ring_id] = NULL;
|
scheduler.h | 84 int ring_id; member in struct:intel_vgpu_workload 134 #define workload_q_head(vgpu, ring_id) \ 135 (&(vgpu->submission.workload_q_head[ring_id])) 160 intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
|
scheduler.h | 84 int ring_id; member in struct:intel_vgpu_workload 134 #define workload_q_head(vgpu, ring_id) \ 135 (&(vgpu->submission.workload_q_head[ring_id])) 160 intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
|
scheduler.h | 84 int ring_id; member in struct:intel_vgpu_workload 134 #define workload_q_head(vgpu, ring_id) \ 135 (&(vgpu->submission.workload_q_head[ring_id])) 160 intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
|
execlist.h | 175 int ring_id; member in struct:intel_vgpu_execlist 184 int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id);
|
/src/sys/external/bsd/drm2/dist/drm/amd/amdkfd/ |
cik_int.h | 33 uint32_t ring_id; member in struct:cik_ih_ring_entry
|
kfd_int_process_v9.c | 115 uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry); local in function:event_interrupt_wq_v9 121 info.prot_valid = ring_id & 0x08; 122 info.prot_read = ring_id & 0x10; 123 info.prot_write = ring_id & 0x20;
|
cik_int.h | 33 uint32_t ring_id; member in struct:cik_ih_ring_entry
|
kfd_int_process_v9.c | 115 uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry); local in function:event_interrupt_wq_v9 121 info.prot_valid = ring_id & 0x08; 122 info.prot_read = ring_id & 0x10; 123 info.prot_write = ring_id & 0x20;
|
cik_int.h | 33 uint32_t ring_id; member in struct:cik_ih_ring_entry
|
kfd_int_process_v9.c | 115 uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry); local in function:event_interrupt_wq_v9 121 info.prot_valid = ring_id & 0x08; 122 info.prot_read = ring_id & 0x10; 123 info.prot_write = ring_id & 0x20;
|
/src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/ |
amdgpu_irq.h | 51 unsigned ring_id; member in struct:amdgpu_iv_entry
|
amdgpu_irq.h | 51 unsigned ring_id; member in struct:amdgpu_iv_entry
|
amdgpu_irq.h | 51 unsigned ring_id; member in struct:amdgpu_iv_entry
|