/src/sys/external/bsd/drm2/dist/drm/i915/gvt/ |
mmio_context.h | 42 int ring_id; member in struct:engine_mmio 50 struct intel_vgpu *next, int ring_id);
|
execlist.c | 47 #define execlist_ring_mmio(gvt, ring_id, offset) \ 48 (gvt->dev_priv->engine[ring_id]->mmio_base + (offset)) 62 static int ring_id_to_context_switch_event(unsigned int ring_id) 64 if (WARN_ON(ring_id >= ARRAY_SIZE(context_switch_events))) 67 return context_switch_events[ring_id]; 101 int ring_id = execlist->ring_id; local in function:emulate_execlist_status 103 ring_id, _EL_OFFSET_STATUS); 136 int ring_id = execlist->ring_id; local in function:emulate_csb_update 269 int ring_id = execlist->ring_id; local in function:get_next_execlist_slot 387 int ring_id = workload->ring_id; local in function:prepare_execlist_workload 407 int ring_id = workload->ring_id; local in function:complete_execlist_workload [all...] |
mmio_context.c | 171 int ring_id, i; local in function:load_render_mocs 177 for (ring_id = 0; ring_id < cnt; ring_id++) { 178 if (!HAS_ENGINE(dev_priv, ring_id)) 180 offset.reg = regs[ring_id]; 182 gen9_render_mocs.control_table[ring_id][i] = 205 int ring_id = req->engine->id; local in function:restore_context_mmio_for_inhibit 206 int count = gvt->engine_mmio_list.ctx_mmio_count[ring_id]; 222 if (mmio->ring_id != ring_id | [all...] |
sched_policy.c | 452 int ring_id; local in function:intel_vgpu_stop_schedule 475 for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { 476 if (scheduler->engine_owner[ring_id] == vgpu) { 477 intel_gvt_switch_mmio(vgpu, NULL, ring_id); 478 scheduler->engine_owner[ring_id] = NULL;
|
scheduler.h | 84 int ring_id; member in struct:intel_vgpu_workload 134 #define workload_q_head(vgpu, ring_id) \ 135 (&(vgpu->submission.workload_q_head[ring_id])) 160 intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
|
execlist.h | 175 int ring_id; member in struct:intel_vgpu_execlist 184 int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id);
|
scheduler.c | 106 if (workload->ring_id != RCS0) 136 int ring_id = workload->ring_id; local in function:populate_shadow_context 162 if (ring_id == RCS0) { 183 gvt_dbg_sched("ring id %d workload lrca %x", ring_id, 186 context_page_num = gvt->dev_priv->engine[ring_id]->context_size; 190 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS0) 218 static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id) 221 u32 ring_base = dev_priv->engine[ring_id]->mmio_base; 239 enum intel_engine_id ring_id = req->engine->id local in function:shadow_context_status_change 685 int ring_id = workload->ring_id; local in function:dispatch_workload 976 int ring_id; member in struct:workload_thread_param 983 int ring_id = p->ring_id; local in function:workload_thread [all...] |
handlers.c | 522 int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); local in function:force_nonpriv_write 527 if ((bytes != 4) || ((offset & (bytes - 1)) != 0) || ring_id < 0) { 529 vgpu->id, ring_id, offset, bytes); 533 ring_base = dev_priv->engine[ring_id]->mmio_base; 1486 int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); local in function:hws_pga_write 1498 if (unlikely(ring_id < 0 || ring_id >= I915_NUM_ENGINES)) { 1503 vgpu->hws_pga[ring_id] = value; 1663 int ring_id; local in function:mmio_read_from_hw 1666 ring_id = intel_gvt_render_mmio_to_ring_id(gvt, offset) 1690 int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); local in function:elsp_mmio_write 1717 int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); local in function:ring_mode_mmio_write [all...] |
cmd_parser.c | 470 int ring_id; member in struct:parser_exec_state 643 static inline u32 get_opcode(u32 cmd, int ring_id) 647 d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)]; 655 unsigned int opcode, int ring_id) 660 if (opcode == e->info->opcode && e->info->rings & BIT(ring_id)) 667 u32 cmd, int ring_id) 671 opcode = get_opcode(cmd, ring_id); 675 return find_cmd_entry(gvt, opcode, ring_id); 683 static inline void print_opcode(u32 cmd, int ring_id) 688 d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)] 2863 int ring_id = workload->ring_id; local in function:shadow_workload_ring_buffer [all...] |
/src/sys/external/bsd/drm2/dist/drm/amd/amdkfd/ |
cik_int.h | 33 uint32_t ring_id; member in struct:cik_ih_ring_entry
|
kfd_int_process_v9.c | 115 uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry); local in function:event_interrupt_wq_v9 121 info.prot_valid = ring_id & 0x08; 122 info.prot_read = ring_id & 0x10; 123 info.prot_write = ring_id & 0x20;
|
/src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/ |
amdgpu_irq.h | 51 unsigned ring_id; member in struct:amdgpu_iv_entry
|
amdgpu_gfx_v10_0.c | 1239 static int gfx_v10_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id, 1246 ring = &adev->gfx.gfx_ring[ring_id]; 1255 if (!ring_id) 1269 static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, 1274 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; 1276 ring = &adev->gfx.compute_ring[ring_id]; 1285 ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1; 1287 + (ring_id * GFX10_MEC_HPD_SIZE); 1305 int i, j, k, r, ring_id = 0; local in function:gfx_v10_0_sw_init 1383 r = gfx_v10_0_gfx_ring_init(adev, ring_id, [all...] |
amdgpu_gfx_v7_0.c | 3081 static int gfx_v7_0_compute_queue_init(struct amdgpu_device *adev, int ring_id) 3086 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; 4405 static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, 4410 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; 4419 ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id; 4440 int i, j, k, r, ring_id; local in function:gfx_v7_0_sw_init 4506 ring_id = 0; 4514 ring_id, 4519 ring_id++; 4881 me_id = (entry->ring_id & 0x0c) >> 2 [all...] |
amdgpu_gfx_v8_0.c | 1888 static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, 1893 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; 1895 ring = &adev->gfx.compute_ring[ring_id]; 1904 ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id; 1906 + (ring_id * GFX8_MEC_HPD_SIZE); 1927 int i, j, k, r, ring_id; local in function:gfx_v8_0_sw_init 2026 ring_id = 0; 2034 ring_id, 2039 ring_id++; 6709 me_id = (entry->ring_id & 0x0c) >> 2 [all...] |
amdgpu_gfx_v9_0.c | 2132 static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, 2137 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; 2139 ring = &adev->gfx.compute_ring[ring_id]; 2148 ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1; 2150 + (ring_id * GFX9_MEC_HPD_SIZE); 2169 int i, j, k, r, ring_id; local in function:gfx_v9_0_sw_init 2259 ring_id = 0; 2267 ring_id, 2272 ring_id++; 5571 me_id = (entry->ring_id & 0x0c) >> 2 [all...] |
/src/sys/external/bsd/drm2/dist/drm/radeon/ |
radeon_si.c | 6259 u32 src_id, src_data, ring_id; local in function:si_irq_process 6291 ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff; 6410 switch (ring_id) {
|
radeon_cik.c | 7572 u32 src_id, src_data, ring_id; local in function:cik_irq_process 7606 ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff; 8001 me_id = (ring_id & 0x60) >> 5; 8002 pipe_id = (ring_id & 0x18) >> 3; 8003 queue_id = (ring_id & 0x7) >> 0; 8020 me_id = (ring_id & 0x60) >> 5; 8021 pipe_id = (ring_id & 0x18) >> 3; 8022 queue_id = (ring_id & 0x7) >> 0; 8043 me_id = (ring_id & 0x60) >> 5; 8044 pipe_id = (ring_id & 0x18) >> 3 [all...] |