| /src/sys/external/bsd/drm2/dist/drm/i915/gvt/ |
| mmio_context.h | 42 int ring_id; member in struct:engine_mmio 50 struct intel_vgpu *next, int ring_id);
|
| execlist.c | 47 #define execlist_ring_mmio(gvt, ring_id, offset) \ 48 (gvt->dev_priv->engine[ring_id]->mmio_base + (offset)) 62 static int ring_id_to_context_switch_event(unsigned int ring_id) 64 if (WARN_ON(ring_id >= ARRAY_SIZE(context_switch_events))) 67 return context_switch_events[ring_id]; 101 int ring_id = execlist->ring_id; local in function:emulate_execlist_status 103 ring_id, _EL_OFFSET_STATUS); 136 int ring_id = execlist->ring_id; local in function:emulate_csb_update 269 int ring_id = execlist->ring_id; local in function:get_next_execlist_slot 387 int ring_id = workload->ring_id; local in function:prepare_execlist_workload 407 int ring_id = workload->ring_id; local in function:complete_execlist_workload [all...] |
| scheduler.c | 106 if (workload->ring_id != RCS0) 136 int ring_id = workload->ring_id; local in function:populate_shadow_context 162 if (ring_id == RCS0) { 183 gvt_dbg_sched("ring id %d workload lrca %x", ring_id, 186 context_page_num = gvt->dev_priv->engine[ring_id]->context_size; 190 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS0) 218 static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id) 221 u32 ring_base = dev_priv->engine[ring_id]->mmio_base; 239 enum intel_engine_id ring_id = req->engine->id local in function:shadow_context_status_change 685 int ring_id = workload->ring_id; local in function:dispatch_workload 976 int ring_id; member in struct:workload_thread_param 983 int ring_id = p->ring_id; local in function:workload_thread [all...] |
| mmio_context.c | 171 int ring_id, i; local in function:load_render_mocs 177 for (ring_id = 0; ring_id < cnt; ring_id++) { 178 if (!HAS_ENGINE(dev_priv, ring_id)) 180 offset.reg = regs[ring_id]; 182 gen9_render_mocs.control_table[ring_id][i] = 205 int ring_id = req->engine->id; local in function:restore_context_mmio_for_inhibit 206 int count = gvt->engine_mmio_list.ctx_mmio_count[ring_id]; 222 if (mmio->ring_id != ring_id | [all...] |
| scheduler.h | 84 int ring_id; member in struct:intel_vgpu_workload 134 #define workload_q_head(vgpu, ring_id) \ 135 (&(vgpu->submission.workload_q_head[ring_id])) 160 intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
|
| trace.h | 118 TP_PROTO(int id, char *type, int ring_id, int root_entry_type, 121 TP_ARGS(id, type, ring_id, root_entry_type, gma, gpa), 130 id, type, ring_id, root_entry_type, gma, gpa); 231 TP_PROTO(u8 vgpu_id, u8 ring_id, u32 ip_gma, u32 *cmd_va, 235 TP_ARGS(vgpu_id, ring_id, ip_gma, cmd_va, cmd_len, buf_type, 240 __field(u8, ring_id) 252 __entry->ring_id = ring_id; 265 __entry->ring_id,
|
| interrupt.h | 234 int gvt_ring_id_to_pipe_control_notify_event(int ring_id); 235 int gvt_ring_id_to_mi_flush_dw_event(int ring_id); 236 int gvt_ring_id_to_mi_user_interrupt_event(int ring_id);
|
| execlist.h | 175 int ring_id; member in struct:intel_vgpu_execlist 184 int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id);
|
| sched_policy.c | 452 int ring_id; local in function:intel_vgpu_stop_schedule 475 for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { 476 if (scheduler->engine_owner[ring_id] == vgpu) { 477 intel_gvt_switch_mmio(vgpu, NULL, ring_id); 478 scheduler->engine_owner[ring_id] = NULL;
|
| cmd_parser.c | 470 int ring_id; member in struct:parser_exec_state 643 static inline u32 get_opcode(u32 cmd, int ring_id) 647 d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)]; 655 unsigned int opcode, int ring_id) 660 if (opcode == e->info->opcode && e->info->rings & BIT(ring_id)) 667 u32 cmd, int ring_id) 671 opcode = get_opcode(cmd, ring_id); 675 return find_cmd_entry(gvt, opcode, ring_id); 683 static inline void print_opcode(u32 cmd, int ring_id) 688 d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)] 2863 int ring_id = workload->ring_id; local in function:shadow_workload_ring_buffer [all...] |
| handlers.c | 522 int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); local in function:force_nonpriv_write 527 if ((bytes != 4) || ((offset & (bytes - 1)) != 0) || ring_id < 0) { 529 vgpu->id, ring_id, offset, bytes); 533 ring_base = dev_priv->engine[ring_id]->mmio_base; 1486 int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); local in function:hws_pga_write 1498 if (unlikely(ring_id < 0 || ring_id >= I915_NUM_ENGINES)) { 1503 vgpu->hws_pga[ring_id] = value; 1663 int ring_id; local in function:mmio_read_from_hw 1666 ring_id = intel_gvt_render_mmio_to_ring_id(gvt, offset) 1690 int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); local in function:elsp_mmio_write 1717 int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); local in function:ring_mode_mmio_write [all...] |
| /src/sys/external/bsd/drm2/dist/drm/amd/amdkfd/ |
| cik_int.h | 33 uint32_t ring_id; member in struct:cik_ih_ring_entry
|
| cik_event_interrupt.c | 60 tmp_ihre->ring_id &= 0x000000ff; 61 tmp_ihre->ring_id |= vmid << 8; 62 tmp_ihre->ring_id |= pasid << 16; 70 vmid = (ihre->ring_id & 0x0000ff00) >> 8; 76 pasid = (ihre->ring_id & 0xffff0000) >> 16; 97 unsigned int vmid = (ihre->ring_id & 0x0000ff00) >> 8; 98 unsigned int pasid = (ihre->ring_id & 0xffff0000) >> 16;
|
| kfd_int_process_v9.c | 115 uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry); local in function:event_interrupt_wq_v9 121 info.prot_valid = ring_id & 0x08; 122 info.prot_read = ring_id & 0x10; 123 info.prot_write = ring_id & 0x20;
|
| /src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/ |
| amdgpu_irq.h | 51 unsigned ring_id; member in struct:amdgpu_iv_entry
|
| amdgpu_trace.h | 85 __field(unsigned, ring_id) 97 __entry->ring_id = iv->ring_id; 111 __entry->ring_id, __entry->vmid,
|
| amdgpu_gfx_v10_0.c | 1239 static int gfx_v10_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id, 1246 ring = &adev->gfx.gfx_ring[ring_id]; 1255 if (!ring_id) 1269 static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, 1274 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; 1276 ring = &adev->gfx.compute_ring[ring_id]; 1285 ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1; 1287 + (ring_id * GFX10_MEC_HPD_SIZE); 1305 int i, j, k, r, ring_id = 0; local in function:gfx_v10_0_sw_init 1383 r = gfx_v10_0_gfx_ring_init(adev, ring_id, [all...] |
| amdgpu_cik_ih.c | 262 entry->ring_id = dw[2] & 0xff;
|
| amdgpu_sdma_v2_4.c | 1062 instance_id = (entry->ring_id & 0x3) >> 0; 1063 queue_id = (entry->ring_id & 0xc) >> 2; 1103 instance_id = (entry->ring_id & 0x3) >> 0; 1104 queue_id = (entry->ring_id & 0xc) >> 2;
|
| amdgpu_si_ih.c | 144 entry->ring_id = dw[2] & 0xff;
|
| amdgpu_vega10_ih.c | 461 entry->ring_id = (dw[0] >> 16) & 0xff; 550 switch (entry->ring_id) {
|
| amdgpu_cz_ih.c | 241 entry->ring_id = dw[2] & 0xff;
|
| amdgpu_iceland_ih.c | 241 entry->ring_id = dw[2] & 0xff;
|
| /src/sys/dev/pci/ixgbe/ |
| ixgbe_netmap.c | 211 struct tx_ring *txr = &sc->tx_rings[kring->ring_id]; 349 nic_i = IXGBE_READ_REG(&sc->hw, IXGBE_TDH(kring->ring_id)); 395 struct rx_ring *rxr = &sc->rx_rings[kring->ring_id];
|
| /src/sys/dev/pci/ |
| xmm7360.c | 699 static void xmm7360_td_ring_activate(struct xmm_dev *xmm, u8 ring_id) 701 struct td_ring *ring = &xmm->td_ring[ring_id]; 704 xmm->cp->s_rptr[ring_id] = xmm->cp->s_wptr[ring_id] = 0; 706 ret = xmm7360_cmd_ring_execute(xmm, CMD_RING_OPEN, ring_id, ring->depth, ring->tds_phys, 0x60); 710 static void xmm7360_td_ring_create(struct xmm_dev *xmm, u8 ring_id, u8 depth, u16 page_size) 712 struct td_ring *ring = &xmm->td_ring[ring_id]; 732 xmm7360_td_ring_activate(xmm, ring_id); 735 static void xmm7360_td_ring_deactivate(struct xmm_dev *xmm, u8 ring_id) 737 xmm7360_cmd_ring_execute(xmm, CMD_RING_CLOSE, ring_id, 0, 0, 0) [all...] |