/src/sys/external/bsd/drm2/dist/drm/i915/gvt/ |
vgpu.c | 1 /* $NetBSD: vgpu.c,v 1.3 2021/12/19 11:06:55 riastradh Exp $ */ 37 __KERNEL_RCSID(0, "$NetBSD: vgpu.c,v 1.3 2021/12/19 11:06:55 riastradh Exp $"); 43 void populate_pvinfo_page(struct intel_vgpu *vgpu) 46 vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC; 47 vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1; 48 vgpu_vreg_t(vgpu, vgtif_reg(version_minor)) = 0; 49 vgpu_vreg_t(vgpu, vgtif_reg(display_ready)) = 0; 50 vgpu_vreg_t(vgpu, vgtif_reg(vgt_id)) = vgpu->id; 52 vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_PPGTT 320 struct intel_vgpu *vgpu; local in function:intel_gvt_create_idle_vgpu 367 struct intel_vgpu *vgpu; local in function:__intel_gvt_create_vgpu 481 struct intel_vgpu *vgpu; local in function:intel_gvt_create_vgpu [all...] |
sched_policy.h | 45 int (*init_vgpu)(struct intel_vgpu *vgpu); 46 void (*clean_vgpu)(struct intel_vgpu *vgpu); 47 void (*start_schedule)(struct intel_vgpu *vgpu); 48 void (*stop_schedule)(struct intel_vgpu *vgpu); 57 int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu); 59 void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu); 61 void intel_vgpu_start_schedule(struct intel_vgpu *vgpu); 63 void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu);
|
mpt.h | 76 * intel_gvt_hypervisor_attach_vgpu - call hypervisor to initialize vGPU 82 static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu) 88 return intel_gvt_host.mpt->attach_vgpu(vgpu, &vgpu->handle); 92 * intel_gvt_hypervisor_detach_vgpu - call hypervisor to release vGPU 98 static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu) 104 intel_gvt_host.mpt->detach_vgpu(vgpu); 113 * intel_gvt_hypervisor_inject_msi - inject a MSI interrupt into vGPU 118 static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu) 120 unsigned long offset = vgpu->gvt->device_info.msi_cap_offset [all...] |
page_track.c | 33 * @vgpu: a vGPU 40 struct intel_vgpu *vgpu, unsigned long gfn) 42 return radix_tree_lookup(&vgpu->page_track_tree, gfn); 47 * @vgpu: a vGPU 55 int intel_vgpu_register_page_track(struct intel_vgpu *vgpu, unsigned long gfn, 61 track = intel_vgpu_find_page_track(vgpu, gfn); 72 ret = radix_tree_insert(&vgpu->page_track_tree, gfn, track); 83 * @vgpu: a vGP [all...] |
cfg_space.c | 64 * @vgpu: target vgpu 73 static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off, 76 u8 *cfg_base = vgpu_cfg_space(vgpu); 102 * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read 103 * @vgpu: target vgpu 111 int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, 117 if (WARN_ON(offset + bytes > vgpu->gvt->device_info.cfg_space_size)) 120 memcpy(p_data, vgpu_cfg_space(vgpu) + offset, bytes) 374 vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] = local in function:intel_vgpu_init_cfg_space 376 vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] = local in function:intel_vgpu_init_cfg_space 387 vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO local in function:intel_vgpu_init_cfg_space [all...] |
mmio.c | 46 * @vgpu: a vGPU 52 int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa) 54 u64 gttmmio_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0); 65 static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, u64 pa, 72 if (!vgpu || !p_data) 75 gvt = vgpu->gvt; 76 mutex_lock(&vgpu->vgpu_lock); 77 offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); 80 intel_vgpu_default_mmio_read(vgpu, offset, p_data [all...] |
page_track.h | 47 struct intel_vgpu *vgpu, unsigned long gfn); 49 int intel_vgpu_register_page_track(struct intel_vgpu *vgpu, 52 void intel_vgpu_unregister_page_track(struct intel_vgpu *vgpu, 55 int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn); 56 int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn); 58 int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
|
aperture_gm.c | 46 static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) 48 struct intel_gvt *gvt = vgpu->gvt; 56 node = &vgpu->gm.high_gm_node; 57 size = vgpu_hidden_sz(vgpu); 62 node = &vgpu->gm.low_gm_node; 63 size = vgpu_aperture_sz(vgpu); 84 static int alloc_vgpu_gm(struct intel_vgpu *vgpu) 86 struct intel_gvt *gvt = vgpu->gvt; 90 ret = alloc_gm(vgpu, false); 94 ret = alloc_gm(vgpu, true) [all...] |
edid.c | 54 static unsigned char edid_get_byte(struct intel_vgpu *vgpu) 56 struct intel_vgpu_i2c_edid *edid = &vgpu->display.i2c_edid; 73 if (intel_vgpu_has_monitor_on_port(vgpu, edid->port)) { 75 intel_vgpu_port(vgpu, edid->port)->edid; 131 static void reset_gmbus_controller(struct intel_vgpu *vgpu) 133 vgpu_vreg_t(vgpu, PCH_GMBUS2) = GMBUS_HW_RDY; 134 if (!vgpu->display.i2c_edid.edid_available) 135 vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_SATOER; 136 vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE; 140 static int gmbus0_mmio_write(struct intel_vgpu *vgpu, [all...] |
display.c | 43 static int get_edp_pipe(struct intel_vgpu *vgpu) 45 u32 data = vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP); 63 static int edp_pipe_is_enabled(struct intel_vgpu *vgpu) 65 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 67 if (!(vgpu_vreg_t(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE)) 70 if (!(vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP) & TRANS_DDI_FUNC_ENABLE)) 75 int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe) 77 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 82 if (vgpu_vreg_t(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE) 85 if (edp_pipe_is_enabled(vgpu) & 369 struct intel_vgpu *vgpu; local in function:intel_gvt_check_vblank_emulation 443 struct intel_vgpu *vgpu; local in function:intel_gvt_emulate_vblank [all...] |
gvt.h | 80 /* GM resources owned by a vGPU */ 90 /* Fences owned by a vGPU */ 113 #define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space) 127 #define vgpu_opregion(vgpu) (&(vgpu->opregion)) 146 int (*init)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask); 147 void (*clean)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask); 148 void (*reset)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask); 174 unsigned long handle; /* vGPU handle used by hypervisor MPT modules * [all...] |
dmabuf.h | 54 * struct intel_vgpu_dmabuf_obj- Intel vGPU device buffer object 57 struct intel_vgpu *vgpu; member in struct:intel_vgpu_dmabuf_obj 65 int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args); 66 int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id); 67 void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu);
|
sched_policy.c | 42 static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu) 47 for_each_engine(engine, vgpu->gvt->dev_priv, i) { 48 if (!list_empty(workload_q_head(vgpu, i))) 55 /* We give 2 seconds higher prio for vGPU during start */ 60 struct intel_vgpu *vgpu; member in struct:vgpu_sched_data 80 static void vgpu_update_timeslice(struct intel_vgpu *vgpu, ktime_t cur_time) 85 if (!vgpu || vgpu == vgpu->gvt->idle_vgpu) 88 vgpu_data = vgpu->sched_data 184 struct intel_vgpu *vgpu = NULL; local in function:find_busy_vgpu 221 struct intel_vgpu *vgpu = NULL; local in function:tbs_sched_func [all...] |
kvmgt.c | 71 size_t (*rw)(struct intel_vgpu *vgpu, char *buf, 73 void (*release)(struct intel_vgpu *vgpu, 98 struct intel_vgpu *vgpu; member in struct:kvmgt_guest_info 107 struct intel_vgpu *vgpu; member in struct:gvt_dma 125 static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, 137 ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1); 143 static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, 160 ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1, 188 gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE); 192 static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn 573 struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; local in function:kvmgt_get_vfio_device 587 struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; local in function:kvmgt_set_opregion 615 struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; local in function:kvmgt_set_edid 653 struct intel_vgpu *vgpu = NULL; local in function:intel_vgpu_create 692 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); local in function:intel_vgpu_remove 704 struct intel_vgpu *vgpu = container_of(nb, local in function:intel_vgpu_iommu_notifier 735 struct intel_vgpu *vgpu = container_of(nb, local in function:intel_vgpu_group_notifier 752 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); local in function:intel_vgpu_open 849 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); local in function:intel_vgpu_release 856 struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu, local in function:intel_vgpu_release_work 940 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); local in function:intel_vgpu_rw 988 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); local in function:gtt_entry 1157 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); local in function:intel_vgpu_mmap 1283 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); local in function:intel_vgpu_ioctl 1565 struct intel_vgpu *vgpu = (struct intel_vgpu *) local in function:vgpu_id_show 1749 struct intel_vgpu *vgpu; local in function:kvmgt_guest_init 1812 struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; local in function:kvmgt_detach_vgpu 1829 struct intel_vgpu *vgpu; local in function:kvmgt_inject_msi 1876 struct intel_vgpu *vgpu; local in function:kvmgt_dma_map_guest_page [all...] |
display.h | 48 #define intel_vgpu_port(vgpu, port) \ 49 (&(vgpu->display.ports[port])) 51 #define intel_vgpu_has_monitor_on_port(vgpu, port) \ 52 (intel_vgpu_port(vgpu, port)->edid && \ 53 intel_vgpu_port(vgpu, port)->edid->data_valid) 55 #define intel_vgpu_port_is_dp(vgpu, port) \ 56 ((intel_vgpu_port(vgpu, port)->type == GVT_DP_A) || \ 57 (intel_vgpu_port(vgpu, port)->type == GVT_DP_B) || \ 58 (intel_vgpu_port(vgpu, port)->type == GVT_DP_C) || \ 59 (intel_vgpu_port(vgpu, port)->type == GVT_DP_D) [all...] |
mmio.h | 85 int intel_vgpu_init_mmio(struct intel_vgpu *vgpu); 86 void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr); 87 void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu); 89 int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa); 91 int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa, 93 int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa, 96 int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, 98 int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, 104 int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset, 107 int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset [all...] |
scheduler.h | 83 struct intel_vgpu *vgpu; member in struct:intel_vgpu_workload 134 #define workload_q_head(vgpu, ring_id) \ 135 (&(vgpu->submission.workload_q_head[ring_id])) 143 void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu); 145 int intel_vgpu_setup_submission(struct intel_vgpu *vgpu); 147 void intel_vgpu_reset_submission(struct intel_vgpu *vgpu, 150 void intel_vgpu_clean_submission(struct intel_vgpu *vgpu); 152 int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, 160 intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, 165 void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu, [all...] |
gtt.c | 59 bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size) 62 return vgpu_gmadr_is_valid(vgpu, addr); 64 if (vgpu_gmadr_is_aperture(vgpu, addr) && 65 vgpu_gmadr_is_aperture(vgpu, addr + size - 1)) 67 else if (vgpu_gmadr_is_hidden(vgpu, addr) && 68 vgpu_gmadr_is_hidden(vgpu, addr + size - 1)) 77 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr) 79 if (WARN(!vgpu_gmadr_is_valid(vgpu, g_addr), 83 if (vgpu_gmadr_is_aperture(vgpu, g_addr)) 84 *h_addr = vgpu_aperture_gmadr_base(vgpu) 982 struct intel_vgpu *vgpu = spt->vgpu; local in function:ppgtt_invalidate_pte 999 struct intel_vgpu *vgpu = spt->vgpu; local in function:ppgtt_invalidate_spt 1305 struct intel_vgpu *vgpu = spt->vgpu; local in function:ppgtt_populate_spt 1349 struct intel_vgpu *vgpu = spt->vgpu; local in function:ppgtt_handle_guest_entry_removal 1394 struct intel_vgpu *vgpu = spt->vgpu; local in function:ppgtt_handle_guest_entry_add 1600 struct intel_vgpu *vgpu = spt->vgpu; local in function:ppgtt_handle_guest_write_page_table 1718 struct intel_vgpu *vgpu = spt->vgpu; local in function:ppgtt_handle_guest_write_page_table_bytes 1781 struct intel_vgpu *vgpu = mm->vgpu; local in function:invalidate_ppgtt_mm 1811 struct intel_vgpu *vgpu = mm->vgpu; local in function:shadow_ppgtt_mm 2047 struct intel_vgpu *vgpu = mm->vgpu; local in function:ppgtt_get_next_level_entry 2075 struct intel_vgpu *vgpu = mm->vgpu; local in function:intel_vgpu_gma_to_gpa [all...] |
gtt.h | 56 struct intel_vgpu *vgpu); 62 struct intel_vgpu *vgpu); 146 struct intel_vgpu *vgpu; member in struct:intel_vgpu_mm 173 struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu, 210 int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); 211 void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); 212 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old); 213 void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu); 216 void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu); 219 struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu, 236 struct intel_vgpu *vgpu; member in struct:intel_vgpu_ppgtt_spt [all...] |
fb_decoder.c | 151 static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe, 154 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 156 u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask; 187 static int get_active_pipe(struct intel_vgpu *vgpu) 192 if (pipe_is_enabled(vgpu, i)) 200 * @vgpu: input vgpu 207 int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, 211 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 214 pipe = get_active_pipe(vgpu); [all...] |
dmabuf.c | 44 static int vgpu_pin_dma_address(struct intel_vgpu *vgpu, 50 if (intel_gvt_hypervisor_dma_pin_guest_page(vgpu, dma_addr)) 56 static void vgpu_unpin_dma_address(struct intel_vgpu *vgpu, 59 intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, dma_addr); 66 struct intel_vgpu *vgpu; local in function:vgpu_gem_get_pages 78 vgpu = fb_info->obj->vgpu; 79 if (WARN_ON(!vgpu)) 97 if (vgpu_pin_dma_address(vgpu, PAGE_SIZE, dma_addr)) { 116 vgpu_unpin_dma_address(vgpu, dma_addr) 134 struct intel_vgpu *vgpu = obj->vgpu; local in function:vgpu_gem_put_pages 150 struct intel_vgpu *vgpu = obj->vgpu; local in function:dmabuf_gem_object_free 191 struct intel_vgpu *vgpu = obj->vgpu; local in function:vgpu_gem_release [all...] |
debugfs.c | 34 struct intel_vgpu *vgpu; member in struct:mmio_diff_param 72 vreg = vgpu_vreg(param->vgpu, offset); 92 struct intel_vgpu *vgpu = s->private; local in function:vgpu_mmio_diff_show 93 struct intel_gvt *gvt = vgpu->gvt; 95 .vgpu = vgpu, 117 seq_printf(s, "%-8s %-8s %-8s %-8s\n", "Offset", "HW", "vGPU", "Diff"); 135 struct intel_vgpu *vgpu = (struct intel_vgpu *)data; local in function:vgpu_scan_nonprivbb_get 136 *val = vgpu->scan_nonprivbb; 141 * set/unset bit engine_id of vgpu->scan_nonprivbb to turn on/off scannin 149 struct intel_vgpu *vgpu = (struct intel_vgpu *)data; local in function:vgpu_scan_nonprivbb_set [all...] |
execlist.c | 99 struct intel_vgpu *vgpu = execlist->vgpu; local in function:emulate_execlist_status 102 u32 status_reg = execlist_ring_mmio(vgpu->gvt, 105 status.ldw = vgpu_vreg(vgpu, status_reg); 106 status.udw = vgpu_vreg(vgpu, status_reg + 4); 124 vgpu_vreg(vgpu, status_reg) = status.ldw; 125 vgpu_vreg(vgpu, status_reg + 4) = status.udw; 127 gvt_dbg_el("vgpu%d: status reg offset %x ldw %x udw %x\n", 128 vgpu->id, status_reg, status.ldw, status.udw); 135 struct intel_vgpu *vgpu = execlist->vgpu local in function:emulate_csb_update 195 struct intel_vgpu *vgpu = execlist->vgpu; local in function:emulate_execlist_ctx_schedule_out 268 struct intel_vgpu *vgpu = execlist->vgpu; local in function:get_next_execlist_slot 294 struct intel_vgpu *vgpu = execlist->vgpu; local in function:emulate_execlist_schedule_in 384 struct intel_vgpu *vgpu = workload->vgpu; local in function:prepare_execlist_workload 406 struct intel_vgpu *vgpu = workload->vgpu; local in function:complete_execlist_workload [all...] |
interrupt.c | 56 static void update_upstream_irq(struct intel_vgpu *vgpu, 168 * @vgpu: a vGPU 180 int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu, 183 struct intel_gvt *gvt = vgpu->gvt; 187 trace_write_ir(vgpu->id, "IMR", reg, imr, vgpu_vreg(vgpu, reg), 188 (vgpu_vreg(vgpu, reg) ^ imr)); 190 vgpu_vreg(vgpu, reg) = imr; 192 ops->check_pending_irq(vgpu); [all...] |
/src/sys/external/bsd/drm2/dist/drm/i915/ |
i915_vgpu.h | 39 return dev_priv->vgpu.caps & VGT_CAPS_HWSP_EMULATION; 45 return dev_priv->vgpu.caps & VGT_CAPS_HUGE_GTT;
|