Lines Matching defs:vgpu
92 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
134 struct intel_vgpu *vgpu = workload->vgpu;
135 struct intel_gvt *gvt = vgpu->gvt;
150 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
153 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
170 intel_gvt_hypervisor_read_gpa(vgpu,
195 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
205 intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
218 static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
220 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
225 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
227 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
229 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
247 /* Switch ring from vGPU to host. */
264 if (workload->vgpu != scheduler->engine_owner[ring_id]) {
265 /* Switch ring from host to vGPU or vGPU to vGPU. */
267 workload->vgpu, ring_id);
268 scheduler->engine_owner[ring_id] = workload->vgpu;
270 gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
271 ring_id, workload->vgpu->id);
276 save_ring_hw_state(workload->vgpu, ring_id);
280 save_ring_hw_state(workload->vgpu, ring_id);
309 struct intel_vgpu *vgpu = workload->vgpu;
316 intel_vgpu_restore_inhibit_context(vgpu, req);
392 struct intel_vgpu *vgpu = workload->vgpu;
393 struct intel_vgpu_submission *s = &vgpu->submission;
419 struct intel_vgpu *vgpu = workload->vgpu;
420 struct intel_vgpu_submission *s = &vgpu->submission;
423 lockdep_assert_held(&vgpu->vgpu_lock);
453 struct intel_gvt *gvt = workload->vgpu->gvt;
575 struct intel_vgpu *vgpu = workload->vgpu;
576 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
580 vgpu_vreg_t(vgpu, RING_START(ring_base)) = workload->rb_start;
614 struct intel_vgpu *vgpu = workload->vgpu;
615 struct intel_vgpu_submission *s = &vgpu->submission;
621 gvt_vgpu_err("fail to vgpu pin mm\n");
635 ret = intel_vgpu_sync_oos_pages(workload->vgpu);
637 gvt_vgpu_err("fail to vgpu sync oos pages\n");
641 ret = intel_vgpu_flush_post_shadow(workload->vgpu);
683 struct intel_vgpu *vgpu = workload->vgpu;
691 mutex_lock(&vgpu->vgpu_lock);
726 mutex_unlock(&vgpu->vgpu_lock);
739 * no current vgpu / will be scheduled out / no workload
743 gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
771 * schedule out a vgpu.
781 atomic_inc(&workload->vgpu->submission.running_workload_num);
790 struct intel_vgpu *vgpu = workload->vgpu;
791 struct intel_gvt *gvt = vgpu->gvt;
820 vgpu_vreg_t(vgpu, RING_TAIL(ring_base)) = tail;
821 vgpu_vreg_t(vgpu, RING_HEAD(ring_base)) = head;
832 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
842 intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
848 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
855 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
863 intel_gvt_hypervisor_write_gpa(vgpu,
873 void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
876 struct intel_vgpu_submission *s = &vgpu->submission;
877 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
898 struct intel_vgpu *vgpu = workload->vgpu;
899 struct intel_vgpu_submission *s = &vgpu->submission;
903 mutex_lock(&vgpu->vgpu_lock);
927 !(vgpu->resetting_eng & BIT(ring_id))) {
932 intel_vgpu_trigger_virtual_event(vgpu, event);
945 if (workload->status || vgpu->resetting_eng & BIT(ring_id)) {
949 * So this error is a vGPU hang actually to the guest.
950 * According to this we should emunlate a vGPU hang. If
959 intel_vgpu_clean_workloads(vgpu, BIT(ring_id));
971 mutex_unlock(&vgpu->vgpu_lock);
986 struct intel_vgpu *vgpu = NULL;
1010 gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
1012 workload->vgpu->id);
1023 * Update the vReg of the vGPU which submitted this
1024 * workload. The vGPU may use these registers for checking
1033 vgpu = workload->vgpu;
1054 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
1059 void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
1061 struct intel_vgpu_submission *s = &vgpu->submission;
1062 struct intel_gvt *gvt = vgpu->gvt;
1066 gvt_dbg_sched("wait vgpu idle\n");
1153 * intel_vgpu_clean_submission - free submission-related resource for vGPU
1154 * @vgpu: a vGPU
1156 * This function is called when a vGPU is being destroyed.
1159 void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
1161 struct intel_vgpu_submission *s = &vgpu->submission;
1165 intel_vgpu_select_submission_ops(vgpu
1168 for_each_engine(engine, vgpu->gvt->dev_priv, id)
1176 * intel_vgpu_reset_submission - reset submission-related resource for vGPU
1177 * @vgpu: a vGPU
1180 * This function is called when a vGPU is being destroyed.
1183 void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
1186 struct intel_vgpu_submission *s = &vgpu->submission;
1191 intel_vgpu_clean_workloads(vgpu, engine_mask);
1192 s->ops->reset(vgpu, engine_mask);
1214 * intel_vgpu_setup_submission - setup submission-related resource for vGPU
1215 * @vgpu: a vGPU
1217 * This function is called when a vGPU is being created.
1223 int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
1225 struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
1226 struct intel_vgpu_submission *s = &vgpu->submission;
1303 * @vgpu: a vGPU
1305 * @interface: expected vGPU virtual submission interface
1313 int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
1317 struct intel_vgpu_submission *s = &vgpu->submission;
1331 s->ops->clean(vgpu, engine_mask);
1337 gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu->id);
1341 ret = ops[interface]->init(vgpu, engine_mask);
1349 gvt_dbg_core("vgpu%d: activate ops [ %s ]\n",
1350 vgpu->id, s->ops->name);
1356 * intel_vgpu_destroy_workload - destroy a vGPU workload
1359 * This function is called when destroy a vGPU workload.
1364 struct intel_vgpu_submission *s = &workload->vgpu->submission;
1376 alloc_workload(struct intel_vgpu *vgpu)
1378 struct intel_vgpu_submission *s = &vgpu->submission;
1392 workload->vgpu = vgpu;
1400 static void read_guest_pdps(struct intel_vgpu *vgpu,
1409 intel_gvt_hypervisor_read_gpa(vgpu,
1417 struct intel_vgpu *vgpu = workload->vgpu;
1433 read_guest_pdps(workload->vgpu, workload->ring_context_gpa, (void *)pdps);
1435 mm = intel_vgpu_get_ppgtt_mm(workload->vgpu, root_entry_type, pdps);
1447 * intel_vgpu_create_workload - create a vGPU workload
1448 * @vgpu: a vGPU
1452 * This function is called when creating a vGPU workload.
1460 intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
1463 struct intel_vgpu_submission *s = &vgpu->submission;
1464 struct list_head *q = workload_q_head(vgpu, ring_id);
1467 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
1473 ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
1480 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1483 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1510 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1512 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1514 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1517 if (!intel_gvt_ggtt_validate_range(vgpu, start,
1523 workload = alloc_workload(vgpu);
1537 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1539 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1549 if (!intel_gvt_ggtt_validate_range(vgpu,
1563 if (!intel_gvt_ggtt_validate_range(vgpu,
1586 if (list_empty(workload_q_head(vgpu, ring_id))) {
1594 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
1603 * intel_vgpu_queue_workload - Qeue a vGPU workload
1609 workload_q_head(workload->vgpu, workload->ring_id));
1610 intel_gvt_kick_schedule(workload->vgpu->gvt);
1611 wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->ring_id]);