Lines Matching defs:workload
64 static void update_shadow_pdps(struct intel_vgpu_workload *workload)
67 workload->req->context->state->obj;
71 if (WARN_ON(!workload->shadow_mm))
74 if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount)))
80 (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
89 static void sr_oa_regs(struct intel_vgpu_workload *workload,
92 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
106 if (workload->ring_id != RCS0)
110 workload->oactxctrl = reg_state[ctx_oactxctrl + 1];
112 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
115 workload->flex_mmio[i] = reg_state[state_offset + 1];
120 reg_state[ctx_oactxctrl + 1] = workload->oactxctrl;
122 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
127 reg_state[state_offset + 1] = workload->flex_mmio[i];
132 static int populate_shadow_context(struct intel_vgpu_workload *workload)
134 struct intel_vgpu *vgpu = workload->vgpu;
136 int ring_id = workload->ring_id;
138 workload->req->context->state->obj;
148 sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
150 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
153 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
171 workload->ring_context_gpa +
177 sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
183 gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
184 workload->ctx_desc.lrca);
196 (u32)((workload->ctx_desc.lrca + i) <<
240 struct intel_vgpu_workload *workload;
257 workload = scheduler->current_workload[ring_id];
258 if (unlikely(!workload))
264 if (workload->vgpu != scheduler->engine_owner[ring_id]) {
267 workload->vgpu, ring_id);
268 scheduler->engine_owner[ring_id] = workload->vgpu;
271 ring_id, workload->vgpu->id);
273 atomic_set(&workload->shadow_ctx_active, 1);
276 save_ring_hw_state(workload->vgpu, ring_id);
277 atomic_set(&workload->shadow_ctx_active, 0);
280 save_ring_hw_state(workload->vgpu, ring_id);
286 wake_up(&workload->shadow_ctx_status_wq);
292 struct intel_vgpu_workload *workload)
301 desc |= workload->ctx_desc.addressing_mode <<
307 static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
309 struct intel_vgpu *vgpu = workload->vgpu;
310 struct i915_request *req = workload->req;
337 cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
340 workload->rb_len);
344 shadow_ring_buffer_va = workload->shadow_ring_buffer_va;
347 workload->shadow_ring_buffer_va = cs;
350 workload->rb_len);
352 cs += workload->rb_len / sizeof(u32);
353 intel_ring_advance(workload->req, cs);
370 static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
373 struct intel_vgpu_mm *mm = workload->shadow_mm;
390 intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
392 struct intel_vgpu *vgpu = workload->vgpu;
396 if (workload->req)
399 rq = i915_request_create(s->shadow[workload->ring_id]);
405 workload->req = i915_request_get(rq);
410 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
412 * @workload: an abstract entity for each execlist submission.
414 * This function is called before the workload submitting to i915, to make
415 * sure the content of the workload is valid.
417 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
419 struct intel_vgpu *vgpu = workload->vgpu;
425 if (workload->shadow)
428 if (!test_and_set_bit(workload->ring_id, s->shadow_ctx_desc_updated))
429 shadow_context_descriptor_update(s->shadow[workload->ring_id],
430 workload);
432 ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
436 if (workload->ring_id == RCS0 && workload->wa_ctx.indirect_ctx.size) {
437 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
442 workload->shadow = true;
445 release_shadow_wa_ctx(&workload->wa_ctx);
449 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload);
451 static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
453 struct intel_gvt *gvt = workload->vgpu->gvt;
458 list_for_each_entry(bb, &workload->shadow_bb, list) {
467 bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
513 workload->req,
524 release_shadow_batch_buffer(workload);
530 struct intel_vgpu_workload *workload =
532 struct i915_request *rq = workload->req;
573 static void update_vreg_in_ctx(struct intel_vgpu_workload *workload)
575 struct intel_vgpu *vgpu = workload->vgpu;
579 ring_base = dev_priv->engine[workload->ring_id]->mmio_base;
580 vgpu_vreg_t(vgpu, RING_START(ring_base)) = workload->rb_start;
583 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
587 if (list_empty(&workload->shadow_bb))
590 bb = list_first_entry(&workload->shadow_bb,
593 list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
612 static int prepare_workload(struct intel_vgpu_workload *workload)
614 struct intel_vgpu *vgpu = workload->vgpu;
616 int ring = workload->ring_id;
619 ret = intel_vgpu_pin_mm(workload->shadow_mm);
625 if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT ||
626 !workload->shadow_mm->ppgtt_mm.shadowed) {
627 gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
631 update_shadow_pdps(workload);
633 set_context_ppgtt_from_shadow(workload, s->shadow[ring]);
635 ret = intel_vgpu_sync_oos_pages(workload->vgpu);
641 ret = intel_vgpu_flush_post_shadow(workload->vgpu);
647 ret = copy_workload_to_ring_buffer(workload);
653 ret = prepare_shadow_batch_buffer(workload);
659 ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
665 if (workload->prepare) {
666 ret = workload->prepare(workload);
673 release_shadow_wa_ctx(&workload->wa_ctx);
675 release_shadow_batch_buffer(workload);
677 intel_vgpu_unpin_mm(workload->shadow_mm);
681 static int dispatch_workload(struct intel_vgpu_workload *workload)
683 struct intel_vgpu *vgpu = workload->vgpu;
685 int ring_id = workload->ring_id;
688 gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
689 ring_id, workload);
693 ret = intel_gvt_workload_req_alloc(workload);
697 ret = intel_gvt_scan_and_shadow_workload(workload);
701 ret = populate_shadow_context(workload);
703 release_shadow_wa_ctx(&workload->wa_ctx);
707 ret = prepare_workload(workload);
713 rq = fetch_and_zero(&workload->req);
717 if (!IS_ERR_OR_NULL(workload->req)) {
718 gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
719 ring_id, workload->req);
720 i915_request_add(workload->req);
721 workload->dispatched = true;
725 workload->status = ret;
734 struct intel_vgpu_workload *workload = NULL;
739 * no current vgpu / will be scheduled out / no workload
757 * still have current workload, maybe the workload disptacher
761 workload = scheduler->current_workload[ring_id];
762 gvt_dbg_sched("ring id %d still have current workload %p\n",
763 ring_id, workload);
768 * pick a workload as current workload
769 * once current workload is set, schedule policy routines
770 * will wait the current workload is finished when trying to
777 workload = scheduler->current_workload[ring_id];
779 gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
781 atomic_inc(&workload->vgpu->submission.running_workload_num);
784 return workload;
787 static void update_guest_context(struct intel_vgpu_workload *workload)
789 struct i915_request *rq = workload->req;
790 struct intel_vgpu *vgpu = workload->vgpu;
803 gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
804 workload->ctx_desc.lrca);
806 head = workload->rb_head;
807 tail = workload->rb_tail;
808 wrap_count = workload->guest_rb_head >> RB_HEAD_WRAP_CNT_OFF;
819 ring_base = dev_priv->engine[workload->ring_id]->mmio_base;
833 (u32)((workload->ctx_desc.lrca + i) <<
848 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
849 RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
855 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
864 workload->ring_context_gpa +
896 struct intel_vgpu_workload *workload =
898 struct intel_vgpu *vgpu = workload->vgpu;
900 struct i915_request *rq = workload->req;
906 /* For the workload w/ request, needs to wait for the context
908 * For the workload w/o request, directly complete the workload.
911 wait_event(workload->shadow_ctx_status_wq,
912 !atomic_read(&workload->shadow_ctx_active));
915 * be set to -EIO. Use -EIO to set workload status so
919 if (likely(workload->status == -EINPROGRESS)) {
920 if (workload->req->fence.error == -EIO)
921 workload->status = -EIO;
923 workload->status = 0;
926 if (!workload->status &&
928 update_guest_context(workload);
930 for_each_set_bit(event, workload->pending_events,
935 i915_request_put(fetch_and_zero(&workload->req));
938 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
939 ring_id, workload, workload->status);
943 list_del_init(&workload->list);
945 if (workload->status || vgpu->resetting_eng & BIT(ring_id)) {
946 /* if workload->status is not successful means HW GPU
957 * the workload clean up here doesn't have any impact.
962 workload->complete(workload);
985 struct intel_vgpu_workload *workload = NULL;
994 gvt_dbg_core("workload thread for ring %d started\n", ring_id);
999 workload = pick_next_workload(gvt, ring_id);
1000 if (workload)
1007 if (!workload)
1010 gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
1011 workload->ring_id, workload,
1012 workload->vgpu->id);
1016 gvt_dbg_sched("ring id %d will dispatch workload %p\n",
1017 workload->ring_id, workload);
1024 * workload. The vGPU may use these registers for checking
1026 * in this workload.
1028 update_vreg_in_ctx(workload);
1030 ret = dispatch_workload(workload);
1033 vgpu = workload->vgpu;
1034 gvt_vgpu_err("fail to dispatch workload, skip\n");
1038 gvt_dbg_sched("ring id %d wait workload %p\n",
1039 workload->ring_id, workload);
1040 i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
1043 gvt_dbg_sched("will complete workload %p, status: %d\n",
1044 workload, workload->status);
1079 gvt_dbg_core("clean workload scheduler\n");
1097 gvt_dbg_core("init workload scheduler\n");
1114 "gvt workload %d", i);
1116 gvt_err("fail to create workload thread\n");
1356 * intel_vgpu_destroy_workload - destroy a vGPU workload
1357 * @workload: workload to destroy
1359 * This function is called when destroy a vGPU workload.
1362 void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
1364 struct intel_vgpu_submission *s = &workload->vgpu->submission;
1366 release_shadow_batch_buffer(workload);
1367 release_shadow_wa_ctx(&workload->wa_ctx);
1369 if (workload->shadow_mm)
1370 intel_vgpu_mm_put(workload->shadow_mm);
1372 kmem_cache_free(s->workloads, workload);
1379 struct intel_vgpu_workload *workload;
1381 workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
1382 if (!workload)
1385 INIT_LIST_HEAD(&workload->list);
1386 INIT_LIST_HEAD(&workload->shadow_bb);
1388 init_waitqueue_head(&workload->shadow_ctx_status_wq);
1389 atomic_set(&workload->shadow_ctx_active, 0);
1391 workload->status = -EINPROGRESS;
1392 workload->vgpu = vgpu;
1394 return workload;
1413 static int prepare_mm(struct intel_vgpu_workload *workload)
1415 struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
1417 struct intel_vgpu *vgpu = workload->vgpu;
1433 read_guest_pdps(workload->vgpu, workload->ring_context_gpa, (void *)pdps);
1435 mm = intel_vgpu_get_ppgtt_mm(workload->vgpu, root_entry_type, pdps);
1439 workload->shadow_mm = mm;
1447 * intel_vgpu_create_workload - create a vGPU workload
1452 * This function is called when creating a vGPU workload.
1466 struct intel_vgpu_workload *workload = NULL;
1494 gvt_dbg_el("ring id %d cur workload == last\n",
1507 gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
1523 workload = alloc_workload(vgpu);
1524 if (IS_ERR(workload))
1525 return workload;
1527 workload->ring_id = ring_id;
1528 workload->ctx_desc = *desc;
1529 workload->ring_context_gpa = ring_context_gpa;
1530 workload->rb_head = head;
1531 workload->guest_rb_head = guest_head;
1532 workload->rb_tail = tail;
1533 workload->rb_start = start;
1534 workload->rb_ctl = ctl;
1542 workload->wa_ctx.indirect_ctx.guest_gma =
1544 workload->wa_ctx.indirect_ctx.size =
1548 if (workload->wa_ctx.indirect_ctx.size != 0) {
1550 workload->wa_ctx.indirect_ctx.guest_gma,
1551 workload->wa_ctx.indirect_ctx.size)) {
1553 workload->wa_ctx.indirect_ctx.guest_gma);
1554 kmem_cache_free(s->workloads, workload);
1559 workload->wa_ctx.per_ctx.guest_gma =
1561 workload->wa_ctx.per_ctx.valid = per_ctx & 1;
1562 if (workload->wa_ctx.per_ctx.valid) {
1564 workload->wa_ctx.per_ctx.guest_gma,
1567 workload->wa_ctx.per_ctx.guest_gma);
1568 kmem_cache_free(s->workloads, workload);
1574 gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
1575 workload, ring_id, head, tail, start, ctl);
1577 ret = prepare_mm(workload);
1579 kmem_cache_free(s->workloads, workload);
1583 /* Only scan and shadow the first workload in the queue
1588 ret = intel_gvt_scan_and_shadow_workload(workload);
1595 intel_vgpu_destroy_workload(workload);
1599 return workload;
1603 * intel_vgpu_queue_workload - Qeue a vGPU workload
1604 * @workload: the workload to queue in
1606 void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
1608 list_add_tail(&workload->list,
1609 workload_q_head(workload->vgpu, workload->ring_id));
1610 intel_gvt_kick_schedule(workload->vgpu->gvt);
1611 wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->ring_id]);