1 1.1 riastrad /* $NetBSD: scheduler.h,v 1.2 2021/12/18 23:45:31 riastradh Exp $ */ 2 1.1 riastrad 3 1.1 riastrad /* 4 1.1 riastrad * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. 5 1.1 riastrad * 6 1.1 riastrad * Permission is hereby granted, free of charge, to any person obtaining a 7 1.1 riastrad * copy of this software and associated documentation files (the "Software"), 8 1.1 riastrad * to deal in the Software without restriction, including without limitation 9 1.1 riastrad * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 1.1 riastrad * and/or sell copies of the Software, and to permit persons to whom the 11 1.1 riastrad * Software is furnished to do so, subject to the following conditions: 12 1.1 riastrad * 13 1.1 riastrad * The above copyright notice and this permission notice (including the next 14 1.1 riastrad * paragraph) shall be included in all copies or substantial portions of the 15 1.1 riastrad * Software. 16 1.1 riastrad * 17 1.1 riastrad * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 1.1 riastrad * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 1.1 riastrad * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 1.1 riastrad * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 1.1 riastrad * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 1.1 riastrad * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 23 1.1 riastrad * SOFTWARE. 24 1.1 riastrad * 25 1.1 riastrad * Authors: 26 1.1 riastrad * Zhi Wang <zhi.a.wang (at) intel.com> 27 1.1 riastrad * 28 1.1 riastrad * Contributors: 29 1.1 riastrad * Ping Gao <ping.a.gao (at) intel.com> 30 1.1 riastrad * Tina Zhang <tina.zhang (at) intel.com> 31 1.1 riastrad * Chanbin Du <changbin.du (at) intel.com> 32 1.1 riastrad * Min He <min.he (at) intel.com> 33 1.1 riastrad * Bing Niu <bing.niu (at) intel.com> 34 1.1 riastrad * Zhenyu Wang <zhenyuw (at) linux.intel.com> 35 1.1 riastrad * 36 1.1 riastrad */ 37 1.1 riastrad 38 1.1 riastrad #ifndef _GVT_SCHEDULER_H_ 39 1.1 riastrad #define _GVT_SCHEDULER_H_ 40 1.1 riastrad 41 1.1 riastrad struct intel_gvt_workload_scheduler { 42 1.1 riastrad struct intel_vgpu *current_vgpu; 43 1.1 riastrad struct intel_vgpu *next_vgpu; 44 1.1 riastrad struct intel_vgpu_workload *current_workload[I915_NUM_ENGINES]; 45 1.1 riastrad bool need_reschedule; 46 1.1 riastrad 47 1.1 riastrad spinlock_t mmio_context_lock; 48 1.1 riastrad /* can be null when owner is host */ 49 1.1 riastrad struct intel_vgpu *engine_owner[I915_NUM_ENGINES]; 50 1.1 riastrad 51 1.1 riastrad wait_queue_head_t workload_complete_wq; 52 1.1 riastrad struct task_struct *thread[I915_NUM_ENGINES]; 53 1.1 riastrad wait_queue_head_t waitq[I915_NUM_ENGINES]; 54 1.1 riastrad 55 1.1 riastrad void *sched_data; 56 1.1 riastrad struct intel_gvt_sched_policy_ops *sched_ops; 57 1.1 riastrad }; 58 1.1 riastrad 59 1.1 riastrad #define INDIRECT_CTX_ADDR_MASK 0xffffffc0 60 1.1 riastrad #define INDIRECT_CTX_SIZE_MASK 0x3f 61 1.1 riastrad struct shadow_indirect_ctx { 62 1.1 riastrad struct drm_i915_gem_object *obj; 63 1.1 riastrad unsigned long guest_gma; 64 1.1 riastrad unsigned long shadow_gma; 65 1.1 riastrad void *shadow_va; 66 1.1 riastrad u32 size; 67 1.1 riastrad }; 68 1.1 riastrad 69 1.1 riastrad #define PER_CTX_ADDR_MASK 0xfffff000 70 1.1 riastrad struct shadow_per_ctx { 71 1.1 riastrad unsigned long guest_gma; 72 1.1 riastrad unsigned long shadow_gma; 73 1.1 riastrad unsigned valid; 74 1.1 riastrad }; 75 1.1 riastrad 76 1.1 riastrad struct intel_shadow_wa_ctx { 77 1.1 riastrad struct shadow_indirect_ctx indirect_ctx; 78 1.1 riastrad struct shadow_per_ctx per_ctx; 79 1.1 riastrad 80 1.1 riastrad }; 81 1.1 riastrad 82 1.1 riastrad struct intel_vgpu_workload { 83 1.1 riastrad struct intel_vgpu *vgpu; 84 1.1 riastrad int ring_id; 85 1.1 riastrad struct i915_request *req; 86 1.1 riastrad /* if this workload has been dispatched to i915? */ 87 1.1 riastrad bool dispatched; 88 1.1 riastrad bool shadow; /* if workload has done shadow of guest request */ 89 1.1 riastrad int status; 90 1.1 riastrad 91 1.1 riastrad struct intel_vgpu_mm *shadow_mm; 92 1.1 riastrad 93 1.1 riastrad /* different submission model may need different handler */ 94 1.1 riastrad int (*prepare)(struct intel_vgpu_workload *); 95 1.1 riastrad int (*complete)(struct intel_vgpu_workload *); 96 1.1 riastrad struct list_head list; 97 1.1 riastrad 98 1.1 riastrad DECLARE_BITMAP(pending_events, INTEL_GVT_EVENT_MAX); 99 1.1 riastrad void *shadow_ring_buffer_va; 100 1.1 riastrad 101 1.1 riastrad /* execlist context information */ 102 1.1 riastrad struct execlist_ctx_descriptor_format ctx_desc; 103 1.1 riastrad struct execlist_ring_context *ring_context; 104 1.1 riastrad unsigned long rb_head, rb_tail, rb_ctl, rb_start, rb_len; 105 1.1 riastrad unsigned long guest_rb_head; 106 1.1 riastrad bool restore_inhibit; 107 1.1 riastrad struct intel_vgpu_elsp_dwords elsp_dwords; 108 1.1 riastrad bool emulate_schedule_in; 109 1.1 riastrad atomic_t shadow_ctx_active; 110 1.1 riastrad wait_queue_head_t shadow_ctx_status_wq; 111 1.1 riastrad u64 ring_context_gpa; 112 1.1 riastrad 113 1.1 riastrad /* shadow batch buffer */ 114 1.1 riastrad struct list_head shadow_bb; 115 1.1 riastrad struct intel_shadow_wa_ctx wa_ctx; 116 1.1 riastrad 117 1.1 riastrad /* oa registers */ 118 1.1 riastrad u32 oactxctrl; 119 1.1 riastrad u32 flex_mmio[7]; 120 1.1 riastrad }; 121 1.1 riastrad 122 1.1 riastrad struct intel_vgpu_shadow_bb { 123 1.1 riastrad struct list_head list; 124 1.1 riastrad struct drm_i915_gem_object *obj; 125 1.1 riastrad struct i915_vma *vma; 126 1.1 riastrad void *va; 127 1.1 riastrad u32 *bb_start_cmd_va; 128 1.1 riastrad unsigned int clflush; 129 1.1 riastrad bool accessing; 130 1.1 riastrad unsigned long bb_offset; 131 1.1 riastrad bool ppgtt; 132 1.1 riastrad }; 133 1.1 riastrad 134 1.1 riastrad #define workload_q_head(vgpu, ring_id) \ 135 1.1 riastrad (&(vgpu->submission.workload_q_head[ring_id])) 136 1.1 riastrad 137 1.1 riastrad void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload); 138 1.1 riastrad 139 1.1 riastrad int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt); 140 1.1 riastrad 141 1.1 riastrad void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt); 142 1.1 riastrad 143 1.1 riastrad void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu); 144 1.1 riastrad 145 1.1 riastrad int intel_vgpu_setup_submission(struct intel_vgpu *vgpu); 146 1.1 riastrad 147 1.1 riastrad void intel_vgpu_reset_submission(struct intel_vgpu *vgpu, 148 1.1 riastrad intel_engine_mask_t engine_mask); 149 1.1 riastrad 150 1.1 riastrad void intel_vgpu_clean_submission(struct intel_vgpu *vgpu); 151 1.1 riastrad 152 1.1 riastrad int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, 153 1.1 riastrad intel_engine_mask_t engine_mask, 154 1.1 riastrad unsigned int interface); 155 1.1 riastrad 156 1.1 riastrad extern const struct intel_vgpu_submission_ops 157 1.1 riastrad intel_vgpu_execlist_submission_ops; 158 1.1 riastrad 159 1.1 riastrad struct intel_vgpu_workload * 160 1.1 riastrad intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, 161 1.1 riastrad struct execlist_ctx_descriptor_format *desc); 162 1.1 riastrad 163 1.1 riastrad void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload); 164 1.1 riastrad 165 1.1 riastrad void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu, 166 1.1 riastrad intel_engine_mask_t engine_mask); 167 1.1 riastrad 168 1.1 riastrad #endif 169