Home | History | Annotate | Line # | Download | only in gvt
      1 /*	$NetBSD: scheduler.h,v 1.2 2021/12/18 23:45:31 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice (including the next
     14  * paragraph) shall be included in all copies or substantial portions of the
     15  * Software.
     16  *
     17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
     22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     23  * SOFTWARE.
     24  *
     25  * Authors:
     26  *    Zhi Wang <zhi.a.wang (at) intel.com>
     27  *
     28  * Contributors:
     29  *    Ping Gao <ping.a.gao (at) intel.com>
     30  *    Tina Zhang <tina.zhang (at) intel.com>
     31  *    Chanbin Du <changbin.du (at) intel.com>
     32  *    Min He <min.he (at) intel.com>
     33  *    Bing Niu <bing.niu (at) intel.com>
     34  *    Zhenyu Wang <zhenyuw (at) linux.intel.com>
     35  *
     36  */
     37 
     38 #ifndef _GVT_SCHEDULER_H_
     39 #define _GVT_SCHEDULER_H_
     40 
     41 struct intel_gvt_workload_scheduler {
     42 	struct intel_vgpu *current_vgpu;
     43 	struct intel_vgpu *next_vgpu;
     44 	struct intel_vgpu_workload *current_workload[I915_NUM_ENGINES];
     45 	bool need_reschedule;
     46 
     47 	spinlock_t mmio_context_lock;
     48 	/* can be null when owner is host */
     49 	struct intel_vgpu *engine_owner[I915_NUM_ENGINES];
     50 
     51 	wait_queue_head_t workload_complete_wq;
     52 	struct task_struct *thread[I915_NUM_ENGINES];
     53 	wait_queue_head_t waitq[I915_NUM_ENGINES];
     54 
     55 	void *sched_data;
     56 	struct intel_gvt_sched_policy_ops *sched_ops;
     57 };
     58 
     59 #define INDIRECT_CTX_ADDR_MASK 0xffffffc0
     60 #define INDIRECT_CTX_SIZE_MASK 0x3f
     61 struct shadow_indirect_ctx {
     62 	struct drm_i915_gem_object *obj;
     63 	unsigned long guest_gma;
     64 	unsigned long shadow_gma;
     65 	void *shadow_va;
     66 	u32 size;
     67 };
     68 
     69 #define PER_CTX_ADDR_MASK 0xfffff000
     70 struct shadow_per_ctx {
     71 	unsigned long guest_gma;
     72 	unsigned long shadow_gma;
     73 	unsigned valid;
     74 };
     75 
     76 struct intel_shadow_wa_ctx {
     77 	struct shadow_indirect_ctx indirect_ctx;
     78 	struct shadow_per_ctx per_ctx;
     79 
     80 };
     81 
     82 struct intel_vgpu_workload {
     83 	struct intel_vgpu *vgpu;
     84 	int ring_id;
     85 	struct i915_request *req;
     86 	/* if this workload has been dispatched to i915? */
     87 	bool dispatched;
     88 	bool shadow;      /* if workload has done shadow of guest request */
     89 	int status;
     90 
     91 	struct intel_vgpu_mm *shadow_mm;
     92 
     93 	/* different submission model may need different handler */
     94 	int (*prepare)(struct intel_vgpu_workload *);
     95 	int (*complete)(struct intel_vgpu_workload *);
     96 	struct list_head list;
     97 
     98 	DECLARE_BITMAP(pending_events, INTEL_GVT_EVENT_MAX);
     99 	void *shadow_ring_buffer_va;
    100 
    101 	/* execlist context information */
    102 	struct execlist_ctx_descriptor_format ctx_desc;
    103 	struct execlist_ring_context *ring_context;
    104 	unsigned long rb_head, rb_tail, rb_ctl, rb_start, rb_len;
    105 	unsigned long guest_rb_head;
    106 	bool restore_inhibit;
    107 	struct intel_vgpu_elsp_dwords elsp_dwords;
    108 	bool emulate_schedule_in;
    109 	atomic_t shadow_ctx_active;
    110 	wait_queue_head_t shadow_ctx_status_wq;
    111 	u64 ring_context_gpa;
    112 
    113 	/* shadow batch buffer */
    114 	struct list_head shadow_bb;
    115 	struct intel_shadow_wa_ctx wa_ctx;
    116 
    117 	/* oa registers */
    118 	u32 oactxctrl;
    119 	u32 flex_mmio[7];
    120 };
    121 
    122 struct intel_vgpu_shadow_bb {
    123 	struct list_head list;
    124 	struct drm_i915_gem_object *obj;
    125 	struct i915_vma *vma;
    126 	void *va;
    127 	u32 *bb_start_cmd_va;
    128 	unsigned int clflush;
    129 	bool accessing;
    130 	unsigned long bb_offset;
    131 	bool ppgtt;
    132 };
    133 
    134 #define workload_q_head(vgpu, ring_id) \
    135 	(&(vgpu->submission.workload_q_head[ring_id]))
    136 
    137 void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload);
    138 
    139 int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt);
    140 
    141 void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt);
    142 
    143 void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu);
    144 
    145 int intel_vgpu_setup_submission(struct intel_vgpu *vgpu);
    146 
    147 void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
    148 				 intel_engine_mask_t engine_mask);
    149 
    150 void intel_vgpu_clean_submission(struct intel_vgpu *vgpu);
    151 
    152 int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
    153 				     intel_engine_mask_t engine_mask,
    154 				     unsigned int interface);
    155 
    156 extern const struct intel_vgpu_submission_ops
    157 intel_vgpu_execlist_submission_ops;
    158 
    159 struct intel_vgpu_workload *
    160 intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
    161 			   struct execlist_ctx_descriptor_format *desc);
    162 
    163 void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
    164 
    165 void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
    166 				intel_engine_mask_t engine_mask);
    167 
    168 #endif
    169