Home | History | Annotate | Line # | Download | only in gvt
      1 /*	$NetBSD: vgpu.c,v 1.3 2021/12/19 11:06:55 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice (including the next
     14  * paragraph) shall be included in all copies or substantial portions of the
     15  * Software.
     16  *
     17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
     22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     23  * SOFTWARE.
     24  *
     25  * Authors:
     26  *    Eddie Dong <eddie.dong (at) intel.com>
     27  *    Kevin Tian <kevin.tian (at) intel.com>
     28  *
     29  * Contributors:
     30  *    Ping Gao <ping.a.gao (at) intel.com>
     31  *    Zhi Wang <zhi.a.wang (at) intel.com>
     32  *    Bing Niu <bing.niu (at) intel.com>
     33  *
     34  */
     35 
     36 #include <sys/cdefs.h>
     37 __KERNEL_RCSID(0, "$NetBSD: vgpu.c,v 1.3 2021/12/19 11:06:55 riastradh Exp $");
     38 
     39 #include "i915_drv.h"
     40 #include "gvt.h"
     41 #include "i915_pvinfo.h"
     42 
     43 void populate_pvinfo_page(struct intel_vgpu *vgpu)
     44 {
     45 	/* setup the ballooning information */
     46 	vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
     47 	vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1;
     48 	vgpu_vreg_t(vgpu, vgtif_reg(version_minor)) = 0;
     49 	vgpu_vreg_t(vgpu, vgtif_reg(display_ready)) = 0;
     50 	vgpu_vreg_t(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
     51 
     52 	vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_PPGTT;
     53 	vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
     54 	vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT;
     55 
     56 	vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
     57 		vgpu_aperture_gmadr_base(vgpu);
     58 	vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
     59 		vgpu_aperture_sz(vgpu);
     60 	vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
     61 		vgpu_hidden_gmadr_base(vgpu);
     62 	vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) =
     63 		vgpu_hidden_sz(vgpu);
     64 
     65 	vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
     66 
     67 	vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot)) = UINT_MAX;
     68 	vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot)) = UINT_MAX;
     69 
     70 	gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
     71 	gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
     72 		vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu));
     73 	gvt_dbg_core("hidden base [GMADR] 0x%llx size=0x%llx\n",
     74 		vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu));
     75 	gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu));
     76 
     77 	WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
     78 }
     79 
     80 #define VGPU_MAX_WEIGHT 16
     81 #define VGPU_WEIGHT(vgpu_num)	\
     82 	(VGPU_MAX_WEIGHT / (vgpu_num))
     83 
     84 static struct {
     85 	unsigned int low_mm;
     86 	unsigned int high_mm;
     87 	unsigned int fence;
     88 
     89 	/* A vGPU with a weight of 8 will get twice as much GPU as a vGPU
     90 	 * with a weight of 4 on a contended host, different vGPU type has
     91 	 * different weight set. Legal weights range from 1 to 16.
     92 	 */
     93 	unsigned int weight;
     94 	enum intel_vgpu_edid edid;
     95 	char *name;
     96 } vgpu_types[] = {
     97 /* Fixed vGPU type table */
     98 	{ MB_TO_BYTES(64), MB_TO_BYTES(384), 4, VGPU_WEIGHT(8), GVT_EDID_1024_768, "8" },
     99 	{ MB_TO_BYTES(128), MB_TO_BYTES(512), 4, VGPU_WEIGHT(4), GVT_EDID_1920_1200, "4" },
    100 	{ MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, VGPU_WEIGHT(2), GVT_EDID_1920_1200, "2" },
    101 	{ MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, VGPU_WEIGHT(1), GVT_EDID_1920_1200, "1" },
    102 };
    103 
    104 /**
    105  * intel_gvt_init_vgpu_types - initialize vGPU type list
    106  * @gvt : GVT device
    107  *
    108  * Initialize vGPU type list based on available resource.
    109  *
    110  */
    111 int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
    112 {
    113 	unsigned int num_types;
    114 	unsigned int i, low_avail, high_avail;
    115 	unsigned int min_low;
    116 
    117 	/* vGPU type name is defined as GVTg_Vx_y which contains
    118 	 * physical GPU generation type (e.g V4 as BDW server, V5 as
    119 	 * SKL server).
    120 	 *
    121 	 * Depend on physical SKU resource, might see vGPU types like
    122 	 * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create
    123 	 * different types of vGPU on same physical GPU depending on
    124 	 * available resource. Each vGPU type will have "avail_instance"
    125 	 * to indicate how many vGPU instance can be created for this
    126 	 * type.
    127 	 *
    128 	 */
    129 	low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
    130 	high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
    131 	num_types = sizeof(vgpu_types) / sizeof(vgpu_types[0]);
    132 
    133 	gvt->types = kcalloc(num_types, sizeof(struct intel_vgpu_type),
    134 			     GFP_KERNEL);
    135 	if (!gvt->types)
    136 		return -ENOMEM;
    137 
    138 	min_low = MB_TO_BYTES(32);
    139 	for (i = 0; i < num_types; ++i) {
    140 		if (low_avail / vgpu_types[i].low_mm == 0)
    141 			break;
    142 
    143 		gvt->types[i].low_gm_size = vgpu_types[i].low_mm;
    144 		gvt->types[i].high_gm_size = vgpu_types[i].high_mm;
    145 		gvt->types[i].fence = vgpu_types[i].fence;
    146 
    147 		if (vgpu_types[i].weight < 1 ||
    148 					vgpu_types[i].weight > VGPU_MAX_WEIGHT)
    149 			return -EINVAL;
    150 
    151 		gvt->types[i].weight = vgpu_types[i].weight;
    152 		gvt->types[i].resolution = vgpu_types[i].edid;
    153 		gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
    154 						   high_avail / vgpu_types[i].high_mm);
    155 
    156 		if (IS_GEN(gvt->dev_priv, 8))
    157 			sprintf(gvt->types[i].name, "GVTg_V4_%s",
    158 						vgpu_types[i].name);
    159 		else if (IS_GEN(gvt->dev_priv, 9))
    160 			sprintf(gvt->types[i].name, "GVTg_V5_%s",
    161 						vgpu_types[i].name);
    162 
    163 		gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n",
    164 			     i, gvt->types[i].name,
    165 			     gvt->types[i].avail_instance,
    166 			     gvt->types[i].low_gm_size,
    167 			     gvt->types[i].high_gm_size, gvt->types[i].fence,
    168 			     gvt->types[i].weight,
    169 			     vgpu_edid_str(gvt->types[i].resolution));
    170 	}
    171 
    172 	gvt->num_types = i;
    173 	return 0;
    174 }
    175 
    176 void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
    177 {
    178 	kfree(gvt->types);
    179 }
    180 
    181 static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
    182 {
    183 	int i;
    184 	unsigned int low_gm_avail, high_gm_avail, fence_avail;
    185 	unsigned int low_gm_min, high_gm_min, fence_min;
    186 
    187 	/* Need to depend on maxium hw resource size but keep on
    188 	 * static config for now.
    189 	 */
    190 	low_gm_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE -
    191 		gvt->gm.vgpu_allocated_low_gm_size;
    192 	high_gm_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE -
    193 		gvt->gm.vgpu_allocated_high_gm_size;
    194 	fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
    195 		gvt->fence.vgpu_allocated_fence_num;
    196 
    197 	for (i = 0; i < gvt->num_types; i++) {
    198 		low_gm_min = low_gm_avail / gvt->types[i].low_gm_size;
    199 		high_gm_min = high_gm_avail / gvt->types[i].high_gm_size;
    200 		fence_min = fence_avail / gvt->types[i].fence;
    201 		gvt->types[i].avail_instance = min(min(low_gm_min, high_gm_min),
    202 						   fence_min);
    203 
    204 		gvt_dbg_core("update type[%d]: %s avail %u low %u high %u fence %u\n",
    205 		       i, gvt->types[i].name,
    206 		       gvt->types[i].avail_instance, gvt->types[i].low_gm_size,
    207 		       gvt->types[i].high_gm_size, gvt->types[i].fence);
    208 	}
    209 }
    210 
    211 /**
    212  * intel_gvt_active_vgpu - activate a virtual GPU
    213  * @vgpu: virtual GPU
    214  *
    215  * This function is called when user wants to activate a virtual GPU.
    216  *
    217  */
    218 void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
    219 {
    220 	mutex_lock(&vgpu->vgpu_lock);
    221 	vgpu->active = true;
    222 	mutex_unlock(&vgpu->vgpu_lock);
    223 }
    224 
    225 /**
    226  * intel_gvt_deactive_vgpu - deactivate a virtual GPU
    227  * @vgpu: virtual GPU
    228  *
    229  * This function is called when user wants to deactivate a virtual GPU.
    230  * The virtual GPU will be stopped.
    231  *
    232  */
    233 void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
    234 {
    235 	mutex_lock(&vgpu->vgpu_lock);
    236 
    237 	vgpu->active = false;
    238 
    239 	if (atomic_read(&vgpu->submission.running_workload_num)) {
    240 		mutex_unlock(&vgpu->vgpu_lock);
    241 		intel_gvt_wait_vgpu_idle(vgpu);
    242 		mutex_lock(&vgpu->vgpu_lock);
    243 	}
    244 
    245 	intel_vgpu_stop_schedule(vgpu);
    246 
    247 	mutex_unlock(&vgpu->vgpu_lock);
    248 }
    249 
    250 /**
    251  * intel_gvt_release_vgpu - release a virtual GPU
    252  * @vgpu: virtual GPU
    253  *
    254  * This function is called when user wants to release a virtual GPU.
    255  * The virtual GPU will be stopped and all runtime information will be
    256  * destroyed.
    257  *
    258  */
    259 void intel_gvt_release_vgpu(struct intel_vgpu *vgpu)
    260 {
    261 	intel_gvt_deactivate_vgpu(vgpu);
    262 
    263 	mutex_lock(&vgpu->vgpu_lock);
    264 	intel_vgpu_clean_workloads(vgpu, ALL_ENGINES);
    265 	intel_vgpu_dmabuf_cleanup(vgpu);
    266 	mutex_unlock(&vgpu->vgpu_lock);
    267 }
    268 
    269 /**
    270  * intel_gvt_destroy_vgpu - destroy a virtual GPU
    271  * @vgpu: virtual GPU
    272  *
    273  * This function is called when user wants to destroy a virtual GPU.
    274  *
    275  */
    276 void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
    277 {
    278 	struct intel_gvt *gvt = vgpu->gvt;
    279 
    280 	mutex_lock(&vgpu->vgpu_lock);
    281 
    282 	WARN(vgpu->active, "vGPU is still active!\n");
    283 
    284 	intel_gvt_debugfs_remove_vgpu(vgpu);
    285 	intel_vgpu_clean_sched_policy(vgpu);
    286 	intel_vgpu_clean_submission(vgpu);
    287 	intel_vgpu_clean_display(vgpu);
    288 	intel_vgpu_clean_opregion(vgpu);
    289 	intel_vgpu_reset_ggtt(vgpu, true);
    290 	intel_vgpu_clean_gtt(vgpu);
    291 	intel_gvt_hypervisor_detach_vgpu(vgpu);
    292 	intel_vgpu_free_resource(vgpu);
    293 	intel_vgpu_clean_mmio(vgpu);
    294 	intel_vgpu_dmabuf_cleanup(vgpu);
    295 	mutex_unlock(&vgpu->vgpu_lock);
    296 
    297 	mutex_lock(&gvt->lock);
    298 	idr_remove(&gvt->vgpu_idr, vgpu->id);
    299 	if (idr_is_empty(&gvt->vgpu_idr))
    300 		intel_gvt_clean_irq(gvt);
    301 	intel_gvt_update_vgpu_types(gvt);
    302 	mutex_unlock(&gvt->lock);
    303 
    304 	vfree(vgpu);
    305 }
    306 
    307 #define IDLE_VGPU_IDR 0
    308 
    309 /**
    310  * intel_gvt_create_idle_vgpu - create an idle virtual GPU
    311  * @gvt: GVT device
    312  *
    313  * This function is called when user wants to create an idle virtual GPU.
    314  *
    315  * Returns:
    316  * pointer to intel_vgpu, error pointer if failed.
    317  */
    318 struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
    319 {
    320 	struct intel_vgpu *vgpu;
    321 	enum intel_engine_id i;
    322 	int ret;
    323 
    324 	vgpu = vzalloc(sizeof(*vgpu));
    325 	if (!vgpu)
    326 		return ERR_PTR(-ENOMEM);
    327 
    328 	vgpu->id = IDLE_VGPU_IDR;
    329 	vgpu->gvt = gvt;
    330 	mutex_init(&vgpu->vgpu_lock);
    331 
    332 	for (i = 0; i < I915_NUM_ENGINES; i++)
    333 		INIT_LIST_HEAD(&vgpu->submission.workload_q_head[i]);
    334 
    335 	ret = intel_vgpu_init_sched_policy(vgpu);
    336 	if (ret)
    337 		goto out_free_vgpu;
    338 
    339 	vgpu->active = false;
    340 
    341 	return vgpu;
    342 
    343 out_free_vgpu:
    344 	vfree(vgpu);
    345 	return ERR_PTR(ret);
    346 }
    347 
    348 /**
    349  * intel_gvt_destroy_vgpu - destroy an idle virtual GPU
    350  * @vgpu: virtual GPU
    351  *
    352  * This function is called when user wants to destroy an idle virtual GPU.
    353  *
    354  */
    355 void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu)
    356 {
    357 	mutex_lock(&vgpu->vgpu_lock);
    358 	intel_vgpu_clean_sched_policy(vgpu);
    359 	mutex_unlock(&vgpu->vgpu_lock);
    360 
    361 	vfree(vgpu);
    362 }
    363 
    364 static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
    365 		struct intel_vgpu_creation_params *param)
    366 {
    367 	struct intel_vgpu *vgpu;
    368 	int ret;
    369 
    370 	gvt_dbg_core("handle %llu low %llu MB high %llu MB fence %llu\n",
    371 			param->handle, param->low_gm_sz, param->high_gm_sz,
    372 			param->fence_sz);
    373 
    374 	vgpu = vzalloc(sizeof(*vgpu));
    375 	if (!vgpu)
    376 		return ERR_PTR(-ENOMEM);
    377 
    378 	idr_preload(GFP_KERNEL);
    379 	ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU,
    380 		GFP_KERNEL);
    381 	idr_preload_end();
    382 	if (ret < 0)
    383 		goto out_free_vgpu;
    384 
    385 	vgpu->id = ret;
    386 	vgpu->handle = param->handle;
    387 	vgpu->gvt = gvt;
    388 	vgpu->sched_ctl.weight = param->weight;
    389 	mutex_init(&vgpu->vgpu_lock);
    390 	mutex_init(&vgpu->dmabuf_lock);
    391 	INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
    392 	INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
    393 	idr_init(&vgpu->object_idr);
    394 	intel_vgpu_init_cfg_space(vgpu, param->primary);
    395 
    396 	ret = intel_vgpu_init_mmio(vgpu);
    397 	if (ret)
    398 		goto out_clean_idr;
    399 
    400 	ret = intel_vgpu_alloc_resource(vgpu, param);
    401 	if (ret)
    402 		goto out_clean_vgpu_mmio;
    403 
    404 	populate_pvinfo_page(vgpu);
    405 
    406 	ret = intel_gvt_hypervisor_attach_vgpu(vgpu);
    407 	if (ret)
    408 		goto out_clean_vgpu_resource;
    409 
    410 	ret = intel_vgpu_init_gtt(vgpu);
    411 	if (ret)
    412 		goto out_detach_hypervisor_vgpu;
    413 
    414 	ret = intel_vgpu_init_opregion(vgpu);
    415 	if (ret)
    416 		goto out_clean_gtt;
    417 
    418 	ret = intel_vgpu_init_display(vgpu, param->resolution);
    419 	if (ret)
    420 		goto out_clean_opregion;
    421 
    422 	ret = intel_vgpu_setup_submission(vgpu);
    423 	if (ret)
    424 		goto out_clean_display;
    425 
    426 	ret = intel_vgpu_init_sched_policy(vgpu);
    427 	if (ret)
    428 		goto out_clean_submission;
    429 
    430 	intel_gvt_debugfs_add_vgpu(vgpu);
    431 
    432 	ret = intel_gvt_hypervisor_set_opregion(vgpu);
    433 	if (ret)
    434 		goto out_clean_sched_policy;
    435 
    436 	/*TODO: add more platforms support */
    437 	if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
    438 		ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
    439 	if (ret)
    440 		goto out_clean_sched_policy;
    441 
    442 	return vgpu;
    443 
    444 out_clean_sched_policy:
    445 	intel_vgpu_clean_sched_policy(vgpu);
    446 out_clean_submission:
    447 	intel_vgpu_clean_submission(vgpu);
    448 out_clean_display:
    449 	intel_vgpu_clean_display(vgpu);
    450 out_clean_opregion:
    451 	intel_vgpu_clean_opregion(vgpu);
    452 out_clean_gtt:
    453 	intel_vgpu_clean_gtt(vgpu);
    454 out_detach_hypervisor_vgpu:
    455 	intel_gvt_hypervisor_detach_vgpu(vgpu);
    456 out_clean_vgpu_resource:
    457 	intel_vgpu_free_resource(vgpu);
    458 out_clean_vgpu_mmio:
    459 	intel_vgpu_clean_mmio(vgpu);
    460 out_clean_idr:
    461 	idr_remove(&gvt->vgpu_idr, vgpu->id);
    462 out_free_vgpu:
    463 	vfree(vgpu);
    464 	return ERR_PTR(ret);
    465 }
    466 
    467 /**
    468  * intel_gvt_create_vgpu - create a virtual GPU
    469  * @gvt: GVT device
    470  * @type: type of the vGPU to create
    471  *
    472  * This function is called when user wants to create a virtual GPU.
    473  *
    474  * Returns:
    475  * pointer to intel_vgpu, error pointer if failed.
    476  */
    477 struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
    478 				struct intel_vgpu_type *type)
    479 {
    480 	struct intel_vgpu_creation_params param;
    481 	struct intel_vgpu *vgpu;
    482 
    483 	param.handle = 0;
    484 	param.primary = 1;
    485 	param.low_gm_sz = type->low_gm_size;
    486 	param.high_gm_sz = type->high_gm_size;
    487 	param.fence_sz = type->fence;
    488 	param.weight = type->weight;
    489 	param.resolution = type->resolution;
    490 
    491 	/* XXX current param based on MB */
    492 	param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
    493 	param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz);
    494 
    495 	mutex_lock(&gvt->lock);
    496 	vgpu = __intel_gvt_create_vgpu(gvt, &param);
    497 	if (!IS_ERR(vgpu))
    498 		/* calculate left instance change for types */
    499 		intel_gvt_update_vgpu_types(gvt);
    500 	mutex_unlock(&gvt->lock);
    501 
    502 	return vgpu;
    503 }
    504 
    505 /**
    506  * intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset
    507  * @vgpu: virtual GPU
    508  * @dmlr: vGPU Device Model Level Reset or GT Reset
    509  * @engine_mask: engines to reset for GT reset
    510  *
    511  * This function is called when user wants to reset a virtual GPU through
    512  * device model reset or GT reset. The caller should hold the vgpu lock.
    513  *
    514  * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset
    515  * the whole vGPU to default state as when it is created. This vGPU function
    516  * is required both for functionary and security concerns.The ultimate goal
    517  * of vGPU FLR is that reuse a vGPU instance by virtual machines. When we
    518  * assign a vGPU to a virtual machine we must isse such reset first.
    519  *
    520  * Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines
    521  * (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec.
    522  * Unlike the FLR, GT reset only reset particular resource of a vGPU per
    523  * the reset request. Guest driver can issue a GT reset by programming the
    524  * virtual GDRST register to reset specific virtual GPU engine or all
    525  * engines.
    526  *
    527  * The parameter dev_level is to identify if we will do DMLR or GT reset.
    528  * The parameter engine_mask is to specific the engines that need to be
    529  * resetted. If value ALL_ENGINES is given for engine_mask, it means
    530  * the caller requests a full GT reset that we will reset all virtual
    531  * GPU engines. For FLR, engine_mask is ignored.
    532  */
    533 void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
    534 				 intel_engine_mask_t engine_mask)
    535 {
    536 	struct intel_gvt *gvt = vgpu->gvt;
    537 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
    538 	intel_engine_mask_t resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
    539 
    540 	gvt_dbg_core("------------------------------------------\n");
    541 	gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
    542 		     vgpu->id, dmlr, engine_mask);
    543 
    544 	vgpu->resetting_eng = resetting_eng;
    545 
    546 	intel_vgpu_stop_schedule(vgpu);
    547 	/*
    548 	 * The current_vgpu will set to NULL after stopping the
    549 	 * scheduler when the reset is triggered by current vgpu.
    550 	 */
    551 	if (scheduler->current_vgpu == NULL) {
    552 		mutex_unlock(&vgpu->vgpu_lock);
    553 		intel_gvt_wait_vgpu_idle(vgpu);
    554 		mutex_lock(&vgpu->vgpu_lock);
    555 	}
    556 
    557 	intel_vgpu_reset_submission(vgpu, resetting_eng);
    558 	/* full GPU reset or device model level reset */
    559 	if (engine_mask == ALL_ENGINES || dmlr) {
    560 		intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
    561 		intel_vgpu_invalidate_ppgtt(vgpu);
    562 		/*fence will not be reset during virtual reset */
    563 		if (dmlr) {
    564 			intel_vgpu_reset_gtt(vgpu);
    565 			intel_vgpu_reset_resource(vgpu);
    566 		}
    567 
    568 		intel_vgpu_reset_mmio(vgpu, dmlr);
    569 		populate_pvinfo_page(vgpu);
    570 		intel_vgpu_reset_display(vgpu);
    571 
    572 		if (dmlr) {
    573 			intel_vgpu_reset_cfg_space(vgpu);
    574 			/* only reset the failsafe mode when dmlr reset */
    575 			vgpu->failsafe = false;
    576 			vgpu->pv_notified = false;
    577 		}
    578 	}
    579 
    580 	vgpu->resetting_eng = 0;
    581 	gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
    582 	gvt_dbg_core("------------------------------------------\n");
    583 }
    584 
    585 /**
    586  * intel_gvt_reset_vgpu - reset a virtual GPU (Function Level)
    587  * @vgpu: virtual GPU
    588  *
    589  * This function is called when user wants to reset a virtual GPU.
    590  *
    591  */
    592 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
    593 {
    594 	mutex_lock(&vgpu->vgpu_lock);
    595 	intel_gvt_reset_vgpu_locked(vgpu, true, 0);
    596 	mutex_unlock(&vgpu->vgpu_lock);
    597 }
    598