Lines Matching defs:gvt
40 #include "gvt.h"
106 * @gvt : GVT device
111 int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
129 low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
130 high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
133 gvt->types = kcalloc(num_types, sizeof(struct intel_vgpu_type),
135 if (!gvt->types)
143 gvt->types[i].low_gm_size = vgpu_types[i].low_mm;
144 gvt->types[i].high_gm_size = vgpu_types[i].high_mm;
145 gvt->types[i].fence = vgpu_types[i].fence;
151 gvt->types[i].weight = vgpu_types[i].weight;
152 gvt->types[i].resolution = vgpu_types[i].edid;
153 gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
156 if (IS_GEN(gvt->dev_priv, 8))
157 sprintf(gvt->types[i].name, "GVTg_V4_%s",
159 else if (IS_GEN(gvt->dev_priv, 9))
160 sprintf(gvt->types[i].name, "GVTg_V5_%s",
164 i, gvt->types[i].name,
165 gvt->types[i].avail_instance,
166 gvt->types[i].low_gm_size,
167 gvt->types[i].high_gm_size, gvt->types[i].fence,
168 gvt->types[i].weight,
169 vgpu_edid_str(gvt->types[i].resolution));
172 gvt->num_types = i;
176 void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
178 kfree(gvt->types);
181 static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
190 low_gm_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE -
191 gvt->gm.vgpu_allocated_low_gm_size;
192 high_gm_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE -
193 gvt->gm.vgpu_allocated_high_gm_size;
194 fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
195 gvt->fence.vgpu_allocated_fence_num;
197 for (i = 0; i < gvt->num_types; i++) {
198 low_gm_min = low_gm_avail / gvt->types[i].low_gm_size;
199 high_gm_min = high_gm_avail / gvt->types[i].high_gm_size;
200 fence_min = fence_avail / gvt->types[i].fence;
201 gvt->types[i].avail_instance = min(min(low_gm_min, high_gm_min),
205 i, gvt->types[i].name,
206 gvt->types[i].avail_instance, gvt->types[i].low_gm_size,
207 gvt->types[i].high_gm_size, gvt->types[i].fence);
278 struct intel_gvt *gvt = vgpu->gvt;
297 mutex_lock(&gvt->lock);
298 idr_remove(&gvt->vgpu_idr, vgpu->id);
299 if (idr_is_empty(&gvt->vgpu_idr))
300 intel_gvt_clean_irq(gvt);
301 intel_gvt_update_vgpu_types(gvt);
302 mutex_unlock(&gvt->lock);
311 * @gvt: GVT device
318 struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
329 vgpu->gvt = gvt;
364 static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
379 ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU,
387 vgpu->gvt = gvt;
437 if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
461 idr_remove(&gvt->vgpu_idr, vgpu->id);
469 * @gvt: GVT device
477 struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
495 mutex_lock(&gvt->lock);
496 vgpu = __intel_gvt_create_vgpu(gvt, ¶m);
499 intel_gvt_update_vgpu_types(gvt);
500 mutex_unlock(&gvt->lock);
536 struct intel_gvt *gvt = vgpu->gvt;
537 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;