Lines Matching defs:gvt
45 #include "gvt.h"
55 unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
57 if (IS_BROADWELL(gvt->dev_priv))
59 else if (IS_SKYLAKE(gvt->dev_priv))
61 else if (IS_KABYLAKE(gvt->dev_priv))
63 else if (IS_BROXTON(gvt->dev_priv))
65 else if (IS_COFFEELAKE(gvt->dev_priv))
71 bool intel_gvt_match_device(struct intel_gvt *gvt,
74 return intel_gvt_get_device_type(gvt) & device;
89 static struct intel_gvt_mmio_info *find_mmio_info(struct intel_gvt *gvt,
94 hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) {
101 static int new_mmio_info(struct intel_gvt *gvt,
109 if (!intel_gvt_match_device(gvt, device))
124 p = find_mmio_info(gvt, info->offset);
130 /* We return -EEXIST here to make GVT-g load fail.
141 gvt->mmio.mmio_attribute[info->offset / 4] = flags;
143 hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset);
144 gvt->mmio.num_tracked_mmio++;
151 * @gvt: a GVT device
157 int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt,
164 for_each_engine(engine, gvt->dev_priv, id) {
182 pr_err("Detected your guest driver doesn't support GVT-g.\n");
188 pr_err("GVT Internal error for the guest\n");
207 * pv_info first, we treat guest not supporting GVT,
225 if (INTEL_GEN(vgpu->gvt->dev_priv) <= 10) {
261 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
291 if (INTEL_GEN(vgpu->gvt->dev_priv) >= 9) {
353 engine_mask &= INTEL_INFO(vgpu->gvt->dev_priv)->engine_mask;
463 intel_gvt_check_vblank_emulation(vgpu->gvt);
522 int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
524 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
764 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
805 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
829 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
932 if ((INTEL_GEN(vgpu->gvt->dev_priv) >= 9)
936 } else if (IS_BROADWELL(vgpu->gvt->dev_priv) &&
1252 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
1435 if (IS_SKYLAKE(vgpu->gvt->dev_priv)
1436 || IS_KABYLAKE(vgpu->gvt->dev_priv)
1437 || IS_COFFEELAKE(vgpu->gvt->dev_priv)) {
1447 } else if (IS_BROXTON(vgpu->gvt->dev_priv)) {
1460 if (IS_SKYLAKE(vgpu->gvt->dev_priv)
1461 || IS_KABYLAKE(vgpu->gvt->dev_priv)
1462 || IS_COFFEELAKE(vgpu->gvt->dev_priv))
1474 * PCODE_ERROR_MASK clear means no error happened. In GVT-g we
1486 int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
1515 if (IS_BROXTON(vgpu->gvt->dev_priv))
1661 struct intel_gvt *gvt = vgpu->gvt;
1662 struct drm_i915_private *dev_priv = gvt->dev_priv;
1666 ring_id = intel_gvt_render_mmio_to_ring_id(gvt, offset);
1676 if (ring_id < 0 || vgpu == gvt->scheduler.engine_owner[ring_id] ||
1690 int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
1717 int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
1722 if (IS_COFFEELAKE(vgpu->gvt->dev_priv))
1731 if (IS_COFFEELAKE(vgpu->gvt->dev_priv) &&
1834 ret = new_mmio_info(gvt, i915_mmio_reg_offset(reg), \
1882 static int init_generic_mmio_info(struct intel_gvt *gvt)
1884 struct drm_i915_private *dev_priv = gvt->dev_priv;
2699 static int init_bdw_mmio_info(struct intel_gvt *gvt)
2701 struct drm_i915_private *dev_priv = gvt->dev_priv;
2888 static int init_skl_mmio_info(struct intel_gvt *gvt)
2890 struct drm_i915_private *dev_priv = gvt->dev_priv;
3137 static int init_bxt_mmio_info(struct intel_gvt *gvt)
3139 struct drm_i915_private *dev_priv = gvt->dev_priv;
3312 static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
3315 unsigned long device = intel_gvt_get_device_type(gvt);
3316 struct gvt_mmio_block *block = gvt->mmio.mmio_block;
3317 int num = gvt->mmio.num_mmio_block;
3331 * intel_gvt_clean_mmio_info - clean up MMIO information table for GVT device
3332 * @gvt: GVT device
3335 * information table of GVT device
3338 void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
3344 hash_for_each_safe(gvt->mmio.mmio_info_table, i, tmp, e, node)
3347 vfree(gvt->mmio.mmio_attribute);
3348 gvt->mmio.mmio_attribute = NULL;
3363 * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
3364 * @gvt: GVT device
3367 * information table for GVT device
3372 int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
3374 struct intel_gvt_device_info *info = &gvt->device_info;
3375 struct drm_i915_private *dev_priv = gvt->dev_priv;
3376 int size = info->mmio_size / 4 * sizeof(*gvt->mmio.mmio_attribute);
3379 gvt->mmio.mmio_attribute = vzalloc(size);
3380 if (!gvt->mmio.mmio_attribute)
3383 ret = init_generic_mmio_info(gvt);
3388 ret = init_bdw_mmio_info(gvt);
3394 ret = init_bdw_mmio_info(gvt);
3397 ret = init_skl_mmio_info(gvt);
3401 ret = init_bdw_mmio_info(gvt);
3404 ret = init_skl_mmio_info(gvt);
3407 ret = init_bxt_mmio_info(gvt);
3412 gvt->mmio.mmio_block = mmio_blocks;
3413 gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks);
3417 intel_gvt_clean_mmio_info(gvt);
3423 * @gvt: a GVT device
3430 int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
3431 int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
3434 struct gvt_mmio_block *block = gvt->mmio.mmio_block;
3438 hash_for_each(gvt->mmio.mmio_info_table, i, e, node) {
3439 ret = handler(gvt, e->offset, data);
3444 for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) {
3450 ret = handler(gvt,
3522 * @gvt: a GVT device
3529 bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
3549 struct intel_gvt *gvt = vgpu->gvt;
3561 mmio_block = find_mmio_block(gvt, offset);
3572 mmio_info = find_mmio_info(gvt, offset);
3585 if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
3603 if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {