Lines Matching defs:vgpu
59 bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
62 return vgpu_gmadr_is_valid(vgpu, addr);
64 if (vgpu_gmadr_is_aperture(vgpu, addr) &&
65 vgpu_gmadr_is_aperture(vgpu, addr + size - 1))
67 else if (vgpu_gmadr_is_hidden(vgpu, addr) &&
68 vgpu_gmadr_is_hidden(vgpu, addr + size - 1))
77 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
79 if (WARN(!vgpu_gmadr_is_valid(vgpu, g_addr),
83 if (vgpu_gmadr_is_aperture(vgpu, g_addr))
84 *h_addr = vgpu_aperture_gmadr_base(vgpu)
85 + (g_addr - vgpu_aperture_offset(vgpu));
87 *h_addr = vgpu_hidden_gmadr_base(vgpu)
88 + (g_addr - vgpu_hidden_offset(vgpu));
93 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
95 if (WARN(!gvt_gmadr_is_valid(vgpu->gvt, h_addr),
99 if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
100 *g_addr = vgpu_aperture_gmadr_base(vgpu)
101 + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt));
103 *g_addr = vgpu_hidden_gmadr_base(vgpu)
104 + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt));
108 int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
114 ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT,
123 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
129 ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT,
308 struct intel_vgpu *vgpu)
310 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
317 ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
323 e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
333 struct intel_vgpu *vgpu)
335 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
342 ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
348 write_pte64(vgpu->gvt->dev_priv, index, e->val64);
558 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
565 entry, index, false, 0, mm->vgpu);
585 vgpu->gvt->gtt.pte_ops;
589 entry, index, false, 0, mm->vgpu);
607 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
613 false, 0, mm->vgpu);
619 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
624 false, 0, mm->vgpu);
630 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
634 pte_ops->get_entry(NULL, entry, index, false, 0, mm->vgpu);
640 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
644 pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu);
656 struct intel_gvt *gvt = spt->vgpu->gvt;
667 spt->vgpu);
685 struct intel_gvt *gvt = spt->vgpu->gvt;
696 spt->vgpu);
737 static int detach_oos_page(struct intel_vgpu *vgpu,
742 struct device *kdev = &spt->vgpu->gvt->dev_priv->drm.pdev->dev;
744 trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type);
749 radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn);
753 detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
755 intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
762 static void ppgtt_free_all_spt(struct intel_vgpu *vgpu)
770 radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) {
803 struct intel_vgpu *vgpu, unsigned long gfn)
807 track = intel_vgpu_find_page_track(vgpu, gfn);
816 struct intel_vgpu *vgpu, unsigned long mfn)
818 return radix_tree_lookup(&vgpu->gtt.spt_tree, mfn);
825 struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type)
827 struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
835 if (reclaim_one_ppgtt_mm(vgpu->gvt))
842 spt->vgpu = vgpu;
860 ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt);
875 struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type,
881 spt = ppgtt_alloc_spt(vgpu, type);
888 ret = intel_vgpu_register_page_track(vgpu, gfn,
899 trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
905 ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
914 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
920 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
931 trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
939 trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
945 static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
948 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
967 vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
970 s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
982 struct intel_vgpu *vgpu = spt->vgpu;
983 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
991 if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn)
994 intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
999 struct intel_vgpu *vgpu = spt->vgpu;
1004 trace_spt_change(spt->vgpu->id, "die", spt,
1031 spt->vgpu, &e);
1040 trace_spt_change(spt->vgpu->id, "release", spt,
1050 static bool vgpu_ips_enabled(struct intel_vgpu *vgpu)
1052 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
1055 u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) &
1069 struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we)
1071 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1079 ips = vgpu_ips_enabled(vgpu) && ops->test_ips(we);
1081 spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we));
1104 spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips);
1110 ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn);
1118 trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn,
1135 struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops;
1149 * @vgpu: target vgpu
1155 static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
1158 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1161 if (!HAS_PAGE_SIZES(vgpu->gvt->dev_priv, I915_GTT_PAGE_SIZE_2M))
1164 pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry));
1171 static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
1175 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1187 sub_spt = ppgtt_alloc_spt(vgpu, GTT_TYPE_PPGTT_PTE_PT);
1192 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
1218 static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
1222 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1238 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
1249 static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
1253 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
1275 return split_64KB_gtt_entry(vgpu, spt, index, &se);
1278 ret = is_2MB_gtt_possible(vgpu, ge);
1280 return split_2MB_gtt_entry(vgpu, spt, index, &se);
1293 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size,
1305 struct intel_vgpu *vgpu = spt->vgpu;
1306 struct intel_gvt *gvt = vgpu->gvt;
1313 trace_spt_change(spt->vgpu->id, "born", spt,
1318 s = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
1328 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
1334 ret = ppgtt_populate_shadow_entry(vgpu, spt, i, &ge);
1349 struct intel_vgpu *vgpu = spt->vgpu;
1350 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1353 trace_spt_guest_change(spt->vgpu->id, "remove", spt,
1363 vgpu->gtt.scratch_pt[spt->shadow_page.type].page_mfn)
1368 intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(se));
1394 struct intel_vgpu *vgpu = spt->vgpu;
1399 trace_spt_guest_change(spt->vgpu->id, "add", spt, spt->shadow_page.type,
1406 s = ppgtt_populate_spt_by_guest_entry(vgpu, we);
1415 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, we);
1426 static int sync_oos_page(struct intel_vgpu *vgpu,
1429 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1430 struct intel_gvt *gvt = vgpu->gvt;
1437 trace_oos_change(vgpu->id, "sync", oos_page->id,
1445 ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
1447 spt->guest_page.gfn << PAGE_SHIFT, vgpu);
1453 trace_oos_sync(vgpu->id, oos_page->id,
1457 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, &new);
1461 ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu);
1469 static int detach_oos_page(struct intel_vgpu *vgpu,
1472 struct intel_gvt *gvt = vgpu->gvt;
1475 trace_oos_change(vgpu->id, "detach", oos_page->id,
1491 struct intel_gvt *gvt = spt->vgpu->gvt;
1494 ret = intel_gvt_hypervisor_read_gpa(spt->vgpu,
1505 trace_oos_change(spt->vgpu->id, "attach", oos_page->id,
1515 ret = intel_vgpu_enable_page_track(spt->vgpu, spt->guest_page.gfn);
1519 trace_oos_change(spt->vgpu->id, "set page sync", oos_page->id,
1523 return sync_oos_page(spt->vgpu, oos_page);
1528 struct intel_gvt *gvt = spt->vgpu->gvt;
1541 ret = detach_oos_page(spt->vgpu, oos_page);
1557 trace_oos_change(spt->vgpu->id, "set page out of sync", oos_page->id,
1560 list_add_tail(&oos_page->vm_list, &spt->vgpu->gtt.oos_page_list_head);
1561 return intel_vgpu_disable_page_track(spt->vgpu, spt->guest_page.gfn);
1565 * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU
1566 * @vgpu: a vGPU
1569 * to sync all the out-of-synced shadow for vGPU
1574 int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
1583 list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) {
1600 struct intel_vgpu *vgpu = spt->vgpu;
1602 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1634 vgpu->gtt.scratch_pt[type].page_mfn);
1641 vgpu->gtt.scratch_pt[type].page_mfn);
1645 vgpu->gtt.scratch_pt[type].page_mfn);
1674 &spt->vgpu->gtt.post_shadow_list_head);
1679 * @vgpu: a vGPU
1682 * to flush all the post shadows for a vGPU.
1687 int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
1695 list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) {
1718 struct intel_vgpu *vgpu = spt->vgpu;
1719 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1720 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1753 ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
1766 false, 0, vgpu);
1781 struct intel_vgpu *vgpu = mm->vgpu;
1782 struct intel_gvt *gvt = vgpu->gvt;
1797 ppgtt_invalidate_spt_by_shadow_entry(vgpu, &se);
1801 trace_spt_guest_change(vgpu->id, "destroy root pointer",
1811 struct intel_vgpu *vgpu = mm->vgpu;
1812 struct intel_gvt *gvt = vgpu->gvt;
1830 trace_spt_guest_change(vgpu->id, __func__, NULL,
1833 spt = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
1842 trace_spt_guest_change(vgpu->id, "populate root pointer",
1852 static struct intel_vgpu_mm *vgpu_alloc_mm(struct intel_vgpu *vgpu)
1860 mm->vgpu = vgpu;
1873 * intel_vgpu_create_ppgtt_mm - create a ppgtt mm object for a vGPU
1874 * @vgpu: a vGPU
1878 * This function is used to create a ppgtt mm object for a vGPU.
1883 struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
1886 struct intel_gvt *gvt = vgpu->gvt;
1890 mm = vgpu_alloc_mm(vgpu);
1916 list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
1925 static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
1930 mm = vgpu_alloc_mm(vgpu);
1936 nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT;
1939 vgpu->gvt->device_info.gtt_entry_size));
1952 * This function is used to destroy a mm object for vGPU
1960 gvt_err("vgpu mm pin count bug detected\n");
1965 mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
1967 mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
1978 * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object
1979 * @mm: a vGPU mm object
1981 * This function is called when user doesn't want to use a vGPU mm object
1989 * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
1990 * @mm: target vgpu mm
1992 * This function is called when user wants to use a vGPU mm object. If this
2010 mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2012 &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
2013 mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2047 struct intel_vgpu *vgpu = mm->vgpu;
2048 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2051 s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
2075 struct intel_vgpu *vgpu = mm->vgpu;
2076 struct intel_gvt *gvt = vgpu->gvt;
2089 if (!vgpu_gmadr_is_valid(vgpu, gma))
2098 trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
2137 trace_gma_translate(vgpu->id, "ppgtt", 0,
2147 static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
2150 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
2151 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2160 if (!intel_gvt_ggtt_validate_range(vgpu,
2175 * @vgpu: a vGPU
2185 int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
2188 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2195 ret = emulate_ggtt_mmio_read(vgpu, off, p_data, bytes);
2199 static void ggtt_invalidate_pte(struct intel_vgpu *vgpu,
2202 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
2206 if (pfn != vgpu->gvt->gtt.scratch_mfn)
2207 intel_gvt_hypervisor_dma_unmap_guest_page(vgpu,
2211 static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
2214 struct intel_gvt *gvt = vgpu->gvt;
2216 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
2233 if (!vgpu_gmadr_is_valid(vgpu, gma))
2294 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
2299 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn,
2319 ggtt_invalidate_pte(vgpu, &e);
2328 * @vgpu: a vGPU
2338 int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
2341 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2348 ret = emulate_ggtt_mmio_write(vgpu, off, p_data, bytes);
2352 static int alloc_scratch_pages(struct intel_vgpu *vgpu,
2355 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
2356 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2358 vgpu->gvt->device_info.gtt_entry_size_shift;
2361 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
2383 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
2384 vgpu->id, type, gtt->scratch_pt[type].page_mfn);
2409 ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
2415 static int release_scratch_page_tree(struct intel_vgpu *vgpu)
2418 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
2422 if (vgpu->gtt.scratch_pt[i].page != NULL) {
2423 daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn <<
2426 __free_page(vgpu->gtt.scratch_pt[i].page);
2427 vgpu
2428 vgpu->gtt.scratch_pt[i].page_mfn = 0;
2435 static int create_scratch_page_tree(struct intel_vgpu *vgpu)
2440 ret = alloc_scratch_pages(vgpu, i);
2448 release_scratch_page_tree(vgpu);
2453 * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization
2454 * @vgpu: a vGPU
2456 * This function is used to initialize per-vGPU graphics memory virtualization
2462 int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
2464 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
2472 gtt->ggtt_mm = intel_vgpu_create_ggtt_mm(vgpu);
2478 intel_vgpu_reset_ggtt(vgpu, false);
2482 return create_scratch_page_tree(vgpu);
2485 static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
2490 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
2495 if (GEM_WARN_ON(!list_empty(&vgpu->gtt.ppgtt_mm_list_head)))
2496 gvt_err("vgpu ppgtt mm is not fully destroyed\n");
2498 if (GEM_WARN_ON(!radix_tree_empty(&vgpu->gtt.spt_tree))) {
2500 ppgtt_free_all_spt(vgpu);
2504 static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
2509 &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list,
2515 intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm);
2516 vgpu->gtt.ggtt_mm = NULL;
2520 * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
2521 * @vgpu: a vGPU
2523 * This function is used to clean up per-vGPU graphics memory virtualization
2529 void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
2531 intel_vgpu_destroy_all_ppgtt_mm(vgpu);
2532 intel_vgpu_destroy_ggtt_mm(vgpu);
2533 release_scratch_page_tree(vgpu);
2592 * @vgpu: a vGPU
2600 struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
2606 list_for_each(pos, &vgpu->gtt.ppgtt_mm_list_head) {
2628 * @vgpu: a vGPU
2637 struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
2642 mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
2646 mm = intel_vgpu_create_ppgtt_mm(vgpu, root_entry_type, pdps);
2655 * @vgpu: a vGPU
2663 int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[])
2667 mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
2753 * @vgpu: a vGPU
2755 * This function is called when invalidate all PPGTT instances of a vGPU.
2758 void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
2763 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
2766 mutex_lock(&vgpu->gvt->gtt.ppgtt_mm_lock);
2768 mutex_unlock(&vgpu->gvt->gtt.ppgtt_mm_lock);
2777 * @vgpu: a vGPU
2780 * This function is called at the vGPU create stage
2784 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
2786 struct intel_gvt *gvt = vgpu->gvt;
2788 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
2797 index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
2798 num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
2801 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
2802 ggtt_invalidate_pte(vgpu, &old_entry);
2804 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
2807 index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
2808 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
2811 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
2812 ggtt_invalidate_pte(vgpu, &old_entry);
2814 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
2822 * @vgpu: a vGPU
2828 void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
2834 intel_vgpu_destroy_all_ppgtt_mm(vgpu);
2835 intel_vgpu_reset_ggtt(vgpu, true);