Lines Matching refs:spt
651 struct intel_vgpu_ppgtt_spt *spt,
656 struct intel_gvt *gvt = spt->vgpu->gvt;
666 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
667 spt->vgpu);
672 spt->guest_page.pde_ips : false);
674 gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
680 struct intel_vgpu_ppgtt_spt *spt,
685 struct intel_gvt *gvt = spt->vgpu->gvt;
691 gvt_vdbg_mm("set ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
695 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
696 spt->vgpu);
699 #define ppgtt_get_guest_entry(spt, e, index) \
700 ppgtt_spt_get_entry(spt, NULL, \
701 spt->guest_page.type, e, index, true)
703 #define ppgtt_set_guest_entry(spt, e, index) \
704 ppgtt_spt_set_entry(spt, NULL, \
705 spt->guest_page.type, e, index, true)
707 #define ppgtt_get_shadow_entry(spt, e, index) \
708 ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
709 spt->shadow_page.type, e, index, false)
711 #define ppgtt_set_shadow_entry(spt, e, index) \
712 ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
713 spt->shadow_page.type, e, index, false)
717 struct intel_vgpu_ppgtt_spt *spt;
719 spt = kzalloc(sizeof(*spt), gfp_mask);
720 if (!spt)
723 spt->shadow_page.page = alloc_page(gfp_mask);
724 if (!spt->shadow_page.page) {
725 kfree(spt);
728 return spt;
731 static void free_spt(struct intel_vgpu_ppgtt_spt *spt)
733 __free_page(spt->shadow_page.page);
734 kfree(spt);
740 static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
742 struct device *kdev = &spt->vgpu->gvt->dev_priv->drm.pdev->dev;
744 trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type);
746 dma_unmap_page(kdev, spt->shadow_page.mfn << I915_GTT_PAGE_SHIFT, 4096,
749 radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn);
751 if (spt->guest_page.gfn) {
752 if (spt->guest_page.oos_page)
753 detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
755 intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
758 list_del_init(&spt->post_shadow_list);
759 free_spt(spt);
764 struct intel_vgpu_ppgtt_spt *spt, *spn;
771 spt = radix_tree_deref_slot(slot);
772 list_move(&spt->post_shadow_list, &all_spt);
776 list_for_each_entry_safe(spt, spn, &all_spt, post_shadow_list)
777 ppgtt_free_spt(spt);
781 struct intel_vgpu_ppgtt_spt *spt,
788 struct intel_vgpu_ppgtt_spt *spt = page_track->priv_data;
795 ret = ppgtt_handle_guest_write_page_table_bytes(spt, gpa, data, bytes);
801 /* Find a spt by guest gfn. */
814 /* Find the spt by shadow page mfn. */
828 struct intel_vgpu_ppgtt_spt *spt = NULL;
833 spt = alloc_spt(GFP_KERNEL | __GFP_ZERO);
834 if (!spt) {
842 spt->vgpu = vgpu;
843 atomic_set(&spt->refcount, 1);
844 INIT_LIST_HEAD(&spt->post_shadow_list);
849 spt->shadow_page.type = type;
850 daddr = dma_map_page(kdev, spt->shadow_page.page,
857 spt->shadow_page.vaddr = page_address(spt->shadow_page.page);
858 spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT;
860 ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt);
864 return spt;
869 free_spt(spt);
878 struct intel_vgpu_ppgtt_spt *spt;
881 spt = ppgtt_alloc_spt(vgpu, type);
882 if (IS_ERR(spt))
883 return spt;
889 ppgtt_write_protection_handler, spt);
891 ppgtt_free_spt(spt);
895 spt->guest_page.type = type;
896 spt->guest_page.gfn = gfn;
897 spt->guest_page.pde_ips = guest_pde_ips;
899 trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
901 return spt;
904 #define pt_entry_size_shift(spt) \
905 ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
907 #define pt_entries(spt) \
908 (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
910 #define for_each_present_guest_entry(spt, e, i) \
911 for (i = 0; i < pt_entries(spt); \
912 i += spt->guest_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
913 if (!ppgtt_get_guest_entry(spt, e, i) && \
914 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
916 #define for_each_present_shadow_entry(spt, e, i) \
917 for (i = 0; i < pt_entries(spt); \
918 i += spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
919 if (!ppgtt_get_shadow_entry(spt, e, i) && \
920 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
922 #define for_each_shadow_entry(spt, e, i) \
923 for (i = 0; i < pt_entries(spt); \
924 i += (spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1)) \
925 if (!ppgtt_get_shadow_entry(spt, e, i))
927 static inline void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt)
929 int v = atomic_read(&spt->refcount);
931 trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
932 atomic_inc(&spt->refcount);
935 static inline int ppgtt_put_spt(struct intel_vgpu_ppgtt_spt *spt)
937 int v = atomic_read(&spt->refcount);
939 trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
940 return atomic_dec_return(&spt->refcount);
943 static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt);
979 static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt,
982 struct intel_vgpu *vgpu = spt->vgpu;
988 type = spt->shadow_page.type;
997 static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
999 struct intel_vgpu *vgpu = spt->vgpu;
1004 trace_spt_change(spt->vgpu->id, "die", spt,
1005 spt->guest_page.gfn, spt->shadow_page.type);
1007 if (ppgtt_put_spt(spt) > 0)
1010 for_each_present_shadow_entry(spt, &e, index) {
1014 ppgtt_invalidate_pte(spt, &e);
1031 spt->vgpu, &e);
1040 trace_spt_change(spt->vgpu->id, "release", spt,
1041 spt->guest_page.gfn, spt->shadow_page.type);
1042 ppgtt_free_spt(spt);
1046 spt, e.val64, e.type);
1066 static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt);
1072 struct intel_vgpu_ppgtt_spt *spt = NULL;
1081 spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we));
1082 if (spt) {
1083 ppgtt_get_spt(spt);
1085 if (ips != spt->guest_page.pde_ips) {
1086 spt->guest_page.pde_ips = ips;
1089 clear_page(spt->shadow_page.vaddr);
1090 ret = ppgtt_populate_spt(spt);
1092 ppgtt_put_spt(spt);
1104 spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips);
1105 if (IS_ERR(spt)) {
1106 ret = PTR_ERR(spt);
1110 ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn);
1114 ret = ppgtt_populate_spt(spt);
1118 trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn,
1119 spt->shadow_page.type);
1121 return spt;
1124 ppgtt_free_spt(spt);
1125 spt = NULL;
1128 spt, we->val64, we->type);
1172 struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
1195 ppgtt_invalidate_spt(spt);
1214 ppgtt_set_shadow_entry(spt, se, index);
1219 struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
1244 ppgtt_set_shadow_entry(spt, &entry, index + i);
1250 struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
1275 return split_64KB_gtt_entry(vgpu, spt, index, &se);
1280 return split_2MB_gtt_entry(vgpu, spt, index, &se);
1299 ppgtt_set_shadow_entry(spt, &se, index);
1303 static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt)
1305 struct intel_vgpu *vgpu = spt->vgpu;
1313 trace_spt_change(spt->vgpu->id, "born", spt,
1314 spt->guest_page.gfn, spt->shadow_page.type);
1316 for_each_present_guest_entry(spt, &ge, i) {
1323 ppgtt_get_shadow_entry(spt, &se, i);
1325 ppgtt_set_shadow_entry(spt, &se, i);
1330 ppgtt_set_shadow_entry(spt, &se, i);
1334 ret = ppgtt_populate_shadow_entry(vgpu, spt, i, &ge);
1342 spt, ge.val64, ge.type);
1346 static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt,
1349 struct intel_vgpu *vgpu = spt->vgpu;
1353 trace_spt_guest_change(spt->vgpu->id, "remove", spt,
1354 spt->shadow_page.type, se->val64, index);
1363 vgpu->gtt.scratch_pt[spt->shadow_page.type].page_mfn)
1381 ppgtt_invalidate_pte(spt, se);
1387 spt, se->val64, se->type);
1391 static int ppgtt_handle_guest_entry_add(struct intel_vgpu_ppgtt_spt *spt,
1394 struct intel_vgpu *vgpu = spt->vgpu;
1399 trace_spt_guest_change(spt->vgpu->id, "add", spt, spt->shadow_page.type,
1411 ppgtt_get_shadow_entry(spt, &m, index);
1413 ppgtt_set_shadow_entry(spt, &m, index);
1415 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, we);
1421 gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n",
1422 spt, we->val64, we->type);
1432 struct intel_vgpu_ppgtt_spt *spt = oos_page->spt;
1438 spt, spt->guest_page.type);
1440 old.type = new.type = get_entry_type(spt->guest_page.type);
1447 spt->guest_page.gfn << PAGE_SHIFT, vgpu);
1450 && !test_and_clear_bit(index, spt->post_shadow_bitmap))
1454 spt, spt->guest_page.type,
1457 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, &new);
1464 spt->guest_page.write_cnt = 0;
1465 list_del_init(&spt->post_shadow_list);
1473 struct intel_vgpu_ppgtt_spt *spt = oos_page->spt;
1476 spt, spt->guest_page.type);
1478 spt->guest_page.write_cnt = 0;
1479 spt->guest_page.oos_page = NULL;
1480 oos_page->spt = NULL;
1489 struct intel_vgpu_ppgtt_spt *spt)
1491 struct intel_gvt *gvt = spt->vgpu->gvt;
1494 ret = intel_gvt_hypervisor_read_gpa(spt->vgpu,
1495 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
1500 oos_page->spt = spt;
1501 spt->guest_page.oos_page = oos_page;
1505 trace_oos_change(spt->vgpu->id, "attach", oos_page->id,
1506 spt, spt->guest_page.type);
1510 static int ppgtt_set_guest_page_sync(struct intel_vgpu_ppgtt_spt *spt)
1512 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
1515 ret = intel_vgpu_enable_page_track(spt->vgpu, spt->guest_page.gfn);
1519 trace_oos_change(spt->vgpu->id, "set page sync", oos_page->id,
1520 spt, spt->guest_page.type);
1523 return sync_oos_page(spt->vgpu, oos_page);
1526 static int ppgtt_allocate_oos_page(struct intel_vgpu_ppgtt_spt *spt)
1528 struct intel_gvt *gvt = spt->vgpu->gvt;
1530 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
1538 ret = ppgtt_set_guest_page_sync(oos_page->spt);
1541 ret = detach_oos_page(spt->vgpu, oos_page);
1547 return attach_oos_page(oos_page, spt);
1550 static int ppgtt_set_guest_page_oos(struct intel_vgpu_ppgtt_spt *spt)
1552 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
1557 trace_oos_change(spt->vgpu->id, "set page out of sync", oos_page->id,
1558 spt, spt->guest_page.type);
1560 list_add_tail(&oos_page->vm_list, &spt->vgpu->gtt.oos_page_list_head);
1561 return intel_vgpu_disable_page_track(spt->vgpu, spt->guest_page.gfn);
1586 ret = ppgtt_set_guest_page_sync(oos_page->spt);
1597 struct intel_vgpu_ppgtt_spt *spt,
1600 struct intel_vgpu *vgpu = spt->vgpu;
1601 int type = spt->shadow_page.type;
1614 ppgtt_get_shadow_entry(spt, &old_se, index);
1617 ret = ppgtt_handle_guest_entry_add(spt, we, index);
1622 ret = ppgtt_handle_guest_entry_removal(spt, &old_se, index);
1635 ppgtt_set_shadow_entry(spt, &old_se, index + i);
1642 ppgtt_set_shadow_entry(spt, &old_se, index);
1646 ppgtt_set_shadow_entry(spt, &old_se, index);
1653 spt, we->val64, we->type);
1659 static inline bool can_do_out_of_sync(struct intel_vgpu_ppgtt_spt *spt)
1662 && gtt_type_is_pte_pt(spt->guest_page.type)
1663 && spt->guest_page.write_cnt >= 2;
1666 static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt,
1669 set_bit(index, spt->post_shadow_bitmap);
1670 if (!list_empty(&spt->post_shadow_list))
1673 list_add_tail(&spt->post_shadow_list,
1674 &spt->vgpu->gtt.post_shadow_list_head);
1690 struct intel_vgpu_ppgtt_spt *spt;
1696 spt = container_of(pos, struct intel_vgpu_ppgtt_spt,
1699 for_each_set_bit(index, spt->post_shadow_bitmap,
1701 ppgtt_get_guest_entry(spt, &ge, index);
1703 ret = ppgtt_handle_guest_write_page_table(spt,
1707 clear_bit(index, spt->post_shadow_bitmap);
1709 list_del_init(&spt->post_shadow_list);
1715 struct intel_vgpu_ppgtt_spt *spt,
1718 struct intel_vgpu *vgpu = spt->vgpu;
1727 ppgtt_get_guest_entry(spt, &we, index);
1742 ret = ppgtt_handle_guest_write_page_table(spt, &we, index);
1746 if (!test_bit(index, spt->post_shadow_bitmap)) {
1747 int type = spt->shadow_page.type;
1749 ppgtt_get_shadow_entry(spt, &se, index);
1750 ret = ppgtt_handle_guest_entry_removal(spt, &se, index);
1754 ppgtt_set_shadow_entry(spt, &se, index);
1756 ppgtt_set_post_shadow(spt, index);
1762 spt->guest_page.write_cnt++;
1764 if (spt->guest_page.oos_page)
1765 ops->set_entry(spt->guest_page.oos_page->mem, &we, index,
1768 if (can_do_out_of_sync(spt)) {
1769 if (!spt->guest_page.oos_page)
1770 ppgtt_allocate_oos_page(spt);
1772 ret = ppgtt_set_guest_page_oos(spt);
1815 struct intel_vgpu_ppgtt_spt *spt;
1833 spt = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
1834 if (IS_ERR(spt)) {
1836 ret = PTR_ERR(spt);
1839 ppgtt_generate_shadow_entry(&se, spt, &ge);
2499 gvt_err("Why we still has spt not freed?\n");
2718 gvt_err("fail to initialize SPT oos\n");