Home | History | Annotate | Download | only in gvt

Lines Matching defs:gtt

1 /*	$NetBSD: gtt.c,v 1.2 2021/12/18 23:45:31 riastradh Exp $	*/
4 * GTT virtualization
39 __KERNEL_RCSID(0, "$NetBSD: gtt.c,v 1.2 2021/12/18 23:45:31 riastradh Exp $");
363 #define GTT_SPTE_FLAG_64K_SPLITED BIT(52) /* splited 64K gtt entry */
558 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
585 gtt.pte_ops;
607 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
619 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
630 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
640 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
657 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
686 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
749 radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn);
770 radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) {
818 return radix_tree_lookup(&vgpu->gtt.spt_tree, mfn);
860 ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt);
914 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
920 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
948 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
967 vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
983 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
991 if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn)
1018 WARN(1, "suspicious 64K gtt entry\n");
1071 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1135 struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops;
1150 * @entry: target pfn's gtt entry
1152 * Return 1 if 2MB huge gtt shadowing is possilbe, 0 if miscondition,
1158 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1175 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1183 gvt_dbg_mm("Split 2M gtt entry, index %lu\n", index);
1222 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1228 gvt_vdbg_mm("Split 64K gtt entry, index %lu\n", index);
1253 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
1266 gvt_vdbg_mm("shadow 4K gtt entry\n");
1269 gvt_vdbg_mm("shadow 64K gtt entry\n");
1277 gvt_vdbg_mm("shadow 2M gtt entry\n");
1307 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1329 ops->set_pfn(&se, gvt->gtt.scratch_mfn);
1350 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1363 vgpu->gtt.scratch_pt[spt->shadow_page.type].page_mfn)
1431 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1483 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head);
1503 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head);
1529 struct intel_gvt_gtt *gtt = &gvt->gtt;
1535 if (list_empty(&gtt->oos_page_free_list_head)) {
1536 oos_page = container_of(gtt->oos_page_use_list_head.next,
1545 oos_page = container_of(gtt->oos_page_free_list_head.next,
1560 list_add_tail(&oos_page->vm_list, &spt->vgpu->gtt.oos_page_list_head);
1583 list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) {
1602 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1634 vgpu->gtt.scratch_pt[type].page_mfn);
1641 vgpu->gtt.scratch_pt[type].page_mfn);
1645 vgpu->gtt.scratch_pt[type].page_mfn);
1674 &spt->vgpu->gtt.post_shadow_list_head);
1695 list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) {
1719 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1730 * For page table which has 64K gtt entry, only PTE#0, PTE#16,
1753 ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
1783 struct intel_gvt_gtt *gtt = &gvt->gtt;
1784 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1813 struct intel_gvt_gtt *gtt = &gvt->gtt;
1814 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1916 list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
1918 mutex_lock(&gvt->gtt.ppgtt_mm_lock);
1919 list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head);
1920 mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
1965 mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
1967 mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2010 mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2012 &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
2013 mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2024 mutex_lock(&gvt->gtt.ppgtt_mm_lock);
2026 list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) {
2033 mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
2037 mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
2048 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2077 struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops;
2078 struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops;
2150 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
2174 * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read
2180 * This function is used to emulate the GTT MMIO register read
2202 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
2206 if (pfn != vgpu->gvt->gtt.scratch_mfn)
2216 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
2217 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
2295 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
2307 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
2311 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
2327 * intel_vgpu_emulate_ggtt_mmio_write - emulate GTT MMIO register write
2333 * This function is used to emulate the GTT MMIO register write
2355 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
2356 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2380 gtt->scratch_pt[type].page_mfn =
2382 gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
2384 vgpu->id, type, gtt->scratch_pt[type].page_mfn);
2399 ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn);
2422 if (vgpu->gtt.scratch_pt[i].page != NULL) {
2423 daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn <<
2426 __free_page(vgpu->gtt.scratch_pt[i].page);
2427 vgpu->gtt
2428 vgpu->gtt.scratch_pt[i].page_mfn = 0;
2464 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
2466 INIT_RADIX_TREE(&gtt->spt_tree, GFP_KERNEL);
2468 INIT_LIST_HEAD(&gtt->ppgtt_mm_list_head);
2469 INIT_LIST_HEAD(&gtt->oos_page_list_head);
2470 INIT_LIST_HEAD(&gtt->post_shadow_list_head);
2472 gtt->ggtt_mm = intel_vgpu_create_ggtt_mm(vgpu);
2473 if (IS_ERR(gtt->ggtt_mm)) {
2475 return PTR_ERR(gtt->ggtt_mm);
2480 INIT_LIST_HEAD(&gtt->ggtt_mm->ggtt_mm.partial_pte_list);
2490 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
2495 if (GEM_WARN_ON(!list_empty(&vgpu->gtt.ppgtt_mm_list_head)))
2498 if (GEM_WARN_ON(!radix_tree_empty(&vgpu->gtt.spt_tree))) {
2509 &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list,
2515 intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm);
2516 vgpu->gtt.ggtt_mm = NULL;
2538 struct intel_gvt_gtt *gtt = &gvt->gtt;
2542 WARN(!list_empty(&gtt->oos_page_use_list_head),
2545 list_for_each_safe(pos, n, &gtt->oos_page_free_list_head) {
2555 struct intel_gvt_gtt *gtt = &gvt->gtt;
2560 INIT_LIST_HEAD(&gtt->oos_page_free_list_head);
2561 INIT_LIST_HEAD(&gtt->oos_page_use_list_head);
2579 list_add_tail(&oos_page->list, &gtt->oos_page_free_list_head);
2606 list_for_each(pos, &vgpu->gtt.ppgtt_mm_list_head) {
2693 gvt_dbg_core("init gtt\n");
2695 gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
2696 gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
2712 gvt->gtt.scratch_page = virt_to_page(page);
2713 gvt->gtt.scratch_mfn = (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
2720 __free_page(gvt->gtt.scratch_page);
2724 INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head);
2725 mutex_init(&gvt->gtt.ppgtt_mm_lock);
2740 dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
2745 __free_page(gvt->gtt.scratch_page);
2763 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
2766 mutex_lock(&vgpu->gvt->gtt.ppgtt_mm_lock);
2768 mutex_unlock(&vgpu->gvt->gtt.ppgtt_mm_lock);
2788 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
2794 pte_ops->set_pfn(&entry, gvt->gtt.scratch_mfn);
2801 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
2804 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
2811 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
2814 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
2821 * intel_vgpu_reset_gtt - reset the all GTT related status
2825 * GTT related status, including GGTT, PPGTT, scratch page.