Home | History | Annotate | Download | only in gvt

Lines Matching defs:gfn

92 	gfn_t gfn;
110 gfn_t gfn;
125 static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
135 unsigned long cur_gfn = gfn + npage;
143 static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
157 unsigned long cur_gfn = gfn + npage;
163 gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
188 gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE);
192 static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
199 ret = gvt_pin_guest_page(vgpu, gfn, size, &page);
208 gvt_unpin_guest_page(vgpu, gfn, size);
215 static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
221 gvt_unpin_guest_page(vgpu, gfn, size);
243 static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
251 if (gfn < itr->gfn)
253 else if (gfn > itr->gfn)
261 static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
272 new->gfn = gfn;
277 /* gfn_cache maps gfn to struct gvt_dma. */
283 if (gfn < itr->gfn)
332 gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size);
364 __kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn)
368 hash_for_each_possible(info->ptable, p, hnode, gfn) {
369 if (gfn == p->gfn) {
379 gfn_t gfn)
383 p = __kvmgt_protect_table_find(info, gfn);
387 static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn)
391 if (kvmgt_gfn_is_write_protected(info, gfn))
395 if (WARN(!p, "gfn: 0x%llx\n", gfn))
398 p->gfn = gfn;
399 hash_add(info->ptable, &p->hnode, gfn);
403 gfn_t gfn)
407 p = __kvmgt_protect_table_find(info, gfn);
722 gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr,
1622 static int kvmgt_page_track_add(unsigned long handle, u64 gfn)
1636 slot = gfn_to_memslot(kvm, gfn);
1644 if (kvmgt_gfn_is_write_protected(info, gfn))
1647 kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
1648 kvmgt_protect_table_add(info, gfn);
1656 static int kvmgt_page_track_remove(unsigned long handle, u64 gfn)
1670 slot = gfn_to_memslot(kvm, gfn);
1678 if (!kvmgt_gfn_is_write_protected(info, gfn))
1681 kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
1682 kvmgt_protect_table_del(info, gfn);
1707 gfn_t gfn;
1713 gfn = slot->base_gfn + i;
1714 if (kvmgt_gfn_is_write_protected(info, gfn)) {
1715 kvm_slot_page_track_remove_page(kvm, slot, gfn,
1717 kvmgt_protect_table_del(info, gfn);
1855 static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
1865 pfn = gfn_to_pfn(info->kvm, gfn);
1872 static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
1888 entry = __gvt_cache_find_gfn(info->vgpu, gfn);
1890 ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
1894 ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
1898 /* the same gfn with different size: unmap and re-map */
1899 gvt_dma_unmap_page(vgpu, gfn, entry->dma_addr, entry->size);
1902 ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
1906 ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
1918 gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size);
1950 gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr,
2022 static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
2036 ret = kvm_is_visible_gfn(kvm, gfn);