Home | History | Annotate | Download | only in gvt

Lines Matching defs:vgpu

71 	size_t (*rw)(struct intel_vgpu *vgpu, char *buf,
73 void (*release)(struct intel_vgpu *vgpu,
98 struct intel_vgpu *vgpu;
107 struct intel_vgpu *vgpu;
125 static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
137 ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1);
143 static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
160 ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1,
188 gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE);
192 static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
195 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
199 ret = gvt_pin_guest_page(vgpu, gfn, size, &page);
208 gvt_unpin_guest_page(vgpu, gfn, size);
215 static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
218 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
221 gvt_unpin_guest_page(vgpu, gfn, size);
224 static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
227 struct rb_node *node = vgpu->vdev.dma_addr_cache.rb_node;
243 static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
245 struct rb_node *node = vgpu->vdev.gfn_cache.rb_node;
261 static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
271 new->vgpu = vgpu;
278 link = &vgpu->vdev.gfn_cache.rb_node;
289 rb_insert_color(&new->gfn_node, &vgpu->vdev.gfn_cache);
293 link = &vgpu->vdev.dma_addr_cache.rb_node;
304 rb_insert_color(&new->dma_addr_node, &vgpu->vdev.dma_addr_cache);
306 vgpu->vdev.nr_cache_entries++;
310 static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
313 rb_erase(&entry->gfn_node, &vgpu->vdev.gfn_cache);
314 rb_erase(&entry->dma_addr_node, &vgpu->vdev.dma_addr_cache);
316 vgpu->vdev.nr_cache_entries--;
319 static void gvt_cache_destroy(struct intel_vgpu *vgpu)
325 mutex_lock(&vgpu->vdev.cache_lock);
326 node = rb_first(&vgpu->vdev.gfn_cache);
328 mutex_unlock(&vgpu->vdev.cache_lock);
332 gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size);
333 __gvt_cache_remove_entry(vgpu, dma);
334 mutex_unlock(&vgpu->vdev.cache_lock);
338 static void gvt_cache_init(struct intel_vgpu *vgpu)
340 vgpu->vdev.gfn_cache = RB_ROOT;
341 vgpu->vdev.dma_addr_cache = RB_ROOT;
342 vgpu->vdev.nr_cache_entries = 0;
343 mutex_init(&vgpu->vdev.cache_lock);
414 static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf,
419 void *base = vgpu->vdev.region[i].data;
422 if (pos >= vgpu->vdev.region[i].size || iswrite) {
423 gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n");
426 count = min(count, (size_t)(vgpu->vdev.region[i].size - pos));
432 static void intel_vgpu_reg_release_opregion(struct intel_vgpu *vgpu,
442 static int handle_edid_regs(struct intel_vgpu *vgpu,
468 intel_gvt_ops->emulate_hotplug(vgpu, true);
470 intel_gvt_ops->emulate_hotplug(vgpu, false);
513 static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu *vgpu, char *buf,
520 (struct vfio_edid_region *)vgpu->vdev.region[i].data;
524 ret = handle_edid_regs(vgpu, region, buf, count, pos, iswrite);
536 static void intel_vgpu_reg_release_edid(struct intel_vgpu *vgpu,
547 static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
554 region = krealloc(vgpu->vdev.region,
555 (vgpu->vdev.num_regions + 1) * sizeof(*region),
560 vgpu->vdev.region = region;
561 vgpu->vdev.region[vgpu->vdev.num_regions].type = type;
562 vgpu->vdev.region[vgpu->vdev.num_regions].subtype = subtype;
563 vgpu->vdev.region[vgpu->vdev.num_regions].ops = ops;
564 vgpu->vdev.region[vgpu->vdev.num_regions].size = size;
565 vgpu->vdev.region[vgpu->vdev.num_regions].flags = flags;
566 vgpu->vdev.region[vgpu->vdev.num_regions].data = data;
567 vgpu->vdev.num_regions++;
573 struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
575 vgpu->vdev.vfio_device = vfio_device_get_from_dev(
576 mdev_dev(vgpu->vdev.mdev));
577 if (!vgpu->vdev.vfio_device) {
587 struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
591 /* Each vgpu has its own opregion, although VFIO would create another
595 base = vgpu_opregion(vgpu)->va;
604 ret = intel_vgpu_register_reg(vgpu,
615 struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
616 struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
632 ret = intel_vgpu_register_reg(vgpu,
643 static void kvmgt_put_vfio_device(void *vgpu)
645 if (WARN_ON(!((struct intel_vgpu *)vgpu)->vdev.vfio_device))
648 vfio_device_put(((struct intel_vgpu *)vgpu)->vdev.vfio_device);
653 struct intel_vgpu *vgpu = NULL;
670 vgpu = intel_gvt_ops->vgpu_create(gvt, type);
671 if (IS_ERR_OR_NULL(vgpu)) {
672 ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
673 gvt_err("failed to create intel vgpu: %d\n", ret);
677 INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
679 vgpu->vdev.mdev = mdev;
680 mdev_set_drvdata(mdev, vgpu);
692 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
694 if (handle_valid(vgpu->handle))
697 intel_gvt_ops->vgpu_destroy(vgpu);
704 struct intel_vgpu *vgpu = container_of(nb,
716 mutex_lock(&vgpu->vdev.cache_lock);
718 entry = __gvt_cache_find_gfn(vgpu, iov_pfn);
722 gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr,
724 __gvt_cache_remove_entry(vgpu, entry);
726 mutex_unlock(&vgpu->vdev.cache_lock);
735 struct intel_vgpu *vgpu = container_of(nb,
741 vgpu->vdev.kvm = data;
744 schedule_work(&vgpu->vdev.release_work);
752 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
756 vgpu->vdev.iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
757 vgpu->vdev.group_notifier.notifier_call = intel_vgpu_group_notifier;
761 &vgpu->vdev.iommu_notifier);
770 &vgpu->vdev.group_notifier);
787 intel_gvt_ops->vgpu_activate(vgpu);
789 atomic_set(&vgpu->vdev.released, 0);
794 &vgpu->vdev.group_notifier);
798 &vgpu->vdev.iommu_notifier);
803 static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
807 trigger = vgpu->vdev.msi_trigger;
810 vgpu->vdev.msi_trigger = NULL;
814 static void __intel_vgpu_release(struct intel_vgpu *vgpu)
819 if (!handle_valid(vgpu->handle))
822 if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
825 intel_gvt_ops->vgpu_release(vgpu);
827 ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
828 &vgpu->vdev.iommu_notifier);
831 ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_GROUP_NOTIFY,
832 &vgpu->vdev.group_notifier);
838 info = (struct kvmgt_guest_info *)vgpu->handle;
841 intel_vgpu_release_msi_eventfd_ctx(vgpu);
843 vgpu->vdev.kvm = NULL;
844 vgpu->handle = 0;
849 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
851 __intel_vgpu_release(vgpu);
856 struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
859 __intel_vgpu_release(vgpu);
862 static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
867 start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
869 mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
874 start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space
889 static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, u64 off,
892 u64 bar_start = intel_vgpu_get_bar_addr(vgpu, bar);
896 ret = intel_gvt_ops->emulate_mmio_write(vgpu,
899 ret = intel_gvt_ops->emulate_mmio_read(vgpu,
904 static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, u64 off)
906 return off >= vgpu_aperture_offset(vgpu) &&
907 off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu);
910 static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
915 if (!intel_vgpu_in_aperture(vgpu, off) ||
916 !intel_vgpu_in_aperture(vgpu, off + count)) {
921 aperture_va = io_mapping_map_wc(&vgpu->gvt->dev_priv->ggtt.iomap,
940 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
946 if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions) {
954 ret = intel_gvt_ops->emulate_cfg_write(vgpu, pos,
957 ret = intel_gvt_ops->emulate_cfg_read(vgpu, pos,
961 ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_0, pos,
965 ret = intel_vgpu_aperture_rw(vgpu, pos, buf, count, is_write);
975 if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions)
979 return vgpu->vdev.region[index].ops->rw(vgpu, buf, count,
988 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
990 struct intel_gvt *gvt = vgpu->gvt;
998 intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0);
1157 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
1177 if (!intel_vgpu_in_aperture(vgpu, req_start))
1180 vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu))
1183 pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff;
1188 static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type)
1196 static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
1204 static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu,
1211 static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu,
1218 static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
1232 vgpu->vdev.msi_trigger = trigger;
1234 intel_vgpu_release_msi_eventfd_ctx(vgpu);
1239 static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, u32 flags,
1243 int (*func)(struct intel_vgpu *vgpu, unsigned int index,
1277 return func(vgpu, index, start, count, flags, data);
1283 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
1286 gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
1302 vgpu->vdev.num_regions;
1328 info.size = vgpu->gvt->device_info.cfg_space_size;
1334 info.size = vgpu->cfg_space.bar[info.index].size;
1354 info.size = gvt_aperture_sz(vgpu->gvt);
1366 PAGE_ALIGN(vgpu_aperture_offset(vgpu));
1367 sparse->areas[0].size = vgpu_aperture_sz(vgpu);
1393 vgpu->vdev.num_regions)
1398 vgpu->vdev.num_regions);
1404 info.size = vgpu->vdev.region[i].size;
1405 info.flags = vgpu->vdev.region[i].flags;
1407 cap_type.type = vgpu->vdev.region[i].type;
1408 cap_type.subtype = vgpu->vdev.region[i].subtype;
1480 info.count = intel_vgpu_get_irq_count(vgpu, info.index);
1502 int max = intel_vgpu_get_irq_count(vgpu, hdr.index);
1518 ret = intel_vgpu_set_irqs(vgpu, hdr.flags, hdr.index,
1524 intel_gvt_ops->vgpu_reset(vgpu);
1537 ret = intel_gvt_ops->vgpu_query_plane(vgpu, &dmabuf);
1550 dmabuf_fd = intel_gvt_ops->vgpu_get_dmabuf(vgpu, dmabuf_id);
1565 struct intel_vgpu *vgpu = (struct intel_vgpu *)
1567 return sprintf(buf, "%d\n", vgpu->id);
1698 intel_gvt_ops->write_protect_handler(info->vgpu, gpa,
1723 static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm)
1730 mutex_lock(&vgpu->gvt->lock);
1731 for_each_active_vgpu(vgpu->gvt, itr, id) {
1742 mutex_unlock(&vgpu->gvt->lock);
1749 struct intel_vgpu *vgpu;
1752 vgpu = mdev_get_drvdata(mdev);
1753 if (handle_valid(vgpu->handle))
1756 kvm = vgpu->vdev.kvm;
1758 gvt_vgpu_err("KVM is required to use Intel vGPU\n");
1762 if (__kvmgt_vgpu_exist(vgpu, kvm))
1769 vgpu->handle = (unsigned long)info;
1770 info->vgpu = vgpu;
1775 gvt_cache_init(vgpu);
1777 init_completion(&vgpu->vblank_done);
1785 0444, vgpu->debugfs,
1786 &vgpu->vdev.nr_cache_entries);
1797 gvt_cache_destroy(info->vgpu);
1803 static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle)
1812 struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
1814 if (!vgpu->vdev.region)
1817 for (i = 0; i < vgpu->vdev.num_regions; i++)
1818 if (vgpu->vdev.region[i].ops->release)
1819 vgpu->vdev.region[i].ops->release(vgpu,
1820 &vgpu->vdev.region[i]);
1821 vgpu->vdev.num_regions = 0;
1822 kfree(vgpu->vdev.region);
1823 vgpu->vdev.region = NULL;
1829 struct intel_vgpu *vgpu;
1835 vgpu = info->vgpu;
1838 * When guest is poweroff, msi_trigger is set to NULL, but vgpu's
1840 * poweroff. If this vgpu is still used in next vm, this vgpu's pipe
1841 * may be enabled, then once this vgpu is active, it will get inject
1846 if (vgpu->vdev.msi_trigger == NULL)
1849 if (eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1)
1876 struct intel_vgpu *vgpu;
1884 vgpu = info->vgpu;
1886 mutex_lock(&info->vgpu->vdev.cache_lock);
1888 entry = __gvt_cache_find_gfn(info->vgpu, gfn);
1890 ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
1894 ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
1899 gvt_dma_unmap_page(vgpu, gfn, entry->dma_addr, entry->size);
1900 __gvt_cache_remove_entry(vgpu, entry);
1902 ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
1906 ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
1914 mutex_unlock(&info->vgpu->vdev.cache_lock);
1918 gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size);
1920 mutex_unlock(&info->vgpu->vdev.cache_lock);
1935 mutex_lock(&info->vgpu->vdev.cache_lock);
1936 entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
1941 mutex_unlock(&info->vgpu->vdev.cache_lock);
1950 gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr,
1952 __gvt_cache_remove_entry(entry->vgpu, entry);
1965 mutex_lock(&info->vgpu->vdev.cache_lock);
1966 entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
1969 mutex_unlock(&info->vgpu->vdev.cache_lock);