Home | History | Annotate | Download | only in virtio

Lines Matching defs:vgdev

62 	struct virtio_gpu_device *vgdev = dev->dev_private;
64 schedule_work(&vgdev->ctrlq.dequeue_work);
70 struct virtio_gpu_device *vgdev = dev->dev_private;
72 schedule_work(&vgdev->cursorq.dequeue_work);
75 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
77 vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
81 if (!vgdev->vbufs)
86 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
88 kmem_cache_destroy(vgdev->vbufs);
89 vgdev->vbufs = NULL;
93 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
99 vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
117 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
123 vbuf = virtio_gpu_get_vbuf(vgdev, size,
135 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
141 (vgdev, sizeof(struct virtio_gpu_update_cursor),
151 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
159 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
169 static void free_vbuf(struct virtio_gpu_device *vgdev,
175 kmem_cache_free(vgdev->vbufs, vbuf);
194 struct virtio_gpu_device *vgdev =
203 spin_lock(&vgdev->ctrlq.qlock);
205 virtqueue_disable_cb(vgdev->ctrlq.vq);
206 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
208 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
209 spin_unlock(&vgdev->ctrlq.qlock);
214 trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
237 entry->resp_cb(vgdev, entry);
239 wake_up(&vgdev->ctrlq.ack_queue);
242 virtio_gpu_fence_event_process(vgdev, fence_id);
246 virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
248 free_vbuf(vgdev, entry);
254 struct virtio_gpu_device *vgdev =
261 spin_lock(&vgdev->cursorq.qlock);
263 virtqueue_disable_cb(vgdev->cursorq.vq);
264 reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
265 } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
266 spin_unlock(&vgdev->cursorq.qlock);
270 free_vbuf(vgdev, entry);
272 wake_up(&vgdev->cursorq.ack_queue);
315 static bool virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
318 __releases(&vgdev->ctrlq.qlock)
319 __acquires(&vgdev->ctrlq.qlock)
321 struct virtqueue *vq = vgdev->ctrlq.vq;
327 if (!vgdev->vqs_ready)
348 spin_unlock(&vgdev->ctrlq.qlock);
349 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
350 spin_lock(&vgdev->ctrlq.qlock);
361 static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
366 struct virtqueue *vq = vgdev->ctrlq.vq;
387 spin_lock(&vgdev->ctrlq.qlock);
398 spin_unlock(&vgdev->ctrlq.qlock);
399 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
404 virtio_gpu_fence_emit(vgdev, hdr, fence);
410 notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf, vout);
411 spin_unlock(&vgdev->ctrlq.qlock);
413 if (vgdev->disable_notify)
414 vgdev->pending_notify = true;
416 virtqueue_notify(vgdev->ctrlq.vq);
425 void virtio_gpu_disable_notify(struct virtio_gpu_device *vgdev)
427 vgdev->disable_notify = true;
430 void virtio_gpu_enable_notify(struct virtio_gpu_device *vgdev)
432 vgdev->disable_notify = false;
434 if (!vgdev->pending_notify)
436 vgdev->pending_notify = false;
437 virtqueue_notify(vgdev->ctrlq.vq);
440 static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
443 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL, NULL);
446 static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
449 struct virtqueue *vq = vgdev->cursorq.vq;
455 if (!vgdev->vqs_ready)
462 spin_lock(&vgdev->cursorq.qlock);
466 spin_unlock(&vgdev->cursorq.qlock);
467 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
468 spin_lock(&vgdev->cursorq.qlock);
477 spin_unlock(&vgdev->cursorq.qlock);
488 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
497 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
507 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
511 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
517 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
523 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
526 static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
533 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
539 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
542 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
550 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
561 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
564 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
572 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
582 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
585 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
595 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
598 dma_sync_sg_for_device(vgdev->vdev->dev.parent,
602 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
614 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
618 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
627 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
637 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
640 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
647 spin_lock(&vgdev->display_info_lock);
648 for (i = 0; i < vgdev->num_scanouts; i++) {
649 vgdev->outputs[i].info = resp->pmodes[i];
661 vgdev->display_info_pending = false;
662 spin_unlock(&vgdev->display_info_lock);
663 wake_up(&vgdev->resp_wq);
665 if (!drm_helper_hpd_irq_event(vgdev->ddev))
666 drm_kms_helper_hotplug_event(vgdev->ddev);
669 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
678 spin_lock(&vgdev->display_info_lock);
679 vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
680 vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
681 vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
682 spin_unlock(&vgdev->display_info_lock);
683 wake_up(&vgdev->resp_wq);
686 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
695 spin_lock(&vgdev->display_info_lock);
696 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
707 spin_unlock(&vgdev->display_info_lock);
708 wake_up_all(&vgdev->resp_wq);
723 static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
734 if (scanout >= vgdev->num_scanouts)
736 output = vgdev->outputs + scanout;
741 spin_lock(&vgdev->display_info_lock);
744 spin_unlock(&vgdev->display_info_lock);
747 wake_up(&vgdev->resp_wq);
750 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
762 (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
767 vgdev->display_info_pending = true;
769 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
773 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
785 (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
792 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
796 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
809 if (idx >= vgdev->num_capsets)
812 if (version > vgdev->capsets[idx].max_version)
819 max_size = vgdev->capsets[idx].max_size;
835 cache_ent->id = vgdev->capsets[idx].id;
838 spin_lock(&vgdev->display_info_lock);
840 list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
841 if (search_ent->id == vgdev->capsets[idx].id &&
848 list_add_tail(&cache_ent->head, &vgdev->cap_cache);
849 spin_unlock(&vgdev->display_info_lock);
860 (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
864 cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
867 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
872 int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
879 if (WARN_ON(!vgdev->has_edid))
882 for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
889 (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
894 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
900 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
906 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
914 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
917 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
923 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
928 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
931 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
939 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
946 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
950 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
958 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
965 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
969 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
978 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
996 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
1000 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
1010 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
1013 dma_sync_sg_for_device(vgdev->vdev->dev.parent,
1017 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1029 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
1032 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
1043 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1055 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
1058 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
1067 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1078 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
1081 int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
1085 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
1106 obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
1130 virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1136 void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
1139 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
1145 struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
1147 virtio_gpu_cmd_resource_inval_backing(vgdev
1152 dma_unmap_sg(vgdev->vdev->dev.parent,
1157 virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
1166 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1173 cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1175 virtio_gpu_queue_cursor(vgdev, vbuf);