Home | History | Annotate | Line # | Download | only in virtio
      1 /*	$NetBSD: virtgpu_fence.c,v 1.3 2021/12/18 23:45:45 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright (C) 2015 Red Hat, Inc.
      5  * All Rights Reserved.
      6  *
      7  * Permission is hereby granted, free of charge, to any person obtaining
      8  * a copy of this software and associated documentation files (the
      9  * "Software"), to deal in the Software without restriction, including
     10  * without limitation the rights to use, copy, modify, merge, publish,
     11  * distribute, sublicense, and/or sell copies of the Software, and to
     12  * permit persons to whom the Software is furnished to do so, subject to
     13  * the following conditions:
     14  *
     15  * The above copyright notice and this permission notice (including the
     16  * next paragraph) shall be included in all copies or substantial
     17  * portions of the Software.
     18  *
     19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     20  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
     22  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
     23  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
     24  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
     25  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
     26  */
     27 
     28 #include <sys/cdefs.h>
     29 __KERNEL_RCSID(0, "$NetBSD: virtgpu_fence.c,v 1.3 2021/12/18 23:45:45 riastradh Exp $");
     30 
     31 #include <trace/events/dma_fence.h>
     32 
     33 #include "virtgpu_drv.h"
     34 
     35 #define to_virtio_fence(x) \
     36 	container_of(x, struct virtio_gpu_fence, f)
     37 
     38 static const char *virtio_get_driver_name(struct dma_fence *f)
     39 {
     40 	return "virtio_gpu";
     41 }
     42 
     43 static const char *virtio_get_timeline_name(struct dma_fence *f)
     44 {
     45 	return "controlq";
     46 }
     47 
     48 static bool virtio_fence_signaled(struct dma_fence *f)
     49 {
     50 	struct virtio_gpu_fence *fence = to_virtio_fence(f);
     51 
     52 	if (WARN_ON_ONCE(fence->f.seqno == 0))
     53 		/* leaked fence outside driver before completing
     54 		 * initialization with virtio_gpu_fence_emit */
     55 		return false;
     56 	if (atomic64_read(&fence->drv->last_seq) >= fence->f.seqno)
     57 		return true;
     58 	return false;
     59 }
     60 
     61 static void virtio_fence_value_str(struct dma_fence *f, char *str, int size)
     62 {
     63 	snprintf(str, size, "%llu", f->seqno);
     64 }
     65 
     66 static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size)
     67 {
     68 	struct virtio_gpu_fence *fence = to_virtio_fence(f);
     69 
     70 	snprintf(str, size, "%llu", (u64)atomic64_read(&fence->drv->last_seq));
     71 }
     72 
     73 static const struct dma_fence_ops virtio_fence_ops = {
     74 	.get_driver_name     = virtio_get_driver_name,
     75 	.get_timeline_name   = virtio_get_timeline_name,
     76 	.signaled            = virtio_fence_signaled,
     77 	.fence_value_str     = virtio_fence_value_str,
     78 	.timeline_value_str  = virtio_timeline_value_str,
     79 };
     80 
     81 struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev)
     82 {
     83 	struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
     84 	struct virtio_gpu_fence *fence = kzalloc(sizeof(struct virtio_gpu_fence),
     85 							GFP_KERNEL);
     86 	if (!fence)
     87 		return fence;
     88 
     89 	fence->drv = drv;
     90 
     91 	/* This only partially initializes the fence because the seqno is
     92 	 * unknown yet.  The fence must not be used outside of the driver
     93 	 * until virtio_gpu_fence_emit is called.
     94 	 */
     95 	dma_fence_init(&fence->f, &virtio_fence_ops, &drv->lock, drv->context, 0);
     96 
     97 	return fence;
     98 }
     99 
    100 void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
    101 			  struct virtio_gpu_ctrl_hdr *cmd_hdr,
    102 			  struct virtio_gpu_fence *fence)
    103 {
    104 	struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
    105 	unsigned long irq_flags;
    106 
    107 	spin_lock_irqsave(&drv->lock, irq_flags);
    108 	fence->f.seqno = ++drv->sync_seq;
    109 	dma_fence_get(&fence->f);
    110 	list_add_tail(&fence->node, &drv->fences);
    111 	spin_unlock_irqrestore(&drv->lock, irq_flags);
    112 
    113 	trace_dma_fence_emit(&fence->f);
    114 
    115 	cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
    116 	cmd_hdr->fence_id = cpu_to_le64(fence->f.seqno);
    117 }
    118 
    119 void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
    120 				    u64 last_seq)
    121 {
    122 	struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
    123 	struct virtio_gpu_fence *fence, *tmp;
    124 	unsigned long irq_flags;
    125 
    126 	spin_lock_irqsave(&drv->lock, irq_flags);
    127 	atomic64_set(&vgdev->fence_drv.last_seq, last_seq);
    128 	list_for_each_entry_safe(fence, tmp, &drv->fences, node) {
    129 		if (last_seq < fence->f.seqno)
    130 			continue;
    131 		dma_fence_signal_locked(&fence->f);
    132 		list_del(&fence->node);
    133 		dma_fence_put(&fence->f);
    134 	}
    135 	spin_unlock_irqrestore(&drv->lock, irq_flags);
    136 }
    137