Home | History | Annotate | Line # | Download | only in vmwgfx
      1 /*	$NetBSD: vmwgfx_marker.c,v 1.4 2022/10/25 23:35:43 riastradh Exp $	*/
      2 
      3 // SPDX-License-Identifier: GPL-2.0 OR MIT
      4 /**************************************************************************
      5  *
      6  * Copyright 2010 VMware, Inc., Palo Alto, CA., USA
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a
      9  * copy of this software and associated documentation files (the
     10  * "Software"), to deal in the Software without restriction, including
     11  * without limitation the rights to use, copy, modify, merge, publish,
     12  * distribute, sub license, and/or sell copies of the Software, and to
     13  * permit persons to whom the Software is furnished to do so, subject to
     14  * the following conditions:
     15  *
     16  * The above copyright notice and this permission notice (including the
     17  * next paragraph) shall be included in all copies or substantial portions
     18  * of the Software.
     19  *
     20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     22  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     23  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     24  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     25  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     26  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     27  *
     28  **************************************************************************/
     29 
     30 
     31 #include <sys/cdefs.h>
     32 __KERNEL_RCSID(0, "$NetBSD: vmwgfx_marker.c,v 1.4 2022/10/25 23:35:43 riastradh Exp $");
     33 
     34 #include "vmwgfx_drv.h"
     35 
     36 struct vmw_marker {
     37 	struct list_head head;
     38 	uint32_t seqno;
     39 	u64 submitted;
     40 };
     41 
     42 void vmw_marker_queue_init(struct vmw_marker_queue *queue)
     43 {
     44 	INIT_LIST_HEAD(&queue->head);
     45 	queue->lag = 0;
     46 	queue->lag_time = ktime_get_raw_ns();
     47 	spin_lock_init(&queue->lock);
     48 }
     49 
     50 void vmw_marker_queue_takedown(struct vmw_marker_queue *queue)
     51 {
     52 	struct vmw_marker *marker, *next;
     53 
     54 	list_for_each_entry_safe(marker, next, &queue->head, head) {
     55 		kfree(marker);
     56 	}
     57 	spin_lock_destroy(&queue->lock);
     58 }
     59 
     60 int vmw_marker_push(struct vmw_marker_queue *queue,
     61 		   uint32_t seqno)
     62 {
     63 	struct vmw_marker *marker = kmalloc(sizeof(*marker), GFP_KERNEL);
     64 
     65 	if (unlikely(!marker))
     66 		return -ENOMEM;
     67 
     68 	marker->seqno = seqno;
     69 	marker->submitted = ktime_get_raw_ns();
     70 	spin_lock(&queue->lock);
     71 	list_add_tail(&marker->head, &queue->head);
     72 	spin_unlock(&queue->lock);
     73 
     74 	return 0;
     75 }
     76 
     77 int vmw_marker_pull(struct vmw_marker_queue *queue,
     78 		   uint32_t signaled_seqno)
     79 {
     80 	struct vmw_marker *marker, *next;
     81 	bool updated = false;
     82 	u64 now;
     83 
     84 	spin_lock(&queue->lock);
     85 	now = ktime_get_raw_ns();
     86 
     87 	if (list_empty(&queue->head)) {
     88 		queue->lag = 0;
     89 		queue->lag_time = now;
     90 		updated = true;
     91 		goto out_unlock;
     92 	}
     93 
     94 	list_for_each_entry_safe(marker, next, &queue->head, head) {
     95 		if (signaled_seqno - marker->seqno > (1 << 30))
     96 			continue;
     97 
     98 		queue->lag = now - marker->submitted;
     99 		queue->lag_time = now;
    100 		updated = true;
    101 		list_del(&marker->head);
    102 		kfree(marker);
    103 	}
    104 
    105 out_unlock:
    106 	spin_unlock(&queue->lock);
    107 
    108 	return (updated) ? 0 : -EBUSY;
    109 }
    110 
    111 static u64 vmw_fifo_lag(struct vmw_marker_queue *queue)
    112 {
    113 	u64 now;
    114 
    115 	spin_lock(&queue->lock);
    116 	now = ktime_get_raw_ns();
    117 	queue->lag += now - queue->lag_time;
    118 	queue->lag_time = now;
    119 	spin_unlock(&queue->lock);
    120 	return queue->lag;
    121 }
    122 
    123 
    124 static bool vmw_lag_lt(struct vmw_marker_queue *queue,
    125 		       uint32_t us)
    126 {
    127 	u64 cond = (u64) us * NSEC_PER_USEC;
    128 
    129 	return vmw_fifo_lag(queue) <= cond;
    130 }
    131 
    132 int vmw_wait_lag(struct vmw_private *dev_priv,
    133 		 struct vmw_marker_queue *queue, uint32_t us)
    134 {
    135 	struct vmw_marker *marker;
    136 	uint32_t seqno;
    137 	int ret;
    138 
    139 	while (!vmw_lag_lt(queue, us)) {
    140 		spin_lock(&queue->lock);
    141 		if (list_empty(&queue->head))
    142 			seqno = atomic_read(&dev_priv->marker_seq);
    143 		else {
    144 			marker = list_first_entry(&queue->head,
    145 						 struct vmw_marker, head);
    146 			seqno = marker->seqno;
    147 		}
    148 		spin_unlock(&queue->lock);
    149 
    150 		ret = vmw_wait_seqno(dev_priv, false, seqno, true,
    151 					3*HZ);
    152 
    153 		if (unlikely(ret != 0))
    154 			return ret;
    155 
    156 		(void) vmw_marker_pull(queue, seqno);
    157 	}
    158 	return 0;
    159 }
    160