Home | History | Annotate | Line # | Download | only in vmwgfx
      1  1.4  riastrad /*	$NetBSD: vmwgfx_fence.c,v 1.4 2022/10/25 23:34:05 riastradh Exp $	*/
      2  1.2  riastrad 
      3  1.3  riastrad // SPDX-License-Identifier: GPL-2.0 OR MIT
      4  1.1  riastrad /**************************************************************************
      5  1.1  riastrad  *
      6  1.3  riastrad  * Copyright 2011-2014 VMware, Inc., Palo Alto, CA., USA
      7  1.1  riastrad  *
      8  1.1  riastrad  * Permission is hereby granted, free of charge, to any person obtaining a
      9  1.1  riastrad  * copy of this software and associated documentation files (the
     10  1.1  riastrad  * "Software"), to deal in the Software without restriction, including
     11  1.1  riastrad  * without limitation the rights to use, copy, modify, merge, publish,
     12  1.1  riastrad  * distribute, sub license, and/or sell copies of the Software, and to
     13  1.1  riastrad  * permit persons to whom the Software is furnished to do so, subject to
     14  1.1  riastrad  * the following conditions:
     15  1.1  riastrad  *
     16  1.1  riastrad  * The above copyright notice and this permission notice (including the
     17  1.1  riastrad  * next paragraph) shall be included in all copies or substantial portions
     18  1.1  riastrad  * of the Software.
     19  1.1  riastrad  *
     20  1.1  riastrad  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     21  1.1  riastrad  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     22  1.1  riastrad  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     23  1.1  riastrad  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     24  1.1  riastrad  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     25  1.1  riastrad  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     26  1.1  riastrad  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     27  1.1  riastrad  *
     28  1.1  riastrad  **************************************************************************/
     29  1.1  riastrad 
     30  1.2  riastrad #include <sys/cdefs.h>
     31  1.4  riastrad __KERNEL_RCSID(0, "$NetBSD: vmwgfx_fence.c,v 1.4 2022/10/25 23:34:05 riastradh Exp $");
     32  1.2  riastrad 
     33  1.3  riastrad #include <linux/sched/signal.h>
     34  1.3  riastrad 
     35  1.1  riastrad #include "vmwgfx_drv.h"
     36  1.1  riastrad 
     37  1.4  riastrad #include <linux/nbsd-namespace.h>
     38  1.4  riastrad 
     39  1.1  riastrad #define VMW_FENCE_WRAP (1 << 31)
     40  1.1  riastrad 
     41  1.1  riastrad struct vmw_fence_manager {
     42  1.1  riastrad 	int num_fence_objects;
     43  1.1  riastrad 	struct vmw_private *dev_priv;
     44  1.1  riastrad 	spinlock_t lock;
     45  1.1  riastrad 	struct list_head fence_list;
     46  1.1  riastrad 	struct work_struct work;
     47  1.1  riastrad 	u32 user_fence_size;
     48  1.1  riastrad 	u32 fence_size;
     49  1.1  riastrad 	u32 event_fence_action_size;
     50  1.1  riastrad 	bool fifo_down;
     51  1.1  riastrad 	struct list_head cleanup_list;
     52  1.1  riastrad 	uint32_t pending_actions[VMW_ACTION_MAX];
     53  1.1  riastrad 	struct mutex goal_irq_mutex;
     54  1.1  riastrad 	bool goal_irq_on; /* Protected by @goal_irq_mutex */
     55  1.1  riastrad 	bool seqno_valid; /* Protected by @lock, and may not be set to true
     56  1.1  riastrad 			     without the @goal_irq_mutex held. */
     57  1.3  riastrad 	u64 ctx;
     58  1.1  riastrad };
     59  1.1  riastrad 
     60  1.1  riastrad struct vmw_user_fence {
     61  1.1  riastrad 	struct ttm_base_object base;
     62  1.1  riastrad 	struct vmw_fence_obj fence;
     63  1.1  riastrad };
     64  1.1  riastrad 
     65  1.1  riastrad /**
     66  1.1  riastrad  * struct vmw_event_fence_action - fence action that delivers a drm event.
     67  1.1  riastrad  *
     68  1.1  riastrad  * @e: A struct drm_pending_event that controls the event delivery.
     69  1.1  riastrad  * @action: A struct vmw_fence_action to hook up to a fence.
     70  1.1  riastrad  * @fence: A referenced pointer to the fence to keep it alive while @action
     71  1.1  riastrad  * hangs on it.
     72  1.1  riastrad  * @dev: Pointer to a struct drm_device so we can access the event stuff.
     73  1.1  riastrad  * @kref: Both @e and @action has destructors, so we need to refcount.
     74  1.1  riastrad  * @size: Size accounted for this object.
     75  1.1  riastrad  * @tv_sec: If non-null, the variable pointed to will be assigned
     76  1.1  riastrad  * current time tv_sec val when the fence signals.
     77  1.1  riastrad  * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
     78  1.1  riastrad  * be assigned the current time tv_usec val when the fence signals.
     79  1.1  riastrad  */
     80  1.1  riastrad struct vmw_event_fence_action {
     81  1.1  riastrad 	struct vmw_fence_action action;
     82  1.1  riastrad 
     83  1.1  riastrad 	struct drm_pending_event *event;
     84  1.1  riastrad 	struct vmw_fence_obj *fence;
     85  1.1  riastrad 	struct drm_device *dev;
     86  1.1  riastrad 
     87  1.1  riastrad 	uint32_t *tv_sec;
     88  1.1  riastrad 	uint32_t *tv_usec;
     89  1.1  riastrad };
     90  1.1  riastrad 
     91  1.2  riastrad static struct vmw_fence_manager *
     92  1.2  riastrad fman_from_fence(struct vmw_fence_obj *fence)
     93  1.2  riastrad {
     94  1.2  riastrad 	return container_of(fence->base.lock, struct vmw_fence_manager, lock);
     95  1.2  riastrad }
     96  1.2  riastrad 
     97  1.1  riastrad /**
     98  1.1  riastrad  * Note on fencing subsystem usage of irqs:
     99  1.1  riastrad  * Typically the vmw_fences_update function is called
    100  1.1  riastrad  *
    101  1.1  riastrad  * a) When a new fence seqno has been submitted by the fifo code.
    102  1.1  riastrad  * b) On-demand when we have waiters. Sleeping waiters will switch on the
    103  1.1  riastrad  * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
    104  1.1  riastrad  * irq is received. When the last fence waiter is gone, that IRQ is masked
    105  1.1  riastrad  * away.
    106  1.1  riastrad  *
    107  1.1  riastrad  * In situations where there are no waiters and we don't submit any new fences,
    108  1.1  riastrad  * fence objects may not be signaled. This is perfectly OK, since there are
    109  1.1  riastrad  * no consumers of the signaled data, but that is NOT ok when there are fence
    110  1.1  riastrad  * actions attached to a fence. The fencing subsystem then makes use of the
    111  1.1  riastrad  * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
    112  1.1  riastrad  * which has an action attached, and each time vmw_fences_update is called,
    113  1.1  riastrad  * the subsystem makes sure the fence goal seqno is updated.
    114  1.1  riastrad  *
    115  1.1  riastrad  * The fence goal seqno irq is on as long as there are unsignaled fence
    116  1.1  riastrad  * objects with actions attached to them.
    117  1.1  riastrad  */
    118  1.1  riastrad 
    119  1.3  riastrad static void vmw_fence_obj_destroy(struct dma_fence *f)
    120  1.1  riastrad {
    121  1.1  riastrad 	struct vmw_fence_obj *fence =
    122  1.2  riastrad 		container_of(f, struct vmw_fence_obj, base);
    123  1.1  riastrad 
    124  1.2  riastrad 	struct vmw_fence_manager *fman = fman_from_fence(fence);
    125  1.1  riastrad 
    126  1.3  riastrad 	spin_lock(&fman->lock);
    127  1.1  riastrad 	list_del_init(&fence->head);
    128  1.2  riastrad 	--fman->num_fence_objects;
    129  1.3  riastrad 	spin_unlock(&fman->lock);
    130  1.2  riastrad 	fence->destroy(fence);
    131  1.2  riastrad }
    132  1.2  riastrad 
    133  1.3  riastrad static const char *vmw_fence_get_driver_name(struct dma_fence *f)
    134  1.2  riastrad {
    135  1.2  riastrad 	return "vmwgfx";
    136  1.2  riastrad }
    137  1.2  riastrad 
    138  1.3  riastrad static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
    139  1.2  riastrad {
    140  1.2  riastrad 	return "svga";
    141  1.2  riastrad }
    142  1.2  riastrad 
    143  1.3  riastrad static bool vmw_fence_enable_signaling(struct dma_fence *f)
    144  1.2  riastrad {
    145  1.2  riastrad 	struct vmw_fence_obj *fence =
    146  1.2  riastrad 		container_of(f, struct vmw_fence_obj, base);
    147  1.2  riastrad 
    148  1.2  riastrad 	struct vmw_fence_manager *fman = fman_from_fence(fence);
    149  1.2  riastrad 	struct vmw_private *dev_priv = fman->dev_priv;
    150  1.2  riastrad 
    151  1.2  riastrad 	u32 *fifo_mem = dev_priv->mmio_virt;
    152  1.2  riastrad 	u32 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
    153  1.2  riastrad 	if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
    154  1.2  riastrad 		return false;
    155  1.2  riastrad 
    156  1.2  riastrad 	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
    157  1.2  riastrad 
    158  1.2  riastrad 	return true;
    159  1.2  riastrad }
    160  1.2  riastrad 
    161  1.2  riastrad struct vmwgfx_wait_cb {
    162  1.3  riastrad 	struct dma_fence_cb base;
    163  1.4  riastrad #ifdef __NetBSD__
    164  1.4  riastrad 	drm_waitqueue_t wq;
    165  1.4  riastrad #else
    166  1.2  riastrad 	struct task_struct *task;
    167  1.4  riastrad #endif
    168  1.2  riastrad };
    169  1.2  riastrad 
    170  1.2  riastrad static void
    171  1.3  riastrad vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
    172  1.2  riastrad {
    173  1.2  riastrad 	struct vmwgfx_wait_cb *wait =
    174  1.2  riastrad 		container_of(cb, struct vmwgfx_wait_cb, base);
    175  1.2  riastrad 
    176  1.4  riastrad #ifdef __NetBSD__
    177  1.4  riastrad 	DRM_SPIN_WAKEUP_ALL(&wait->wq, fence->lock);
    178  1.4  riastrad #else
    179  1.2  riastrad 	wake_up_process(wait->task);
    180  1.4  riastrad #endif
    181  1.2  riastrad }
    182  1.2  riastrad 
    183  1.2  riastrad static void __vmw_fences_update(struct vmw_fence_manager *fman);
    184  1.2  riastrad 
    185  1.3  riastrad static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
    186  1.2  riastrad {
    187  1.2  riastrad 	struct vmw_fence_obj *fence =
    188  1.2  riastrad 		container_of(f, struct vmw_fence_obj, base);
    189  1.2  riastrad 
    190  1.2  riastrad 	struct vmw_fence_manager *fman = fman_from_fence(fence);
    191  1.2  riastrad 	struct vmw_private *dev_priv = fman->dev_priv;
    192  1.2  riastrad 	struct vmwgfx_wait_cb cb;
    193  1.2  riastrad 	long ret = timeout;
    194  1.2  riastrad 
    195  1.2  riastrad 	if (likely(vmw_fence_obj_signaled(fence)))
    196  1.2  riastrad 		return timeout;
    197  1.2  riastrad 
    198  1.2  riastrad 	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
    199  1.2  riastrad 	vmw_seqno_waiter_add(dev_priv);
    200  1.2  riastrad 
    201  1.3  riastrad 	spin_lock(f->lock);
    202  1.3  riastrad 
    203  1.3  riastrad 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
    204  1.3  riastrad 		goto out;
    205  1.2  riastrad 
    206  1.2  riastrad 	if (intr && signal_pending(current)) {
    207  1.2  riastrad 		ret = -ERESTARTSYS;
    208  1.2  riastrad 		goto out;
    209  1.2  riastrad 	}
    210  1.2  riastrad 
    211  1.4  riastrad #ifdef __NetBSD__
    212  1.4  riastrad 	DRM_INIT_WAITQUEUE(&cb.wq, "vmwgfxwf");
    213  1.4  riastrad #else
    214  1.2  riastrad 	cb.task = current;
    215  1.4  riastrad #endif
    216  1.4  riastrad 	spin_unlock(f->lock);
    217  1.4  riastrad 	ret = dma_fence_add_callback(f, &cb.base, vmwgfx_wait_cb);
    218  1.4  riastrad 	spin_lock(f->lock);
    219  1.4  riastrad 	if (ret)
    220  1.4  riastrad 		goto out;
    221  1.2  riastrad 
    222  1.4  riastrad #ifdef __NetBSD__
    223  1.4  riastrad #define	C	(__vmw_fences_update(fman), dma_fence_is_signaled_locked(f))
    224  1.4  riastrad 	if (intr) {
    225  1.4  riastrad 		DRM_SPIN_TIMED_WAIT_UNTIL(ret, &cb.wq, f->lock, timeout, C);
    226  1.4  riastrad 	} else {
    227  1.4  riastrad 		DRM_SPIN_TIMED_WAIT_NOINTR_UNTIL(ret, &cb.wq, f->lock, timeout,
    228  1.4  riastrad 		    C);
    229  1.4  riastrad 	}
    230  1.4  riastrad #else
    231  1.3  riastrad 	for (;;) {
    232  1.2  riastrad 		__vmw_fences_update(fman);
    233  1.2  riastrad 
    234  1.3  riastrad 		/*
    235  1.3  riastrad 		 * We can use the barrier free __set_current_state() since
    236  1.3  riastrad 		 * DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the
    237  1.3  riastrad 		 * fence spinlock.
    238  1.3  riastrad 		 */
    239  1.2  riastrad 		if (intr)
    240  1.2  riastrad 			__set_current_state(TASK_INTERRUPTIBLE);
    241  1.2  riastrad 		else
    242  1.2  riastrad 			__set_current_state(TASK_UNINTERRUPTIBLE);
    243  1.3  riastrad 
    244  1.3  riastrad 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) {
    245  1.3  riastrad 			if (ret == 0 && timeout > 0)
    246  1.3  riastrad 				ret = 1;
    247  1.3  riastrad 			break;
    248  1.3  riastrad 		}
    249  1.3  riastrad 
    250  1.3  riastrad 		if (intr && signal_pending(current)) {
    251  1.3  riastrad 			ret = -ERESTARTSYS;
    252  1.3  riastrad 			break;
    253  1.3  riastrad 		}
    254  1.3  riastrad 
    255  1.3  riastrad 		if (ret == 0)
    256  1.3  riastrad 			break;
    257  1.3  riastrad 
    258  1.3  riastrad 		spin_unlock(f->lock);
    259  1.2  riastrad 
    260  1.2  riastrad 		ret = schedule_timeout(ret);
    261  1.2  riastrad 
    262  1.3  riastrad 		spin_lock(f->lock);
    263  1.2  riastrad 	}
    264  1.3  riastrad 	__set_current_state(TASK_RUNNING);
    265  1.2  riastrad 	if (!list_empty(&cb.base.node))
    266  1.2  riastrad 		list_del(&cb.base.node);
    267  1.4  riastrad #endif
    268  1.4  riastrad 	spin_unlock(f->lock);
    269  1.4  riastrad 	dma_fence_remove_callback(f, &cb.base);
    270  1.4  riastrad 	spin_lock(f->lock);
    271  1.2  riastrad 
    272  1.2  riastrad out:
    273  1.3  riastrad 	spin_unlock(f->lock);
    274  1.4  riastrad #ifdef __NetBSD__
    275  1.4  riastrad 	DRM_DESTROY_WAITQUEUE(&cb.wq);
    276  1.4  riastrad #endif
    277  1.2  riastrad 
    278  1.2  riastrad 	vmw_seqno_waiter_remove(dev_priv);
    279  1.1  riastrad 
    280  1.2  riastrad 	return ret;
    281  1.1  riastrad }
    282  1.1  riastrad 
    283  1.3  riastrad static const struct dma_fence_ops vmw_fence_ops = {
    284  1.2  riastrad 	.get_driver_name = vmw_fence_get_driver_name,
    285  1.2  riastrad 	.get_timeline_name = vmw_fence_get_timeline_name,
    286  1.2  riastrad 	.enable_signaling = vmw_fence_enable_signaling,
    287  1.2  riastrad 	.wait = vmw_fence_wait,
    288  1.2  riastrad 	.release = vmw_fence_obj_destroy,
    289  1.2  riastrad };
    290  1.2  riastrad 
    291  1.1  riastrad 
    292  1.1  riastrad /**
    293  1.1  riastrad  * Execute signal actions on fences recently signaled.
    294  1.1  riastrad  * This is done from a workqueue so we don't have to execute
    295  1.1  riastrad  * signal actions from atomic context.
    296  1.1  riastrad  */
    297  1.1  riastrad 
    298  1.1  riastrad static void vmw_fence_work_func(struct work_struct *work)
    299  1.1  riastrad {
    300  1.1  riastrad 	struct vmw_fence_manager *fman =
    301  1.1  riastrad 		container_of(work, struct vmw_fence_manager, work);
    302  1.1  riastrad 	struct list_head list;
    303  1.1  riastrad 	struct vmw_fence_action *action, *next_action;
    304  1.1  riastrad 	bool seqno_valid;
    305  1.1  riastrad 
    306  1.1  riastrad 	do {
    307  1.1  riastrad 		INIT_LIST_HEAD(&list);
    308  1.1  riastrad 		mutex_lock(&fman->goal_irq_mutex);
    309  1.1  riastrad 
    310  1.3  riastrad 		spin_lock(&fman->lock);
    311  1.1  riastrad 		list_splice_init(&fman->cleanup_list, &list);
    312  1.1  riastrad 		seqno_valid = fman->seqno_valid;
    313  1.3  riastrad 		spin_unlock(&fman->lock);
    314  1.1  riastrad 
    315  1.1  riastrad 		if (!seqno_valid && fman->goal_irq_on) {
    316  1.1  riastrad 			fman->goal_irq_on = false;
    317  1.1  riastrad 			vmw_goal_waiter_remove(fman->dev_priv);
    318  1.1  riastrad 		}
    319  1.1  riastrad 		mutex_unlock(&fman->goal_irq_mutex);
    320  1.1  riastrad 
    321  1.1  riastrad 		if (list_empty(&list))
    322  1.1  riastrad 			return;
    323  1.1  riastrad 
    324  1.1  riastrad 		/*
    325  1.1  riastrad 		 * At this point, only we should be able to manipulate the
    326  1.1  riastrad 		 * list heads of the actions we have on the private list.
    327  1.1  riastrad 		 * hence fman::lock not held.
    328  1.1  riastrad 		 */
    329  1.1  riastrad 
    330  1.1  riastrad 		list_for_each_entry_safe(action, next_action, &list, head) {
    331  1.1  riastrad 			list_del_init(&action->head);
    332  1.1  riastrad 			if (action->cleanup)
    333  1.1  riastrad 				action->cleanup(action);
    334  1.1  riastrad 		}
    335  1.1  riastrad 	} while (1);
    336  1.1  riastrad }
    337  1.1  riastrad 
    338  1.1  riastrad struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
    339  1.1  riastrad {
    340  1.1  riastrad 	struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
    341  1.1  riastrad 
    342  1.3  riastrad 	if (unlikely(!fman))
    343  1.1  riastrad 		return NULL;
    344  1.1  riastrad 
    345  1.1  riastrad 	fman->dev_priv = dev_priv;
    346  1.1  riastrad 	spin_lock_init(&fman->lock);
    347  1.1  riastrad 	INIT_LIST_HEAD(&fman->fence_list);
    348  1.1  riastrad 	INIT_LIST_HEAD(&fman->cleanup_list);
    349  1.1  riastrad 	INIT_WORK(&fman->work, &vmw_fence_work_func);
    350  1.1  riastrad 	fman->fifo_down = true;
    351  1.3  riastrad 	fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)) +
    352  1.3  riastrad 		TTM_OBJ_EXTRA_SIZE;
    353  1.1  riastrad 	fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
    354  1.1  riastrad 	fman->event_fence_action_size =
    355  1.1  riastrad 		ttm_round_pot(sizeof(struct vmw_event_fence_action));
    356  1.1  riastrad 	mutex_init(&fman->goal_irq_mutex);
    357  1.3  riastrad 	fman->ctx = dma_fence_context_alloc(1);
    358  1.1  riastrad 
    359  1.1  riastrad 	return fman;
    360  1.1  riastrad }
    361  1.1  riastrad 
    362  1.1  riastrad void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
    363  1.1  riastrad {
    364  1.1  riastrad 	bool lists_empty;
    365  1.1  riastrad 
    366  1.1  riastrad 	(void) cancel_work_sync(&fman->work);
    367  1.1  riastrad 
    368  1.3  riastrad 	spin_lock(&fman->lock);
    369  1.1  riastrad 	lists_empty = list_empty(&fman->fence_list) &&
    370  1.1  riastrad 		list_empty(&fman->cleanup_list);
    371  1.3  riastrad 	spin_unlock(&fman->lock);
    372  1.1  riastrad 
    373  1.1  riastrad 	BUG_ON(!lists_empty);
    374  1.1  riastrad 	kfree(fman);
    375  1.1  riastrad }
    376  1.1  riastrad 
    377  1.1  riastrad static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
    378  1.2  riastrad 			      struct vmw_fence_obj *fence, u32 seqno,
    379  1.1  riastrad 			      void (*destroy) (struct vmw_fence_obj *fence))
    380  1.1  riastrad {
    381  1.1  riastrad 	int ret = 0;
    382  1.1  riastrad 
    383  1.3  riastrad 	dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
    384  1.3  riastrad 		       fman->ctx, seqno);
    385  1.1  riastrad 	INIT_LIST_HEAD(&fence->seq_passed_actions);
    386  1.1  riastrad 	fence->destroy = destroy;
    387  1.1  riastrad 
    388  1.3  riastrad 	spin_lock(&fman->lock);
    389  1.1  riastrad 	if (unlikely(fman->fifo_down)) {
    390  1.1  riastrad 		ret = -EBUSY;
    391  1.1  riastrad 		goto out_unlock;
    392  1.1  riastrad 	}
    393  1.1  riastrad 	list_add_tail(&fence->head, &fman->fence_list);
    394  1.2  riastrad 	++fman->num_fence_objects;
    395  1.1  riastrad 
    396  1.1  riastrad out_unlock:
    397  1.3  riastrad 	spin_unlock(&fman->lock);
    398  1.1  riastrad 	return ret;
    399  1.1  riastrad 
    400  1.1  riastrad }
    401  1.1  riastrad 
    402  1.2  riastrad static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
    403  1.1  riastrad 				struct list_head *list)
    404  1.1  riastrad {
    405  1.1  riastrad 	struct vmw_fence_action *action, *next_action;
    406  1.1  riastrad 
    407  1.1  riastrad 	list_for_each_entry_safe(action, next_action, list, head) {
    408  1.1  riastrad 		list_del_init(&action->head);
    409  1.1  riastrad 		fman->pending_actions[action->type]--;
    410  1.1  riastrad 		if (action->seq_passed != NULL)
    411  1.1  riastrad 			action->seq_passed(action);
    412  1.1  riastrad 
    413  1.1  riastrad 		/*
    414  1.1  riastrad 		 * Add the cleanup action to the cleanup list so that
    415  1.1  riastrad 		 * it will be performed by a worker task.
    416  1.1  riastrad 		 */
    417  1.1  riastrad 
    418  1.1  riastrad 		list_add_tail(&action->head, &fman->cleanup_list);
    419  1.1  riastrad 	}
    420  1.1  riastrad }
    421  1.1  riastrad 
    422  1.1  riastrad /**
    423  1.1  riastrad  * vmw_fence_goal_new_locked - Figure out a new device fence goal
    424  1.1  riastrad  * seqno if needed.
    425  1.1  riastrad  *
    426  1.1  riastrad  * @fman: Pointer to a fence manager.
    427  1.1  riastrad  * @passed_seqno: The seqno the device currently signals as passed.
    428  1.1  riastrad  *
    429  1.1  riastrad  * This function should be called with the fence manager lock held.
    430  1.1  riastrad  * It is typically called when we have a new passed_seqno, and
    431  1.1  riastrad  * we might need to update the fence goal. It checks to see whether
    432  1.1  riastrad  * the current fence goal has already passed, and, in that case,
    433  1.1  riastrad  * scans through all unsignaled fences to get the next fence object with an
    434  1.1  riastrad  * action attached, and sets the seqno of that fence as a new fence goal.
    435  1.1  riastrad  *
    436  1.1  riastrad  * returns true if the device goal seqno was updated. False otherwise.
    437  1.1  riastrad  */
    438  1.1  riastrad static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
    439  1.1  riastrad 				      u32 passed_seqno)
    440  1.1  riastrad {
    441  1.1  riastrad 	u32 goal_seqno;
    442  1.2  riastrad 	u32 *fifo_mem;
    443  1.1  riastrad 	struct vmw_fence_obj *fence;
    444  1.1  riastrad 
    445  1.1  riastrad 	if (likely(!fman->seqno_valid))
    446  1.1  riastrad 		return false;
    447  1.1  riastrad 
    448  1.1  riastrad 	fifo_mem = fman->dev_priv->mmio_virt;
    449  1.2  riastrad 	goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
    450  1.1  riastrad 	if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
    451  1.1  riastrad 		return false;
    452  1.1  riastrad 
    453  1.1  riastrad 	fman->seqno_valid = false;
    454  1.1  riastrad 	list_for_each_entry(fence, &fman->fence_list, head) {
    455  1.1  riastrad 		if (!list_empty(&fence->seq_passed_actions)) {
    456  1.1  riastrad 			fman->seqno_valid = true;
    457  1.2  riastrad 			vmw_mmio_write(fence->base.seqno,
    458  1.2  riastrad 				       fifo_mem + SVGA_FIFO_FENCE_GOAL);
    459  1.1  riastrad 			break;
    460  1.1  riastrad 		}
    461  1.1  riastrad 	}
    462  1.1  riastrad 
    463  1.1  riastrad 	return true;
    464  1.1  riastrad }
    465  1.1  riastrad 
    466  1.1  riastrad 
    467  1.1  riastrad /**
    468  1.1  riastrad  * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
    469  1.1  riastrad  * needed.
    470  1.1  riastrad  *
    471  1.1  riastrad  * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
    472  1.1  riastrad  * considered as a device fence goal.
    473  1.1  riastrad  *
    474  1.1  riastrad  * This function should be called with the fence manager lock held.
    475  1.1  riastrad  * It is typically called when an action has been attached to a fence to
    476  1.1  riastrad  * check whether the seqno of that fence should be used for a fence
    477  1.1  riastrad  * goal interrupt. This is typically needed if the current fence goal is
    478  1.1  riastrad  * invalid, or has a higher seqno than that of the current fence object.
    479  1.1  riastrad  *
    480  1.1  riastrad  * returns true if the device goal seqno was updated. False otherwise.
    481  1.1  riastrad  */
    482  1.1  riastrad static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
    483  1.1  riastrad {
    484  1.2  riastrad 	struct vmw_fence_manager *fman = fman_from_fence(fence);
    485  1.1  riastrad 	u32 goal_seqno;
    486  1.2  riastrad 	u32 *fifo_mem;
    487  1.1  riastrad 
    488  1.3  riastrad 	if (dma_fence_is_signaled_locked(&fence->base))
    489  1.1  riastrad 		return false;
    490  1.1  riastrad 
    491  1.2  riastrad 	fifo_mem = fman->dev_priv->mmio_virt;
    492  1.2  riastrad 	goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
    493  1.2  riastrad 	if (likely(fman->seqno_valid &&
    494  1.2  riastrad 		   goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
    495  1.1  riastrad 		return false;
    496  1.1  riastrad 
    497  1.2  riastrad 	vmw_mmio_write(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
    498  1.2  riastrad 	fman->seqno_valid = true;
    499  1.1  riastrad 
    500  1.1  riastrad 	return true;
    501  1.1  riastrad }
    502  1.1  riastrad 
    503  1.2  riastrad static void __vmw_fences_update(struct vmw_fence_manager *fman)
    504  1.1  riastrad {
    505  1.1  riastrad 	struct vmw_fence_obj *fence, *next_fence;
    506  1.1  riastrad 	struct list_head action_list;
    507  1.1  riastrad 	bool needs_rerun;
    508  1.1  riastrad 	uint32_t seqno, new_seqno;
    509  1.2  riastrad 	u32 *fifo_mem = fman->dev_priv->mmio_virt;
    510  1.1  riastrad 
    511  1.2  riastrad 	seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
    512  1.1  riastrad rerun:
    513  1.1  riastrad 	list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
    514  1.2  riastrad 		if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
    515  1.1  riastrad 			list_del_init(&fence->head);
    516  1.3  riastrad 			dma_fence_signal_locked(&fence->base);
    517  1.1  riastrad 			INIT_LIST_HEAD(&action_list);
    518  1.1  riastrad 			list_splice_init(&fence->seq_passed_actions,
    519  1.1  riastrad 					 &action_list);
    520  1.1  riastrad 			vmw_fences_perform_actions(fman, &action_list);
    521  1.1  riastrad 		} else
    522  1.1  riastrad 			break;
    523  1.1  riastrad 	}
    524  1.1  riastrad 
    525  1.1  riastrad 	/*
    526  1.1  riastrad 	 * Rerun if the fence goal seqno was updated, and the
    527  1.1  riastrad 	 * hardware might have raced with that update, so that
    528  1.1  riastrad 	 * we missed a fence_goal irq.
    529  1.1  riastrad 	 */
    530  1.1  riastrad 
    531  1.2  riastrad 	needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
    532  1.1  riastrad 	if (unlikely(needs_rerun)) {
    533  1.2  riastrad 		new_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
    534  1.1  riastrad 		if (new_seqno != seqno) {
    535  1.1  riastrad 			seqno = new_seqno;
    536  1.1  riastrad 			goto rerun;
    537  1.1  riastrad 		}
    538  1.1  riastrad 	}
    539  1.2  riastrad 
    540  1.2  riastrad 	if (!list_empty(&fman->cleanup_list))
    541  1.2  riastrad 		(void) schedule_work(&fman->work);
    542  1.1  riastrad }
    543  1.1  riastrad 
    544  1.2  riastrad void vmw_fences_update(struct vmw_fence_manager *fman)
    545  1.1  riastrad {
    546  1.3  riastrad 	spin_lock(&fman->lock);
    547  1.2  riastrad 	__vmw_fences_update(fman);
    548  1.3  riastrad 	spin_unlock(&fman->lock);
    549  1.2  riastrad }
    550  1.1  riastrad 
    551  1.2  riastrad bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
    552  1.2  riastrad {
    553  1.2  riastrad 	struct vmw_fence_manager *fman = fman_from_fence(fence);
    554  1.2  riastrad 
    555  1.3  riastrad 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
    556  1.1  riastrad 		return 1;
    557  1.1  riastrad 
    558  1.2  riastrad 	vmw_fences_update(fman);
    559  1.1  riastrad 
    560  1.3  riastrad 	return dma_fence_is_signaled(&fence->base);
    561  1.1  riastrad }
    562  1.1  riastrad 
    563  1.2  riastrad int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
    564  1.1  riastrad 		       bool interruptible, unsigned long timeout)
    565  1.1  riastrad {
    566  1.3  riastrad 	long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
    567  1.1  riastrad 
    568  1.2  riastrad 	if (likely(ret > 0))
    569  1.1  riastrad 		return 0;
    570  1.2  riastrad 	else if (ret == 0)
    571  1.2  riastrad 		return -EBUSY;
    572  1.1  riastrad 	else
    573  1.2  riastrad 		return ret;
    574  1.1  riastrad }
    575  1.1  riastrad 
    576  1.1  riastrad void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
    577  1.1  riastrad {
    578  1.2  riastrad 	struct vmw_private *dev_priv = fman_from_fence(fence)->dev_priv;
    579  1.1  riastrad 
    580  1.1  riastrad 	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
    581  1.1  riastrad }
    582  1.1  riastrad 
    583  1.1  riastrad static void vmw_fence_destroy(struct vmw_fence_obj *fence)
    584  1.1  riastrad {
    585  1.3  riastrad 	dma_fence_free(&fence->base);
    586  1.1  riastrad }
    587  1.1  riastrad 
    588  1.1  riastrad int vmw_fence_create(struct vmw_fence_manager *fman,
    589  1.1  riastrad 		     uint32_t seqno,
    590  1.1  riastrad 		     struct vmw_fence_obj **p_fence)
    591  1.1  riastrad {
    592  1.1  riastrad 	struct vmw_fence_obj *fence;
    593  1.2  riastrad  	int ret;
    594  1.1  riastrad 
    595  1.1  riastrad 	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
    596  1.3  riastrad 	if (unlikely(!fence))
    597  1.2  riastrad 		return -ENOMEM;
    598  1.1  riastrad 
    599  1.2  riastrad 	ret = vmw_fence_obj_init(fman, fence, seqno,
    600  1.1  riastrad 				 vmw_fence_destroy);
    601  1.1  riastrad 	if (unlikely(ret != 0))
    602  1.1  riastrad 		goto out_err_init;
    603  1.1  riastrad 
    604  1.1  riastrad 	*p_fence = fence;
    605  1.1  riastrad 	return 0;
    606  1.1  riastrad 
    607  1.1  riastrad out_err_init:
    608  1.1  riastrad 	kfree(fence);
    609  1.1  riastrad 	return ret;
    610  1.1  riastrad }
    611  1.1  riastrad 
    612  1.1  riastrad 
    613  1.1  riastrad static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
    614  1.1  riastrad {
    615  1.1  riastrad 	struct vmw_user_fence *ufence =
    616  1.1  riastrad 		container_of(fence, struct vmw_user_fence, fence);
    617  1.2  riastrad 	struct vmw_fence_manager *fman = fman_from_fence(fence);
    618  1.1  riastrad 
    619  1.1  riastrad 	ttm_base_object_kfree(ufence, base);
    620  1.1  riastrad 	/*
    621  1.1  riastrad 	 * Free kernel space accounting.
    622  1.1  riastrad 	 */
    623  1.1  riastrad 	ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
    624  1.1  riastrad 			    fman->user_fence_size);
    625  1.1  riastrad }
    626  1.1  riastrad 
    627  1.1  riastrad static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
    628  1.1  riastrad {
    629  1.1  riastrad 	struct ttm_base_object *base = *p_base;
    630  1.1  riastrad 	struct vmw_user_fence *ufence =
    631  1.1  riastrad 		container_of(base, struct vmw_user_fence, base);
    632  1.1  riastrad 	struct vmw_fence_obj *fence = &ufence->fence;
    633  1.1  riastrad 
    634  1.1  riastrad 	*p_base = NULL;
    635  1.1  riastrad 	vmw_fence_obj_unreference(&fence);
    636  1.1  riastrad }
    637  1.1  riastrad 
    638  1.1  riastrad int vmw_user_fence_create(struct drm_file *file_priv,
    639  1.1  riastrad 			  struct vmw_fence_manager *fman,
    640  1.1  riastrad 			  uint32_t seqno,
    641  1.1  riastrad 			  struct vmw_fence_obj **p_fence,
    642  1.1  riastrad 			  uint32_t *p_handle)
    643  1.1  riastrad {
    644  1.1  riastrad 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
    645  1.1  riastrad 	struct vmw_user_fence *ufence;
    646  1.1  riastrad 	struct vmw_fence_obj *tmp;
    647  1.1  riastrad 	struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
    648  1.3  riastrad 	struct ttm_operation_ctx ctx = {
    649  1.3  riastrad 		.interruptible = false,
    650  1.3  riastrad 		.no_wait_gpu = false
    651  1.3  riastrad 	};
    652  1.1  riastrad 	int ret;
    653  1.1  riastrad 
    654  1.1  riastrad 	/*
    655  1.1  riastrad 	 * Kernel memory space accounting, since this object may
    656  1.1  riastrad 	 * be created by a user-space request.
    657  1.1  riastrad 	 */
    658  1.1  riastrad 
    659  1.1  riastrad 	ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
    660  1.3  riastrad 				   &ctx);
    661  1.1  riastrad 	if (unlikely(ret != 0))
    662  1.1  riastrad 		return ret;
    663  1.1  riastrad 
    664  1.1  riastrad 	ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
    665  1.3  riastrad 	if (unlikely(!ufence)) {
    666  1.1  riastrad 		ret = -ENOMEM;
    667  1.1  riastrad 		goto out_no_object;
    668  1.1  riastrad 	}
    669  1.1  riastrad 
    670  1.1  riastrad 	ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
    671  1.2  riastrad 				 vmw_user_fence_destroy);
    672  1.1  riastrad 	if (unlikely(ret != 0)) {
    673  1.1  riastrad 		kfree(ufence);
    674  1.1  riastrad 		goto out_no_object;
    675  1.1  riastrad 	}
    676  1.1  riastrad 
    677  1.1  riastrad 	/*
    678  1.1  riastrad 	 * The base object holds a reference which is freed in
    679  1.1  riastrad 	 * vmw_user_fence_base_release.
    680  1.1  riastrad 	 */
    681  1.1  riastrad 	tmp = vmw_fence_obj_reference(&ufence->fence);
    682  1.1  riastrad 	ret = ttm_base_object_init(tfile, &ufence->base, false,
    683  1.1  riastrad 				   VMW_RES_FENCE,
    684  1.1  riastrad 				   &vmw_user_fence_base_release, NULL);
    685  1.1  riastrad 
    686  1.1  riastrad 
    687  1.1  riastrad 	if (unlikely(ret != 0)) {
    688  1.1  riastrad 		/*
    689  1.1  riastrad 		 * Free the base object's reference
    690  1.1  riastrad 		 */
    691  1.1  riastrad 		vmw_fence_obj_unreference(&tmp);
    692  1.1  riastrad 		goto out_err;
    693  1.1  riastrad 	}
    694  1.1  riastrad 
    695  1.1  riastrad 	*p_fence = &ufence->fence;
    696  1.3  riastrad 	*p_handle = ufence->base.handle;
    697  1.1  riastrad 
    698  1.1  riastrad 	return 0;
    699  1.1  riastrad out_err:
    700  1.1  riastrad 	tmp = &ufence->fence;
    701  1.1  riastrad 	vmw_fence_obj_unreference(&tmp);
    702  1.1  riastrad out_no_object:
    703  1.1  riastrad 	ttm_mem_global_free(mem_glob, fman->user_fence_size);
    704  1.1  riastrad 	return ret;
    705  1.1  riastrad }
    706  1.1  riastrad 
    707  1.1  riastrad 
    708  1.1  riastrad /**
    709  1.3  riastrad  * vmw_wait_dma_fence - Wait for a dma fence
    710  1.3  riastrad  *
    711  1.3  riastrad  * @fman: pointer to a fence manager
    712  1.3  riastrad  * @fence: DMA fence to wait on
    713  1.3  riastrad  *
    714  1.3  riastrad  * This function handles the case when the fence is actually a fence
    715  1.3  riastrad  * array.  If that's the case, it'll wait on each of the child fence
    716  1.3  riastrad  */
    717  1.3  riastrad int vmw_wait_dma_fence(struct vmw_fence_manager *fman,
    718  1.3  riastrad 		       struct dma_fence *fence)
    719  1.3  riastrad {
    720  1.3  riastrad 	struct dma_fence_array *fence_array;
    721  1.3  riastrad 	int ret = 0;
    722  1.3  riastrad 	int i;
    723  1.3  riastrad 
    724  1.3  riastrad 
    725  1.3  riastrad 	if (dma_fence_is_signaled(fence))
    726  1.3  riastrad 		return 0;
    727  1.3  riastrad 
    728  1.3  riastrad 	if (!dma_fence_is_array(fence))
    729  1.3  riastrad 		return dma_fence_wait(fence, true);
    730  1.3  riastrad 
    731  1.3  riastrad 	/* From i915: Note that if the fence-array was created in
    732  1.3  riastrad 	 * signal-on-any mode, we should *not* decompose it into its individual
    733  1.3  riastrad 	 * fences. However, we don't currently store which mode the fence-array
    734  1.3  riastrad 	 * is operating in. Fortunately, the only user of signal-on-any is
    735  1.3  riastrad 	 * private to amdgpu and we should not see any incoming fence-array
    736  1.3  riastrad 	 * from sync-file being in signal-on-any mode.
    737  1.3  riastrad 	 */
    738  1.3  riastrad 
    739  1.3  riastrad 	fence_array = to_dma_fence_array(fence);
    740  1.3  riastrad 	for (i = 0; i < fence_array->num_fences; i++) {
    741  1.3  riastrad 		struct dma_fence *child = fence_array->fences[i];
    742  1.3  riastrad 
    743  1.3  riastrad 		ret = dma_fence_wait(child, true);
    744  1.3  riastrad 
    745  1.3  riastrad 		if (ret < 0)
    746  1.3  riastrad 			return ret;
    747  1.3  riastrad 	}
    748  1.3  riastrad 
    749  1.3  riastrad 	return 0;
    750  1.3  riastrad }
    751  1.3  riastrad 
    752  1.3  riastrad 
    753  1.3  riastrad /**
    754  1.1  riastrad  * vmw_fence_fifo_down - signal all unsignaled fence objects.
    755  1.1  riastrad  */
    756  1.1  riastrad 
    757  1.1  riastrad void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
    758  1.1  riastrad {
    759  1.1  riastrad 	struct list_head action_list;
    760  1.1  riastrad 	int ret;
    761  1.1  riastrad 
    762  1.1  riastrad 	/*
    763  1.1  riastrad 	 * The list may be altered while we traverse it, so always
    764  1.1  riastrad 	 * restart when we've released the fman->lock.
    765  1.1  riastrad 	 */
    766  1.1  riastrad 
    767  1.3  riastrad 	spin_lock(&fman->lock);
    768  1.1  riastrad 	fman->fifo_down = true;
    769  1.1  riastrad 	while (!list_empty(&fman->fence_list)) {
    770  1.1  riastrad 		struct vmw_fence_obj *fence =
    771  1.1  riastrad 			list_entry(fman->fence_list.prev, struct vmw_fence_obj,
    772  1.1  riastrad 				   head);
    773  1.3  riastrad 		dma_fence_get(&fence->base);
    774  1.3  riastrad 		spin_unlock(&fman->lock);
    775  1.1  riastrad 
    776  1.2  riastrad 		ret = vmw_fence_obj_wait(fence, false, false,
    777  1.1  riastrad 					 VMW_FENCE_WAIT_TIMEOUT);
    778  1.1  riastrad 
    779  1.1  riastrad 		if (unlikely(ret != 0)) {
    780  1.1  riastrad 			list_del_init(&fence->head);
    781  1.3  riastrad 			dma_fence_signal(&fence->base);
    782  1.1  riastrad 			INIT_LIST_HEAD(&action_list);
    783  1.1  riastrad 			list_splice_init(&fence->seq_passed_actions,
    784  1.1  riastrad 					 &action_list);
    785  1.1  riastrad 			vmw_fences_perform_actions(fman, &action_list);
    786  1.1  riastrad 		}
    787  1.1  riastrad 
    788  1.2  riastrad 		BUG_ON(!list_empty(&fence->head));
    789  1.3  riastrad 		dma_fence_put(&fence->base);
    790  1.3  riastrad 		spin_lock(&fman->lock);
    791  1.1  riastrad 	}
    792  1.3  riastrad 	spin_unlock(&fman->lock);
    793  1.1  riastrad }
    794  1.1  riastrad 
    795  1.1  riastrad void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
    796  1.1  riastrad {
    797  1.3  riastrad 	spin_lock(&fman->lock);
    798  1.1  riastrad 	fman->fifo_down = false;
    799  1.3  riastrad 	spin_unlock(&fman->lock);
    800  1.1  riastrad }
    801  1.1  riastrad 
    802  1.1  riastrad 
    803  1.2  riastrad /**
    804  1.2  riastrad  * vmw_fence_obj_lookup - Look up a user-space fence object
    805  1.2  riastrad  *
    806  1.2  riastrad  * @tfile: A struct ttm_object_file identifying the caller.
    807  1.2  riastrad  * @handle: A handle identifying the fence object.
    808  1.2  riastrad  * @return: A struct vmw_user_fence base ttm object on success or
    809  1.2  riastrad  * an error pointer on failure.
    810  1.2  riastrad  *
    811  1.2  riastrad  * The fence object is looked up and type-checked. The caller needs
    812  1.2  riastrad  * to have opened the fence object first, but since that happens on
    813  1.2  riastrad  * creation and fence objects aren't shareable, that's not an
    814  1.2  riastrad  * issue currently.
    815  1.2  riastrad  */
    816  1.2  riastrad static struct ttm_base_object *
    817  1.2  riastrad vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
    818  1.2  riastrad {
    819  1.2  riastrad 	struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
    820  1.2  riastrad 
    821  1.2  riastrad 	if (!base) {
    822  1.2  riastrad 		pr_err("Invalid fence object handle 0x%08lx.\n",
    823  1.2  riastrad 		       (unsigned long)handle);
    824  1.2  riastrad 		return ERR_PTR(-EINVAL);
    825  1.2  riastrad 	}
    826  1.2  riastrad 
    827  1.2  riastrad 	if (base->refcount_release != vmw_user_fence_base_release) {
    828  1.2  riastrad 		pr_err("Invalid fence object handle 0x%08lx.\n",
    829  1.2  riastrad 		       (unsigned long)handle);
    830  1.2  riastrad 		ttm_base_object_unref(&base);
    831  1.2  riastrad 		return ERR_PTR(-EINVAL);
    832  1.2  riastrad 	}
    833  1.2  riastrad 
    834  1.2  riastrad 	return base;
    835  1.2  riastrad }
    836  1.2  riastrad 
    837  1.2  riastrad 
    838  1.1  riastrad int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
    839  1.1  riastrad 			     struct drm_file *file_priv)
    840  1.1  riastrad {
    841  1.1  riastrad 	struct drm_vmw_fence_wait_arg *arg =
    842  1.1  riastrad 	    (struct drm_vmw_fence_wait_arg *)data;
    843  1.1  riastrad 	unsigned long timeout;
    844  1.1  riastrad 	struct ttm_base_object *base;
    845  1.1  riastrad 	struct vmw_fence_obj *fence;
    846  1.1  riastrad 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
    847  1.1  riastrad 	int ret;
    848  1.1  riastrad 	uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
    849  1.1  riastrad 
    850  1.1  riastrad 	/*
    851  1.1  riastrad 	 * 64-bit division not present on 32-bit systems, so do an
    852  1.1  riastrad 	 * approximation. (Divide by 1000000).
    853  1.1  riastrad 	 */
    854  1.1  riastrad 
    855  1.1  riastrad 	wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
    856  1.1  riastrad 	  (wait_timeout >> 26);
    857  1.1  riastrad 
    858  1.1  riastrad 	if (!arg->cookie_valid) {
    859  1.1  riastrad 		arg->cookie_valid = 1;
    860  1.1  riastrad 		arg->kernel_cookie = jiffies + wait_timeout;
    861  1.1  riastrad 	}
    862  1.1  riastrad 
    863  1.2  riastrad 	base = vmw_fence_obj_lookup(tfile, arg->handle);
    864  1.2  riastrad 	if (IS_ERR(base))
    865  1.2  riastrad 		return PTR_ERR(base);
    866  1.1  riastrad 
    867  1.1  riastrad 	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
    868  1.1  riastrad 
    869  1.1  riastrad 	timeout = jiffies;
    870  1.1  riastrad 	if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
    871  1.2  riastrad 		ret = ((vmw_fence_obj_signaled(fence)) ?
    872  1.1  riastrad 		       0 : -EBUSY);
    873  1.1  riastrad 		goto out;
    874  1.1  riastrad 	}
    875  1.1  riastrad 
    876  1.1  riastrad 	timeout = (unsigned long)arg->kernel_cookie - timeout;
    877  1.1  riastrad 
    878  1.2  riastrad 	ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout);
    879  1.1  riastrad 
    880  1.1  riastrad out:
    881  1.1  riastrad 	ttm_base_object_unref(&base);
    882  1.1  riastrad 
    883  1.1  riastrad 	/*
    884  1.1  riastrad 	 * Optionally unref the fence object.
    885  1.1  riastrad 	 */
    886  1.1  riastrad 
    887  1.1  riastrad 	if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
    888  1.1  riastrad 		return ttm_ref_object_base_unref(tfile, arg->handle,
    889  1.1  riastrad 						 TTM_REF_USAGE);
    890  1.1  riastrad 	return ret;
    891  1.1  riastrad }
    892  1.1  riastrad 
    893  1.1  riastrad int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
    894  1.1  riastrad 				 struct drm_file *file_priv)
    895  1.1  riastrad {
    896  1.1  riastrad 	struct drm_vmw_fence_signaled_arg *arg =
    897  1.1  riastrad 		(struct drm_vmw_fence_signaled_arg *) data;
    898  1.1  riastrad 	struct ttm_base_object *base;
    899  1.1  riastrad 	struct vmw_fence_obj *fence;
    900  1.1  riastrad 	struct vmw_fence_manager *fman;
    901  1.1  riastrad 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
    902  1.1  riastrad 	struct vmw_private *dev_priv = vmw_priv(dev);
    903  1.1  riastrad 
    904  1.2  riastrad 	base = vmw_fence_obj_lookup(tfile, arg->handle);
    905  1.2  riastrad 	if (IS_ERR(base))
    906  1.2  riastrad 		return PTR_ERR(base);
    907  1.1  riastrad 
    908  1.1  riastrad 	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
    909  1.2  riastrad 	fman = fman_from_fence(fence);
    910  1.1  riastrad 
    911  1.2  riastrad 	arg->signaled = vmw_fence_obj_signaled(fence);
    912  1.2  riastrad 
    913  1.2  riastrad 	arg->signaled_flags = arg->flags;
    914  1.4  riastrad 	spin_lock(&dev_priv->fence_lock);
    915  1.4  riastrad 	const u32 seqno = dev_priv->last_read_seqno;
    916  1.4  riastrad 	spin_unlock(&dev_priv->fence_lock);
    917  1.3  riastrad 	spin_lock(&fman->lock);
    918  1.4  riastrad 	arg->passed_seqno = seqno;
    919  1.3  riastrad 	spin_unlock(&fman->lock);
    920  1.1  riastrad 
    921  1.1  riastrad 	ttm_base_object_unref(&base);
    922  1.1  riastrad 
    923  1.1  riastrad 	return 0;
    924  1.1  riastrad }
    925  1.1  riastrad 
    926  1.1  riastrad 
    927  1.1  riastrad int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
    928  1.1  riastrad 			      struct drm_file *file_priv)
    929  1.1  riastrad {
    930  1.1  riastrad 	struct drm_vmw_fence_arg *arg =
    931  1.1  riastrad 		(struct drm_vmw_fence_arg *) data;
    932  1.1  riastrad 
    933  1.1  riastrad 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
    934  1.1  riastrad 					 arg->handle,
    935  1.1  riastrad 					 TTM_REF_USAGE);
    936  1.1  riastrad }
    937  1.1  riastrad 
    938  1.1  riastrad /**
    939  1.1  riastrad  * vmw_event_fence_action_seq_passed
    940  1.1  riastrad  *
    941  1.1  riastrad  * @action: The struct vmw_fence_action embedded in a struct
    942  1.1  riastrad  * vmw_event_fence_action.
    943  1.1  riastrad  *
    944  1.1  riastrad  * This function is called when the seqno of the fence where @action is
    945  1.1  riastrad  * attached has passed. It queues the event on the submitter's event list.
    946  1.3  riastrad  * This function is always called from atomic context.
    947  1.1  riastrad  */
    948  1.1  riastrad static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
    949  1.1  riastrad {
    950  1.1  riastrad 	struct vmw_event_fence_action *eaction =
    951  1.1  riastrad 		container_of(action, struct vmw_event_fence_action, action);
    952  1.1  riastrad 	struct drm_device *dev = eaction->dev;
    953  1.1  riastrad 	struct drm_pending_event *event = eaction->event;
    954  1.1  riastrad 
    955  1.1  riastrad 	if (unlikely(event == NULL))
    956  1.1  riastrad 		return;
    957  1.1  riastrad 
    958  1.3  riastrad 	spin_lock_irq(&dev->event_lock);
    959  1.1  riastrad 
    960  1.1  riastrad 	if (likely(eaction->tv_sec != NULL)) {
    961  1.3  riastrad 		struct timespec64 ts;
    962  1.1  riastrad 
    963  1.3  riastrad 		ktime_get_ts64(&ts);
    964  1.3  riastrad 		/* monotonic time, so no y2038 overflow */
    965  1.3  riastrad 		*eaction->tv_sec = ts.tv_sec;
    966  1.3  riastrad 		*eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
    967  1.1  riastrad 	}
    968  1.1  riastrad 
    969  1.3  riastrad 	drm_send_event_locked(dev, eaction->event);
    970  1.1  riastrad 	eaction->event = NULL;
    971  1.3  riastrad 	spin_unlock_irq(&dev->event_lock);
    972  1.1  riastrad }
    973  1.1  riastrad 
    974  1.1  riastrad /**
    975  1.1  riastrad  * vmw_event_fence_action_cleanup
    976  1.1  riastrad  *
    977  1.1  riastrad  * @action: The struct vmw_fence_action embedded in a struct
    978  1.1  riastrad  * vmw_event_fence_action.
    979  1.1  riastrad  *
    980  1.1  riastrad  * This function is the struct vmw_fence_action destructor. It's typically
    981  1.1  riastrad  * called from a workqueue.
    982  1.1  riastrad  */
    983  1.1  riastrad static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
    984  1.1  riastrad {
    985  1.1  riastrad 	struct vmw_event_fence_action *eaction =
    986  1.1  riastrad 		container_of(action, struct vmw_event_fence_action, action);
    987  1.1  riastrad 
    988  1.1  riastrad 	vmw_fence_obj_unreference(&eaction->fence);
    989  1.1  riastrad 	kfree(eaction);
    990  1.1  riastrad }
    991  1.1  riastrad 
    992  1.1  riastrad 
    993  1.1  riastrad /**
    994  1.1  riastrad  * vmw_fence_obj_add_action - Add an action to a fence object.
    995  1.1  riastrad  *
    996  1.1  riastrad  * @fence - The fence object.
    997  1.1  riastrad  * @action - The action to add.
    998  1.1  riastrad  *
    999  1.1  riastrad  * Note that the action callbacks may be executed before this function
   1000  1.1  riastrad  * returns.
   1001  1.1  riastrad  */
   1002  1.2  riastrad static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
   1003  1.1  riastrad 			      struct vmw_fence_action *action)
   1004  1.1  riastrad {
   1005  1.2  riastrad 	struct vmw_fence_manager *fman = fman_from_fence(fence);
   1006  1.1  riastrad 	bool run_update = false;
   1007  1.1  riastrad 
   1008  1.1  riastrad 	mutex_lock(&fman->goal_irq_mutex);
   1009  1.3  riastrad 	spin_lock(&fman->lock);
   1010  1.1  riastrad 
   1011  1.1  riastrad 	fman->pending_actions[action->type]++;
   1012  1.3  riastrad 	if (dma_fence_is_signaled_locked(&fence->base)) {
   1013  1.1  riastrad 		struct list_head action_list;
   1014  1.1  riastrad 
   1015  1.1  riastrad 		INIT_LIST_HEAD(&action_list);
   1016  1.1  riastrad 		list_add_tail(&action->head, &action_list);
   1017  1.1  riastrad 		vmw_fences_perform_actions(fman, &action_list);
   1018  1.1  riastrad 	} else {
   1019  1.1  riastrad 		list_add_tail(&action->head, &fence->seq_passed_actions);
   1020  1.1  riastrad 
   1021  1.1  riastrad 		/*
   1022  1.1  riastrad 		 * This function may set fman::seqno_valid, so it must
   1023  1.1  riastrad 		 * be run with the goal_irq_mutex held.
   1024  1.1  riastrad 		 */
   1025  1.1  riastrad 		run_update = vmw_fence_goal_check_locked(fence);
   1026  1.1  riastrad 	}
   1027  1.1  riastrad 
   1028  1.3  riastrad 	spin_unlock(&fman->lock);
   1029  1.1  riastrad 
   1030  1.1  riastrad 	if (run_update) {
   1031  1.1  riastrad 		if (!fman->goal_irq_on) {
   1032  1.1  riastrad 			fman->goal_irq_on = true;
   1033  1.1  riastrad 			vmw_goal_waiter_add(fman->dev_priv);
   1034  1.1  riastrad 		}
   1035  1.1  riastrad 		vmw_fences_update(fman);
   1036  1.1  riastrad 	}
   1037  1.1  riastrad 	mutex_unlock(&fman->goal_irq_mutex);
   1038  1.1  riastrad 
   1039  1.1  riastrad }
   1040  1.1  riastrad 
   1041  1.1  riastrad /**
   1042  1.1  riastrad  * vmw_event_fence_action_create - Post an event for sending when a fence
   1043  1.1  riastrad  * object seqno has passed.
   1044  1.1  riastrad  *
   1045  1.1  riastrad  * @file_priv: The file connection on which the event should be posted.
   1046  1.1  riastrad  * @fence: The fence object on which to post the event.
   1047  1.1  riastrad  * @event: Event to be posted. This event should've been alloced
   1048  1.1  riastrad  * using k[mz]alloc, and should've been completely initialized.
   1049  1.1  riastrad  * @interruptible: Interruptible waits if possible.
   1050  1.1  riastrad  *
   1051  1.1  riastrad  * As a side effect, the object pointed to by @event may have been
   1052  1.1  riastrad  * freed when this function returns. If this function returns with
   1053  1.1  riastrad  * an error code, the caller needs to free that object.
   1054  1.1  riastrad  */
   1055  1.1  riastrad 
   1056  1.1  riastrad int vmw_event_fence_action_queue(struct drm_file *file_priv,
   1057  1.1  riastrad 				 struct vmw_fence_obj *fence,
   1058  1.1  riastrad 				 struct drm_pending_event *event,
   1059  1.1  riastrad 				 uint32_t *tv_sec,
   1060  1.1  riastrad 				 uint32_t *tv_usec,
   1061  1.1  riastrad 				 bool interruptible)
   1062  1.1  riastrad {
   1063  1.1  riastrad 	struct vmw_event_fence_action *eaction;
   1064  1.2  riastrad 	struct vmw_fence_manager *fman = fman_from_fence(fence);
   1065  1.1  riastrad 
   1066  1.1  riastrad 	eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
   1067  1.3  riastrad 	if (unlikely(!eaction))
   1068  1.1  riastrad 		return -ENOMEM;
   1069  1.1  riastrad 
   1070  1.1  riastrad 	eaction->event = event;
   1071  1.1  riastrad 
   1072  1.1  riastrad 	eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
   1073  1.1  riastrad 	eaction->action.cleanup = vmw_event_fence_action_cleanup;
   1074  1.1  riastrad 	eaction->action.type = VMW_ACTION_EVENT;
   1075  1.1  riastrad 
   1076  1.1  riastrad 	eaction->fence = vmw_fence_obj_reference(fence);
   1077  1.1  riastrad 	eaction->dev = fman->dev_priv->dev;
   1078  1.1  riastrad 	eaction->tv_sec = tv_sec;
   1079  1.1  riastrad 	eaction->tv_usec = tv_usec;
   1080  1.1  riastrad 
   1081  1.1  riastrad 	vmw_fence_obj_add_action(fence, &eaction->action);
   1082  1.1  riastrad 
   1083  1.1  riastrad 	return 0;
   1084  1.1  riastrad }
   1085  1.1  riastrad 
   1086  1.1  riastrad struct vmw_event_fence_pending {
   1087  1.1  riastrad 	struct drm_pending_event base;
   1088  1.1  riastrad 	struct drm_vmw_event_fence event;
   1089  1.1  riastrad };
   1090  1.1  riastrad 
   1091  1.2  riastrad static int vmw_event_fence_action_create(struct drm_file *file_priv,
   1092  1.1  riastrad 				  struct vmw_fence_obj *fence,
   1093  1.1  riastrad 				  uint32_t flags,
   1094  1.1  riastrad 				  uint64_t user_data,
   1095  1.1  riastrad 				  bool interruptible)
   1096  1.1  riastrad {
   1097  1.1  riastrad 	struct vmw_event_fence_pending *event;
   1098  1.2  riastrad 	struct vmw_fence_manager *fman = fman_from_fence(fence);
   1099  1.2  riastrad 	struct drm_device *dev = fman->dev_priv->dev;
   1100  1.1  riastrad 	int ret;
   1101  1.1  riastrad 
   1102  1.1  riastrad 	event = kzalloc(sizeof(*event), GFP_KERNEL);
   1103  1.3  riastrad 	if (unlikely(!event)) {
   1104  1.1  riastrad 		DRM_ERROR("Failed to allocate an event.\n");
   1105  1.1  riastrad 		ret = -ENOMEM;
   1106  1.3  riastrad 		goto out_no_space;
   1107  1.1  riastrad 	}
   1108  1.1  riastrad 
   1109  1.1  riastrad 	event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
   1110  1.1  riastrad 	event->event.base.length = sizeof(*event);
   1111  1.1  riastrad 	event->event.user_data = user_data;
   1112  1.1  riastrad 
   1113  1.3  riastrad 	ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
   1114  1.1  riastrad 
   1115  1.3  riastrad 	if (unlikely(ret != 0)) {
   1116  1.3  riastrad 		DRM_ERROR("Failed to allocate event space for this file.\n");
   1117  1.3  riastrad 		kfree(event);
   1118  1.3  riastrad 		goto out_no_space;
   1119  1.3  riastrad 	}
   1120  1.1  riastrad 
   1121  1.1  riastrad 	if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
   1122  1.1  riastrad 		ret = vmw_event_fence_action_queue(file_priv, fence,
   1123  1.1  riastrad 						   &event->base,
   1124  1.1  riastrad 						   &event->event.tv_sec,
   1125  1.1  riastrad 						   &event->event.tv_usec,
   1126  1.1  riastrad 						   interruptible);
   1127  1.1  riastrad 	else
   1128  1.1  riastrad 		ret = vmw_event_fence_action_queue(file_priv, fence,
   1129  1.1  riastrad 						   &event->base,
   1130  1.1  riastrad 						   NULL,
   1131  1.1  riastrad 						   NULL,
   1132  1.1  riastrad 						   interruptible);
   1133  1.1  riastrad 	if (ret != 0)
   1134  1.1  riastrad 		goto out_no_queue;
   1135  1.1  riastrad 
   1136  1.2  riastrad 	return 0;
   1137  1.2  riastrad 
   1138  1.1  riastrad out_no_queue:
   1139  1.3  riastrad 	drm_event_cancel_free(dev, &event->base);
   1140  1.1  riastrad out_no_space:
   1141  1.1  riastrad 	return ret;
   1142  1.1  riastrad }
   1143  1.1  riastrad 
   1144  1.1  riastrad int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
   1145  1.1  riastrad 			  struct drm_file *file_priv)
   1146  1.1  riastrad {
   1147  1.1  riastrad 	struct vmw_private *dev_priv = vmw_priv(dev);
   1148  1.1  riastrad 	struct drm_vmw_fence_event_arg *arg =
   1149  1.1  riastrad 		(struct drm_vmw_fence_event_arg *) data;
   1150  1.1  riastrad 	struct vmw_fence_obj *fence = NULL;
   1151  1.1  riastrad 	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
   1152  1.2  riastrad 	struct ttm_object_file *tfile = vmw_fp->tfile;
   1153  1.1  riastrad 	struct drm_vmw_fence_rep __user *user_fence_rep =
   1154  1.1  riastrad 		(struct drm_vmw_fence_rep __user *)(unsigned long)
   1155  1.1  riastrad 		arg->fence_rep;
   1156  1.1  riastrad 	uint32_t handle;
   1157  1.1  riastrad 	int ret;
   1158  1.1  riastrad 
   1159  1.1  riastrad 	/*
   1160  1.1  riastrad 	 * Look up an existing fence object,
   1161  1.1  riastrad 	 * and if user-space wants a new reference,
   1162  1.1  riastrad 	 * add one.
   1163  1.1  riastrad 	 */
   1164  1.1  riastrad 	if (arg->handle) {
   1165  1.1  riastrad 		struct ttm_base_object *base =
   1166  1.2  riastrad 			vmw_fence_obj_lookup(tfile, arg->handle);
   1167  1.2  riastrad 
   1168  1.2  riastrad 		if (IS_ERR(base))
   1169  1.2  riastrad 			return PTR_ERR(base);
   1170  1.1  riastrad 
   1171  1.1  riastrad 		fence = &(container_of(base, struct vmw_user_fence,
   1172  1.1  riastrad 				       base)->fence);
   1173  1.1  riastrad 		(void) vmw_fence_obj_reference(fence);
   1174  1.1  riastrad 
   1175  1.1  riastrad 		if (user_fence_rep != NULL) {
   1176  1.1  riastrad 			ret = ttm_ref_object_add(vmw_fp->tfile, base,
   1177  1.2  riastrad 						 TTM_REF_USAGE, NULL, false);
   1178  1.1  riastrad 			if (unlikely(ret != 0)) {
   1179  1.1  riastrad 				DRM_ERROR("Failed to reference a fence "
   1180  1.1  riastrad 					  "object.\n");
   1181  1.1  riastrad 				goto out_no_ref_obj;
   1182  1.1  riastrad 			}
   1183  1.3  riastrad 			handle = base->handle;
   1184  1.1  riastrad 		}
   1185  1.1  riastrad 		ttm_base_object_unref(&base);
   1186  1.1  riastrad 	}
   1187  1.1  riastrad 
   1188  1.1  riastrad 	/*
   1189  1.1  riastrad 	 * Create a new fence object.
   1190  1.1  riastrad 	 */
   1191  1.1  riastrad 	if (!fence) {
   1192  1.1  riastrad 		ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
   1193  1.1  riastrad 						 &fence,
   1194  1.1  riastrad 						 (user_fence_rep) ?
   1195  1.1  riastrad 						 &handle : NULL);
   1196  1.1  riastrad 		if (unlikely(ret != 0)) {
   1197  1.1  riastrad 			DRM_ERROR("Fence event failed to create fence.\n");
   1198  1.1  riastrad 			return ret;
   1199  1.1  riastrad 		}
   1200  1.1  riastrad 	}
   1201  1.1  riastrad 
   1202  1.1  riastrad 	BUG_ON(fence == NULL);
   1203  1.1  riastrad 
   1204  1.2  riastrad 	ret = vmw_event_fence_action_create(file_priv, fence,
   1205  1.2  riastrad 					    arg->flags,
   1206  1.2  riastrad 					    arg->user_data,
   1207  1.2  riastrad 					    true);
   1208  1.1  riastrad 	if (unlikely(ret != 0)) {
   1209  1.1  riastrad 		if (ret != -ERESTARTSYS)
   1210  1.1  riastrad 			DRM_ERROR("Failed to attach event to fence.\n");
   1211  1.1  riastrad 		goto out_no_create;
   1212  1.1  riastrad 	}
   1213  1.1  riastrad 
   1214  1.1  riastrad 	vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
   1215  1.3  riastrad 				    handle, -1, NULL);
   1216  1.1  riastrad 	vmw_fence_obj_unreference(&fence);
   1217  1.1  riastrad 	return 0;
   1218  1.1  riastrad out_no_create:
   1219  1.1  riastrad 	if (user_fence_rep != NULL)
   1220  1.2  riastrad 		ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
   1221  1.1  riastrad out_no_ref_obj:
   1222  1.1  riastrad 	vmw_fence_obj_unreference(&fence);
   1223  1.1  riastrad 	return ret;
   1224  1.1  riastrad }
   1225