Home | History | Annotate | Line # | Download | only in vmwgfx
vmwgfx_fifo.c revision 1.1.1.2
      1 /**************************************************************************
      2  *
      3  * Copyright  2009 VMware, Inc., Palo Alto, CA., USA
      4  * All Rights Reserved.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the
      8  * "Software"), to deal in the Software without restriction, including
      9  * without limitation the rights to use, copy, modify, merge, publish,
     10  * distribute, sub license, and/or sell copies of the Software, and to
     11  * permit persons to whom the Software is furnished to do so, subject to
     12  * the following conditions:
     13  *
     14  * The above copyright notice and this permission notice (including the
     15  * next paragraph) shall be included in all copies or substantial portions
     16  * of the Software.
     17  *
     18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     25  *
     26  **************************************************************************/
     27 
     28 #include "vmwgfx_drv.h"
     29 #include <drm/drmP.h>
     30 #include <drm/ttm/ttm_placement.h>
     31 
     32 bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
     33 {
     34 	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
     35 	uint32_t fifo_min, hwversion;
     36 	const struct vmw_fifo_state *fifo = &dev_priv->fifo;
     37 
     38 	if (!(dev_priv->capabilities & SVGA_CAP_3D))
     39 		return false;
     40 
     41 	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
     42 		uint32_t result;
     43 
     44 		if (!dev_priv->has_mob)
     45 			return false;
     46 
     47 		mutex_lock(&dev_priv->hw_mutex);
     48 		vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
     49 		result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
     50 		mutex_unlock(&dev_priv->hw_mutex);
     51 
     52 		return (result != 0);
     53 	}
     54 
     55 	if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
     56 		return false;
     57 
     58 	fifo_min = ioread32(fifo_mem  + SVGA_FIFO_MIN);
     59 	if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
     60 		return false;
     61 
     62 	hwversion = ioread32(fifo_mem +
     63 			     ((fifo->capabilities &
     64 			       SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
     65 			      SVGA_FIFO_3D_HWVERSION_REVISED :
     66 			      SVGA_FIFO_3D_HWVERSION));
     67 
     68 	if (hwversion == 0)
     69 		return false;
     70 
     71 	if (hwversion < SVGA3D_HWVERSION_WS8_B1)
     72 		return false;
     73 
     74 	/* Non-Screen Object path does not support surfaces */
     75 	if (!dev_priv->sou_priv)
     76 		return false;
     77 
     78 	return true;
     79 }
     80 
     81 bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
     82 {
     83 	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
     84 	uint32_t caps;
     85 
     86 	if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
     87 		return false;
     88 
     89 	caps = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
     90 	if (caps & SVGA_FIFO_CAP_PITCHLOCK)
     91 		return true;
     92 
     93 	return false;
     94 }
     95 
     96 int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
     97 {
     98 	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
     99 	uint32_t max;
    100 	uint32_t min;
    101 	uint32_t dummy;
    102 
    103 	fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
    104 	fifo->static_buffer = vmalloc(fifo->static_buffer_size);
    105 	if (unlikely(fifo->static_buffer == NULL))
    106 		return -ENOMEM;
    107 
    108 	fifo->dynamic_buffer = NULL;
    109 	fifo->reserved_size = 0;
    110 	fifo->using_bounce_buffer = false;
    111 
    112 	mutex_init(&fifo->fifo_mutex);
    113 	init_rwsem(&fifo->rwsem);
    114 
    115 	/*
    116 	 * Allow mapping the first page read-only to user-space.
    117 	 */
    118 
    119 	DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
    120 	DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
    121 	DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
    122 
    123 	mutex_lock(&dev_priv->hw_mutex);
    124 	dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
    125 	dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
    126 	dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
    127 	vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
    128 
    129 	min = 4;
    130 	if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
    131 		min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
    132 	min <<= 2;
    133 
    134 	if (min < PAGE_SIZE)
    135 		min = PAGE_SIZE;
    136 
    137 	iowrite32(min, fifo_mem + SVGA_FIFO_MIN);
    138 	iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
    139 	wmb();
    140 	iowrite32(min,  fifo_mem + SVGA_FIFO_NEXT_CMD);
    141 	iowrite32(min,  fifo_mem + SVGA_FIFO_STOP);
    142 	iowrite32(0, fifo_mem + SVGA_FIFO_BUSY);
    143 	mb();
    144 
    145 	vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
    146 	mutex_unlock(&dev_priv->hw_mutex);
    147 
    148 	max = ioread32(fifo_mem + SVGA_FIFO_MAX);
    149 	min = ioread32(fifo_mem  + SVGA_FIFO_MIN);
    150 	fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
    151 
    152 	DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
    153 		 (unsigned int) max,
    154 		 (unsigned int) min,
    155 		 (unsigned int) fifo->capabilities);
    156 
    157 	atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
    158 	iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
    159 	vmw_marker_queue_init(&fifo->marker_queue);
    160 	return vmw_fifo_send_fence(dev_priv, &dummy);
    161 }
    162 
    163 void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
    164 {
    165 	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
    166 
    167 	mutex_lock(&dev_priv->hw_mutex);
    168 
    169 	if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
    170 		iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
    171 		vmw_write(dev_priv, SVGA_REG_SYNC, reason);
    172 	}
    173 
    174 	mutex_unlock(&dev_priv->hw_mutex);
    175 }
    176 
    177 void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
    178 {
    179 	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
    180 
    181 	mutex_lock(&dev_priv->hw_mutex);
    182 
    183 	while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
    184 		vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
    185 
    186 	dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
    187 
    188 	vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
    189 		  dev_priv->config_done_state);
    190 	vmw_write(dev_priv, SVGA_REG_ENABLE,
    191 		  dev_priv->enable_state);
    192 	vmw_write(dev_priv, SVGA_REG_TRACES,
    193 		  dev_priv->traces_state);
    194 
    195 	mutex_unlock(&dev_priv->hw_mutex);
    196 	vmw_marker_queue_takedown(&fifo->marker_queue);
    197 
    198 	if (likely(fifo->static_buffer != NULL)) {
    199 		vfree(fifo->static_buffer);
    200 		fifo->static_buffer = NULL;
    201 	}
    202 
    203 	if (likely(fifo->dynamic_buffer != NULL)) {
    204 		vfree(fifo->dynamic_buffer);
    205 		fifo->dynamic_buffer = NULL;
    206 	}
    207 }
    208 
    209 static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
    210 {
    211 	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
    212 	uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
    213 	uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
    214 	uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
    215 	uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
    216 
    217 	return ((max - next_cmd) + (stop - min) <= bytes);
    218 }
    219 
    220 static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
    221 			       uint32_t bytes, bool interruptible,
    222 			       unsigned long timeout)
    223 {
    224 	int ret = 0;
    225 	unsigned long end_jiffies = jiffies + timeout;
    226 	DEFINE_WAIT(__wait);
    227 
    228 	DRM_INFO("Fifo wait noirq.\n");
    229 
    230 	for (;;) {
    231 		prepare_to_wait(&dev_priv->fifo_queue, &__wait,
    232 				(interruptible) ?
    233 				TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
    234 		if (!vmw_fifo_is_full(dev_priv, bytes))
    235 			break;
    236 		if (time_after_eq(jiffies, end_jiffies)) {
    237 			ret = -EBUSY;
    238 			DRM_ERROR("SVGA device lockup.\n");
    239 			break;
    240 		}
    241 		schedule_timeout(1);
    242 		if (interruptible && signal_pending(current)) {
    243 			ret = -ERESTARTSYS;
    244 			break;
    245 		}
    246 	}
    247 	finish_wait(&dev_priv->fifo_queue, &__wait);
    248 	wake_up_all(&dev_priv->fifo_queue);
    249 	DRM_INFO("Fifo noirq exit.\n");
    250 	return ret;
    251 }
    252 
    253 static int vmw_fifo_wait(struct vmw_private *dev_priv,
    254 			 uint32_t bytes, bool interruptible,
    255 			 unsigned long timeout)
    256 {
    257 	long ret = 1L;
    258 	unsigned long irq_flags;
    259 
    260 	if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
    261 		return 0;
    262 
    263 	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
    264 	if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
    265 		return vmw_fifo_wait_noirq(dev_priv, bytes,
    266 					   interruptible, timeout);
    267 
    268 	mutex_lock(&dev_priv->hw_mutex);
    269 	if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
    270 		spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
    271 		outl(SVGA_IRQFLAG_FIFO_PROGRESS,
    272 		     dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
    273 		dev_priv->irq_mask |= SVGA_IRQFLAG_FIFO_PROGRESS;
    274 		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
    275 		spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
    276 	}
    277 	mutex_unlock(&dev_priv->hw_mutex);
    278 
    279 	if (interruptible)
    280 		ret = wait_event_interruptible_timeout
    281 		    (dev_priv->fifo_queue,
    282 		     !vmw_fifo_is_full(dev_priv, bytes), timeout);
    283 	else
    284 		ret = wait_event_timeout
    285 		    (dev_priv->fifo_queue,
    286 		     !vmw_fifo_is_full(dev_priv, bytes), timeout);
    287 
    288 	if (unlikely(ret == 0))
    289 		ret = -EBUSY;
    290 	else if (likely(ret > 0))
    291 		ret = 0;
    292 
    293 	mutex_lock(&dev_priv->hw_mutex);
    294 	if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
    295 		spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
    296 		dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
    297 		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
    298 		spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
    299 	}
    300 	mutex_unlock(&dev_priv->hw_mutex);
    301 
    302 	return ret;
    303 }
    304 
    305 /**
    306  * Reserve @bytes number of bytes in the fifo.
    307  *
    308  * This function will return NULL (error) on two conditions:
    309  *  If it timeouts waiting for fifo space, or if @bytes is larger than the
    310  *   available fifo space.
    311  *
    312  * Returns:
    313  *   Pointer to the fifo, or null on error (possible hardware hang).
    314  */
    315 void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
    316 {
    317 	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
    318 	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
    319 	uint32_t max;
    320 	uint32_t min;
    321 	uint32_t next_cmd;
    322 	uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
    323 	int ret;
    324 
    325 	mutex_lock(&fifo_state->fifo_mutex);
    326 	max = ioread32(fifo_mem + SVGA_FIFO_MAX);
    327 	min = ioread32(fifo_mem + SVGA_FIFO_MIN);
    328 	next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
    329 
    330 	if (unlikely(bytes >= (max - min)))
    331 		goto out_err;
    332 
    333 	BUG_ON(fifo_state->reserved_size != 0);
    334 	BUG_ON(fifo_state->dynamic_buffer != NULL);
    335 
    336 	fifo_state->reserved_size = bytes;
    337 
    338 	while (1) {
    339 		uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
    340 		bool need_bounce = false;
    341 		bool reserve_in_place = false;
    342 
    343 		if (next_cmd >= stop) {
    344 			if (likely((next_cmd + bytes < max ||
    345 				    (next_cmd + bytes == max && stop > min))))
    346 				reserve_in_place = true;
    347 
    348 			else if (vmw_fifo_is_full(dev_priv, bytes)) {
    349 				ret = vmw_fifo_wait(dev_priv, bytes,
    350 						    false, 3 * HZ);
    351 				if (unlikely(ret != 0))
    352 					goto out_err;
    353 			} else
    354 				need_bounce = true;
    355 
    356 		} else {
    357 
    358 			if (likely((next_cmd + bytes < stop)))
    359 				reserve_in_place = true;
    360 			else {
    361 				ret = vmw_fifo_wait(dev_priv, bytes,
    362 						    false, 3 * HZ);
    363 				if (unlikely(ret != 0))
    364 					goto out_err;
    365 			}
    366 		}
    367 
    368 		if (reserve_in_place) {
    369 			if (reserveable || bytes <= sizeof(uint32_t)) {
    370 				fifo_state->using_bounce_buffer = false;
    371 
    372 				if (reserveable)
    373 					iowrite32(bytes, fifo_mem +
    374 						  SVGA_FIFO_RESERVED);
    375 				return fifo_mem + (next_cmd >> 2);
    376 			} else {
    377 				need_bounce = true;
    378 			}
    379 		}
    380 
    381 		if (need_bounce) {
    382 			fifo_state->using_bounce_buffer = true;
    383 			if (bytes < fifo_state->static_buffer_size)
    384 				return fifo_state->static_buffer;
    385 			else {
    386 				fifo_state->dynamic_buffer = vmalloc(bytes);
    387 				return fifo_state->dynamic_buffer;
    388 			}
    389 		}
    390 	}
    391 out_err:
    392 	fifo_state->reserved_size = 0;
    393 	mutex_unlock(&fifo_state->fifo_mutex);
    394 	return NULL;
    395 }
    396 
    397 static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
    398 			      __le32 __iomem *fifo_mem,
    399 			      uint32_t next_cmd,
    400 			      uint32_t max, uint32_t min, uint32_t bytes)
    401 {
    402 	uint32_t chunk_size = max - next_cmd;
    403 	uint32_t rest;
    404 	uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
    405 	    fifo_state->dynamic_buffer : fifo_state->static_buffer;
    406 
    407 	if (bytes < chunk_size)
    408 		chunk_size = bytes;
    409 
    410 	iowrite32(bytes, fifo_mem + SVGA_FIFO_RESERVED);
    411 	mb();
    412 	memcpy_toio(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
    413 	rest = bytes - chunk_size;
    414 	if (rest)
    415 		memcpy_toio(fifo_mem + (min >> 2), buffer + (chunk_size >> 2),
    416 			    rest);
    417 }
    418 
    419 static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
    420 			       __le32 __iomem *fifo_mem,
    421 			       uint32_t next_cmd,
    422 			       uint32_t max, uint32_t min, uint32_t bytes)
    423 {
    424 	uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
    425 	    fifo_state->dynamic_buffer : fifo_state->static_buffer;
    426 
    427 	while (bytes > 0) {
    428 		iowrite32(*buffer++, fifo_mem + (next_cmd >> 2));
    429 		next_cmd += sizeof(uint32_t);
    430 		if (unlikely(next_cmd == max))
    431 			next_cmd = min;
    432 		mb();
    433 		iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
    434 		mb();
    435 		bytes -= sizeof(uint32_t);
    436 	}
    437 }
    438 
    439 void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
    440 {
    441 	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
    442 	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
    443 	uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
    444 	uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
    445 	uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
    446 	bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
    447 
    448 	BUG_ON((bytes & 3) != 0);
    449 	BUG_ON(bytes > fifo_state->reserved_size);
    450 
    451 	fifo_state->reserved_size = 0;
    452 
    453 	if (fifo_state->using_bounce_buffer) {
    454 		if (reserveable)
    455 			vmw_fifo_res_copy(fifo_state, fifo_mem,
    456 					  next_cmd, max, min, bytes);
    457 		else
    458 			vmw_fifo_slow_copy(fifo_state, fifo_mem,
    459 					   next_cmd, max, min, bytes);
    460 
    461 		if (fifo_state->dynamic_buffer) {
    462 			vfree(fifo_state->dynamic_buffer);
    463 			fifo_state->dynamic_buffer = NULL;
    464 		}
    465 
    466 	}
    467 
    468 	down_write(&fifo_state->rwsem);
    469 	if (fifo_state->using_bounce_buffer || reserveable) {
    470 		next_cmd += bytes;
    471 		if (next_cmd >= max)
    472 			next_cmd -= max - min;
    473 		mb();
    474 		iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
    475 	}
    476 
    477 	if (reserveable)
    478 		iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
    479 	mb();
    480 	up_write(&fifo_state->rwsem);
    481 	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
    482 	mutex_unlock(&fifo_state->fifo_mutex);
    483 }
    484 
    485 int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
    486 {
    487 	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
    488 	struct svga_fifo_cmd_fence *cmd_fence;
    489 	void *fm;
    490 	int ret = 0;
    491 	uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence);
    492 
    493 	fm = vmw_fifo_reserve(dev_priv, bytes);
    494 	if (unlikely(fm == NULL)) {
    495 		*seqno = atomic_read(&dev_priv->marker_seq);
    496 		ret = -ENOMEM;
    497 		(void)vmw_fallback_wait(dev_priv, false, true, *seqno,
    498 					false, 3*HZ);
    499 		goto out_err;
    500 	}
    501 
    502 	do {
    503 		*seqno = atomic_add_return(1, &dev_priv->marker_seq);
    504 	} while (*seqno == 0);
    505 
    506 	if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
    507 
    508 		/*
    509 		 * Don't request hardware to send a fence. The
    510 		 * waiting code in vmwgfx_irq.c will emulate this.
    511 		 */
    512 
    513 		vmw_fifo_commit(dev_priv, 0);
    514 		return 0;
    515 	}
    516 
    517 	*(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE);
    518 	cmd_fence = (struct svga_fifo_cmd_fence *)
    519 	    ((unsigned long)fm + sizeof(__le32));
    520 
    521 	iowrite32(*seqno, &cmd_fence->fence);
    522 	vmw_fifo_commit(dev_priv, bytes);
    523 	(void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
    524 	vmw_update_seqno(dev_priv, fifo_state);
    525 
    526 out_err:
    527 	return ret;
    528 }
    529 
    530 /**
    531  * vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using
    532  * legacy query commands.
    533  *
    534  * @dev_priv: The device private structure.
    535  * @cid: The hardware context id used for the query.
    536  *
    537  * See the vmw_fifo_emit_dummy_query documentation.
    538  */
    539 static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
    540 					    uint32_t cid)
    541 {
    542 	/*
    543 	 * A query wait without a preceding query end will
    544 	 * actually finish all queries for this cid
    545 	 * without writing to the query result structure.
    546 	 */
    547 
    548 	struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
    549 	struct {
    550 		SVGA3dCmdHeader header;
    551 		SVGA3dCmdWaitForQuery body;
    552 	} *cmd;
    553 
    554 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
    555 
    556 	if (unlikely(cmd == NULL)) {
    557 		DRM_ERROR("Out of fifo space for dummy query.\n");
    558 		return -ENOMEM;
    559 	}
    560 
    561 	cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
    562 	cmd->header.size = sizeof(cmd->body);
    563 	cmd->body.cid = cid;
    564 	cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
    565 
    566 	if (bo->mem.mem_type == TTM_PL_VRAM) {
    567 		cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
    568 		cmd->body.guestResult.offset = bo->offset;
    569 	} else {
    570 		cmd->body.guestResult.gmrId = bo->mem.start;
    571 		cmd->body.guestResult.offset = 0;
    572 	}
    573 
    574 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
    575 
    576 	return 0;
    577 }
    578 
    579 /**
    580  * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
    581  * guest-backed resource query commands.
    582  *
    583  * @dev_priv: The device private structure.
    584  * @cid: The hardware context id used for the query.
    585  *
    586  * See the vmw_fifo_emit_dummy_query documentation.
    587  */
    588 static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
    589 					uint32_t cid)
    590 {
    591 	/*
    592 	 * A query wait without a preceding query end will
    593 	 * actually finish all queries for this cid
    594 	 * without writing to the query result structure.
    595 	 */
    596 
    597 	struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
    598 	struct {
    599 		SVGA3dCmdHeader header;
    600 		SVGA3dCmdWaitForGBQuery body;
    601 	} *cmd;
    602 
    603 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
    604 
    605 	if (unlikely(cmd == NULL)) {
    606 		DRM_ERROR("Out of fifo space for dummy query.\n");
    607 		return -ENOMEM;
    608 	}
    609 
    610 	cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
    611 	cmd->header.size = sizeof(cmd->body);
    612 	cmd->body.cid = cid;
    613 	cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
    614 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
    615 	cmd->body.mobid = bo->mem.start;
    616 	cmd->body.offset = 0;
    617 
    618 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
    619 
    620 	return 0;
    621 }
    622 
    623 
    624 /**
    625  * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
    626  * appropriate resource query commands.
    627  *
    628  * @dev_priv: The device private structure.
    629  * @cid: The hardware context id used for the query.
    630  *
    631  * This function is used to emit a dummy occlusion query with
    632  * no primitives rendered between query begin and query end.
    633  * It's used to provide a query barrier, in order to know that when
    634  * this query is finished, all preceding queries are also finished.
    635  *
    636  * A Query results structure should have been initialized at the start
    637  * of the dev_priv->dummy_query_bo buffer object. And that buffer object
    638  * must also be either reserved or pinned when this function is called.
    639  *
    640  * Returns -ENOMEM on failure to reserve fifo space.
    641  */
    642 int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
    643 			      uint32_t cid)
    644 {
    645 	if (dev_priv->has_mob)
    646 		return vmw_fifo_emit_dummy_gb_query(dev_priv, cid);
    647 
    648 	return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
    649 }
    650