vmwgfx_fifo.c revision 1.2 1 /* $NetBSD: vmwgfx_fifo.c,v 1.2 2018/08/27 04:58:37 riastradh Exp $ */
2
3 /**************************************************************************
4 *
5 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
23 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
24 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
25 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
26 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
28 **************************************************************************/
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: vmwgfx_fifo.c,v 1.2 2018/08/27 04:58:37 riastradh Exp $");
32
33 #include "vmwgfx_drv.h"
34 #include <drm/drmP.h>
35 #include <drm/ttm/ttm_placement.h>
36
37 struct vmw_temp_set_context {
38 SVGA3dCmdHeader header;
39 SVGA3dCmdDXTempSetContext body;
40 };
41
42 bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
43 {
44 u32 *fifo_mem = dev_priv->mmio_virt;
45 uint32_t fifo_min, hwversion;
46 const struct vmw_fifo_state *fifo = &dev_priv->fifo;
47
48 if (!(dev_priv->capabilities & SVGA_CAP_3D))
49 return false;
50
51 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
52 uint32_t result;
53
54 if (!dev_priv->has_mob)
55 return false;
56
57 spin_lock(&dev_priv->cap_lock);
58 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
59 result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
60 spin_unlock(&dev_priv->cap_lock);
61
62 return (result != 0);
63 }
64
65 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
66 return false;
67
68 fifo_min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
69 if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
70 return false;
71
72 hwversion = vmw_mmio_read(fifo_mem +
73 ((fifo->capabilities &
74 SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
75 SVGA_FIFO_3D_HWVERSION_REVISED :
76 SVGA_FIFO_3D_HWVERSION));
77
78 if (hwversion == 0)
79 return false;
80
81 if (hwversion < SVGA3D_HWVERSION_WS8_B1)
82 return false;
83
84 /* Legacy Display Unit does not support surfaces */
85 if (dev_priv->active_display_unit == vmw_du_legacy)
86 return false;
87
88 return true;
89 }
90
91 bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
92 {
93 u32 *fifo_mem = dev_priv->mmio_virt;
94 uint32_t caps;
95
96 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
97 return false;
98
99 caps = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES);
100 if (caps & SVGA_FIFO_CAP_PITCHLOCK)
101 return true;
102
103 return false;
104 }
105
106 int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
107 {
108 u32 *fifo_mem = dev_priv->mmio_virt;
109 uint32_t max;
110 uint32_t min;
111
112 fifo->dx = false;
113 fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
114 fifo->static_buffer = vmalloc(fifo->static_buffer_size);
115 if (unlikely(fifo->static_buffer == NULL))
116 return -ENOMEM;
117
118 fifo->dynamic_buffer = NULL;
119 fifo->reserved_size = 0;
120 fifo->using_bounce_buffer = false;
121
122 mutex_init(&fifo->fifo_mutex);
123 init_rwsem(&fifo->rwsem);
124
125 DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
126 DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
127 DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
128
129 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
130 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
131 dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
132
133 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
134 SVGA_REG_ENABLE_HIDE);
135 vmw_write(dev_priv, SVGA_REG_TRACES, 0);
136
137 min = 4;
138 if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
139 min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
140 min <<= 2;
141
142 if (min < PAGE_SIZE)
143 min = PAGE_SIZE;
144
145 vmw_mmio_write(min, fifo_mem + SVGA_FIFO_MIN);
146 vmw_mmio_write(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
147 wmb();
148 vmw_mmio_write(min, fifo_mem + SVGA_FIFO_NEXT_CMD);
149 vmw_mmio_write(min, fifo_mem + SVGA_FIFO_STOP);
150 vmw_mmio_write(0, fifo_mem + SVGA_FIFO_BUSY);
151 mb();
152
153 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
154
155 max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
156 min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
157 fifo->capabilities = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES);
158
159 DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
160 (unsigned int) max,
161 (unsigned int) min,
162 (unsigned int) fifo->capabilities);
163
164 atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
165 vmw_mmio_write(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
166 vmw_marker_queue_init(&fifo->marker_queue);
167
168 return 0;
169 }
170
171 void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
172 {
173 u32 *fifo_mem = dev_priv->mmio_virt;
174
175 preempt_disable();
176 if (cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
177 vmw_write(dev_priv, SVGA_REG_SYNC, reason);
178 preempt_enable();
179 }
180
181 void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
182 {
183 u32 *fifo_mem = dev_priv->mmio_virt;
184
185 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
186 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
187 ;
188
189 dev_priv->last_read_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
190
191 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
192 dev_priv->config_done_state);
193 vmw_write(dev_priv, SVGA_REG_ENABLE,
194 dev_priv->enable_state);
195 vmw_write(dev_priv, SVGA_REG_TRACES,
196 dev_priv->traces_state);
197
198 vmw_marker_queue_takedown(&fifo->marker_queue);
199
200 if (likely(fifo->static_buffer != NULL)) {
201 vfree(fifo->static_buffer);
202 fifo->static_buffer = NULL;
203 }
204
205 if (likely(fifo->dynamic_buffer != NULL)) {
206 vfree(fifo->dynamic_buffer);
207 fifo->dynamic_buffer = NULL;
208 }
209 }
210
211 static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
212 {
213 u32 *fifo_mem = dev_priv->mmio_virt;
214 uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
215 uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
216 uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
217 uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP);
218
219 return ((max - next_cmd) + (stop - min) <= bytes);
220 }
221
222 static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
223 uint32_t bytes, bool interruptible,
224 unsigned long timeout)
225 {
226 int ret = 0;
227 unsigned long end_jiffies = jiffies + timeout;
228 DEFINE_WAIT(__wait);
229
230 DRM_INFO("Fifo wait noirq.\n");
231
232 for (;;) {
233 prepare_to_wait(&dev_priv->fifo_queue, &__wait,
234 (interruptible) ?
235 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
236 if (!vmw_fifo_is_full(dev_priv, bytes))
237 break;
238 if (time_after_eq(jiffies, end_jiffies)) {
239 ret = -EBUSY;
240 DRM_ERROR("SVGA device lockup.\n");
241 break;
242 }
243 schedule_timeout(1);
244 if (interruptible && signal_pending(current)) {
245 ret = -ERESTARTSYS;
246 break;
247 }
248 }
249 finish_wait(&dev_priv->fifo_queue, &__wait);
250 wake_up_all(&dev_priv->fifo_queue);
251 DRM_INFO("Fifo noirq exit.\n");
252 return ret;
253 }
254
255 static int vmw_fifo_wait(struct vmw_private *dev_priv,
256 uint32_t bytes, bool interruptible,
257 unsigned long timeout)
258 {
259 long ret = 1L;
260
261 if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
262 return 0;
263
264 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
265 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
266 return vmw_fifo_wait_noirq(dev_priv, bytes,
267 interruptible, timeout);
268
269 vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
270 &dev_priv->fifo_queue_waiters);
271
272 if (interruptible)
273 ret = wait_event_interruptible_timeout
274 (dev_priv->fifo_queue,
275 !vmw_fifo_is_full(dev_priv, bytes), timeout);
276 else
277 ret = wait_event_timeout
278 (dev_priv->fifo_queue,
279 !vmw_fifo_is_full(dev_priv, bytes), timeout);
280
281 if (unlikely(ret == 0))
282 ret = -EBUSY;
283 else if (likely(ret > 0))
284 ret = 0;
285
286 vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
287 &dev_priv->fifo_queue_waiters);
288
289 return ret;
290 }
291
292 /**
293 * Reserve @bytes number of bytes in the fifo.
294 *
295 * This function will return NULL (error) on two conditions:
296 * If it timeouts waiting for fifo space, or if @bytes is larger than the
297 * available fifo space.
298 *
299 * Returns:
300 * Pointer to the fifo, or null on error (possible hardware hang).
301 */
302 static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
303 uint32_t bytes)
304 {
305 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
306 u32 *fifo_mem = dev_priv->mmio_virt;
307 uint32_t max;
308 uint32_t min;
309 uint32_t next_cmd;
310 uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
311 int ret;
312
313 mutex_lock(&fifo_state->fifo_mutex);
314 max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
315 min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
316 next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
317
318 if (unlikely(bytes >= (max - min)))
319 goto out_err;
320
321 BUG_ON(fifo_state->reserved_size != 0);
322 BUG_ON(fifo_state->dynamic_buffer != NULL);
323
324 fifo_state->reserved_size = bytes;
325
326 while (1) {
327 uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP);
328 bool need_bounce = false;
329 bool reserve_in_place = false;
330
331 if (next_cmd >= stop) {
332 if (likely((next_cmd + bytes < max ||
333 (next_cmd + bytes == max && stop > min))))
334 reserve_in_place = true;
335
336 else if (vmw_fifo_is_full(dev_priv, bytes)) {
337 ret = vmw_fifo_wait(dev_priv, bytes,
338 false, 3 * HZ);
339 if (unlikely(ret != 0))
340 goto out_err;
341 } else
342 need_bounce = true;
343
344 } else {
345
346 if (likely((next_cmd + bytes < stop)))
347 reserve_in_place = true;
348 else {
349 ret = vmw_fifo_wait(dev_priv, bytes,
350 false, 3 * HZ);
351 if (unlikely(ret != 0))
352 goto out_err;
353 }
354 }
355
356 if (reserve_in_place) {
357 if (reserveable || bytes <= sizeof(uint32_t)) {
358 fifo_state->using_bounce_buffer = false;
359
360 if (reserveable)
361 vmw_mmio_write(bytes, fifo_mem +
362 SVGA_FIFO_RESERVED);
363 return (void __force *) (fifo_mem +
364 (next_cmd >> 2));
365 } else {
366 need_bounce = true;
367 }
368 }
369
370 if (need_bounce) {
371 fifo_state->using_bounce_buffer = true;
372 if (bytes < fifo_state->static_buffer_size)
373 return fifo_state->static_buffer;
374 else {
375 fifo_state->dynamic_buffer = vmalloc(bytes);
376 if (!fifo_state->dynamic_buffer)
377 goto out_err;
378 return fifo_state->dynamic_buffer;
379 }
380 }
381 }
382 out_err:
383 fifo_state->reserved_size = 0;
384 mutex_unlock(&fifo_state->fifo_mutex);
385
386 return NULL;
387 }
388
389 void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
390 int ctx_id)
391 {
392 void *ret;
393
394 if (dev_priv->cman)
395 ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes,
396 ctx_id, false, NULL);
397 else if (ctx_id == SVGA3D_INVALID_ID)
398 ret = vmw_local_fifo_reserve(dev_priv, bytes);
399 else {
400 WARN(1, "Command buffer has not been allocated.\n");
401 ret = NULL;
402 }
403 if (IS_ERR_OR_NULL(ret)) {
404 DRM_ERROR("Fifo reserve failure of %u bytes.\n",
405 (unsigned) bytes);
406 dump_stack();
407 return NULL;
408 }
409
410 return ret;
411 }
412
413 static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
414 u32 *fifo_mem,
415 uint32_t next_cmd,
416 uint32_t max, uint32_t min, uint32_t bytes)
417 {
418 uint32_t chunk_size = max - next_cmd;
419 uint32_t rest;
420 uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
421 fifo_state->dynamic_buffer : fifo_state->static_buffer;
422
423 if (bytes < chunk_size)
424 chunk_size = bytes;
425
426 vmw_mmio_write(bytes, fifo_mem + SVGA_FIFO_RESERVED);
427 mb();
428 memcpy(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
429 rest = bytes - chunk_size;
430 if (rest)
431 memcpy(fifo_mem + (min >> 2), buffer + (chunk_size >> 2), rest);
432 }
433
434 static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
435 u32 *fifo_mem,
436 uint32_t next_cmd,
437 uint32_t max, uint32_t min, uint32_t bytes)
438 {
439 uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
440 fifo_state->dynamic_buffer : fifo_state->static_buffer;
441
442 while (bytes > 0) {
443 vmw_mmio_write(*buffer++, fifo_mem + (next_cmd >> 2));
444 next_cmd += sizeof(uint32_t);
445 if (unlikely(next_cmd == max))
446 next_cmd = min;
447 mb();
448 vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
449 mb();
450 bytes -= sizeof(uint32_t);
451 }
452 }
453
454 static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
455 {
456 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
457 u32 *fifo_mem = dev_priv->mmio_virt;
458 uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
459 uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
460 uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
461 bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
462
463 if (fifo_state->dx)
464 bytes += sizeof(struct vmw_temp_set_context);
465
466 fifo_state->dx = false;
467 BUG_ON((bytes & 3) != 0);
468 BUG_ON(bytes > fifo_state->reserved_size);
469
470 fifo_state->reserved_size = 0;
471
472 if (fifo_state->using_bounce_buffer) {
473 if (reserveable)
474 vmw_fifo_res_copy(fifo_state, fifo_mem,
475 next_cmd, max, min, bytes);
476 else
477 vmw_fifo_slow_copy(fifo_state, fifo_mem,
478 next_cmd, max, min, bytes);
479
480 if (fifo_state->dynamic_buffer) {
481 vfree(fifo_state->dynamic_buffer);
482 fifo_state->dynamic_buffer = NULL;
483 }
484
485 }
486
487 down_write(&fifo_state->rwsem);
488 if (fifo_state->using_bounce_buffer || reserveable) {
489 next_cmd += bytes;
490 if (next_cmd >= max)
491 next_cmd -= max - min;
492 mb();
493 vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
494 }
495
496 if (reserveable)
497 vmw_mmio_write(0, fifo_mem + SVGA_FIFO_RESERVED);
498 mb();
499 up_write(&fifo_state->rwsem);
500 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
501 mutex_unlock(&fifo_state->fifo_mutex);
502 }
503
504 void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
505 {
506 if (dev_priv->cman)
507 vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false);
508 else
509 vmw_local_fifo_commit(dev_priv, bytes);
510 }
511
512
513 /**
514 * vmw_fifo_commit_flush - Commit fifo space and flush any buffered commands.
515 *
516 * @dev_priv: Pointer to device private structure.
517 * @bytes: Number of bytes to commit.
518 */
519 void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
520 {
521 if (dev_priv->cman)
522 vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
523 else
524 vmw_local_fifo_commit(dev_priv, bytes);
525 }
526
527 /**
528 * vmw_fifo_flush - Flush any buffered commands and make sure command processing
529 * starts.
530 *
531 * @dev_priv: Pointer to device private structure.
532 * @interruptible: Whether to wait interruptible if function needs to sleep.
533 */
534 int vmw_fifo_flush(struct vmw_private *dev_priv, bool interruptible)
535 {
536 might_sleep();
537
538 if (dev_priv->cman)
539 return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible);
540 else
541 return 0;
542 }
543
544 int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
545 {
546 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
547 struct svga_fifo_cmd_fence *cmd_fence;
548 u32 *fm;
549 int ret = 0;
550 uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
551
552 fm = vmw_fifo_reserve(dev_priv, bytes);
553 if (unlikely(fm == NULL)) {
554 *seqno = atomic_read(&dev_priv->marker_seq);
555 ret = -ENOMEM;
556 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
557 false, 3*HZ);
558 goto out_err;
559 }
560
561 do {
562 *seqno = atomic_add_return(1, &dev_priv->marker_seq);
563 } while (*seqno == 0);
564
565 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
566
567 /*
568 * Don't request hardware to send a fence. The
569 * waiting code in vmwgfx_irq.c will emulate this.
570 */
571
572 vmw_fifo_commit(dev_priv, 0);
573 return 0;
574 }
575
576 *fm++ = SVGA_CMD_FENCE;
577 cmd_fence = (struct svga_fifo_cmd_fence *) fm;
578 cmd_fence->fence = *seqno;
579 vmw_fifo_commit_flush(dev_priv, bytes);
580 (void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
581 vmw_update_seqno(dev_priv, fifo_state);
582
583 out_err:
584 return ret;
585 }
586
587 /**
588 * vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using
589 * legacy query commands.
590 *
591 * @dev_priv: The device private structure.
592 * @cid: The hardware context id used for the query.
593 *
594 * See the vmw_fifo_emit_dummy_query documentation.
595 */
596 static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
597 uint32_t cid)
598 {
599 /*
600 * A query wait without a preceding query end will
601 * actually finish all queries for this cid
602 * without writing to the query result structure.
603 */
604
605 struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
606 struct {
607 SVGA3dCmdHeader header;
608 SVGA3dCmdWaitForQuery body;
609 } *cmd;
610
611 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
612
613 if (unlikely(cmd == NULL)) {
614 DRM_ERROR("Out of fifo space for dummy query.\n");
615 return -ENOMEM;
616 }
617
618 cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
619 cmd->header.size = sizeof(cmd->body);
620 cmd->body.cid = cid;
621 cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
622
623 if (bo->mem.mem_type == TTM_PL_VRAM) {
624 cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
625 cmd->body.guestResult.offset = bo->offset;
626 } else {
627 cmd->body.guestResult.gmrId = bo->mem.start;
628 cmd->body.guestResult.offset = 0;
629 }
630
631 vmw_fifo_commit(dev_priv, sizeof(*cmd));
632
633 return 0;
634 }
635
636 /**
637 * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
638 * guest-backed resource query commands.
639 *
640 * @dev_priv: The device private structure.
641 * @cid: The hardware context id used for the query.
642 *
643 * See the vmw_fifo_emit_dummy_query documentation.
644 */
645 static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
646 uint32_t cid)
647 {
648 /*
649 * A query wait without a preceding query end will
650 * actually finish all queries for this cid
651 * without writing to the query result structure.
652 */
653
654 struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
655 struct {
656 SVGA3dCmdHeader header;
657 SVGA3dCmdWaitForGBQuery body;
658 } *cmd;
659
660 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
661
662 if (unlikely(cmd == NULL)) {
663 DRM_ERROR("Out of fifo space for dummy query.\n");
664 return -ENOMEM;
665 }
666
667 cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
668 cmd->header.size = sizeof(cmd->body);
669 cmd->body.cid = cid;
670 cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
671 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
672 cmd->body.mobid = bo->mem.start;
673 cmd->body.offset = 0;
674
675 vmw_fifo_commit(dev_priv, sizeof(*cmd));
676
677 return 0;
678 }
679
680
681 /**
682 * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
683 * appropriate resource query commands.
684 *
685 * @dev_priv: The device private structure.
686 * @cid: The hardware context id used for the query.
687 *
688 * This function is used to emit a dummy occlusion query with
689 * no primitives rendered between query begin and query end.
690 * It's used to provide a query barrier, in order to know that when
691 * this query is finished, all preceding queries are also finished.
692 *
693 * A Query results structure should have been initialized at the start
694 * of the dev_priv->dummy_query_bo buffer object. And that buffer object
695 * must also be either reserved or pinned when this function is called.
696 *
697 * Returns -ENOMEM on failure to reserve fifo space.
698 */
699 int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
700 uint32_t cid)
701 {
702 if (dev_priv->has_mob)
703 return vmw_fifo_emit_dummy_gb_query(dev_priv, cid);
704
705 return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
706 }
707
708 void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
709 {
710 return vmw_fifo_reserve_dx(dev_priv, bytes, SVGA3D_INVALID_ID);
711 }
712