1/**********************************************************
2 * Copyright 2008-2015 VMware, Inc.  All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26#include "pipe/p_state.h"
27#include "pipe/p_context.h"
28
29#include "util/u_bitmask.h"
30#include "util/u_memory.h"
31
32#include "svga_cmd.h"
33#include "svga_context.h"
34#include "svga_screen.h"
35#include "svga_resource_buffer.h"
36#include "svga_winsys.h"
37#include "svga_debug.h"
38
39
40/* Fixme: want a public base class for all pipe structs, even if there
41 * isn't much in them.
42 */
43struct pipe_query {
44   int dummy;
45};
46
47struct svga_query {
48   struct pipe_query base;
49   unsigned type;                  /**< PIPE_QUERY_x or SVGA_QUERY_x */
50   SVGA3dQueryType svga_type;      /**< SVGA3D_QUERYTYPE_x or unused */
51
52   unsigned id;                    /** Per-context query identifier */
53
54   struct pipe_fence_handle *fence;
55
56   /** For PIPE_QUERY_OCCLUSION_COUNTER / SVGA3D_QUERYTYPE_OCCLUSION */
57
58   /* For VGPU9 */
59   struct svga_winsys_buffer *hwbuf;
60   volatile SVGA3dQueryResult *queryResult;
61
62   /** For VGPU10 */
63   struct svga_winsys_gb_query *gb_query;
64   SVGA3dDXQueryFlags flags;
65   unsigned offset;                /**< offset to the gb_query memory */
66   struct pipe_query *predicate;   /** The associated query that can be used for predicate */
67
68   /** For non-GPU SVGA_QUERY_x queries */
69   uint64_t begin_count, end_count;
70};
71
72
73/** cast wrapper */
74static inline struct svga_query *
75svga_query(struct pipe_query *q)
76{
77   return (struct svga_query *)q;
78}
79
80/**
81 * VGPU9
82 */
83
84static boolean
85svga_get_query_result(struct pipe_context *pipe,
86                      struct pipe_query *q,
87                      boolean wait,
88                      union pipe_query_result *result);
89
90static enum pipe_error
91define_query_vgpu9(struct svga_context *svga,
92                   struct svga_query *sq)
93{
94   struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
95
96   sq->hwbuf = svga_winsys_buffer_create(svga, 1,
97                                         SVGA_BUFFER_USAGE_PINNED,
98                                         sizeof *sq->queryResult);
99   if (!sq->hwbuf)
100      return PIPE_ERROR_OUT_OF_MEMORY;
101
102   sq->queryResult = (SVGA3dQueryResult *)
103                     sws->buffer_map(sws, sq->hwbuf, PIPE_TRANSFER_WRITE);
104   if (!sq->queryResult) {
105      sws->buffer_destroy(sws, sq->hwbuf);
106      return PIPE_ERROR_OUT_OF_MEMORY;
107   }
108
109   sq->queryResult->totalSize = sizeof *sq->queryResult;
110   sq->queryResult->state = SVGA3D_QUERYSTATE_NEW;
111
112   /* We request the buffer to be pinned and assume it is always mapped.
113    * The reason is that we don't want to wait for fences when checking the
114    * query status.
115    */
116   sws->buffer_unmap(sws, sq->hwbuf);
117
118   return PIPE_OK;
119}
120
121static enum pipe_error
122begin_query_vgpu9(struct svga_context *svga, struct svga_query *sq)
123{
124   struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
125   enum pipe_error ret = PIPE_OK;
126
127   if (sq->queryResult->state == SVGA3D_QUERYSTATE_PENDING) {
128      /* The application doesn't care for the pending query result.
129       * We cannot let go of the existing buffer and just get a new one
130       * because its storage may be reused for other purposes and clobbered
131       * by the host when it determines the query result.  So the only
132       * option here is to wait for the existing query's result -- not a
133       * big deal, given that no sane application would do this.
134       */
135       uint64_t result;
136       svga_get_query_result(&svga->pipe, &sq->base, TRUE, (void*)&result);
137       assert(sq->queryResult->state != SVGA3D_QUERYSTATE_PENDING);
138   }
139
140   sq->queryResult->state = SVGA3D_QUERYSTATE_NEW;
141   sws->fence_reference(sws, &sq->fence, NULL);
142
143   ret = SVGA3D_BeginQuery(svga->swc, sq->svga_type);
144   if (ret != PIPE_OK) {
145      svga_context_flush(svga, NULL);
146      ret = SVGA3D_BeginQuery(svga->swc, sq->svga_type);
147   }
148   return ret;
149}
150
151static enum pipe_error
152end_query_vgpu9(struct svga_context *svga, struct svga_query *sq)
153{
154   enum pipe_error ret = PIPE_OK;
155
156   /* Set to PENDING before sending EndQuery. */
157   sq->queryResult->state = SVGA3D_QUERYSTATE_PENDING;
158
159   ret = SVGA3D_EndQuery(svga->swc, sq->svga_type, sq->hwbuf);
160   if (ret != PIPE_OK) {
161      svga_context_flush(svga, NULL);
162      ret = SVGA3D_EndQuery(svga->swc, sq->svga_type, sq->hwbuf);
163   }
164   return ret;
165}
166
167static boolean
168get_query_result_vgpu9(struct svga_context *svga, struct svga_query *sq,
169                       boolean wait, uint64_t *result)
170{
171   struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
172   enum pipe_error ret;
173   SVGA3dQueryState state;
174
175   if (!sq->fence) {
176      /* The query status won't be updated by the host unless
177       * SVGA_3D_CMD_WAIT_FOR_QUERY is emitted. Unfortunately this will cause
178       * a synchronous wait on the host.
179       */
180      ret = SVGA3D_WaitForQuery(svga->swc, sq->svga_type, sq->hwbuf);
181      if (ret != PIPE_OK) {
182         svga_context_flush(svga, NULL);
183         ret = SVGA3D_WaitForQuery(svga->swc, sq->svga_type, sq->hwbuf);
184      }
185      assert (ret == PIPE_OK);
186      svga_context_flush(svga, &sq->fence);
187      assert(sq->fence);
188   }
189
190   state = sq->queryResult->state;
191   if (state == SVGA3D_QUERYSTATE_PENDING) {
192      if (!wait)
193         return FALSE;
194      sws->fence_finish(sws, sq->fence, PIPE_TIMEOUT_INFINITE,
195                        SVGA_FENCE_FLAG_QUERY);
196      state = sq->queryResult->state;
197   }
198
199   assert(state == SVGA3D_QUERYSTATE_SUCCEEDED ||
200          state == SVGA3D_QUERYSTATE_FAILED);
201
202   *result = (uint64_t)sq->queryResult->result32;
203   return TRUE;
204}
205
206
207/**
208 * VGPU10
209 *
210 * There is one query mob allocated for each context to be shared by all
211 * query types. The mob is used to hold queries's state and result. Since
212 * each query result type is of different length, to ease the query allocation
213 * management, the mob is divided into memory blocks. Each memory block
214 * will hold queries of the same type. Multiple memory blocks can be allocated
215 * for a particular query type.
216 *
217 * Currently each memory block is of 184 bytes. We support up to 128
218 * memory blocks. The query memory size is arbitrary right now.
219 * Each occlusion query takes about 8 bytes. One memory block can accomodate
220 * 23 occlusion queries. 128 of those blocks can support up to 2944 occlusion
221 * queries. That seems reasonable for now. If we think this limit is
222 * not enough, we can increase the limit or try to grow the mob in runtime.
223 * Note, SVGA device does not impose one mob per context for queries,
224 * we could allocate multiple mobs for queries; however, wddm KMD does not
225 * currently support that.
226 *
227 * Also note that the GL guest driver does not issue any of the
228 * following commands: DXMoveQuery, DXBindAllQuery & DXReadbackAllQuery.
229 */
230#define SVGA_QUERY_MEM_BLOCK_SIZE    (sizeof(SVGADXQueryResultUnion) * 2)
231#define SVGA_QUERY_MEM_SIZE          (128 * SVGA_QUERY_MEM_BLOCK_SIZE)
232
233struct svga_qmem_alloc_entry
234{
235   unsigned start_offset;               /* start offset of the memory block */
236   unsigned block_index;                /* block index of the memory block */
237   unsigned query_size;                 /* query size in this memory block */
238   unsigned nquery;                     /* number of queries allocated */
239   struct util_bitmask *alloc_mask;     /* allocation mask */
240   struct svga_qmem_alloc_entry *next;  /* next memory block */
241};
242
243
244/**
245 * Allocate a memory block from the query object memory
246 * \return -1 if out of memory, else index of the query memory block
247 */
248static int
249allocate_query_block(struct svga_context *svga)
250{
251   int index;
252   unsigned offset;
253
254   /* Find the next available query block */
255   index = util_bitmask_add(svga->gb_query_alloc_mask);
256
257   if (index == UTIL_BITMASK_INVALID_INDEX)
258      return -1;
259
260   offset = index * SVGA_QUERY_MEM_BLOCK_SIZE;
261   if (offset >= svga->gb_query_len) {
262      unsigned i;
263
264      /**
265       * All the memory blocks are allocated, lets see if there is
266       * any empty memory block around that can be freed up.
267       */
268      index = -1;
269      for (i = 0; i < SVGA3D_QUERYTYPE_MAX && index == -1; i++) {
270         struct svga_qmem_alloc_entry *alloc_entry;
271         struct svga_qmem_alloc_entry *prev_alloc_entry = NULL;
272
273         alloc_entry = svga->gb_query_map[i];
274         while (alloc_entry && index == -1) {
275            if (alloc_entry->nquery == 0) {
276               /* This memory block is empty, it can be recycled. */
277               if (prev_alloc_entry) {
278                  prev_alloc_entry->next = alloc_entry->next;
279               } else {
280                  svga->gb_query_map[i] = alloc_entry->next;
281               }
282               index = alloc_entry->block_index;
283            } else {
284               prev_alloc_entry = alloc_entry;
285               alloc_entry = alloc_entry->next;
286            }
287         }
288      }
289   }
290
291   return index;
292}
293
294/**
295 * Allocate a slot in the specified memory block.
296 * All slots in this memory block are of the same size.
297 *
298 * \return -1 if out of memory, else index of the query slot
299 */
300static int
301allocate_query_slot(struct svga_context *svga,
302                    struct svga_qmem_alloc_entry *alloc)
303{
304   int index;
305   unsigned offset;
306
307   /* Find the next available slot */
308   index = util_bitmask_add(alloc->alloc_mask);
309
310   if (index == UTIL_BITMASK_INVALID_INDEX)
311      return -1;
312
313   offset = index * alloc->query_size;
314   if (offset >= SVGA_QUERY_MEM_BLOCK_SIZE)
315      return -1;
316
317   alloc->nquery++;
318
319   return index;
320}
321
322/**
323 * Deallocate the specified slot in the memory block.
324 * If all slots are freed up, then deallocate the memory block
325 * as well, so it can be allocated for other query type
326 */
327static void
328deallocate_query_slot(struct svga_context *svga,
329                      struct svga_qmem_alloc_entry *alloc,
330                      unsigned index)
331{
332   assert(index != UTIL_BITMASK_INVALID_INDEX);
333
334   util_bitmask_clear(alloc->alloc_mask, index);
335   alloc->nquery--;
336
337   /**
338    * Don't worry about deallocating the empty memory block here.
339    * The empty memory block will be recycled when no more memory block
340    * can be allocated.
341    */
342}
343
344static struct svga_qmem_alloc_entry *
345allocate_query_block_entry(struct svga_context *svga,
346                           unsigned len)
347{
348   struct svga_qmem_alloc_entry *alloc_entry;
349   int block_index = -1;
350
351   block_index = allocate_query_block(svga);
352   if (block_index == -1)
353      return NULL;
354   alloc_entry = CALLOC_STRUCT(svga_qmem_alloc_entry);
355   if (!alloc_entry)
356      return NULL;
357
358   alloc_entry->block_index = block_index;
359   alloc_entry->start_offset = block_index * SVGA_QUERY_MEM_BLOCK_SIZE;
360   alloc_entry->nquery = 0;
361   alloc_entry->alloc_mask = util_bitmask_create();
362   alloc_entry->next = NULL;
363   alloc_entry->query_size = len;
364
365   return alloc_entry;
366}
367
368/**
369 * Allocate a memory slot for a query of the specified type.
370 * It will first search through the memory blocks that are allocated
371 * for the query type. If no memory slot is available, it will try
372 * to allocate another memory block within the query object memory for
373 * this query type.
374 */
375static int
376allocate_query(struct svga_context *svga,
377               SVGA3dQueryType type,
378               unsigned len)
379{
380   struct svga_qmem_alloc_entry *alloc_entry;
381   int slot_index = -1;
382   unsigned offset;
383
384   assert(type < SVGA3D_QUERYTYPE_MAX);
385
386   alloc_entry = svga->gb_query_map[type];
387
388   if (!alloc_entry) {
389      /**
390       * No query memory block has been allocated for this query type,
391       * allocate one now
392       */
393      alloc_entry = allocate_query_block_entry(svga, len);
394      if (!alloc_entry)
395         return -1;
396      svga->gb_query_map[type] = alloc_entry;
397   }
398
399   /* Allocate a slot within the memory block allocated for this query type */
400   slot_index = allocate_query_slot(svga, alloc_entry);
401
402   if (slot_index == -1) {
403      /* This query memory block is full, allocate another one */
404      alloc_entry = allocate_query_block_entry(svga, len);
405      if (!alloc_entry)
406         return -1;
407      alloc_entry->next = svga->gb_query_map[type];
408      svga->gb_query_map[type] = alloc_entry;
409      slot_index = allocate_query_slot(svga, alloc_entry);
410   }
411
412   assert(slot_index != -1);
413   offset = slot_index * len + alloc_entry->start_offset;
414
415   return offset;
416}
417
418
419/**
420 * Deallocate memory slot allocated for the specified query
421 */
422static void
423deallocate_query(struct svga_context *svga,
424                 struct svga_query *sq)
425{
426   struct svga_qmem_alloc_entry *alloc_entry;
427   unsigned slot_index;
428   unsigned offset = sq->offset;
429
430   alloc_entry = svga->gb_query_map[sq->svga_type];
431
432   while (alloc_entry) {
433      if (offset >= alloc_entry->start_offset &&
434          offset < alloc_entry->start_offset + SVGA_QUERY_MEM_BLOCK_SIZE) {
435
436         /* The slot belongs to this memory block, deallocate it */
437         slot_index = (offset - alloc_entry->start_offset) /
438                      alloc_entry->query_size;
439         deallocate_query_slot(svga, alloc_entry, slot_index);
440         alloc_entry = NULL;
441      } else {
442         alloc_entry = alloc_entry->next;
443      }
444   }
445}
446
447
448/**
449 * Destroy the gb query object and all the related query structures
450 */
451static void
452destroy_gb_query_obj(struct svga_context *svga)
453{
454   struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
455   unsigned i;
456
457   for (i = 0; i < SVGA3D_QUERYTYPE_MAX; i++) {
458      struct svga_qmem_alloc_entry *alloc_entry, *next;
459      alloc_entry = svga->gb_query_map[i];
460      while (alloc_entry) {
461         next = alloc_entry->next;
462         util_bitmask_destroy(alloc_entry->alloc_mask);
463         FREE(alloc_entry);
464         alloc_entry = next;
465      }
466      svga->gb_query_map[i] = NULL;
467   }
468
469   if (svga->gb_query)
470      sws->query_destroy(sws, svga->gb_query);
471   svga->gb_query = NULL;
472
473   util_bitmask_destroy(svga->gb_query_alloc_mask);
474}
475
476/**
477 * Define query and create the gb query object if it is not already created.
478 * There is only one gb query object per context which will be shared by
479 * queries of all types.
480 */
481static enum pipe_error
482define_query_vgpu10(struct svga_context *svga,
483                    struct svga_query *sq, int resultLen)
484{
485   struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
486   int qlen;
487   enum pipe_error ret = PIPE_OK;
488
489   SVGA_DBG(DEBUG_QUERY, "%s\n", __FUNCTION__);
490
491   if (svga->gb_query == NULL) {
492      /* Create a gb query object */
493      svga->gb_query = sws->query_create(sws, SVGA_QUERY_MEM_SIZE);
494      if (!svga->gb_query)
495         return PIPE_ERROR_OUT_OF_MEMORY;
496      svga->gb_query_len = SVGA_QUERY_MEM_SIZE;
497      memset (svga->gb_query_map, 0, sizeof(svga->gb_query_map));
498      svga->gb_query_alloc_mask = util_bitmask_create();
499
500      /* Bind the query object to the context */
501      if (svga->swc->query_bind(svga->swc, svga->gb_query,
502                                SVGA_QUERY_FLAG_SET) != PIPE_OK) {
503         svga_context_flush(svga, NULL);
504         svga->swc->query_bind(svga->swc, svga->gb_query,
505                               SVGA_QUERY_FLAG_SET);
506      }
507   }
508
509   sq->gb_query = svga->gb_query;
510
511   /* Allocate an integer ID for this query */
512   sq->id = util_bitmask_add(svga->query_id_bm);
513   if (sq->id == UTIL_BITMASK_INVALID_INDEX)
514      return PIPE_ERROR_OUT_OF_MEMORY;
515
516   /* Find a slot for this query in the gb object */
517   qlen = resultLen + sizeof(SVGA3dQueryState);
518   sq->offset = allocate_query(svga, sq->svga_type, qlen);
519   if (sq->offset == -1)
520      return PIPE_ERROR_OUT_OF_MEMORY;
521
522   SVGA_DBG(DEBUG_QUERY, "   query type=%d qid=0x%x offset=%d\n",
523            sq->svga_type, sq->id, sq->offset);
524
525   /**
526    * Send SVGA3D commands to define the query
527    */
528   ret = SVGA3D_vgpu10_DefineQuery(svga->swc, sq->id, sq->svga_type, sq->flags);
529   if (ret != PIPE_OK) {
530      svga_context_flush(svga, NULL);
531      ret = SVGA3D_vgpu10_DefineQuery(svga->swc, sq->id, sq->svga_type, sq->flags);
532   }
533   if (ret != PIPE_OK)
534      return PIPE_ERROR_OUT_OF_MEMORY;
535
536   ret = SVGA3D_vgpu10_BindQuery(svga->swc, sq->gb_query, sq->id);
537   if (ret != PIPE_OK) {
538      svga_context_flush(svga, NULL);
539      ret = SVGA3D_vgpu10_BindQuery(svga->swc, sq->gb_query, sq->id);
540   }
541   assert(ret == PIPE_OK);
542
543   ret = SVGA3D_vgpu10_SetQueryOffset(svga->swc, sq->id, sq->offset);
544   if (ret != PIPE_OK) {
545      svga_context_flush(svga, NULL);
546      ret = SVGA3D_vgpu10_SetQueryOffset(svga->swc, sq->id, sq->offset);
547   }
548   assert(ret == PIPE_OK);
549
550   return PIPE_OK;
551}
552
553static enum pipe_error
554destroy_query_vgpu10(struct svga_context *svga, struct svga_query *sq)
555{
556   enum pipe_error ret;
557
558   ret = SVGA3D_vgpu10_DestroyQuery(svga->swc, sq->id);
559
560   /* Deallocate the memory slot allocated for this query */
561   deallocate_query(svga, sq);
562
563   return ret;
564}
565
566
567/**
568 * Rebind queryies to the context.
569 */
570static void
571rebind_vgpu10_query(struct svga_context *svga)
572{
573   if (svga->swc->query_bind(svga->swc, svga->gb_query,
574                             SVGA_QUERY_FLAG_REF) != PIPE_OK) {
575      svga_context_flush(svga, NULL);
576      svga->swc->query_bind(svga->swc, svga->gb_query,
577                            SVGA_QUERY_FLAG_REF);
578   }
579
580   svga->rebind.flags.query = FALSE;
581}
582
583
584static enum pipe_error
585begin_query_vgpu10(struct svga_context *svga, struct svga_query *sq)
586{
587   struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
588   enum pipe_error ret = PIPE_OK;
589   int status = 0;
590
591   sws->fence_reference(sws, &sq->fence, NULL);
592
593   /* Initialize the query state to NEW */
594   status = sws->query_init(sws, sq->gb_query, sq->offset, SVGA3D_QUERYSTATE_NEW);
595   if (status)
596      return PIPE_ERROR;
597
598   if (svga->rebind.flags.query) {
599      rebind_vgpu10_query(svga);
600   }
601
602   /* Send the BeginQuery command to the device */
603   ret = SVGA3D_vgpu10_BeginQuery(svga->swc, sq->id);
604   if (ret != PIPE_OK) {
605      svga_context_flush(svga, NULL);
606      ret = SVGA3D_vgpu10_BeginQuery(svga->swc, sq->id);
607   }
608   return ret;
609}
610
611static enum pipe_error
612end_query_vgpu10(struct svga_context *svga, struct svga_query *sq)
613{
614   enum pipe_error ret = PIPE_OK;
615
616   if (svga->rebind.flags.query) {
617      rebind_vgpu10_query(svga);
618   }
619
620   ret = SVGA3D_vgpu10_EndQuery(svga->swc, sq->id);
621   if (ret != PIPE_OK) {
622      svga_context_flush(svga, NULL);
623      ret = SVGA3D_vgpu10_EndQuery(svga->swc, sq->id);
624   }
625
626   return ret;
627}
628
629static boolean
630get_query_result_vgpu10(struct svga_context *svga, struct svga_query *sq,
631                        boolean wait, void *result, int resultLen)
632{
633   struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
634   SVGA3dQueryState queryState;
635
636   if (svga->rebind.flags.query) {
637      rebind_vgpu10_query(svga);
638   }
639
640   sws->query_get_result(sws, sq->gb_query, sq->offset, &queryState, result, resultLen);
641
642   if (queryState != SVGA3D_QUERYSTATE_SUCCEEDED && !sq->fence) {
643      /* We don't have the query result yet, and the query hasn't been
644       * submitted.  We need to submit it now since the GL spec says
645       * "Querying the state for a given occlusion query forces that
646       * occlusion query to complete within a finite amount of time."
647       */
648      svga_context_flush(svga, &sq->fence);
649   }
650
651   if (queryState == SVGA3D_QUERYSTATE_PENDING ||
652       queryState == SVGA3D_QUERYSTATE_NEW) {
653      if (!wait)
654         return FALSE;
655      sws->fence_finish(sws, sq->fence, PIPE_TIMEOUT_INFINITE,
656                        SVGA_FENCE_FLAG_QUERY);
657      sws->query_get_result(sws, sq->gb_query, sq->offset, &queryState, result, resultLen);
658   }
659
660   assert(queryState == SVGA3D_QUERYSTATE_SUCCEEDED ||
661          queryState == SVGA3D_QUERYSTATE_FAILED);
662
663   return TRUE;
664}
665
666static struct pipe_query *
667svga_create_query(struct pipe_context *pipe,
668                  unsigned query_type,
669                  unsigned index)
670{
671   struct svga_context *svga = svga_context(pipe);
672   struct svga_query *sq;
673
674   assert(query_type < SVGA_QUERY_MAX);
675
676   sq = CALLOC_STRUCT(svga_query);
677   if (!sq)
678      goto fail;
679
680   /* Allocate an integer ID for the query */
681   sq->id = util_bitmask_add(svga->query_id_bm);
682   if (sq->id == UTIL_BITMASK_INVALID_INDEX)
683      goto fail;
684
685   SVGA_DBG(DEBUG_QUERY, "%s type=%d sq=0x%x id=%d\n", __FUNCTION__,
686            query_type, sq, sq->id);
687
688   switch (query_type) {
689   case PIPE_QUERY_OCCLUSION_COUNTER:
690      sq->svga_type = SVGA3D_QUERYTYPE_OCCLUSION;
691      if (svga_have_vgpu10(svga)) {
692         define_query_vgpu10(svga, sq, sizeof(SVGADXOcclusionQueryResult));
693
694         /**
695          * In OpenGL, occlusion counter query can be used in conditional
696          * rendering; however, in DX10, only OCCLUSION_PREDICATE query can
697          * be used for predication. Hence, we need to create an occlusion
698          * predicate query along with the occlusion counter query. So when
699          * the occlusion counter query is used for predication, the associated
700          * query of occlusion predicate type will be used
701          * in the SetPredication command.
702          */
703         sq->predicate = svga_create_query(pipe, PIPE_QUERY_OCCLUSION_PREDICATE, index);
704
705      } else {
706         define_query_vgpu9(svga, sq);
707      }
708      break;
709   case PIPE_QUERY_OCCLUSION_PREDICATE:
710   case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
711      if (svga_have_vgpu10(svga)) {
712         sq->svga_type = SVGA3D_QUERYTYPE_OCCLUSIONPREDICATE;
713         define_query_vgpu10(svga, sq, sizeof(SVGADXOcclusionPredicateQueryResult));
714      } else {
715         sq->svga_type = SVGA3D_QUERYTYPE_OCCLUSION;
716         define_query_vgpu9(svga, sq);
717      }
718      break;
719   case PIPE_QUERY_PRIMITIVES_GENERATED:
720   case PIPE_QUERY_PRIMITIVES_EMITTED:
721   case PIPE_QUERY_SO_STATISTICS:
722      assert(svga_have_vgpu10(svga));
723      sq->svga_type = SVGA3D_QUERYTYPE_STREAMOUTPUTSTATS;
724      define_query_vgpu10(svga, sq,
725                          sizeof(SVGADXStreamOutStatisticsQueryResult));
726      break;
727   case PIPE_QUERY_TIMESTAMP:
728      assert(svga_have_vgpu10(svga));
729      sq->svga_type = SVGA3D_QUERYTYPE_TIMESTAMP;
730      define_query_vgpu10(svga, sq,
731                          sizeof(SVGADXTimestampQueryResult));
732      break;
733   case SVGA_QUERY_NUM_DRAW_CALLS:
734   case SVGA_QUERY_NUM_FALLBACKS:
735   case SVGA_QUERY_NUM_FLUSHES:
736   case SVGA_QUERY_NUM_VALIDATIONS:
737   case SVGA_QUERY_NUM_BUFFERS_MAPPED:
738   case SVGA_QUERY_NUM_TEXTURES_MAPPED:
739   case SVGA_QUERY_NUM_BYTES_UPLOADED:
740   case SVGA_QUERY_COMMAND_BUFFER_SIZE:
741   case SVGA_QUERY_SURFACE_WRITE_FLUSHES:
742   case SVGA_QUERY_MEMORY_USED:
743   case SVGA_QUERY_NUM_SHADERS:
744   case SVGA_QUERY_NUM_RESOURCES:
745   case SVGA_QUERY_NUM_STATE_OBJECTS:
746   case SVGA_QUERY_NUM_SURFACE_VIEWS:
747   case SVGA_QUERY_NUM_GENERATE_MIPMAP:
748   case SVGA_QUERY_NUM_READBACKS:
749   case SVGA_QUERY_NUM_RESOURCE_UPDATES:
750   case SVGA_QUERY_NUM_BUFFER_UPLOADS:
751   case SVGA_QUERY_NUM_CONST_BUF_UPDATES:
752   case SVGA_QUERY_NUM_CONST_UPDATES:
753   case SVGA_QUERY_NUM_FAILED_ALLOCATIONS:
754   case SVGA_QUERY_NUM_COMMANDS_PER_DRAW:
755      break;
756   case SVGA_QUERY_FLUSH_TIME:
757   case SVGA_QUERY_MAP_BUFFER_TIME:
758      /* These queries need os_time_get() */
759      svga->hud.uses_time = TRUE;
760      break;
761   default:
762      assert(!"unexpected query type in svga_create_query()");
763   }
764
765   sq->type = query_type;
766
767   return &sq->base;
768
769fail:
770   FREE(sq);
771   return NULL;
772}
773
774static void
775svga_destroy_query(struct pipe_context *pipe, struct pipe_query *q)
776{
777   struct svga_context *svga = svga_context(pipe);
778   struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
779   struct svga_query *sq;
780
781   if (!q) {
782      destroy_gb_query_obj(svga);
783      return;
784   }
785
786   sq = svga_query(q);
787
788   SVGA_DBG(DEBUG_QUERY, "%s sq=0x%x id=%d\n", __FUNCTION__,
789            sq, sq->id);
790
791   switch (sq->type) {
792   case PIPE_QUERY_OCCLUSION_COUNTER:
793   case PIPE_QUERY_OCCLUSION_PREDICATE:
794   case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
795      if (svga_have_vgpu10(svga)) {
796         /* make sure to also destroy any associated predicate query */
797         if (sq->predicate)
798            svga_destroy_query(pipe, sq->predicate);
799         destroy_query_vgpu10(svga, sq);
800      } else {
801         sws->buffer_destroy(sws, sq->hwbuf);
802      }
803      sws->fence_reference(sws, &sq->fence, NULL);
804      break;
805   case PIPE_QUERY_PRIMITIVES_GENERATED:
806   case PIPE_QUERY_PRIMITIVES_EMITTED:
807   case PIPE_QUERY_SO_STATISTICS:
808   case PIPE_QUERY_TIMESTAMP:
809      assert(svga_have_vgpu10(svga));
810      destroy_query_vgpu10(svga, sq);
811      sws->fence_reference(sws, &sq->fence, NULL);
812      break;
813   case SVGA_QUERY_NUM_DRAW_CALLS:
814   case SVGA_QUERY_NUM_FALLBACKS:
815   case SVGA_QUERY_NUM_FLUSHES:
816   case SVGA_QUERY_NUM_VALIDATIONS:
817   case SVGA_QUERY_MAP_BUFFER_TIME:
818   case SVGA_QUERY_NUM_BUFFERS_MAPPED:
819   case SVGA_QUERY_NUM_TEXTURES_MAPPED:
820   case SVGA_QUERY_NUM_BYTES_UPLOADED:
821   case SVGA_QUERY_COMMAND_BUFFER_SIZE:
822   case SVGA_QUERY_FLUSH_TIME:
823   case SVGA_QUERY_SURFACE_WRITE_FLUSHES:
824   case SVGA_QUERY_MEMORY_USED:
825   case SVGA_QUERY_NUM_SHADERS:
826   case SVGA_QUERY_NUM_RESOURCES:
827   case SVGA_QUERY_NUM_STATE_OBJECTS:
828   case SVGA_QUERY_NUM_SURFACE_VIEWS:
829   case SVGA_QUERY_NUM_GENERATE_MIPMAP:
830   case SVGA_QUERY_NUM_READBACKS:
831   case SVGA_QUERY_NUM_RESOURCE_UPDATES:
832   case SVGA_QUERY_NUM_BUFFER_UPLOADS:
833   case SVGA_QUERY_NUM_CONST_BUF_UPDATES:
834   case SVGA_QUERY_NUM_CONST_UPDATES:
835   case SVGA_QUERY_NUM_FAILED_ALLOCATIONS:
836   case SVGA_QUERY_NUM_COMMANDS_PER_DRAW:
837      /* nothing */
838      break;
839   default:
840      assert(!"svga: unexpected query type in svga_destroy_query()");
841   }
842
843   /* Free the query id */
844   util_bitmask_clear(svga->query_id_bm, sq->id);
845
846   FREE(sq);
847}
848
849
850static boolean
851svga_begin_query(struct pipe_context *pipe, struct pipe_query *q)
852{
853   struct svga_context *svga = svga_context(pipe);
854   struct svga_query *sq = svga_query(q);
855   enum pipe_error ret;
856
857   assert(sq);
858   assert(sq->type < SVGA_QUERY_MAX);
859
860   SVGA_DBG(DEBUG_QUERY, "%s sq=0x%x id=%d\n", __FUNCTION__,
861            sq, sq->id);
862
863   /* Need to flush out buffered drawing commands so that they don't
864    * get counted in the query results.
865    */
866   svga_hwtnl_flush_retry(svga);
867
868   switch (sq->type) {
869   case PIPE_QUERY_OCCLUSION_COUNTER:
870   case PIPE_QUERY_OCCLUSION_PREDICATE:
871   case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
872      if (svga_have_vgpu10(svga)) {
873         ret = begin_query_vgpu10(svga, sq);
874         /* also need to start the associated occlusion predicate query */
875         if (sq->predicate) {
876            enum pipe_error status;
877            status = begin_query_vgpu10(svga, svga_query(sq->predicate));
878            assert(status == PIPE_OK);
879            (void) status;
880         }
881      } else {
882         ret = begin_query_vgpu9(svga, sq);
883      }
884      assert(ret == PIPE_OK);
885      (void) ret;
886      break;
887   case PIPE_QUERY_PRIMITIVES_GENERATED:
888   case PIPE_QUERY_PRIMITIVES_EMITTED:
889   case PIPE_QUERY_SO_STATISTICS:
890   case PIPE_QUERY_TIMESTAMP:
891      assert(svga_have_vgpu10(svga));
892      ret = begin_query_vgpu10(svga, sq);
893      assert(ret == PIPE_OK);
894      break;
895   case SVGA_QUERY_NUM_DRAW_CALLS:
896      sq->begin_count = svga->hud.num_draw_calls;
897      break;
898   case SVGA_QUERY_NUM_FALLBACKS:
899      sq->begin_count = svga->hud.num_fallbacks;
900      break;
901   case SVGA_QUERY_NUM_FLUSHES:
902      sq->begin_count = svga->hud.num_flushes;
903      break;
904   case SVGA_QUERY_NUM_VALIDATIONS:
905      sq->begin_count = svga->hud.num_validations;
906      break;
907   case SVGA_QUERY_MAP_BUFFER_TIME:
908      sq->begin_count = svga->hud.map_buffer_time;
909      break;
910   case SVGA_QUERY_NUM_BUFFERS_MAPPED:
911      sq->begin_count = svga->hud.num_buffers_mapped;
912      break;
913   case SVGA_QUERY_NUM_TEXTURES_MAPPED:
914      sq->begin_count = svga->hud.num_textures_mapped;
915      break;
916   case SVGA_QUERY_NUM_BYTES_UPLOADED:
917      sq->begin_count = svga->hud.num_bytes_uploaded;
918      break;
919   case SVGA_QUERY_COMMAND_BUFFER_SIZE:
920      sq->begin_count = svga->hud.command_buffer_size;
921      break;
922   case SVGA_QUERY_FLUSH_TIME:
923      sq->begin_count = svga->hud.flush_time;
924      break;
925   case SVGA_QUERY_SURFACE_WRITE_FLUSHES:
926      sq->begin_count = svga->hud.surface_write_flushes;
927      break;
928   case SVGA_QUERY_NUM_READBACKS:
929      sq->begin_count = svga->hud.num_readbacks;
930      break;
931   case SVGA_QUERY_NUM_RESOURCE_UPDATES:
932      sq->begin_count = svga->hud.num_resource_updates;
933      break;
934   case SVGA_QUERY_NUM_BUFFER_UPLOADS:
935      sq->begin_count = svga->hud.num_buffer_uploads;
936      break;
937   case SVGA_QUERY_NUM_CONST_BUF_UPDATES:
938      sq->begin_count = svga->hud.num_const_buf_updates;
939      break;
940   case SVGA_QUERY_NUM_CONST_UPDATES:
941      sq->begin_count = svga->hud.num_const_updates;
942      break;
943   case SVGA_QUERY_MEMORY_USED:
944   case SVGA_QUERY_NUM_SHADERS:
945   case SVGA_QUERY_NUM_RESOURCES:
946   case SVGA_QUERY_NUM_STATE_OBJECTS:
947   case SVGA_QUERY_NUM_SURFACE_VIEWS:
948   case SVGA_QUERY_NUM_GENERATE_MIPMAP:
949   case SVGA_QUERY_NUM_FAILED_ALLOCATIONS:
950   case SVGA_QUERY_NUM_COMMANDS_PER_DRAW:
951      /* nothing */
952      break;
953   default:
954      assert(!"unexpected query type in svga_begin_query()");
955   }
956
957   svga->sq[sq->type] = sq;
958
959   return true;
960}
961
962
963static bool
964svga_end_query(struct pipe_context *pipe, struct pipe_query *q)
965{
966   struct svga_context *svga = svga_context(pipe);
967   struct svga_query *sq = svga_query(q);
968   enum pipe_error ret;
969
970   assert(sq);
971   assert(sq->type < SVGA_QUERY_MAX);
972
973   SVGA_DBG(DEBUG_QUERY, "%s sq=0x%x id=%d\n", __FUNCTION__,
974            sq, sq->id);
975
976   if (sq->type == PIPE_QUERY_TIMESTAMP && svga->sq[sq->type] != sq)
977      svga_begin_query(pipe, q);
978
979   svga_hwtnl_flush_retry(svga);
980
981   assert(svga->sq[sq->type] == sq);
982
983   switch (sq->type) {
984   case PIPE_QUERY_OCCLUSION_COUNTER:
985   case PIPE_QUERY_OCCLUSION_PREDICATE:
986   case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
987      if (svga_have_vgpu10(svga)) {
988         ret = end_query_vgpu10(svga, sq);
989         /* also need to end the associated occlusion predicate query */
990         if (sq->predicate) {
991            enum pipe_error status;
992            status = end_query_vgpu10(svga, svga_query(sq->predicate));
993            assert(status == PIPE_OK);
994            (void) status;
995         }
996      } else {
997         ret = end_query_vgpu9(svga, sq);
998      }
999      assert(ret == PIPE_OK);
1000      (void) ret;
1001      break;
1002   case PIPE_QUERY_PRIMITIVES_GENERATED:
1003   case PIPE_QUERY_PRIMITIVES_EMITTED:
1004   case PIPE_QUERY_SO_STATISTICS:
1005   case PIPE_QUERY_TIMESTAMP:
1006      assert(svga_have_vgpu10(svga));
1007      ret = end_query_vgpu10(svga, sq);
1008      assert(ret == PIPE_OK);
1009      break;
1010   case SVGA_QUERY_NUM_DRAW_CALLS:
1011      sq->end_count = svga->hud.num_draw_calls;
1012      break;
1013   case SVGA_QUERY_NUM_FALLBACKS:
1014      sq->end_count = svga->hud.num_fallbacks;
1015      break;
1016   case SVGA_QUERY_NUM_FLUSHES:
1017      sq->end_count = svga->hud.num_flushes;
1018      break;
1019   case SVGA_QUERY_NUM_VALIDATIONS:
1020      sq->end_count = svga->hud.num_validations;
1021      break;
1022   case SVGA_QUERY_MAP_BUFFER_TIME:
1023      sq->end_count = svga->hud.map_buffer_time;
1024      break;
1025   case SVGA_QUERY_NUM_BUFFERS_MAPPED:
1026      sq->end_count = svga->hud.num_buffers_mapped;
1027      break;
1028   case SVGA_QUERY_NUM_TEXTURES_MAPPED:
1029      sq->end_count = svga->hud.num_textures_mapped;
1030      break;
1031   case SVGA_QUERY_NUM_BYTES_UPLOADED:
1032      sq->end_count = svga->hud.num_bytes_uploaded;
1033      break;
1034   case SVGA_QUERY_COMMAND_BUFFER_SIZE:
1035      sq->end_count = svga->hud.command_buffer_size;
1036      break;
1037   case SVGA_QUERY_FLUSH_TIME:
1038      sq->end_count = svga->hud.flush_time;
1039      break;
1040   case SVGA_QUERY_SURFACE_WRITE_FLUSHES:
1041      sq->end_count = svga->hud.surface_write_flushes;
1042      break;
1043   case SVGA_QUERY_NUM_READBACKS:
1044      sq->end_count = svga->hud.num_readbacks;
1045      break;
1046   case SVGA_QUERY_NUM_RESOURCE_UPDATES:
1047      sq->end_count = svga->hud.num_resource_updates;
1048      break;
1049   case SVGA_QUERY_NUM_BUFFER_UPLOADS:
1050      sq->end_count = svga->hud.num_buffer_uploads;
1051      break;
1052   case SVGA_QUERY_NUM_CONST_BUF_UPDATES:
1053      sq->end_count = svga->hud.num_const_buf_updates;
1054      break;
1055   case SVGA_QUERY_NUM_CONST_UPDATES:
1056      sq->end_count = svga->hud.num_const_updates;
1057      break;
1058   case SVGA_QUERY_MEMORY_USED:
1059   case SVGA_QUERY_NUM_SHADERS:
1060   case SVGA_QUERY_NUM_RESOURCES:
1061   case SVGA_QUERY_NUM_STATE_OBJECTS:
1062   case SVGA_QUERY_NUM_SURFACE_VIEWS:
1063   case SVGA_QUERY_NUM_GENERATE_MIPMAP:
1064   case SVGA_QUERY_NUM_FAILED_ALLOCATIONS:
1065   case SVGA_QUERY_NUM_COMMANDS_PER_DRAW:
1066      /* nothing */
1067      break;
1068   default:
1069      assert(!"unexpected query type in svga_end_query()");
1070   }
1071   svga->sq[sq->type] = NULL;
1072   return true;
1073}
1074
1075
1076static boolean
1077svga_get_query_result(struct pipe_context *pipe,
1078                      struct pipe_query *q,
1079                      boolean wait,
1080                      union pipe_query_result *vresult)
1081{
1082   struct svga_screen *svgascreen = svga_screen(pipe->screen);
1083   struct svga_context *svga = svga_context(pipe);
1084   struct svga_query *sq = svga_query(q);
1085   uint64_t *result = (uint64_t *)vresult;
1086   boolean ret = TRUE;
1087
1088   assert(sq);
1089
1090   SVGA_DBG(DEBUG_QUERY, "%s sq=0x%x id=%d wait: %d\n",
1091            __FUNCTION__, sq, sq->id, wait);
1092
1093   switch (sq->type) {
1094   case PIPE_QUERY_OCCLUSION_COUNTER:
1095      if (svga_have_vgpu10(svga)) {
1096         SVGADXOcclusionQueryResult occResult;
1097         ret = get_query_result_vgpu10(svga, sq, wait,
1098                                       (void *)&occResult, sizeof(occResult));
1099         *result = (uint64_t)occResult.samplesRendered;
1100      } else {
1101         ret = get_query_result_vgpu9(svga, sq, wait, result);
1102      }
1103      break;
1104   case PIPE_QUERY_OCCLUSION_PREDICATE:
1105   case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE: {
1106      if (svga_have_vgpu10(svga)) {
1107         SVGADXOcclusionPredicateQueryResult occResult;
1108         ret = get_query_result_vgpu10(svga, sq, wait,
1109                                       (void *)&occResult, sizeof(occResult));
1110         vresult->b = occResult.anySamplesRendered != 0;
1111      } else {
1112         uint64_t count = 0;
1113         ret = get_query_result_vgpu9(svga, sq, wait, &count);
1114         vresult->b = count != 0;
1115      }
1116      break;
1117   }
1118   case PIPE_QUERY_SO_STATISTICS: {
1119      SVGADXStreamOutStatisticsQueryResult sResult;
1120      struct pipe_query_data_so_statistics *pResult =
1121         (struct pipe_query_data_so_statistics *)vresult;
1122
1123      assert(svga_have_vgpu10(svga));
1124      ret = get_query_result_vgpu10(svga, sq, wait,
1125                                    (void *)&sResult, sizeof(sResult));
1126      pResult->num_primitives_written = sResult.numPrimitivesWritten;
1127      pResult->primitives_storage_needed = sResult.numPrimitivesRequired;
1128      break;
1129   }
1130   case PIPE_QUERY_TIMESTAMP: {
1131      SVGADXTimestampQueryResult sResult;
1132
1133      assert(svga_have_vgpu10(svga));
1134      ret = get_query_result_vgpu10(svga, sq, wait,
1135                                    (void *)&sResult, sizeof(sResult));
1136      *result = (uint64_t)sResult.timestamp;
1137      break;
1138   }
1139   case PIPE_QUERY_PRIMITIVES_GENERATED: {
1140      SVGADXStreamOutStatisticsQueryResult sResult;
1141
1142      assert(svga_have_vgpu10(svga));
1143      ret = get_query_result_vgpu10(svga, sq, wait,
1144                                    (void *)&sResult, sizeof sResult);
1145      *result = (uint64_t)sResult.numPrimitivesRequired;
1146      break;
1147   }
1148   case PIPE_QUERY_PRIMITIVES_EMITTED: {
1149      SVGADXStreamOutStatisticsQueryResult sResult;
1150
1151      assert(svga_have_vgpu10(svga));
1152      ret = get_query_result_vgpu10(svga, sq, wait,
1153                                    (void *)&sResult, sizeof sResult);
1154      *result = (uint64_t)sResult.numPrimitivesWritten;
1155      break;
1156   }
1157   /* These are per-frame counters */
1158   case SVGA_QUERY_NUM_DRAW_CALLS:
1159   case SVGA_QUERY_NUM_FALLBACKS:
1160   case SVGA_QUERY_NUM_FLUSHES:
1161   case SVGA_QUERY_NUM_VALIDATIONS:
1162   case SVGA_QUERY_MAP_BUFFER_TIME:
1163   case SVGA_QUERY_NUM_BUFFERS_MAPPED:
1164   case SVGA_QUERY_NUM_TEXTURES_MAPPED:
1165   case SVGA_QUERY_NUM_BYTES_UPLOADED:
1166   case SVGA_QUERY_COMMAND_BUFFER_SIZE:
1167   case SVGA_QUERY_FLUSH_TIME:
1168   case SVGA_QUERY_SURFACE_WRITE_FLUSHES:
1169   case SVGA_QUERY_NUM_READBACKS:
1170   case SVGA_QUERY_NUM_RESOURCE_UPDATES:
1171   case SVGA_QUERY_NUM_BUFFER_UPLOADS:
1172   case SVGA_QUERY_NUM_CONST_BUF_UPDATES:
1173   case SVGA_QUERY_NUM_CONST_UPDATES:
1174      vresult->u64 = sq->end_count - sq->begin_count;
1175      break;
1176   /* These are running total counters */
1177   case SVGA_QUERY_MEMORY_USED:
1178      vresult->u64 = svgascreen->hud.total_resource_bytes;
1179      break;
1180   case SVGA_QUERY_NUM_SHADERS:
1181      vresult->u64 = svga->hud.num_shaders;
1182      break;
1183   case SVGA_QUERY_NUM_RESOURCES:
1184      vresult->u64 = svgascreen->hud.num_resources;
1185      break;
1186   case SVGA_QUERY_NUM_STATE_OBJECTS:
1187      vresult->u64 = (svga->hud.num_blend_objects +
1188                      svga->hud.num_depthstencil_objects +
1189                      svga->hud.num_rasterizer_objects +
1190                      svga->hud.num_sampler_objects +
1191                      svga->hud.num_samplerview_objects +
1192                      svga->hud.num_vertexelement_objects);
1193      break;
1194   case SVGA_QUERY_NUM_SURFACE_VIEWS:
1195      vresult->u64 = svga->hud.num_surface_views;
1196      break;
1197   case SVGA_QUERY_NUM_GENERATE_MIPMAP:
1198      vresult->u64 = svga->hud.num_generate_mipmap;
1199      break;
1200   case SVGA_QUERY_NUM_FAILED_ALLOCATIONS:
1201      vresult->u64 = svgascreen->hud.num_failed_allocations;
1202      break;
1203   case SVGA_QUERY_NUM_COMMANDS_PER_DRAW:
1204      vresult->f = (float) svga->swc->num_commands
1205         / (float) svga->swc->num_draw_commands;
1206      break;
1207   default:
1208      assert(!"unexpected query type in svga_get_query_result");
1209   }
1210
1211   SVGA_DBG(DEBUG_QUERY, "%s result %d\n", __FUNCTION__, *((uint64_t *)vresult));
1212
1213   return ret;
1214}
1215
1216static void
1217svga_render_condition(struct pipe_context *pipe, struct pipe_query *q,
1218                      boolean condition, enum pipe_render_cond_flag mode)
1219{
1220   struct svga_context *svga = svga_context(pipe);
1221   struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
1222   struct svga_query *sq = svga_query(q);
1223   SVGA3dQueryId queryId;
1224   enum pipe_error ret;
1225
1226   SVGA_DBG(DEBUG_QUERY, "%s\n", __FUNCTION__);
1227
1228   assert(svga_have_vgpu10(svga));
1229   if (sq == NULL) {
1230      queryId = SVGA3D_INVALID_ID;
1231   }
1232   else {
1233      assert(sq->svga_type == SVGA3D_QUERYTYPE_OCCLUSION ||
1234             sq->svga_type == SVGA3D_QUERYTYPE_OCCLUSIONPREDICATE);
1235
1236      if (sq->svga_type == SVGA3D_QUERYTYPE_OCCLUSION) {
1237         assert(sq->predicate);
1238         /**
1239          * For conditional rendering, make sure to use the associated
1240          * predicate query.
1241          */
1242         sq = svga_query(sq->predicate);
1243      }
1244      queryId = sq->id;
1245
1246      if ((mode == PIPE_RENDER_COND_WAIT ||
1247           mode == PIPE_RENDER_COND_BY_REGION_WAIT) && sq->fence) {
1248         sws->fence_finish(sws, sq->fence, PIPE_TIMEOUT_INFINITE,
1249                           SVGA_FENCE_FLAG_QUERY);
1250      }
1251   }
1252   /*
1253    * if the kernel module doesn't support the predication command,
1254    * we'll just render unconditionally.
1255    * This is probably acceptable for the typical case of occlusion culling.
1256    */
1257   if (sws->have_set_predication_cmd) {
1258      ret = SVGA3D_vgpu10_SetPredication(svga->swc, queryId,
1259                                         (uint32) condition);
1260      if (ret != PIPE_OK) {
1261         svga_context_flush(svga, NULL);
1262         ret = SVGA3D_vgpu10_SetPredication(svga->swc, queryId,
1263                                            (uint32) condition);
1264      }
1265      svga->pred.query_id = queryId;
1266      svga->pred.cond = condition;
1267   }
1268
1269   svga->render_condition = (sq != NULL);
1270}
1271
1272
1273/*
1274 * This function is a workaround because we lack the ability to query
1275 * renderer's time synchornously.
1276 */
1277static uint64_t
1278svga_get_timestamp(struct pipe_context *pipe)
1279{
1280   struct pipe_query *q = svga_create_query(pipe, PIPE_QUERY_TIMESTAMP, 0);
1281   union pipe_query_result result;
1282
1283   svga_begin_query(pipe, q);
1284   svga_end_query(pipe,q);
1285   svga_get_query_result(pipe, q, TRUE, &result);
1286   svga_destroy_query(pipe, q);
1287
1288   return result.u64;
1289}
1290
1291
1292static void
1293svga_set_active_query_state(struct pipe_context *pipe, boolean enable)
1294{
1295}
1296
1297
1298/**
1299 * \brief Toggle conditional rendering if already enabled
1300 *
1301 * \param svga[in]  The svga context
1302 * \param render_condition_enabled[in]  Whether to ignore requests to turn
1303 * conditional rendering off
1304 * \param on[in]  Whether to turn conditional rendering on or off
1305 */
1306void
1307svga_toggle_render_condition(struct svga_context *svga,
1308                             boolean render_condition_enabled,
1309                             boolean on)
1310{
1311   SVGA3dQueryId query_id;
1312   enum pipe_error ret;
1313
1314   if (render_condition_enabled ||
1315       svga->pred.query_id == SVGA3D_INVALID_ID) {
1316      return;
1317   }
1318
1319   /*
1320    * If we get here, it means that the system supports
1321    * conditional rendering since svga->pred.query_id has already been
1322    * modified for this context and thus support has already been
1323    * verified.
1324    */
1325   query_id = on ? svga->pred.query_id : SVGA3D_INVALID_ID;
1326
1327   ret = SVGA3D_vgpu10_SetPredication(svga->swc, query_id,
1328                                      (uint32) svga->pred.cond);
1329   if (ret == PIPE_ERROR_OUT_OF_MEMORY) {
1330      svga_context_flush(svga, NULL);
1331      ret = SVGA3D_vgpu10_SetPredication(svga->swc, query_id,
1332                                         (uint32) svga->pred.cond);
1333      assert(ret == PIPE_OK);
1334   }
1335}
1336
1337
1338void
1339svga_init_query_functions(struct svga_context *svga)
1340{
1341   svga->pipe.create_query = svga_create_query;
1342   svga->pipe.destroy_query = svga_destroy_query;
1343   svga->pipe.begin_query = svga_begin_query;
1344   svga->pipe.end_query = svga_end_query;
1345   svga->pipe.get_query_result = svga_get_query_result;
1346   svga->pipe.set_active_query_state = svga_set_active_query_state;
1347   svga->pipe.render_condition = svga_render_condition;
1348   svga->pipe.get_timestamp = svga_get_timestamp;
1349}
1350