1/*
2 * Copyright © 2008-2009 Maciej Cencora <m.cencora@gmail.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 *    Maciej Cencora <m.cencora@gmail.com>
25 *
26 */
27#include "radeon_common.h"
28#include "radeon_queryobj.h"
29#include "radeon_debug.h"
30
31#include "main/imports.h"
32
33#include <inttypes.h>
34
35static void radeonQueryGetResult(struct gl_context *ctx, struct gl_query_object *q)
36{
37	struct radeon_query_object *query = (struct radeon_query_object *)q;
38        uint32_t *result;
39	int i;
40
41	radeon_print(RADEON_STATE, RADEON_VERBOSE,
42			"%s: query id %d, result %d\n",
43			__func__, query->Base.Id, (int) query->Base.Result);
44
45	radeon_bo_map(query->bo, GL_FALSE);
46        result = query->bo->ptr;
47
48	query->Base.Result = 0;
49	for (i = 0; i < query->curr_offset/sizeof(uint32_t); ++i) {
50		query->Base.Result += LE32_TO_CPU(result[i]);
51		radeon_print(RADEON_STATE, RADEON_TRACE, "result[%d] = %d\n", i, LE32_TO_CPU(result[i]));
52	}
53
54	radeon_bo_unmap(query->bo);
55}
56
57static struct gl_query_object * radeonNewQueryObject(struct gl_context *ctx, GLuint id)
58{
59	struct radeon_query_object *query;
60
61	query = calloc(1, sizeof(struct radeon_query_object));
62
63	query->Base.Id = id;
64	query->Base.Result = 0;
65	query->Base.Active = GL_FALSE;
66	query->Base.Ready = GL_TRUE;
67
68	radeon_print(RADEON_STATE, RADEON_VERBOSE,"%s: query id %d\n", __func__, query->Base.Id);
69
70	return &query->Base;
71}
72
73static void radeonDeleteQuery(struct gl_context *ctx, struct gl_query_object *q)
74{
75	struct radeon_query_object *query = (struct radeon_query_object *)q;
76
77	radeon_print(RADEON_STATE, RADEON_NORMAL, "%s: query id %d\n", __func__, q->Id);
78
79	if (query->bo) {
80		radeon_bo_unref(query->bo);
81	}
82
83	free(query);
84}
85
86static void radeonWaitQuery(struct gl_context *ctx, struct gl_query_object *q)
87{
88	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
89	struct radeon_query_object *query = (struct radeon_query_object *)q;
90
91	/* If the cmdbuf with packets for this query hasn't been flushed yet, do it now */
92	if (radeon_bo_is_referenced_by_cs(query->bo, radeon->cmdbuf.cs))
93		ctx->Driver.Flush(ctx);
94
95	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s: query id %d, bo %p, offset %d\n", __func__, q->Id, query->bo, query->curr_offset);
96
97	radeonQueryGetResult(ctx, q);
98
99	query->Base.Ready = GL_TRUE;
100}
101
102
103static void radeonBeginQuery(struct gl_context *ctx, struct gl_query_object *q)
104{
105	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
106	struct radeon_query_object *query = (struct radeon_query_object *)q;
107
108	radeon_print(RADEON_STATE, RADEON_NORMAL, "%s: query id %d\n", __func__, q->Id);
109
110	assert(radeon->query.current == NULL);
111
112	if (radeon->dma.flush)
113		radeon->dma.flush(&radeon->glCtx);
114
115	if (!query->bo) {
116		query->bo = radeon_bo_open(radeon->radeonScreen->bom, 0, RADEON_QUERY_PAGE_SIZE, RADEON_QUERY_PAGE_SIZE, RADEON_GEM_DOMAIN_GTT, 0);
117	}
118	query->curr_offset = 0;
119
120	radeon->query.current = query;
121
122	radeon->query.queryobj.dirty = GL_TRUE;
123	radeon->hw.is_dirty = GL_TRUE;
124}
125
126void radeonEmitQueryEnd(struct gl_context *ctx)
127{
128	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
129	struct radeon_query_object *query = radeon->query.current;
130
131	if (!query)
132		return;
133
134	if (query->emitted_begin == GL_FALSE)
135		return;
136
137	radeon_print(RADEON_STATE, RADEON_NORMAL, "%s: query id %d, bo %p, offset %d\n", __func__, query->Base.Id, query->bo, query->curr_offset);
138
139	radeon_cs_space_check_with_bo(radeon->cmdbuf.cs,
140				      query->bo,
141				      0, RADEON_GEM_DOMAIN_GTT);
142
143	radeon->vtbl.emit_query_finish(radeon);
144}
145
146static void radeonEndQuery(struct gl_context *ctx, struct gl_query_object *q)
147{
148	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
149
150	radeon_print(RADEON_STATE, RADEON_NORMAL, "%s: query id %d\n", __func__, q->Id);
151
152	if (radeon->dma.flush)
153		radeon->dma.flush(&radeon->glCtx);
154	radeonEmitQueryEnd(ctx);
155
156	radeon->query.current = NULL;
157}
158
159static void radeonCheckQuery(struct gl_context *ctx, struct gl_query_object *q)
160{
161	radeon_print(RADEON_STATE, RADEON_TRACE, "%s: query id %d\n", __func__, q->Id);
162\
163#ifdef DRM_RADEON_GEM_BUSY
164	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
165
166	struct radeon_query_object *query = (struct radeon_query_object *)q;
167	uint32_t domain;
168
169	/* Need to perform a flush, as per ARB_occlusion_query spec */
170	if (radeon_bo_is_referenced_by_cs(query->bo, radeon->cmdbuf.cs)) {
171		ctx->Driver.Flush(ctx);
172	}
173
174	if (radeon_bo_is_busy(query->bo, &domain) == 0) {
175		radeonQueryGetResult(ctx, q);
176		query->Base.Ready = GL_TRUE;
177	}
178#else
179	radeonWaitQuery(ctx, q);
180#endif
181}
182
183void radeonInitQueryObjFunctions(struct dd_function_table *functions)
184{
185	functions->NewQueryObject = radeonNewQueryObject;
186	functions->DeleteQuery = radeonDeleteQuery;
187	functions->BeginQuery = radeonBeginQuery;
188	functions->EndQuery = radeonEndQuery;
189	functions->CheckQuery = radeonCheckQuery;
190	functions->WaitQuery = radeonWaitQuery;
191}
192
193int radeon_check_query_active(struct gl_context *ctx, struct radeon_state_atom *atom)
194{
195	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
196	struct radeon_query_object *query = radeon->query.current;
197
198	if (!query || query->emitted_begin)
199		return 0;
200	return atom->cmd_size;
201}
202
203void radeon_emit_queryobj(struct gl_context *ctx, struct radeon_state_atom *atom)
204{
205	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
206	BATCH_LOCALS(radeon);
207	int dwords;
208
209	dwords = atom->check(ctx, atom);
210
211	BEGIN_BATCH(dwords);
212	OUT_BATCH_TABLE(atom->cmd, dwords);
213	END_BATCH();
214
215	radeon->query.current->emitted_begin = GL_TRUE;
216}
217