1/**************************************************************************
2
3Copyright 1998-1999 Precision Insight, Inc., Cedar Park, Texas.
4Copyright © 2002 David Dawes
5
6All Rights Reserved.
7
8Permission is hereby granted, free of charge, to any person obtaining a
9copy of this software and associated documentation files (the
10"Software"), to deal in the Software without restriction, including
11without limitation the rights to use, copy, modify, merge, publish,
12distribute, sub license, and/or sell copies of the Software, and to
13permit persons to whom the Software is furnished to do so, subject to
14the following conditions:
15
16The above copyright notice and this permission notice (including the
17next paragraph) shall be included in all copies or substantial portions
18of the Software.
19
20THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
24ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27
28**************************************************************************/
29
30#ifndef _INTEL_BATCHBUFFER_H
31#define _INTEL_BATCHBUFFER_H
32
33#define BATCH_RESERVED		16
34
35
36void intel_batch_init(ScrnInfoPtr scrn);
37void intel_batch_teardown(ScrnInfoPtr scrn);
38void intel_batch_emit_flush(ScrnInfoPtr scrn);
39void intel_batch_submit(ScrnInfoPtr scrn);
40
41static inline int intel_batch_space(intel_screen_private *intel)
42{
43	return (intel->batch_bo->size - BATCH_RESERVED) - (4*intel->batch_used);
44}
45
46static inline int intel_vertex_space(intel_screen_private *intel)
47{
48	return intel->vertex_bo ? intel->vertex_bo->size - (4*intel->vertex_used) : 0;
49}
50
51static inline void
52intel_batch_require_space(ScrnInfoPtr scrn, intel_screen_private *intel, int sz)
53{
54	assert(sz < intel->batch_bo->size - 8);
55	if (intel_batch_space(intel) < sz)
56		intel_batch_submit(scrn);
57}
58
59static inline void intel_batch_start_atomic(ScrnInfoPtr scrn, int sz)
60{
61	intel_screen_private *intel = intel_get_screen_private(scrn);
62
63	assert(!intel->in_batch_atomic);
64
65	if (intel->current_batch != RENDER_BATCH) {
66		if (intel->current_batch && intel->context_switch)
67			intel->context_switch(intel, RENDER_BATCH);
68	}
69
70	intel_batch_require_space(scrn, intel, sz * 4);
71	intel->current_batch = RENDER_BATCH;
72
73	intel->in_batch_atomic = TRUE;
74	intel->batch_atomic_limit = intel->batch_used + sz;
75}
76
77static inline void intel_batch_end_atomic(ScrnInfoPtr scrn)
78{
79	intel_screen_private *intel = intel_get_screen_private(scrn);
80
81	assert(intel->in_batch_atomic);
82	assert(intel->batch_used <= intel->batch_atomic_limit);
83	intel->in_batch_atomic = FALSE;
84}
85
86static inline void intel_batch_emit_dword(intel_screen_private *intel, uint32_t dword)
87{
88	intel->batch_ptr[intel->batch_used++] = dword;
89}
90
91static inline void intel_batch_align(intel_screen_private *intel, uint32_t align)
92{
93	uint32_t delta;
94
95	align /= 4;
96	assert(align);
97
98	if ((delta = intel->batch_used & (align - 1))) {
99		delta = align - delta;
100		memset (intel->batch_ptr + intel->batch_used, 0, 4*delta);
101		intel->batch_used += delta;
102	}
103}
104
105static inline void
106intel_batch_emit_reloc(intel_screen_private *intel,
107		       dri_bo * bo,
108		       uint32_t read_domains,
109		       uint32_t write_domains, uint32_t delta, int needs_fence)
110{
111	uint64_t offset;
112
113	if (needs_fence)
114		drm_intel_bo_emit_reloc_fence(intel->batch_bo,
115					      intel->batch_used * 4,
116					      bo, delta,
117					      read_domains, write_domains);
118	else
119		drm_intel_bo_emit_reloc(intel->batch_bo, intel->batch_used * 4,
120					bo, delta,
121					read_domains, write_domains);
122
123	offset = bo->offset64 + delta;
124
125	intel_batch_emit_dword(intel, offset);
126	if (INTEL_INFO(intel)->gen >= 0100)
127		intel_batch_emit_dword(intel, offset >> 32);
128}
129
130static inline void
131intel_batch_mark_pixmap_domains(intel_screen_private *intel,
132				struct intel_uxa_pixmap *priv,
133				uint32_t read_domains, uint32_t write_domain)
134{
135	assert (read_domains);
136	assert (write_domain == 0 || write_domain == read_domains);
137
138	if (list_is_empty(&priv->batch))
139		list_add(&priv->batch, &intel->batch_pixmaps);
140
141	priv->dirty |= write_domain != 0;
142	priv->busy = 1;
143
144	intel->needs_flush |= write_domain != 0;
145}
146
147static inline void
148intel_batch_emit_reloc_pixmap(intel_screen_private *intel, PixmapPtr pixmap,
149			      uint32_t read_domains, uint32_t write_domain,
150			      uint32_t delta, int needs_fence)
151{
152	struct intel_uxa_pixmap *priv = intel_uxa_get_pixmap_private(pixmap);
153
154	intel_batch_mark_pixmap_domains(intel, priv, read_domains, write_domain);
155
156	intel_batch_emit_reloc(intel, priv->bo,
157			       read_domains, write_domain,
158			       delta, needs_fence);
159}
160
161#define ALIGN_BATCH(align) intel_batch_align(intel, align);
162#define OUT_BATCH(dword) intel_batch_emit_dword(intel, dword)
163
164#define OUT_RELOC(bo, read_domains, write_domains, delta) \
165	intel_batch_emit_reloc(intel, bo, read_domains, write_domains, delta, 0)
166
167#define OUT_RELOC_FENCED(bo, read_domains, write_domains, delta) \
168	intel_batch_emit_reloc(intel, bo, read_domains, write_domains, delta, 1)
169
170#define OUT_RELOC_PIXMAP(pixmap, reads, write, delta)	\
171	intel_batch_emit_reloc_pixmap(intel, pixmap, reads, write, delta, 0)
172
173#define OUT_RELOC_PIXMAP_FENCED(pixmap, reads, write, delta)	\
174	intel_batch_emit_reloc_pixmap(intel, pixmap, reads, write, delta, 1)
175
176union intfloat {
177	float f;
178	unsigned int ui;
179};
180
181#define OUT_BATCH_F(x) do {			\
182	union intfloat tmp;			\
183	tmp.f = (float)(x);			\
184	OUT_BATCH(tmp.ui);			\
185} while(0)
186
187#define __BEGIN_BATCH(n,batch_idx)					\
188do {									\
189	if (intel->batch_emitting != 0)					\
190		FatalError("%s: BEGIN_BATCH called without closing "	\
191			   "ADVANCE_BATCH\n", __FUNCTION__);		\
192	assert(!intel->in_batch_atomic);				\
193	if (intel->current_batch != batch_idx) {			\
194		if (intel->current_batch && intel->context_switch)	\
195			intel->context_switch(intel, batch_idx);	\
196	}								\
197	intel_batch_require_space(scrn, intel, (n) * 4);		\
198	intel->current_batch = batch_idx;				\
199	intel->batch_emitting = (n);					\
200	intel->batch_emit_start = intel->batch_used;			\
201} while (0)
202
203#define BEGIN_BATCH(n)	__BEGIN_BATCH(n,RENDER_BATCH)
204#define BEGIN_BATCH_BLT(n)	__BEGIN_BATCH(n,BLT_BATCH)
205
206#define ADVANCE_BATCH() do {						\
207	if (intel->batch_emitting == 0)					\
208		FatalError("%s: ADVANCE_BATCH called with no matching "	\
209			   "BEGIN_BATCH\n", __FUNCTION__);		\
210	if (intel->batch_used >						\
211	    intel->batch_emit_start + intel->batch_emitting)		\
212		FatalError("%s: ADVANCE_BATCH: exceeded allocation %d/%d\n ", \
213			   __FUNCTION__,				\
214			   intel->batch_used - intel->batch_emit_start,	\
215			   intel->batch_emitting);			\
216	if (intel->batch_used < intel->batch_emit_start +		\
217	    intel->batch_emitting)					\
218		FatalError("%s: ADVANCE_BATCH: under-used allocation %d/%d\n ", \
219			   __FUNCTION__,				\
220			   intel->batch_used - intel->batch_emit_start,	\
221			   intel->batch_emitting);			\
222	intel->batch_emitting = 0;					\
223} while (0)
224
225void intel_next_vertex(intel_screen_private *intel);
226static inline void intel_vertex_emit(intel_screen_private *intel, float v)
227{
228	intel->vertex_ptr[intel->vertex_used++] = v;
229}
230#define OUT_VERTEX(v) intel_vertex_emit(intel, v)
231
232#endif /* _INTEL_BATCHBUFFER_H */
233