intel_batchbuffer.h revision 03b705cf
1/**************************************************************************
2
3Copyright 1998-1999 Precision Insight, Inc., Cedar Park, Texas.
4Copyright © 2002 David Dawes
5
6All Rights Reserved.
7
8Permission is hereby granted, free of charge, to any person obtaining a
9copy of this software and associated documentation files (the
10"Software"), to deal in the Software without restriction, including
11without limitation the rights to use, copy, modify, merge, publish,
12distribute, sub license, and/or sell copies of the Software, and to
13permit persons to whom the Software is furnished to do so, subject to
14the following conditions:
15
16The above copyright notice and this permission notice (including the
17next paragraph) shall be included in all copies or substantial portions
18of the Software.
19
20THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
24ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27
28**************************************************************************/
29
30#ifndef _INTEL_BATCHBUFFER_H
31#define _INTEL_BATCHBUFFER_H
32
33#define BATCH_RESERVED		16
34
35
36void intel_batch_init(ScrnInfoPtr scrn);
37void intel_batch_teardown(ScrnInfoPtr scrn);
38void intel_batch_emit_flush(ScrnInfoPtr scrn);
39void intel_batch_submit(ScrnInfoPtr scrn);
40
41static inline int intel_batch_space(intel_screen_private *intel)
42{
43	return (intel->batch_bo->size - BATCH_RESERVED) - (4*intel->batch_used);
44}
45
46static inline int intel_vertex_space(intel_screen_private *intel)
47{
48	return intel->vertex_bo ? intel->vertex_bo->size - (4*intel->vertex_used) : 0;
49}
50
51static inline void
52intel_batch_require_space(ScrnInfoPtr scrn, intel_screen_private *intel, int sz)
53{
54	assert(sz < intel->batch_bo->size - 8);
55	if (intel_batch_space(intel) < sz)
56		intel_batch_submit(scrn);
57}
58
59static inline void intel_batch_start_atomic(ScrnInfoPtr scrn, int sz)
60{
61	intel_screen_private *intel = intel_get_screen_private(scrn);
62
63	assert(!intel->in_batch_atomic);
64
65	if (intel->current_batch != RENDER_BATCH) {
66		if (intel->current_batch && intel->context_switch)
67			intel->context_switch(intel, RENDER_BATCH);
68	}
69
70	intel_batch_require_space(scrn, intel, sz * 4);
71	intel->current_batch = RENDER_BATCH;
72
73	intel->in_batch_atomic = TRUE;
74	intel->batch_atomic_limit = intel->batch_used + sz;
75}
76
77static inline void intel_batch_end_atomic(ScrnInfoPtr scrn)
78{
79	intel_screen_private *intel = intel_get_screen_private(scrn);
80
81	assert(intel->in_batch_atomic);
82	assert(intel->batch_used <= intel->batch_atomic_limit);
83	intel->in_batch_atomic = FALSE;
84}
85
86static inline void intel_batch_emit_dword(intel_screen_private *intel, uint32_t dword)
87{
88	intel->batch_ptr[intel->batch_used++] = dword;
89}
90
91static inline void intel_batch_align(intel_screen_private *intel, uint32_t align)
92{
93	uint32_t delta;
94
95	align /= 4;
96	assert(align);
97
98	if ((delta = intel->batch_used & (align - 1))) {
99		delta = align - delta;
100		memset (intel->batch_ptr + intel->batch_used, 0, 4*delta);
101		intel->batch_used += delta;
102	}
103}
104
105static inline void
106intel_batch_emit_reloc(intel_screen_private *intel,
107		       dri_bo * bo,
108		       uint32_t read_domains,
109		       uint32_t write_domains, uint32_t delta, int needs_fence)
110{
111	if (needs_fence)
112		drm_intel_bo_emit_reloc_fence(intel->batch_bo,
113					      intel->batch_used * 4,
114					      bo, delta,
115					      read_domains, write_domains);
116	else
117		drm_intel_bo_emit_reloc(intel->batch_bo, intel->batch_used * 4,
118					bo, delta,
119					read_domains, write_domains);
120
121	intel_batch_emit_dword(intel, bo->offset + delta);
122}
123
124static inline void
125intel_batch_mark_pixmap_domains(intel_screen_private *intel,
126				struct intel_pixmap *priv,
127				uint32_t read_domains, uint32_t write_domain)
128{
129	assert (read_domains);
130	assert (write_domain == 0 || write_domain == read_domains);
131
132	if (list_is_empty(&priv->batch))
133		list_add(&priv->batch, &intel->batch_pixmaps);
134
135	priv->dirty |= write_domain != 0;
136	priv->busy = 1;
137
138	intel->needs_flush |= write_domain != 0;
139}
140
141static inline void
142intel_batch_emit_reloc_pixmap(intel_screen_private *intel, PixmapPtr pixmap,
143			      uint32_t read_domains, uint32_t write_domain,
144			      uint32_t delta, int needs_fence)
145{
146	struct intel_pixmap *priv = intel_get_pixmap_private(pixmap);
147
148	intel_batch_mark_pixmap_domains(intel, priv, read_domains, write_domain);
149
150	intel_batch_emit_reloc(intel, priv->bo,
151			       read_domains, write_domain,
152			       delta, needs_fence);
153}
154
155#define ALIGN_BATCH(align) intel_batch_align(intel, align);
156#define OUT_BATCH(dword) intel_batch_emit_dword(intel, dword)
157
158#define OUT_RELOC(bo, read_domains, write_domains, delta) \
159	intel_batch_emit_reloc(intel, bo, read_domains, write_domains, delta, 0)
160
161#define OUT_RELOC_FENCED(bo, read_domains, write_domains, delta) \
162	intel_batch_emit_reloc(intel, bo, read_domains, write_domains, delta, 1)
163
164#define OUT_RELOC_PIXMAP(pixmap, reads, write, delta)	\
165	intel_batch_emit_reloc_pixmap(intel, pixmap, reads, write, delta, 0)
166
167#define OUT_RELOC_PIXMAP_FENCED(pixmap, reads, write, delta)	\
168	intel_batch_emit_reloc_pixmap(intel, pixmap, reads, write, delta, 1)
169
170union intfloat {
171	float f;
172	unsigned int ui;
173};
174
175#define OUT_BATCH_F(x) do {			\
176	union intfloat tmp;			\
177	tmp.f = (float)(x);			\
178	OUT_BATCH(tmp.ui);			\
179} while(0)
180
181#define __BEGIN_BATCH(n,batch_idx)					\
182do {									\
183	if (intel->batch_emitting != 0)					\
184		FatalError("%s: BEGIN_BATCH called without closing "	\
185			   "ADVANCE_BATCH\n", __FUNCTION__);		\
186	assert(!intel->in_batch_atomic);				\
187	if (intel->current_batch != batch_idx) {			\
188		if (intel->current_batch && intel->context_switch)	\
189			intel->context_switch(intel, batch_idx);	\
190	}								\
191	intel_batch_require_space(scrn, intel, (n) * 4);		\
192	intel->current_batch = batch_idx;				\
193	intel->batch_emitting = (n);					\
194	intel->batch_emit_start = intel->batch_used;			\
195} while (0)
196
197#define BEGIN_BATCH(n)	__BEGIN_BATCH(n,RENDER_BATCH)
198#define BEGIN_BATCH_BLT(n)	__BEGIN_BATCH(n,BLT_BATCH)
199
200#define ADVANCE_BATCH() do {						\
201	if (intel->batch_emitting == 0)					\
202		FatalError("%s: ADVANCE_BATCH called with no matching "	\
203			   "BEGIN_BATCH\n", __FUNCTION__);		\
204	if (intel->batch_used >						\
205	    intel->batch_emit_start + intel->batch_emitting)		\
206		FatalError("%s: ADVANCE_BATCH: exceeded allocation %d/%d\n ", \
207			   __FUNCTION__,				\
208			   intel->batch_used - intel->batch_emit_start,	\
209			   intel->batch_emitting);			\
210	if (intel->batch_used < intel->batch_emit_start +		\
211	    intel->batch_emitting)					\
212		FatalError("%s: ADVANCE_BATCH: under-used allocation %d/%d\n ", \
213			   __FUNCTION__,				\
214			   intel->batch_used - intel->batch_emit_start,	\
215			   intel->batch_emitting);			\
216	intel->batch_emitting = 0;					\
217} while (0)
218
219void intel_next_vertex(intel_screen_private *intel);
220static inline void intel_vertex_emit(intel_screen_private *intel, float v)
221{
222	intel->vertex_ptr[intel->vertex_used++] = v;
223}
224#define OUT_VERTEX(v) intel_vertex_emit(intel, v)
225
226#endif /* _INTEL_BATCHBUFFER_H */
227