103b705cfSriastradh/**************************************************************************
203b705cfSriastradh
303b705cfSriastradhCopyright 1998-1999 Precision Insight, Inc., Cedar Park, Texas.
403b705cfSriastradhCopyright © 2002 David Dawes
503b705cfSriastradh
603b705cfSriastradhAll Rights Reserved.
703b705cfSriastradh
803b705cfSriastradhPermission is hereby granted, free of charge, to any person obtaining a
903b705cfSriastradhcopy of this software and associated documentation files (the
1003b705cfSriastradh"Software"), to deal in the Software without restriction, including
1103b705cfSriastradhwithout limitation the rights to use, copy, modify, merge, publish,
1203b705cfSriastradhdistribute, sub license, and/or sell copies of the Software, and to
1303b705cfSriastradhpermit persons to whom the Software is furnished to do so, subject to
1403b705cfSriastradhthe following conditions:
1503b705cfSriastradh
1603b705cfSriastradhThe above copyright notice and this permission notice (including the
1703b705cfSriastradhnext paragraph) shall be included in all copies or substantial portions
1803b705cfSriastradhof the Software.
1903b705cfSriastradh
2003b705cfSriastradhTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
2103b705cfSriastradhOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
2203b705cfSriastradhMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
2303b705cfSriastradhIN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
2403b705cfSriastradhANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
2503b705cfSriastradhTORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
2603b705cfSriastradhSOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
2703b705cfSriastradh
2803b705cfSriastradh**************************************************************************/
2903b705cfSriastradh
3003b705cfSriastradh#ifndef _INTEL_BATCHBUFFER_H
3103b705cfSriastradh#define _INTEL_BATCHBUFFER_H
3203b705cfSriastradh
33fe8aea9eSmrg#define BATCH_RESERVED		64
3403b705cfSriastradh
3503b705cfSriastradh
3603b705cfSriastradhvoid intel_batch_init(ScrnInfoPtr scrn);
3703b705cfSriastradhvoid intel_batch_teardown(ScrnInfoPtr scrn);
3803b705cfSriastradhvoid intel_batch_emit_flush(ScrnInfoPtr scrn);
3903b705cfSriastradhvoid intel_batch_submit(ScrnInfoPtr scrn);
4003b705cfSriastradh
4103b705cfSriastradhstatic inline int intel_batch_space(intel_screen_private *intel)
4203b705cfSriastradh{
4303b705cfSriastradh	return (intel->batch_bo->size - BATCH_RESERVED) - (4*intel->batch_used);
4403b705cfSriastradh}
4503b705cfSriastradh
4603b705cfSriastradhstatic inline int intel_vertex_space(intel_screen_private *intel)
4703b705cfSriastradh{
4803b705cfSriastradh	return intel->vertex_bo ? intel->vertex_bo->size - (4*intel->vertex_used) : 0;
4903b705cfSriastradh}
5003b705cfSriastradh
5103b705cfSriastradhstatic inline void
5203b705cfSriastradhintel_batch_require_space(ScrnInfoPtr scrn, intel_screen_private *intel, int sz)
5303b705cfSriastradh{
5403b705cfSriastradh	assert(sz < intel->batch_bo->size - 8);
5503b705cfSriastradh	if (intel_batch_space(intel) < sz)
5603b705cfSriastradh		intel_batch_submit(scrn);
5703b705cfSriastradh}
5803b705cfSriastradh
5903b705cfSriastradhstatic inline void intel_batch_start_atomic(ScrnInfoPtr scrn, int sz)
6003b705cfSriastradh{
6103b705cfSriastradh	intel_screen_private *intel = intel_get_screen_private(scrn);
6203b705cfSriastradh
6303b705cfSriastradh	assert(!intel->in_batch_atomic);
6403b705cfSriastradh
6503b705cfSriastradh	if (intel->current_batch != RENDER_BATCH) {
6603b705cfSriastradh		if (intel->current_batch && intel->context_switch)
6703b705cfSriastradh			intel->context_switch(intel, RENDER_BATCH);
6803b705cfSriastradh	}
6903b705cfSriastradh
7003b705cfSriastradh	intel_batch_require_space(scrn, intel, sz * 4);
7103b705cfSriastradh	intel->current_batch = RENDER_BATCH;
7203b705cfSriastradh
7303b705cfSriastradh	intel->in_batch_atomic = TRUE;
7403b705cfSriastradh	intel->batch_atomic_limit = intel->batch_used + sz;
7503b705cfSriastradh}
7603b705cfSriastradh
7703b705cfSriastradhstatic inline void intel_batch_end_atomic(ScrnInfoPtr scrn)
7803b705cfSriastradh{
7903b705cfSriastradh	intel_screen_private *intel = intel_get_screen_private(scrn);
8003b705cfSriastradh
8103b705cfSriastradh	assert(intel->in_batch_atomic);
8203b705cfSriastradh	assert(intel->batch_used <= intel->batch_atomic_limit);
8303b705cfSriastradh	intel->in_batch_atomic = FALSE;
8403b705cfSriastradh}
8503b705cfSriastradh
8603b705cfSriastradhstatic inline void intel_batch_emit_dword(intel_screen_private *intel, uint32_t dword)
8703b705cfSriastradh{
8803b705cfSriastradh	intel->batch_ptr[intel->batch_used++] = dword;
8903b705cfSriastradh}
9003b705cfSriastradh
9103b705cfSriastradhstatic inline void intel_batch_align(intel_screen_private *intel, uint32_t align)
9203b705cfSriastradh{
9303b705cfSriastradh	uint32_t delta;
9403b705cfSriastradh
9503b705cfSriastradh	align /= 4;
9603b705cfSriastradh	assert(align);
9703b705cfSriastradh
9803b705cfSriastradh	if ((delta = intel->batch_used & (align - 1))) {
9903b705cfSriastradh		delta = align - delta;
10003b705cfSriastradh		memset (intel->batch_ptr + intel->batch_used, 0, 4*delta);
10103b705cfSriastradh		intel->batch_used += delta;
10203b705cfSriastradh	}
10303b705cfSriastradh}
10403b705cfSriastradh
10503b705cfSriastradhstatic inline void
10603b705cfSriastradhintel_batch_emit_reloc(intel_screen_private *intel,
10703b705cfSriastradh		       dri_bo * bo,
10803b705cfSriastradh		       uint32_t read_domains,
10903b705cfSriastradh		       uint32_t write_domains, uint32_t delta, int needs_fence)
11003b705cfSriastradh{
11142542f5fSchristos	uint64_t offset;
11242542f5fSchristos
11303b705cfSriastradh	if (needs_fence)
11403b705cfSriastradh		drm_intel_bo_emit_reloc_fence(intel->batch_bo,
11503b705cfSriastradh					      intel->batch_used * 4,
11603b705cfSriastradh					      bo, delta,
11703b705cfSriastradh					      read_domains, write_domains);
11803b705cfSriastradh	else
11903b705cfSriastradh		drm_intel_bo_emit_reloc(intel->batch_bo, intel->batch_used * 4,
12003b705cfSriastradh					bo, delta,
12103b705cfSriastradh					read_domains, write_domains);
12203b705cfSriastradh
12342542f5fSchristos	offset = bo->offset64 + delta;
12442542f5fSchristos
12542542f5fSchristos	intel_batch_emit_dword(intel, offset);
12642542f5fSchristos	if (INTEL_INFO(intel)->gen >= 0100)
12742542f5fSchristos		intel_batch_emit_dword(intel, offset >> 32);
12803b705cfSriastradh}
12903b705cfSriastradh
13003b705cfSriastradhstatic inline void
13103b705cfSriastradhintel_batch_mark_pixmap_domains(intel_screen_private *intel,
13213496ba1Ssnj				struct intel_uxa_pixmap *priv,
13303b705cfSriastradh				uint32_t read_domains, uint32_t write_domain)
13403b705cfSriastradh{
13503b705cfSriastradh	assert (read_domains);
13603b705cfSriastradh	assert (write_domain == 0 || write_domain == read_domains);
13703b705cfSriastradh
13803b705cfSriastradh	if (list_is_empty(&priv->batch))
13903b705cfSriastradh		list_add(&priv->batch, &intel->batch_pixmaps);
14003b705cfSriastradh
14103b705cfSriastradh	priv->dirty |= write_domain != 0;
14203b705cfSriastradh	priv->busy = 1;
14303b705cfSriastradh
14403b705cfSriastradh	intel->needs_flush |= write_domain != 0;
14503b705cfSriastradh}
14603b705cfSriastradh
14703b705cfSriastradhstatic inline void
14803b705cfSriastradhintel_batch_emit_reloc_pixmap(intel_screen_private *intel, PixmapPtr pixmap,
14903b705cfSriastradh			      uint32_t read_domains, uint32_t write_domain,
15003b705cfSriastradh			      uint32_t delta, int needs_fence)
15103b705cfSriastradh{
15213496ba1Ssnj	struct intel_uxa_pixmap *priv = intel_uxa_get_pixmap_private(pixmap);
15303b705cfSriastradh
15403b705cfSriastradh	intel_batch_mark_pixmap_domains(intel, priv, read_domains, write_domain);
15503b705cfSriastradh
15603b705cfSriastradh	intel_batch_emit_reloc(intel, priv->bo,
15703b705cfSriastradh			       read_domains, write_domain,
15803b705cfSriastradh			       delta, needs_fence);
15903b705cfSriastradh}
16003b705cfSriastradh
16103b705cfSriastradh#define ALIGN_BATCH(align) intel_batch_align(intel, align);
16203b705cfSriastradh#define OUT_BATCH(dword) intel_batch_emit_dword(intel, dword)
16303b705cfSriastradh
16403b705cfSriastradh#define OUT_RELOC(bo, read_domains, write_domains, delta) \
16503b705cfSriastradh	intel_batch_emit_reloc(intel, bo, read_domains, write_domains, delta, 0)
16603b705cfSriastradh
16703b705cfSriastradh#define OUT_RELOC_FENCED(bo, read_domains, write_domains, delta) \
16803b705cfSriastradh	intel_batch_emit_reloc(intel, bo, read_domains, write_domains, delta, 1)
16903b705cfSriastradh
17003b705cfSriastradh#define OUT_RELOC_PIXMAP(pixmap, reads, write, delta)	\
17103b705cfSriastradh	intel_batch_emit_reloc_pixmap(intel, pixmap, reads, write, delta, 0)
17203b705cfSriastradh
17303b705cfSriastradh#define OUT_RELOC_PIXMAP_FENCED(pixmap, reads, write, delta)	\
17403b705cfSriastradh	intel_batch_emit_reloc_pixmap(intel, pixmap, reads, write, delta, 1)
17503b705cfSriastradh
17603b705cfSriastradhunion intfloat {
17703b705cfSriastradh	float f;
17803b705cfSriastradh	unsigned int ui;
17903b705cfSriastradh};
18003b705cfSriastradh
18103b705cfSriastradh#define OUT_BATCH_F(x) do {			\
18203b705cfSriastradh	union intfloat tmp;			\
18303b705cfSriastradh	tmp.f = (float)(x);			\
18403b705cfSriastradh	OUT_BATCH(tmp.ui);			\
18503b705cfSriastradh} while(0)
18603b705cfSriastradh
18703b705cfSriastradh#define __BEGIN_BATCH(n,batch_idx)					\
18803b705cfSriastradhdo {									\
18903b705cfSriastradh	if (intel->batch_emitting != 0)					\
19003b705cfSriastradh		FatalError("%s: BEGIN_BATCH called without closing "	\
19103b705cfSriastradh			   "ADVANCE_BATCH\n", __FUNCTION__);		\
19203b705cfSriastradh	assert(!intel->in_batch_atomic);				\
19303b705cfSriastradh	if (intel->current_batch != batch_idx) {			\
19403b705cfSriastradh		if (intel->current_batch && intel->context_switch)	\
19503b705cfSriastradh			intel->context_switch(intel, batch_idx);	\
19603b705cfSriastradh	}								\
19703b705cfSriastradh	intel_batch_require_space(scrn, intel, (n) * 4);		\
19803b705cfSriastradh	intel->current_batch = batch_idx;				\
19903b705cfSriastradh	intel->batch_emitting = (n);					\
20003b705cfSriastradh	intel->batch_emit_start = intel->batch_used;			\
20103b705cfSriastradh} while (0)
20203b705cfSriastradh
20303b705cfSriastradh#define BEGIN_BATCH(n)	__BEGIN_BATCH(n,RENDER_BATCH)
20403b705cfSriastradh#define BEGIN_BATCH_BLT(n)	__BEGIN_BATCH(n,BLT_BATCH)
205fe8aea9eSmrg#define BEGIN_BATCH_BLT_TILED(n) \
206fe8aea9eSmrgdo { \
207fe8aea9eSmrg	if (INTEL_INFO(intel)->gen < 060) { \
208fe8aea9eSmrg		__BEGIN_BATCH(n, BLT_BATCH); \
209fe8aea9eSmrg	} else { \
210fe8aea9eSmrg		__BEGIN_BATCH(n+7, BLT_BATCH); \
211fe8aea9eSmrg		OUT_BATCH(MI_FLUSH_DW); \
212fe8aea9eSmrg		OUT_BATCH(0); \
213fe8aea9eSmrg		OUT_BATCH(0); \
214fe8aea9eSmrg		OUT_BATCH(0); \
215fe8aea9eSmrg		OUT_BATCH(MI_LOAD_REGISTER_IMM); \
216fe8aea9eSmrg		OUT_BATCH(BCS_SWCTRL); \
217fe8aea9eSmrg		OUT_BATCH((BCS_SWCTRL_DST_Y | BCS_SWCTRL_SRC_Y) << 16 | \
218fe8aea9eSmrg			  ((intel->BR_tiling[0] == I915_TILING_Y) ? BCS_SWCTRL_DST_Y : 0) | \
219fe8aea9eSmrg			  ((intel->BR_tiling[1] == I915_TILING_Y) ? BCS_SWCTRL_SRC_Y : 0)); \
220fe8aea9eSmrg	} \
221fe8aea9eSmrg} while (0)
22203b705cfSriastradh
22303b705cfSriastradh#define ADVANCE_BATCH() do {						\
22403b705cfSriastradh	if (intel->batch_emitting == 0)					\
22503b705cfSriastradh		FatalError("%s: ADVANCE_BATCH called with no matching "	\
22603b705cfSriastradh			   "BEGIN_BATCH\n", __FUNCTION__);		\
22703b705cfSriastradh	if (intel->batch_used >						\
22803b705cfSriastradh	    intel->batch_emit_start + intel->batch_emitting)		\
22903b705cfSriastradh		FatalError("%s: ADVANCE_BATCH: exceeded allocation %d/%d\n ", \
23003b705cfSriastradh			   __FUNCTION__,				\
23103b705cfSriastradh			   intel->batch_used - intel->batch_emit_start,	\
23203b705cfSriastradh			   intel->batch_emitting);			\
23303b705cfSriastradh	if (intel->batch_used < intel->batch_emit_start +		\
23403b705cfSriastradh	    intel->batch_emitting)					\
23503b705cfSriastradh		FatalError("%s: ADVANCE_BATCH: under-used allocation %d/%d\n ", \
23603b705cfSriastradh			   __FUNCTION__,				\
23703b705cfSriastradh			   intel->batch_used - intel->batch_emit_start,	\
23803b705cfSriastradh			   intel->batch_emitting);			\
23903b705cfSriastradh	intel->batch_emitting = 0;					\
24003b705cfSriastradh} while (0)
24103b705cfSriastradh
24203b705cfSriastradhvoid intel_next_vertex(intel_screen_private *intel);
24303b705cfSriastradhstatic inline void intel_vertex_emit(intel_screen_private *intel, float v)
24403b705cfSriastradh{
24503b705cfSriastradh	intel->vertex_ptr[intel->vertex_used++] = v;
24603b705cfSriastradh}
24703b705cfSriastradh#define OUT_VERTEX(v) intel_vertex_emit(intel, v)
24803b705cfSriastradh
24903b705cfSriastradh#endif /* _INTEL_BATCHBUFFER_H */
250