1/**************************************************************************
2
3Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
4
5The Weather Channel (TM) funded Tungsten Graphics to develop the
6initial release of the Radeon 8500 driver under the XFree86 license.
7This notice must be preserved.
8
9Permission is hereby granted, free of charge, to any person obtaining
10a copy of this software and associated documentation files (the
11"Software"), to deal in the Software without restriction, including
12without limitation the rights to use, copy, modify, merge, publish,
13distribute, sublicense, and/or sell copies of the Software, and to
14permit persons to whom the Software is furnished to do so, subject to
15the following conditions:
16
17The above copyright notice and this permission notice (including the
18next paragraph) shall be included in all copies or substantial
19portions of the Software.
20
21THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29**************************************************************************/
30
31/*
32 * Authors:
33 *   Keith Whitwell <keithw@vmware.com>
34 */
35
36/*
37   - Scissor implementation
38   - buffer swap/copy ioctls
39   - finish/flush
40   - state emission
41   - cmdbuffer management
42*/
43
44#include <errno.h>
45#include "main/glheader.h"
46#include "main/context.h"
47#include "main/enums.h"
48#include "main/fbobject.h"
49#include "main/framebuffer.h"
50#include "main/renderbuffer.h"
51#include "drivers/common/meta.h"
52
53#include "radeon_common.h"
54#include "radeon_drm.h"
55#include "radeon_queryobj.h"
56
57/**
58 * Enable verbose debug output for emit code.
59 * 0 no output
60 * 1 most output
61 * 2 also print state alues
62 */
63#define RADEON_CMDBUF         0
64
65/* =============================================================
66 * Scissoring
67 */
68
69/**
70 * Update cliprects and scissors.
71 */
72void radeonSetCliprects(radeonContextPtr radeon)
73{
74	__DRIdrawable *const drawable = radeon_get_drawable(radeon);
75	__DRIdrawable *const readable = radeon_get_readable(radeon);
76
77	if(drawable == NULL && readable == NULL)
78		return;
79
80	struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
81	struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
82
83	if ((draw_rfb->base.Width != drawable->w) ||
84	    (draw_rfb->base.Height != drawable->h)) {
85		_mesa_resize_framebuffer(&radeon->glCtx, &draw_rfb->base,
86					 drawable->w, drawable->h);
87	}
88
89	if (drawable != readable) {
90		if ((read_rfb->base.Width != readable->w) ||
91		    (read_rfb->base.Height != readable->h)) {
92			_mesa_resize_framebuffer(&radeon->glCtx, &read_rfb->base,
93						 readable->w, readable->h);
94		}
95	}
96
97	if (radeon->state.scissor.enabled)
98		radeonUpdateScissor(&radeon->glCtx);
99
100}
101
102
103
104void radeonUpdateScissor( struct gl_context *ctx )
105{
106	radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
107	GLint x = ctx->Scissor.ScissorArray[0].X, y = ctx->Scissor.ScissorArray[0].Y;
108	GLsizei w = ctx->Scissor.ScissorArray[0].Width, h = ctx->Scissor.ScissorArray[0].Height;
109	int x1, y1, x2, y2;
110	int min_x, min_y, max_x, max_y;
111
112	if (!ctx->DrawBuffer)
113	    return;
114	min_x = min_y = 0;
115	max_x = ctx->DrawBuffer->Width - 1;
116	max_y = ctx->DrawBuffer->Height - 1;
117
118	if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
119		x1 = x;
120		y1 = ctx->DrawBuffer->Height - (y + h);
121		x2 = x + w - 1;
122		y2 = y1 + h - 1;
123	} else {
124		x1 = x;
125		y1 = y;
126		x2 = x + w - 1;
127		y2 = y + h - 1;
128
129	}
130
131	rmesa->state.scissor.rect.x1 = CLAMP(x1,  min_x, max_x);
132	rmesa->state.scissor.rect.y1 = CLAMP(y1,  min_y, max_y);
133	rmesa->state.scissor.rect.x2 = CLAMP(x2,  min_x, max_x);
134	rmesa->state.scissor.rect.y2 = CLAMP(y2,  min_y, max_y);
135
136	if (rmesa->vtbl.update_scissor)
137	   rmesa->vtbl.update_scissor(ctx);
138}
139
140/* =============================================================
141 * Scissoring
142 */
143
144void radeonScissor(struct gl_context *ctx)
145{
146	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
147	if (ctx->Scissor.EnableFlags) {
148		/* We don't pipeline cliprect changes */
149		radeon_firevertices(radeon);
150		radeonUpdateScissor(ctx);
151	}
152}
153
154/* ================================================================
155 * SwapBuffers with client-side throttling
156 */
157
158uint32_t radeonGetAge(radeonContextPtr radeon)
159{
160	drm_radeon_getparam_t gp;
161	int ret;
162	uint32_t age;
163
164	gp.param = RADEON_PARAM_LAST_CLEAR;
165	gp.value = (int *)&age;
166	ret = drmCommandWriteRead(radeon->radeonScreen->driScreen->fd, DRM_RADEON_GETPARAM,
167				  &gp, sizeof(gp));
168	if (ret) {
169		fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __func__,
170			ret);
171		exit(1);
172	}
173
174	return age;
175}
176
177void radeon_draw_buffer(struct gl_context *ctx, struct gl_framebuffer *fb)
178{
179	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
180	struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
181		*rrbColor = NULL;
182	uint32_t offset = 0;
183
184
185	if (!fb) {
186		/* this can happen during the initial context initialization */
187		return;
188	}
189
190	/* radeons only handle 1 color draw so far */
191	if (fb->_NumColorDrawBuffers != 1) {
192		radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
193		return;
194	}
195
196	/* Do this here, note core Mesa, since this function is called from
197	 * many places within the driver.
198	 */
199	if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
200		/* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
201		_mesa_update_framebuffer(ctx, ctx->ReadBuffer, ctx->DrawBuffer);
202		/* this updates the DrawBuffer's Width/Height if it's a FBO */
203		_mesa_update_draw_buffer_bounds(ctx, ctx->DrawBuffer);
204	}
205
206	if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
207		/* this may occur when we're called by glBindFrameBuffer() during
208		 * the process of someone setting up renderbuffers, etc.
209		 */
210		/*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
211		return;
212	}
213
214	if (fb->Name) {
215		;/* do something depthy/stencily TODO */
216        }
217
218		/* none */
219	if (fb->Name == 0) {
220		if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
221			rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
222			radeon->front_cliprects = GL_TRUE;
223		} else {
224			rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer);
225			radeon->front_cliprects = GL_FALSE;
226		}
227	} else {
228		/* user FBO in theory */
229		struct radeon_renderbuffer *rrb;
230		rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[0]);
231		if (rrb) {
232			offset = rrb->draw_offset;
233			rrbColor = rrb;
234		}
235	}
236
237	if (rrbColor == NULL)
238		radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
239	else
240		radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
241
242
243	if (fb->Attachment[BUFFER_DEPTH].Renderbuffer) {
244		rrbDepth = radeon_renderbuffer(fb->Attachment[BUFFER_DEPTH].Renderbuffer);
245		if (rrbDepth && rrbDepth->bo) {
246			radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
247		} else {
248			radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
249		}
250	} else {
251		radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
252		rrbDepth = NULL;
253	}
254
255	if (fb->Attachment[BUFFER_STENCIL].Renderbuffer) {
256		rrbStencil = radeon_renderbuffer(fb->Attachment[BUFFER_STENCIL].Renderbuffer);
257		if (rrbStencil && rrbStencil->bo) {
258			radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
259			/* need to re-compute stencil hw state */
260			if (!rrbDepth)
261				rrbDepth = rrbStencil;
262		} else {
263			radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
264		}
265	} else {
266		radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
267		if (ctx->Driver.Enable != NULL)
268			ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
269		else
270			ctx->NewState |= _NEW_STENCIL;
271	}
272
273	/* Update culling direction which changes depending on the
274	 * orientation of the buffer:
275	 */
276	if (ctx->Driver.FrontFace)
277		ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
278	else
279		ctx->NewState |= _NEW_POLYGON;
280
281	/*
282	 * Update depth test state
283	 */
284	if (ctx->Driver.Enable) {
285		ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
286				   (ctx->Depth.Test && fb->Visual.depthBits > 0));
287		ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
288				   (ctx->Stencil.Enabled && fb->Visual.stencilBits > 0));
289	} else {
290		ctx->NewState |= (_NEW_DEPTH | _NEW_STENCIL);
291	}
292
293	_mesa_reference_renderbuffer(&radeon->state.depth.rb, &rrbDepth->base.Base);
294	_mesa_reference_renderbuffer(&radeon->state.color.rb, &rrbColor->base.Base);
295	radeon->state.color.draw_offset = offset;
296
297	ctx->NewState |= _NEW_VIEWPORT;
298
299	/* Set state we know depends on drawable parameters:
300	 */
301	radeonUpdateScissor(ctx);
302	radeon->NewGLState |= _NEW_SCISSOR;
303
304	if (ctx->Driver.DepthRange)
305		ctx->Driver.DepthRange(ctx);
306
307	/* Update culling direction which changes depending on the
308	 * orientation of the buffer:
309	 */
310	if (ctx->Driver.FrontFace)
311		ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
312	else
313		ctx->NewState |= _NEW_POLYGON;
314}
315
316/**
317 * Called via glDrawBuffer.
318 */
319void radeonDrawBuffer(struct gl_context *ctx)
320{
321	if (RADEON_DEBUG & RADEON_DRI)
322		fprintf(stderr, "%s\n", __func__);
323
324	if (_mesa_is_front_buffer_drawing(ctx->DrawBuffer)) {
325		radeonContextPtr radeon = RADEON_CONTEXT(ctx);
326
327		/* If we might be front-buffer rendering on this buffer for
328		 * the first time, invalidate our DRI drawable so we'll ask
329		 * for new buffers (including the fake front) before we start
330		 * rendering again.
331		 */
332		radeon_update_renderbuffers(radeon->driContext,
333					    radeon->driContext->driDrawablePriv,
334					    GL_FALSE);
335	}
336
337	radeon_draw_buffer(ctx, ctx->DrawBuffer);
338}
339
340void radeonReadBuffer( struct gl_context *ctx, GLenum mode )
341{
342	if (_mesa_is_front_buffer_reading(ctx->ReadBuffer)) {
343		struct radeon_context *const rmesa = RADEON_CONTEXT(ctx);
344		radeon_update_renderbuffers(rmesa->driContext,
345					    rmesa->driContext->driReadablePriv, GL_FALSE);
346	}
347	/* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
348	if (ctx->ReadBuffer == ctx->DrawBuffer) {
349		/* This will update FBO completeness status.
350		 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
351		 * refers to a missing renderbuffer.  Calling glReadBuffer can set
352		 * that straight and can make the drawing buffer complete.
353		 */
354		radeon_draw_buffer(ctx, ctx->DrawBuffer);
355	}
356}
357
358void radeon_window_moved(radeonContextPtr radeon)
359{
360	/* Cliprects has to be updated before doing anything else */
361	radeonSetCliprects(radeon);
362}
363
364void radeon_viewport(struct gl_context *ctx)
365{
366	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
367	__DRIcontext *driContext = radeon->driContext;
368	void (*old_viewport)(struct gl_context *ctx);
369
370	if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
371		if (_mesa_is_front_buffer_drawing(ctx->DrawBuffer)) {
372			ctx->Driver.Flush(ctx, 0);
373		}
374		radeon_update_renderbuffers(driContext, driContext->driDrawablePriv, GL_FALSE);
375		if (driContext->driDrawablePriv != driContext->driReadablePriv)
376			radeon_update_renderbuffers(driContext, driContext->driReadablePriv, GL_FALSE);
377	}
378
379	old_viewport = ctx->Driver.Viewport;
380	ctx->Driver.Viewport = NULL;
381	radeon_window_moved(radeon);
382	radeon_draw_buffer(ctx, radeon->glCtx.DrawBuffer);
383	ctx->Driver.Viewport = old_viewport;
384}
385
386static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state)
387{
388	int i, j, reg, count;
389	int dwords;
390	uint32_t packet0;
391	if (!radeon_is_debug_enabled(RADEON_STATE, RADEON_VERBOSE) )
392		return;
393
394	dwords = state->check(&radeon->glCtx, state);
395
396	fprintf(stderr, "  emit %s %d/%d\n", state->name, dwords, state->cmd_size);
397
398	if (state->cmd && radeon_is_debug_enabled(RADEON_STATE, RADEON_TRACE)) {
399		if (dwords > state->cmd_size)
400			dwords = state->cmd_size;
401		for (i = 0; i < dwords;) {
402			packet0 = state->cmd[i];
403			reg = (packet0 & 0x1FFF) << 2;
404			count = ((packet0 & 0x3FFF0000) >> 16) + 1;
405			fprintf(stderr, "      %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
406					state->name, i, reg, count);
407			++i;
408			for (j = 0; j < count && i < dwords; j++) {
409				fprintf(stderr, "      %s[%d]: 0x%04x = %08x\n",
410						state->name, i, reg, state->cmd[i]);
411				reg += 4;
412				++i;
413			}
414		}
415	}
416}
417
418/**
419 * Count total size for next state emit.
420 **/
421GLuint radeonCountStateEmitSize(radeonContextPtr radeon)
422{
423	struct radeon_state_atom *atom;
424	GLuint dwords = 0;
425	/* check if we are going to emit full state */
426
427	if (radeon->cmdbuf.cs->cdw && !radeon->hw.all_dirty) {
428		if (!radeon->hw.is_dirty)
429			goto out;
430		foreach(atom, &radeon->hw.atomlist) {
431			if (atom->dirty) {
432				const GLuint atom_size = atom->check(&radeon->glCtx, atom);
433				dwords += atom_size;
434				if (RADEON_CMDBUF && atom_size) {
435					radeon_print_state_atom(radeon, atom);
436				}
437			}
438		}
439	} else {
440		foreach(atom, &radeon->hw.atomlist) {
441			const GLuint atom_size = atom->check(&radeon->glCtx, atom);
442			dwords += atom_size;
443			if (RADEON_CMDBUF && atom_size) {
444				radeon_print_state_atom(radeon, atom);
445			}
446
447		}
448	}
449out:
450	radeon_print(RADEON_STATE, RADEON_NORMAL, "%s %u\n", __func__, dwords);
451	return dwords;
452}
453
454static inline void radeon_emit_atom(radeonContextPtr radeon, struct radeon_state_atom *atom)
455{
456	BATCH_LOCALS(radeon);
457	int dwords;
458
459	dwords = atom->check(&radeon->glCtx, atom);
460	if (dwords) {
461
462		radeon_print_state_atom(radeon, atom);
463
464		if (atom->emit) {
465			atom->emit(&radeon->glCtx, atom);
466		} else {
467			BEGIN_BATCH(dwords);
468			OUT_BATCH_TABLE(atom->cmd, dwords);
469			END_BATCH();
470		}
471		atom->dirty = GL_FALSE;
472
473	} else {
474		radeon_print(RADEON_STATE, RADEON_VERBOSE, "  skip state %s\n", atom->name);
475	}
476
477}
478
479static inline void radeonEmitAtoms(radeonContextPtr radeon, GLboolean emitAll)
480{
481	struct radeon_state_atom *atom;
482
483	/* Emit actual atoms */
484	if (radeon->hw.all_dirty || emitAll) {
485		foreach(atom, &radeon->hw.atomlist)
486			radeon_emit_atom( radeon, atom );
487	} else {
488		foreach(atom, &radeon->hw.atomlist) {
489			if ( atom->dirty )
490				radeon_emit_atom( radeon, atom );
491		}
492	}
493
494	COMMIT_BATCH();
495}
496
497void radeonEmitState(radeonContextPtr radeon)
498{
499	radeon_print(RADEON_STATE, RADEON_NORMAL, "%s\n", __func__);
500
501	if (radeon->vtbl.pre_emit_state)
502		radeon->vtbl.pre_emit_state(radeon);
503
504	/* this code used to return here but now it emits zbs */
505	if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
506		return;
507
508	if (!radeon->cmdbuf.cs->cdw) {
509		if (RADEON_DEBUG & RADEON_STATE)
510			fprintf(stderr, "Begin reemit state\n");
511
512		radeonEmitAtoms(radeon, GL_TRUE);
513	} else {
514
515		if (RADEON_DEBUG & RADEON_STATE)
516			fprintf(stderr, "Begin dirty state\n");
517
518		radeonEmitAtoms(radeon, GL_FALSE);
519	}
520
521	radeon->hw.is_dirty = GL_FALSE;
522	radeon->hw.all_dirty = GL_FALSE;
523}
524
525
526void radeonFlush(struct gl_context *ctx, unsigned gallium_flush_flags)
527{
528	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
529	if (RADEON_DEBUG & RADEON_IOCTL)
530		fprintf(stderr, "%s %d\n", __func__, radeon->cmdbuf.cs->cdw);
531
532	/* okay if we have no cmds in the buffer &&
533	   we have no DMA flush &&
534	   we have no DMA buffer allocated.
535	   then no point flushing anything at all.
536	*/
537	if (!radeon->dma.flush && !radeon->cmdbuf.cs->cdw && is_empty_list(&radeon->dma.reserved))
538		goto flush_front;
539
540	if (radeon->dma.flush)
541		radeon->dma.flush( ctx );
542
543	if (radeon->cmdbuf.cs->cdw)
544		rcommonFlushCmdBuf(radeon, __func__);
545
546flush_front:
547	if (ctx->DrawBuffer && _mesa_is_winsys_fbo(ctx->DrawBuffer) &&
548	    radeon->front_buffer_dirty) {
549		__DRIscreen *const screen = radeon->radeonScreen->driScreen;
550
551		if (screen->dri2.loader && (screen->dri2.loader->base.version >= 2)
552			&& (screen->dri2.loader->flushFrontBuffer != NULL)) {
553			__DRIdrawable * drawable = radeon_get_drawable(radeon);
554
555			/* We set the dirty bit in radeon_prepare_render() if we're
556			 * front buffer rendering once we get there.
557			 */
558			radeon->front_buffer_dirty = GL_FALSE;
559
560			screen->dri2.loader->flushFrontBuffer(drawable, drawable->loaderPrivate);
561		}
562	}
563}
564
565/* Make sure all commands have been sent to the hardware and have
566 * completed processing.
567 */
568void radeonFinish(struct gl_context * ctx)
569{
570	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
571	struct gl_framebuffer *fb = ctx->DrawBuffer;
572	struct radeon_renderbuffer *rrb;
573	int i;
574
575	if (ctx->Driver.Flush)
576		ctx->Driver.Flush(ctx, 0); /* +r6/r7 */
577
578	for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
579		struct radeon_renderbuffer *rrb;
580		rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]);
581		if (rrb && rrb->bo)
582			radeon_bo_wait(rrb->bo);
583	}
584	rrb = radeon_get_depthbuffer(radeon);
585	if (rrb && rrb->bo)
586		radeon_bo_wait(rrb->bo);
587}
588
589/* cmdbuffer */
590/**
591 * Send the current command buffer via ioctl to the hardware.
592 */
593int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
594{
595	int ret = 0;
596
597	if (rmesa->cmdbuf.flushing) {
598		fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
599		exit(-1);
600	}
601	rmesa->cmdbuf.flushing = 1;
602
603	if (RADEON_DEBUG & RADEON_IOCTL) {
604		fprintf(stderr, "%s from %s\n", __func__, caller);
605	}
606
607	radeonEmitQueryEnd(&rmesa->glCtx);
608
609	if (rmesa->cmdbuf.cs->cdw) {
610		ret = radeon_cs_emit(rmesa->cmdbuf.cs);
611		rmesa->hw.all_dirty = GL_TRUE;
612	}
613	radeon_cs_erase(rmesa->cmdbuf.cs);
614	rmesa->cmdbuf.flushing = 0;
615
616	if (!rmesa->vtbl.revalidate_all_buffers(&rmesa->glCtx))
617		fprintf(stderr,"failed to revalidate buffers\n");
618
619	return ret;
620}
621
622int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
623{
624	int ret;
625
626	radeonReleaseDmaRegions(rmesa);
627
628	ret = rcommonFlushCmdBufLocked(rmesa, caller);
629
630	if (ret) {
631		fprintf(stderr, "drmRadeonCmdBuffer: %d. Kernel failed to "
632				"parse or rejected command stream. See dmesg "
633				"for more info.\n", ret);
634		exit(ret);
635	}
636
637	return ret;
638}
639
640/**
641 * Make sure that enough space is available in the command buffer
642 * by flushing if necessary.
643 *
644 * \param dwords The number of dwords we need to be free on the command buffer
645 */
646GLboolean rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
647{
648   if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size
649	 || radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
650      /* If we try to flush empty buffer there is too big rendering operation. */
651      assert(rmesa->cmdbuf.cs->cdw);
652      rcommonFlushCmdBuf(rmesa, caller);
653      return GL_TRUE;
654   }
655   return GL_FALSE;
656}
657
658void rcommonInitCmdBuf(radeonContextPtr rmesa)
659{
660	GLuint size;
661	struct drm_radeon_gem_info mminfo = { 0 };
662	int fd = rmesa->radeonScreen->driScreen->fd;
663
664	/* Initialize command buffer */
665	size = 256 * driQueryOptioni(&rmesa->optionCache,
666				     "command_buffer_size");
667	if (size < 2 * rmesa->hw.max_state_size) {
668		size = 2 * rmesa->hw.max_state_size + 65535;
669	}
670	if (size > 64 * 256)
671		size = 64 * 256;
672
673	radeon_print(RADEON_CS, RADEON_VERBOSE,
674			"sizeof(drm_r300_cmd_header_t)=%zd\n", sizeof(drm_r300_cmd_header_t));
675	radeon_print(RADEON_CS, RADEON_VERBOSE,
676			"sizeof(drm_radeon_cmd_buffer_t)=%zd\n", sizeof(drm_radeon_cmd_buffer_t));
677	radeon_print(RADEON_CS, RADEON_VERBOSE,
678			"Allocating %d bytes command buffer (max state is %d bytes)\n",
679			size * 4, rmesa->hw.max_state_size * 4);
680
681	rmesa->cmdbuf.csm = radeon_cs_manager_gem_ctor(fd);
682	if (rmesa->cmdbuf.csm == NULL) {
683		/* FIXME: fatal error */
684		return;
685	}
686	rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
687	assert(rmesa->cmdbuf.cs != NULL);
688	rmesa->cmdbuf.size = size;
689
690	radeon_cs_space_set_flush(rmesa->cmdbuf.cs,
691				  (void (*)(void *))rmesa->glCtx.Driver.Flush, &rmesa->glCtx);
692
693
694	if (!drmCommandWriteRead(fd, DRM_RADEON_GEM_INFO,
695				 &mminfo, sizeof(mminfo))) {
696		radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM,
697				    mminfo.vram_visible);
698		radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT,
699				    mminfo.gart_size);
700	}
701}
702
703/**
704 * Destroy the command buffer
705 */
706void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
707{
708	radeon_cs_destroy(rmesa->cmdbuf.cs);
709	radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
710}
711
712void rcommonBeginBatch(radeonContextPtr rmesa, int n,
713		       const char *file,
714		       const char *function,
715		       int line)
716{
717	radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
718
719    radeon_print(RADEON_CS, RADEON_VERBOSE, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
720                        n, rmesa->cmdbuf.cs->cdw, function, line);
721
722}
723
724void radeonUserClear(struct gl_context *ctx, GLuint mask)
725{
726   _mesa_meta_Clear(ctx, mask);
727}
728