r600_state.c revision b8e80941
1/*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23#include "r600_formats.h"
24#include "r600_shader.h"
25#include "r600d.h"
26
27#include "pipe/p_shader_tokens.h"
28#include "util/u_pack_color.h"
29#include "util/u_memory.h"
30#include "util/u_framebuffer.h"
31#include "util/u_dual_blend.h"
32
33static uint32_t r600_translate_blend_function(int blend_func)
34{
35	switch (blend_func) {
36	case PIPE_BLEND_ADD:
37		return V_028804_COMB_DST_PLUS_SRC;
38	case PIPE_BLEND_SUBTRACT:
39		return V_028804_COMB_SRC_MINUS_DST;
40	case PIPE_BLEND_REVERSE_SUBTRACT:
41		return V_028804_COMB_DST_MINUS_SRC;
42	case PIPE_BLEND_MIN:
43		return V_028804_COMB_MIN_DST_SRC;
44	case PIPE_BLEND_MAX:
45		return V_028804_COMB_MAX_DST_SRC;
46	default:
47		R600_ERR("Unknown blend function %d\n", blend_func);
48		assert(0);
49		break;
50	}
51	return 0;
52}
53
54static uint32_t r600_translate_blend_factor(int blend_fact)
55{
56	switch (blend_fact) {
57	case PIPE_BLENDFACTOR_ONE:
58		return V_028804_BLEND_ONE;
59	case PIPE_BLENDFACTOR_SRC_COLOR:
60		return V_028804_BLEND_SRC_COLOR;
61	case PIPE_BLENDFACTOR_SRC_ALPHA:
62		return V_028804_BLEND_SRC_ALPHA;
63	case PIPE_BLENDFACTOR_DST_ALPHA:
64		return V_028804_BLEND_DST_ALPHA;
65	case PIPE_BLENDFACTOR_DST_COLOR:
66		return V_028804_BLEND_DST_COLOR;
67	case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE:
68		return V_028804_BLEND_SRC_ALPHA_SATURATE;
69	case PIPE_BLENDFACTOR_CONST_COLOR:
70		return V_028804_BLEND_CONST_COLOR;
71	case PIPE_BLENDFACTOR_CONST_ALPHA:
72		return V_028804_BLEND_CONST_ALPHA;
73	case PIPE_BLENDFACTOR_ZERO:
74		return V_028804_BLEND_ZERO;
75	case PIPE_BLENDFACTOR_INV_SRC_COLOR:
76		return V_028804_BLEND_ONE_MINUS_SRC_COLOR;
77	case PIPE_BLENDFACTOR_INV_SRC_ALPHA:
78		return V_028804_BLEND_ONE_MINUS_SRC_ALPHA;
79	case PIPE_BLENDFACTOR_INV_DST_ALPHA:
80		return V_028804_BLEND_ONE_MINUS_DST_ALPHA;
81	case PIPE_BLENDFACTOR_INV_DST_COLOR:
82		return V_028804_BLEND_ONE_MINUS_DST_COLOR;
83	case PIPE_BLENDFACTOR_INV_CONST_COLOR:
84		return V_028804_BLEND_ONE_MINUS_CONST_COLOR;
85	case PIPE_BLENDFACTOR_INV_CONST_ALPHA:
86		return V_028804_BLEND_ONE_MINUS_CONST_ALPHA;
87	case PIPE_BLENDFACTOR_SRC1_COLOR:
88		return V_028804_BLEND_SRC1_COLOR;
89	case PIPE_BLENDFACTOR_SRC1_ALPHA:
90		return V_028804_BLEND_SRC1_ALPHA;
91	case PIPE_BLENDFACTOR_INV_SRC1_COLOR:
92		return V_028804_BLEND_INV_SRC1_COLOR;
93	case PIPE_BLENDFACTOR_INV_SRC1_ALPHA:
94		return V_028804_BLEND_INV_SRC1_ALPHA;
95	default:
96		R600_ERR("Bad blend factor %d not supported!\n", blend_fact);
97		assert(0);
98		break;
99	}
100	return 0;
101}
102
103static unsigned r600_tex_dim(unsigned dim, unsigned nr_samples)
104{
105	switch (dim) {
106	default:
107	case PIPE_TEXTURE_1D:
108		return V_038000_SQ_TEX_DIM_1D;
109	case PIPE_TEXTURE_1D_ARRAY:
110		return V_038000_SQ_TEX_DIM_1D_ARRAY;
111	case PIPE_TEXTURE_2D:
112	case PIPE_TEXTURE_RECT:
113		return nr_samples > 1 ? V_038000_SQ_TEX_DIM_2D_MSAA :
114					V_038000_SQ_TEX_DIM_2D;
115	case PIPE_TEXTURE_2D_ARRAY:
116		return nr_samples > 1 ? V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA :
117					V_038000_SQ_TEX_DIM_2D_ARRAY;
118	case PIPE_TEXTURE_3D:
119		return V_038000_SQ_TEX_DIM_3D;
120	case PIPE_TEXTURE_CUBE:
121	case PIPE_TEXTURE_CUBE_ARRAY:
122		return V_038000_SQ_TEX_DIM_CUBEMAP;
123	}
124}
125
126static uint32_t r600_translate_dbformat(enum pipe_format format)
127{
128	switch (format) {
129	case PIPE_FORMAT_Z16_UNORM:
130		return V_028010_DEPTH_16;
131	case PIPE_FORMAT_Z24X8_UNORM:
132		return V_028010_DEPTH_X8_24;
133	case PIPE_FORMAT_Z24_UNORM_S8_UINT:
134		return V_028010_DEPTH_8_24;
135	case PIPE_FORMAT_Z32_FLOAT:
136		return V_028010_DEPTH_32_FLOAT;
137	case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
138		return V_028010_DEPTH_X24_8_32_FLOAT;
139	default:
140		return ~0U;
141	}
142}
143
144static bool r600_is_sampler_format_supported(struct pipe_screen *screen, enum pipe_format format)
145{
146	return r600_translate_texformat(screen, format, NULL, NULL, NULL,
147                                   FALSE) != ~0U;
148}
149
150static bool r600_is_colorbuffer_format_supported(enum chip_class chip, enum pipe_format format)
151{
152	return r600_translate_colorformat(chip, format, FALSE) != ~0U &&
153	       r600_translate_colorswap(format, FALSE) != ~0U;
154}
155
156static bool r600_is_zs_format_supported(enum pipe_format format)
157{
158	return r600_translate_dbformat(format) != ~0U;
159}
160
161boolean r600_is_format_supported(struct pipe_screen *screen,
162				 enum pipe_format format,
163				 enum pipe_texture_target target,
164				 unsigned sample_count,
165				 unsigned storage_sample_count,
166				 unsigned usage)
167{
168	struct r600_screen *rscreen = (struct r600_screen*)screen;
169	unsigned retval = 0;
170
171	if (target >= PIPE_MAX_TEXTURE_TYPES) {
172		R600_ERR("r600: unsupported texture type %d\n", target);
173		return FALSE;
174	}
175
176	if (MAX2(1, sample_count) != MAX2(1, storage_sample_count))
177		return false;
178
179	if (sample_count > 1) {
180		if (!rscreen->has_msaa)
181			return FALSE;
182
183		/* R11G11B10 is broken on R6xx. */
184		if (rscreen->b.chip_class == R600 &&
185		    format == PIPE_FORMAT_R11G11B10_FLOAT)
186			return FALSE;
187
188		/* MSAA integer colorbuffers hang. */
189		if (util_format_is_pure_integer(format) &&
190		    !util_format_is_depth_or_stencil(format))
191			return FALSE;
192
193		switch (sample_count) {
194		case 2:
195		case 4:
196		case 8:
197			break;
198		default:
199			return FALSE;
200		}
201	}
202
203	if (usage & PIPE_BIND_SAMPLER_VIEW) {
204		if (target == PIPE_BUFFER) {
205			if (r600_is_vertex_format_supported(format))
206				retval |= PIPE_BIND_SAMPLER_VIEW;
207		} else {
208			if (r600_is_sampler_format_supported(screen, format))
209				retval |= PIPE_BIND_SAMPLER_VIEW;
210		}
211	}
212
213	if ((usage & (PIPE_BIND_RENDER_TARGET |
214		      PIPE_BIND_DISPLAY_TARGET |
215		      PIPE_BIND_SCANOUT |
216		      PIPE_BIND_SHARED |
217		      PIPE_BIND_BLENDABLE)) &&
218	    r600_is_colorbuffer_format_supported(rscreen->b.chip_class, format)) {
219		retval |= usage &
220			  (PIPE_BIND_RENDER_TARGET |
221			   PIPE_BIND_DISPLAY_TARGET |
222			   PIPE_BIND_SCANOUT |
223			   PIPE_BIND_SHARED);
224		if (!util_format_is_pure_integer(format) &&
225		    !util_format_is_depth_or_stencil(format))
226			retval |= usage & PIPE_BIND_BLENDABLE;
227	}
228
229	if ((usage & PIPE_BIND_DEPTH_STENCIL) &&
230	    r600_is_zs_format_supported(format)) {
231		retval |= PIPE_BIND_DEPTH_STENCIL;
232	}
233
234	if ((usage & PIPE_BIND_VERTEX_BUFFER) &&
235	    r600_is_vertex_format_supported(format)) {
236		retval |= PIPE_BIND_VERTEX_BUFFER;
237	}
238
239	if ((usage & PIPE_BIND_LINEAR) &&
240	    !util_format_is_compressed(format) &&
241	    !(usage & PIPE_BIND_DEPTH_STENCIL))
242		retval |= PIPE_BIND_LINEAR;
243
244	return retval == usage;
245}
246
247static void r600_emit_polygon_offset(struct r600_context *rctx, struct r600_atom *a)
248{
249	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
250	struct r600_poly_offset_state *state = (struct r600_poly_offset_state*)a;
251	float offset_units = state->offset_units;
252	float offset_scale = state->offset_scale;
253	uint32_t pa_su_poly_offset_db_fmt_cntl = 0;
254
255	if (!state->offset_units_unscaled) {
256		switch (state->zs_format) {
257		case PIPE_FORMAT_Z24X8_UNORM:
258		case PIPE_FORMAT_Z24_UNORM_S8_UINT:
259			offset_units *= 2.0f;
260			pa_su_poly_offset_db_fmt_cntl =
261				S_028DF8_POLY_OFFSET_NEG_NUM_DB_BITS((char)-24);
262			break;
263		case PIPE_FORMAT_Z16_UNORM:
264			offset_units *= 4.0f;
265			pa_su_poly_offset_db_fmt_cntl =
266				S_028DF8_POLY_OFFSET_NEG_NUM_DB_BITS((char)-16);
267			break;
268		default:
269			pa_su_poly_offset_db_fmt_cntl =
270				S_028DF8_POLY_OFFSET_NEG_NUM_DB_BITS((char)-23) |
271				S_028DF8_POLY_OFFSET_DB_IS_FLOAT_FMT(1);
272		}
273	}
274
275	radeon_set_context_reg_seq(cs, R_028E00_PA_SU_POLY_OFFSET_FRONT_SCALE, 4);
276	radeon_emit(cs, fui(offset_scale));
277	radeon_emit(cs, fui(offset_units));
278	radeon_emit(cs, fui(offset_scale));
279	radeon_emit(cs, fui(offset_units));
280
281	radeon_set_context_reg(cs, R_028DF8_PA_SU_POLY_OFFSET_DB_FMT_CNTL,
282			       pa_su_poly_offset_db_fmt_cntl);
283}
284
285static uint32_t r600_get_blend_control(const struct pipe_blend_state *state, unsigned i)
286{
287	int j = state->independent_blend_enable ? i : 0;
288
289	unsigned eqRGB = state->rt[j].rgb_func;
290	unsigned srcRGB = state->rt[j].rgb_src_factor;
291	unsigned dstRGB = state->rt[j].rgb_dst_factor;
292
293	unsigned eqA = state->rt[j].alpha_func;
294	unsigned srcA = state->rt[j].alpha_src_factor;
295	unsigned dstA = state->rt[j].alpha_dst_factor;
296	uint32_t bc = 0;
297
298	if (!state->rt[j].blend_enable)
299		return 0;
300
301	bc |= S_028804_COLOR_COMB_FCN(r600_translate_blend_function(eqRGB));
302	bc |= S_028804_COLOR_SRCBLEND(r600_translate_blend_factor(srcRGB));
303	bc |= S_028804_COLOR_DESTBLEND(r600_translate_blend_factor(dstRGB));
304
305	if (srcA != srcRGB || dstA != dstRGB || eqA != eqRGB) {
306		bc |= S_028804_SEPARATE_ALPHA_BLEND(1);
307		bc |= S_028804_ALPHA_COMB_FCN(r600_translate_blend_function(eqA));
308		bc |= S_028804_ALPHA_SRCBLEND(r600_translate_blend_factor(srcA));
309		bc |= S_028804_ALPHA_DESTBLEND(r600_translate_blend_factor(dstA));
310	}
311	return bc;
312}
313
314static void *r600_create_blend_state_mode(struct pipe_context *ctx,
315					  const struct pipe_blend_state *state,
316					  int mode)
317{
318	struct r600_context *rctx = (struct r600_context *)ctx;
319	uint32_t color_control = 0, target_mask = 0;
320	struct r600_blend_state *blend = CALLOC_STRUCT(r600_blend_state);
321
322	if (!blend) {
323		return NULL;
324	}
325
326	r600_init_command_buffer(&blend->buffer, 20);
327	r600_init_command_buffer(&blend->buffer_no_blend, 20);
328
329	/* R600 does not support per-MRT blends */
330	if (rctx->b.family > CHIP_R600)
331		color_control |= S_028808_PER_MRT_BLEND(1);
332
333	if (state->logicop_enable) {
334		color_control |= (state->logicop_func << 16) | (state->logicop_func << 20);
335	} else {
336		color_control |= (0xcc << 16);
337	}
338	/* we pretend 8 buffer are used, CB_SHADER_MASK will disable unused one */
339	if (state->independent_blend_enable) {
340		for (int i = 0; i < 8; i++) {
341			if (state->rt[i].blend_enable) {
342				color_control |= S_028808_TARGET_BLEND_ENABLE(1 << i);
343			}
344			target_mask |= (state->rt[i].colormask << (4 * i));
345		}
346	} else {
347		for (int i = 0; i < 8; i++) {
348			if (state->rt[0].blend_enable) {
349				color_control |= S_028808_TARGET_BLEND_ENABLE(1 << i);
350			}
351			target_mask |= (state->rt[0].colormask << (4 * i));
352		}
353	}
354
355	if (target_mask)
356		color_control |= S_028808_SPECIAL_OP(mode);
357	else
358		color_control |= S_028808_SPECIAL_OP(V_028808_DISABLE);
359
360	/* only MRT0 has dual src blend */
361	blend->dual_src_blend = util_blend_state_is_dual(state, 0);
362	blend->cb_target_mask = target_mask;
363	blend->cb_color_control = color_control;
364	blend->cb_color_control_no_blend = color_control & C_028808_TARGET_BLEND_ENABLE;
365	blend->alpha_to_one = state->alpha_to_one;
366
367	r600_store_context_reg(&blend->buffer, R_028D44_DB_ALPHA_TO_MASK,
368			       S_028D44_ALPHA_TO_MASK_ENABLE(state->alpha_to_coverage) |
369			       S_028D44_ALPHA_TO_MASK_OFFSET0(2) |
370			       S_028D44_ALPHA_TO_MASK_OFFSET1(2) |
371			       S_028D44_ALPHA_TO_MASK_OFFSET2(2) |
372			       S_028D44_ALPHA_TO_MASK_OFFSET3(2));
373
374	/* Copy over the registers set so far into buffer_no_blend. */
375	memcpy(blend->buffer_no_blend.buf, blend->buffer.buf, blend->buffer.num_dw * 4);
376	blend->buffer_no_blend.num_dw = blend->buffer.num_dw;
377
378	/* Only add blend registers if blending is enabled. */
379	if (!G_028808_TARGET_BLEND_ENABLE(color_control)) {
380		return blend;
381	}
382
383	/* The first R600 does not support per-MRT blends */
384	r600_store_context_reg(&blend->buffer, R_028804_CB_BLEND_CONTROL,
385			       r600_get_blend_control(state, 0));
386
387	if (rctx->b.family > CHIP_R600) {
388		r600_store_context_reg_seq(&blend->buffer, R_028780_CB_BLEND0_CONTROL, 8);
389		for (int i = 0; i < 8; i++) {
390			r600_store_value(&blend->buffer, r600_get_blend_control(state, i));
391		}
392	}
393	return blend;
394}
395
396static void *r600_create_blend_state(struct pipe_context *ctx,
397				     const struct pipe_blend_state *state)
398{
399	return r600_create_blend_state_mode(ctx, state, V_028808_SPECIAL_NORMAL);
400}
401
402static void *r600_create_dsa_state(struct pipe_context *ctx,
403				   const struct pipe_depth_stencil_alpha_state *state)
404{
405	unsigned db_depth_control, alpha_test_control, alpha_ref;
406	struct r600_dsa_state *dsa = CALLOC_STRUCT(r600_dsa_state);
407
408	if (!dsa) {
409		return NULL;
410	}
411
412	r600_init_command_buffer(&dsa->buffer, 3);
413
414	dsa->valuemask[0] = state->stencil[0].valuemask;
415	dsa->valuemask[1] = state->stencil[1].valuemask;
416	dsa->writemask[0] = state->stencil[0].writemask;
417	dsa->writemask[1] = state->stencil[1].writemask;
418	dsa->zwritemask = state->depth.writemask;
419
420	db_depth_control = S_028800_Z_ENABLE(state->depth.enabled) |
421		S_028800_Z_WRITE_ENABLE(state->depth.writemask) |
422		S_028800_ZFUNC(state->depth.func);
423
424	/* stencil */
425	if (state->stencil[0].enabled) {
426		db_depth_control |= S_028800_STENCIL_ENABLE(1);
427		db_depth_control |= S_028800_STENCILFUNC(state->stencil[0].func); /* translates straight */
428		db_depth_control |= S_028800_STENCILFAIL(r600_translate_stencil_op(state->stencil[0].fail_op));
429		db_depth_control |= S_028800_STENCILZPASS(r600_translate_stencil_op(state->stencil[0].zpass_op));
430		db_depth_control |= S_028800_STENCILZFAIL(r600_translate_stencil_op(state->stencil[0].zfail_op));
431
432		if (state->stencil[1].enabled) {
433			db_depth_control |= S_028800_BACKFACE_ENABLE(1);
434			db_depth_control |= S_028800_STENCILFUNC_BF(state->stencil[1].func); /* translates straight */
435			db_depth_control |= S_028800_STENCILFAIL_BF(r600_translate_stencil_op(state->stencil[1].fail_op));
436			db_depth_control |= S_028800_STENCILZPASS_BF(r600_translate_stencil_op(state->stencil[1].zpass_op));
437			db_depth_control |= S_028800_STENCILZFAIL_BF(r600_translate_stencil_op(state->stencil[1].zfail_op));
438		}
439	}
440
441	/* alpha */
442	alpha_test_control = 0;
443	alpha_ref = 0;
444	if (state->alpha.enabled) {
445		alpha_test_control = S_028410_ALPHA_FUNC(state->alpha.func);
446		alpha_test_control |= S_028410_ALPHA_TEST_ENABLE(1);
447		alpha_ref = fui(state->alpha.ref_value);
448	}
449	dsa->sx_alpha_test_control = alpha_test_control & 0xff;
450	dsa->alpha_ref = alpha_ref;
451
452	r600_store_context_reg(&dsa->buffer, R_028800_DB_DEPTH_CONTROL, db_depth_control);
453	return dsa;
454}
455
456static void *r600_create_rs_state(struct pipe_context *ctx,
457				  const struct pipe_rasterizer_state *state)
458{
459	struct r600_context *rctx = (struct r600_context *)ctx;
460	unsigned tmp, sc_mode_cntl, spi_interp;
461	float psize_min, psize_max;
462	struct r600_rasterizer_state *rs = CALLOC_STRUCT(r600_rasterizer_state);
463
464	if (!rs) {
465		return NULL;
466	}
467
468	r600_init_command_buffer(&rs->buffer, 30);
469
470	rs->scissor_enable = state->scissor;
471	rs->clip_halfz = state->clip_halfz;
472	rs->flatshade = state->flatshade;
473	rs->sprite_coord_enable = state->sprite_coord_enable;
474	rs->rasterizer_discard = state->rasterizer_discard;
475	rs->two_side = state->light_twoside;
476	rs->clip_plane_enable = state->clip_plane_enable;
477	rs->pa_sc_line_stipple = state->line_stipple_enable ?
478				S_028A0C_LINE_PATTERN(state->line_stipple_pattern) |
479				S_028A0C_REPEAT_COUNT(state->line_stipple_factor) : 0;
480	rs->pa_cl_clip_cntl =
481		S_028810_DX_CLIP_SPACE_DEF(state->clip_halfz) |
482		S_028810_ZCLIP_NEAR_DISABLE(!state->depth_clip_near) |
483		S_028810_ZCLIP_FAR_DISABLE(!state->depth_clip_far) |
484		S_028810_DX_LINEAR_ATTR_CLIP_ENA(1);
485	if (rctx->b.chip_class == R700) {
486		rs->pa_cl_clip_cntl |=
487			S_028810_DX_RASTERIZATION_KILL(state->rasterizer_discard);
488	}
489	rs->multisample_enable = state->multisample;
490
491	/* offset */
492	rs->offset_units = state->offset_units;
493	rs->offset_scale = state->offset_scale * 16.0f;
494	rs->offset_enable = state->offset_point || state->offset_line || state->offset_tri;
495	rs->offset_units_unscaled = state->offset_units_unscaled;
496
497	if (state->point_size_per_vertex) {
498		psize_min = util_get_min_point_size(state);
499		psize_max = 8192;
500	} else {
501		/* Force the point size to be as if the vertex output was disabled. */
502		psize_min = state->point_size;
503		psize_max = state->point_size;
504	}
505
506	sc_mode_cntl = S_028A4C_MSAA_ENABLE(state->multisample) |
507		       S_028A4C_LINE_STIPPLE_ENABLE(state->line_stipple_enable) |
508		       S_028A4C_FORCE_EOV_CNTDWN_ENABLE(1) |
509		       S_028A4C_PS_ITER_SAMPLE(state->multisample && rctx->ps_iter_samples > 1);
510	if (rctx->b.family == CHIP_RV770) {
511		/* workaround possible rendering corruption on RV770 with hyperz together with sample shading */
512		sc_mode_cntl |= S_028A4C_TILE_COVER_DISABLE(state->multisample && rctx->ps_iter_samples > 1);
513	}
514	if (rctx->b.chip_class >= R700) {
515		sc_mode_cntl |= S_028A4C_FORCE_EOV_REZ_ENABLE(1) |
516				S_028A4C_R700_ZMM_LINE_OFFSET(1) |
517				S_028A4C_R700_VPORT_SCISSOR_ENABLE(1);
518	} else {
519		sc_mode_cntl |= S_028A4C_WALK_ALIGN8_PRIM_FITS_ST(1);
520	}
521
522	spi_interp = S_0286D4_FLAT_SHADE_ENA(1);
523	if (state->sprite_coord_enable) {
524		spi_interp |= S_0286D4_PNT_SPRITE_ENA(1) |
525			      S_0286D4_PNT_SPRITE_OVRD_X(2) |
526			      S_0286D4_PNT_SPRITE_OVRD_Y(3) |
527			      S_0286D4_PNT_SPRITE_OVRD_Z(0) |
528			      S_0286D4_PNT_SPRITE_OVRD_W(1);
529		if (state->sprite_coord_mode != PIPE_SPRITE_COORD_UPPER_LEFT) {
530			spi_interp |= S_0286D4_PNT_SPRITE_TOP_1(1);
531		}
532	}
533
534	r600_store_context_reg_seq(&rs->buffer, R_028A00_PA_SU_POINT_SIZE, 3);
535	/* point size 12.4 fixed point (divide by two, because 0.5 = 1 pixel. */
536	tmp = r600_pack_float_12p4(state->point_size/2);
537	r600_store_value(&rs->buffer, /* R_028A00_PA_SU_POINT_SIZE */
538			 S_028A00_HEIGHT(tmp) | S_028A00_WIDTH(tmp));
539	r600_store_value(&rs->buffer, /* R_028A04_PA_SU_POINT_MINMAX */
540			 S_028A04_MIN_SIZE(r600_pack_float_12p4(psize_min/2)) |
541			 S_028A04_MAX_SIZE(r600_pack_float_12p4(psize_max/2)));
542	r600_store_value(&rs->buffer, /* R_028A08_PA_SU_LINE_CNTL */
543			 S_028A08_WIDTH(r600_pack_float_12p4(state->line_width/2)));
544
545	r600_store_context_reg(&rs->buffer, R_0286D4_SPI_INTERP_CONTROL_0, spi_interp);
546	r600_store_context_reg(&rs->buffer, R_028A4C_PA_SC_MODE_CNTL, sc_mode_cntl);
547	r600_store_context_reg(&rs->buffer, R_028C08_PA_SU_VTX_CNTL,
548			       S_028C08_PIX_CENTER_HALF(state->half_pixel_center) |
549			       S_028C08_QUANT_MODE(V_028C08_X_1_256TH));
550	r600_store_context_reg(&rs->buffer, R_028DFC_PA_SU_POLY_OFFSET_CLAMP, fui(state->offset_clamp));
551
552	rs->pa_su_sc_mode_cntl = S_028814_PROVOKING_VTX_LAST(!state->flatshade_first) |
553				 S_028814_CULL_FRONT(state->cull_face & PIPE_FACE_FRONT ? 1 : 0) |
554				 S_028814_CULL_BACK(state->cull_face & PIPE_FACE_BACK ? 1 : 0) |
555				 S_028814_FACE(!state->front_ccw) |
556				 S_028814_POLY_OFFSET_FRONT_ENABLE(util_get_offset(state, state->fill_front)) |
557				 S_028814_POLY_OFFSET_BACK_ENABLE(util_get_offset(state, state->fill_back)) |
558				 S_028814_POLY_OFFSET_PARA_ENABLE(state->offset_point || state->offset_line) |
559				 S_028814_POLY_MODE(state->fill_front != PIPE_POLYGON_MODE_FILL ||
560									 state->fill_back != PIPE_POLYGON_MODE_FILL) |
561				 S_028814_POLYMODE_FRONT_PTYPE(r600_translate_fill(state->fill_front)) |
562				 S_028814_POLYMODE_BACK_PTYPE(r600_translate_fill(state->fill_back));
563	if (rctx->b.chip_class == R700) {
564		r600_store_context_reg(&rs->buffer, R_028814_PA_SU_SC_MODE_CNTL, rs->pa_su_sc_mode_cntl);
565	}
566	if (rctx->b.chip_class == R600) {
567		r600_store_context_reg(&rs->buffer, R_028350_SX_MISC,
568				       S_028350_MULTIPASS(state->rasterizer_discard));
569	}
570	return rs;
571}
572
573static unsigned r600_tex_filter(unsigned filter, unsigned max_aniso)
574{
575	if (filter == PIPE_TEX_FILTER_LINEAR)
576		return max_aniso > 1 ? V_03C000_SQ_TEX_XY_FILTER_ANISO_BILINEAR
577				     : V_03C000_SQ_TEX_XY_FILTER_BILINEAR;
578	else
579		return max_aniso > 1 ? V_03C000_SQ_TEX_XY_FILTER_ANISO_POINT
580				     : V_03C000_SQ_TEX_XY_FILTER_POINT;
581}
582
583static void *r600_create_sampler_state(struct pipe_context *ctx,
584					const struct pipe_sampler_state *state)
585{
586	struct r600_common_screen *rscreen = (struct r600_common_screen*)ctx->screen;
587	struct r600_pipe_sampler_state *ss = CALLOC_STRUCT(r600_pipe_sampler_state);
588	unsigned max_aniso = rscreen->force_aniso >= 0 ? rscreen->force_aniso
589						       : state->max_anisotropy;
590	unsigned max_aniso_ratio = r600_tex_aniso_filter(max_aniso);
591
592	if (!ss) {
593		return NULL;
594	}
595
596	ss->seamless_cube_map = state->seamless_cube_map;
597	ss->border_color_use = sampler_state_needs_border_color(state);
598
599	/* R_03C000_SQ_TEX_SAMPLER_WORD0_0 */
600	ss->tex_sampler_words[0] =
601		S_03C000_CLAMP_X(r600_tex_wrap(state->wrap_s)) |
602		S_03C000_CLAMP_Y(r600_tex_wrap(state->wrap_t)) |
603		S_03C000_CLAMP_Z(r600_tex_wrap(state->wrap_r)) |
604		S_03C000_XY_MAG_FILTER(r600_tex_filter(state->mag_img_filter, max_aniso)) |
605		S_03C000_XY_MIN_FILTER(r600_tex_filter(state->min_img_filter, max_aniso)) |
606		S_03C000_MIP_FILTER(r600_tex_mipfilter(state->min_mip_filter)) |
607		S_03C000_MAX_ANISO_RATIO(max_aniso_ratio) |
608		S_03C000_DEPTH_COMPARE_FUNCTION(r600_tex_compare(state->compare_func)) |
609		S_03C000_BORDER_COLOR_TYPE(ss->border_color_use ? V_03C000_SQ_TEX_BORDER_COLOR_REGISTER : 0);
610	/* R_03C004_SQ_TEX_SAMPLER_WORD1_0 */
611	ss->tex_sampler_words[1] =
612		S_03C004_MIN_LOD(S_FIXED(CLAMP(state->min_lod, 0, 15), 6)) |
613		S_03C004_MAX_LOD(S_FIXED(CLAMP(state->max_lod, 0, 15), 6)) |
614		S_03C004_LOD_BIAS(S_FIXED(CLAMP(state->lod_bias, -16, 16), 6));
615	/* R_03C008_SQ_TEX_SAMPLER_WORD2_0 */
616	ss->tex_sampler_words[2] = S_03C008_TYPE(1);
617
618	if (ss->border_color_use) {
619		memcpy(&ss->border_color, &state->border_color, sizeof(state->border_color));
620	}
621	return ss;
622}
623
624static struct pipe_sampler_view *
625texture_buffer_sampler_view(struct r600_pipe_sampler_view *view,
626			    unsigned width0, unsigned height0)
627
628{
629	struct r600_texture *tmp = (struct r600_texture*)view->base.texture;
630	int stride = util_format_get_blocksize(view->base.format);
631	unsigned format, num_format, format_comp, endian;
632	uint64_t offset = view->base.u.buf.offset;
633	unsigned size = view->base.u.buf.size;
634
635	r600_vertex_data_type(view->base.format,
636			      &format, &num_format, &format_comp,
637			      &endian);
638
639	view->tex_resource = &tmp->resource;
640	view->skip_mip_address_reloc = true;
641
642	view->tex_resource_words[0] = offset;
643	view->tex_resource_words[1] = size - 1;
644	view->tex_resource_words[2] = S_038008_BASE_ADDRESS_HI(offset >> 32UL) |
645		S_038008_STRIDE(stride) |
646		S_038008_DATA_FORMAT(format) |
647		S_038008_NUM_FORMAT_ALL(num_format) |
648		S_038008_FORMAT_COMP_ALL(format_comp) |
649		S_038008_ENDIAN_SWAP(endian);
650	view->tex_resource_words[3] = 0;
651	/*
652	 * in theory dword 4 is for number of elements, for use with resinfo,
653	 * but it seems to utterly fail to work, the amd gpu shader analyser
654	 * uses a const buffer to store the element sizes for buffer txq
655	 */
656	view->tex_resource_words[4] = 0;
657	view->tex_resource_words[5] = 0;
658	view->tex_resource_words[6] = S_038018_TYPE(V_038010_SQ_TEX_VTX_VALID_BUFFER);
659	return &view->base;
660}
661
662struct pipe_sampler_view *
663r600_create_sampler_view_custom(struct pipe_context *ctx,
664				struct pipe_resource *texture,
665				const struct pipe_sampler_view *state,
666				unsigned width_first_level, unsigned height_first_level)
667{
668	struct r600_pipe_sampler_view *view = CALLOC_STRUCT(r600_pipe_sampler_view);
669	struct r600_texture *tmp = (struct r600_texture*)texture;
670	unsigned format, endian;
671	uint32_t word4 = 0, yuv_format = 0, pitch = 0;
672	unsigned char swizzle[4], array_mode = 0;
673	unsigned width, height, depth, offset_level, last_level;
674	bool do_endian_swap = FALSE;
675
676	if (!view)
677		return NULL;
678
679	/* initialize base object */
680	view->base = *state;
681	view->base.texture = NULL;
682	pipe_reference(NULL, &texture->reference);
683	view->base.texture = texture;
684	view->base.reference.count = 1;
685	view->base.context = ctx;
686
687	if (texture->target == PIPE_BUFFER)
688		return texture_buffer_sampler_view(view, texture->width0, 1);
689
690	swizzle[0] = state->swizzle_r;
691	swizzle[1] = state->swizzle_g;
692	swizzle[2] = state->swizzle_b;
693	swizzle[3] = state->swizzle_a;
694
695	if (R600_BIG_ENDIAN)
696		do_endian_swap = !tmp->db_compatible;
697
698	format = r600_translate_texformat(ctx->screen, state->format,
699					  swizzle,
700					  &word4, &yuv_format, do_endian_swap);
701	assert(format != ~0);
702	if (format == ~0) {
703		FREE(view);
704		return NULL;
705	}
706
707	if (state->format == PIPE_FORMAT_X24S8_UINT ||
708	    state->format == PIPE_FORMAT_S8X24_UINT ||
709	    state->format == PIPE_FORMAT_X32_S8X24_UINT ||
710	    state->format == PIPE_FORMAT_S8_UINT)
711		view->is_stencil_sampler = true;
712
713	if (tmp->is_depth && !r600_can_sample_zs(tmp, view->is_stencil_sampler)) {
714		if (!r600_init_flushed_depth_texture(ctx, texture, NULL)) {
715			FREE(view);
716			return NULL;
717		}
718		tmp = tmp->flushed_depth_texture;
719	}
720
721	endian = r600_colorformat_endian_swap(format, do_endian_swap);
722
723	offset_level = state->u.tex.first_level;
724	last_level = state->u.tex.last_level - offset_level;
725	width = width_first_level;
726	height = height_first_level;
727        depth = u_minify(texture->depth0, offset_level);
728	pitch = tmp->surface.u.legacy.level[offset_level].nblk_x * util_format_get_blockwidth(state->format);
729
730	if (texture->target == PIPE_TEXTURE_1D_ARRAY) {
731		height = 1;
732		depth = texture->array_size;
733	} else if (texture->target == PIPE_TEXTURE_2D_ARRAY) {
734		depth = texture->array_size;
735	} else if (texture->target == PIPE_TEXTURE_CUBE_ARRAY)
736		depth = texture->array_size / 6;
737
738	switch (tmp->surface.u.legacy.level[offset_level].mode) {
739	default:
740	case RADEON_SURF_MODE_LINEAR_ALIGNED:
741		array_mode = V_038000_ARRAY_LINEAR_ALIGNED;
742		break;
743	case RADEON_SURF_MODE_1D:
744		array_mode = V_038000_ARRAY_1D_TILED_THIN1;
745		break;
746	case RADEON_SURF_MODE_2D:
747		array_mode = V_038000_ARRAY_2D_TILED_THIN1;
748		break;
749	}
750
751	view->tex_resource = &tmp->resource;
752	view->tex_resource_words[0] = (S_038000_DIM(r600_tex_dim(texture->target, texture->nr_samples)) |
753				       S_038000_TILE_MODE(array_mode) |
754				       S_038000_TILE_TYPE(tmp->non_disp_tiling) |
755				       S_038000_PITCH((pitch / 8) - 1) |
756				       S_038000_TEX_WIDTH(width - 1));
757	view->tex_resource_words[1] = (S_038004_TEX_HEIGHT(height - 1) |
758				       S_038004_TEX_DEPTH(depth - 1) |
759				       S_038004_DATA_FORMAT(format));
760	view->tex_resource_words[2] = tmp->surface.u.legacy.level[offset_level].offset >> 8;
761	if (offset_level >= tmp->resource.b.b.last_level) {
762		view->tex_resource_words[3] = tmp->surface.u.legacy.level[offset_level].offset >> 8;
763	} else {
764		view->tex_resource_words[3] = tmp->surface.u.legacy.level[offset_level + 1].offset >> 8;
765	}
766	view->tex_resource_words[4] = (word4 |
767				       S_038010_REQUEST_SIZE(1) |
768				       S_038010_ENDIAN_SWAP(endian) |
769				       S_038010_BASE_LEVEL(0));
770	view->tex_resource_words[5] = (S_038014_BASE_ARRAY(state->u.tex.first_layer) |
771				       S_038014_LAST_ARRAY(state->u.tex.last_layer));
772	if (texture->nr_samples > 1) {
773		/* LAST_LEVEL holds log2(nr_samples) for multisample textures */
774		view->tex_resource_words[5] |= S_038014_LAST_LEVEL(util_logbase2(texture->nr_samples));
775	} else {
776		view->tex_resource_words[5] |= S_038014_LAST_LEVEL(last_level);
777	}
778	view->tex_resource_words[6] = (S_038018_TYPE(V_038010_SQ_TEX_VTX_VALID_TEXTURE) |
779				       S_038018_MAX_ANISO(4 /* max 16 samples */));
780	return &view->base;
781}
782
783static struct pipe_sampler_view *
784r600_create_sampler_view(struct pipe_context *ctx,
785			 struct pipe_resource *tex,
786			 const struct pipe_sampler_view *state)
787{
788	return r600_create_sampler_view_custom(ctx, tex, state,
789                                               u_minify(tex->width0, state->u.tex.first_level),
790                                               u_minify(tex->height0, state->u.tex.first_level));
791}
792
793static void r600_emit_clip_state(struct r600_context *rctx, struct r600_atom *atom)
794{
795	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
796	struct pipe_clip_state *state = &rctx->clip_state.state;
797
798	radeon_set_context_reg_seq(cs, R_028E20_PA_CL_UCP0_X, 6*4);
799	radeon_emit_array(cs, (unsigned*)state, 6*4);
800}
801
802static void r600_set_polygon_stipple(struct pipe_context *ctx,
803					 const struct pipe_poly_stipple *state)
804{
805}
806
807static void r600_init_color_surface(struct r600_context *rctx,
808				    struct r600_surface *surf,
809				    bool force_cmask_fmask)
810{
811	struct r600_screen *rscreen = rctx->screen;
812	struct r600_texture *rtex = (struct r600_texture*)surf->base.texture;
813	unsigned level = surf->base.u.tex.level;
814	unsigned pitch, slice;
815	unsigned color_info;
816	unsigned color_view;
817	unsigned format, swap, ntype, endian;
818	unsigned offset;
819	const struct util_format_description *desc;
820	int i;
821	bool blend_bypass = 0, blend_clamp = 0, do_endian_swap = FALSE;
822
823	if (rtex->db_compatible && !r600_can_sample_zs(rtex, false)) {
824		r600_init_flushed_depth_texture(&rctx->b.b, surf->base.texture, NULL);
825		rtex = rtex->flushed_depth_texture;
826		assert(rtex);
827	}
828
829	offset = rtex->surface.u.legacy.level[level].offset;
830	color_view = S_028080_SLICE_START(surf->base.u.tex.first_layer) |
831		     S_028080_SLICE_MAX(surf->base.u.tex.last_layer);
832
833	pitch = rtex->surface.u.legacy.level[level].nblk_x / 8 - 1;
834	slice = (rtex->surface.u.legacy.level[level].nblk_x * rtex->surface.u.legacy.level[level].nblk_y) / 64;
835	if (slice) {
836		slice = slice - 1;
837	}
838	color_info = 0;
839	switch (rtex->surface.u.legacy.level[level].mode) {
840	default:
841	case RADEON_SURF_MODE_LINEAR_ALIGNED:
842		color_info = S_0280A0_ARRAY_MODE(V_038000_ARRAY_LINEAR_ALIGNED);
843		break;
844	case RADEON_SURF_MODE_1D:
845		color_info = S_0280A0_ARRAY_MODE(V_038000_ARRAY_1D_TILED_THIN1);
846		break;
847	case RADEON_SURF_MODE_2D:
848		color_info = S_0280A0_ARRAY_MODE(V_038000_ARRAY_2D_TILED_THIN1);
849		break;
850	}
851
852	desc = util_format_description(surf->base.format);
853
854	for (i = 0; i < 4; i++) {
855		if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID) {
856			break;
857		}
858	}
859
860	ntype = V_0280A0_NUMBER_UNORM;
861	if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB)
862		ntype = V_0280A0_NUMBER_SRGB;
863	else if (desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) {
864		if (desc->channel[i].normalized)
865			ntype = V_0280A0_NUMBER_SNORM;
866		else if (desc->channel[i].pure_integer)
867			ntype = V_0280A0_NUMBER_SINT;
868	} else if (desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED) {
869		if (desc->channel[i].normalized)
870			ntype = V_0280A0_NUMBER_UNORM;
871		else if (desc->channel[i].pure_integer)
872			ntype = V_0280A0_NUMBER_UINT;
873	} else if (desc->channel[i].type == UTIL_FORMAT_TYPE_FLOAT) {
874		ntype = V_0280A0_NUMBER_FLOAT;
875	}
876
877	if (R600_BIG_ENDIAN)
878		do_endian_swap = !rtex->db_compatible;
879
880	format = r600_translate_colorformat(rctx->b.chip_class, surf->base.format,
881			                              do_endian_swap);
882	assert(format != ~0);
883
884	swap = r600_translate_colorswap(surf->base.format, do_endian_swap);
885	assert(swap != ~0);
886
887	endian = r600_colorformat_endian_swap(format, do_endian_swap);
888
889	/* blend clamp should be set for all NORM/SRGB types */
890	if (ntype == V_0280A0_NUMBER_UNORM || ntype == V_0280A0_NUMBER_SNORM ||
891	    ntype == V_0280A0_NUMBER_SRGB)
892		blend_clamp = 1;
893
894	/* set blend bypass according to docs if SINT/UINT or
895	   8/24 COLOR variants */
896	if (ntype == V_0280A0_NUMBER_UINT || ntype == V_0280A0_NUMBER_SINT ||
897	    format == V_0280A0_COLOR_8_24 || format == V_0280A0_COLOR_24_8 ||
898	    format == V_0280A0_COLOR_X24_8_32_FLOAT) {
899		blend_clamp = 0;
900		blend_bypass = 1;
901	}
902
903	surf->alphatest_bypass = ntype == V_0280A0_NUMBER_UINT || ntype == V_0280A0_NUMBER_SINT;
904
905	color_info |= S_0280A0_FORMAT(format) |
906		S_0280A0_COMP_SWAP(swap) |
907		S_0280A0_BLEND_BYPASS(blend_bypass) |
908		S_0280A0_BLEND_CLAMP(blend_clamp) |
909		S_0280A0_SIMPLE_FLOAT(1) |
910		S_0280A0_NUMBER_TYPE(ntype) |
911		S_0280A0_ENDIAN(endian);
912
913	/* EXPORT_NORM is an optimzation that can be enabled for better
914	 * performance in certain cases
915	 */
916	if (rctx->b.chip_class == R600) {
917		/* EXPORT_NORM can be enabled if:
918		 * - 11-bit or smaller UNORM/SNORM/SRGB
919		 * - BLEND_CLAMP is enabled
920		 * - BLEND_FLOAT32 is disabled
921		 */
922		if (desc->colorspace != UTIL_FORMAT_COLORSPACE_ZS &&
923		    (desc->channel[i].size < 12 &&
924		     desc->channel[i].type != UTIL_FORMAT_TYPE_FLOAT &&
925		     ntype != V_0280A0_NUMBER_UINT &&
926		     ntype != V_0280A0_NUMBER_SINT) &&
927		    G_0280A0_BLEND_CLAMP(color_info) &&
928		    /* XXX this condition is always true since BLEND_FLOAT32 is never set (bug?). */
929		    !G_0280A0_BLEND_FLOAT32(color_info)) {
930			color_info |= S_0280A0_SOURCE_FORMAT(V_0280A0_EXPORT_NORM);
931			surf->export_16bpc = true;
932		}
933	} else {
934		/* EXPORT_NORM can be enabled if:
935		 * - 11-bit or smaller UNORM/SNORM/SRGB
936		 * - 16-bit or smaller FLOAT
937		 */
938		if (desc->colorspace != UTIL_FORMAT_COLORSPACE_ZS &&
939		    ((desc->channel[i].size < 12 &&
940		      desc->channel[i].type != UTIL_FORMAT_TYPE_FLOAT &&
941		      ntype != V_0280A0_NUMBER_UINT && ntype != V_0280A0_NUMBER_SINT) ||
942		    (desc->channel[i].size < 17 &&
943		     desc->channel[i].type == UTIL_FORMAT_TYPE_FLOAT))) {
944			color_info |= S_0280A0_SOURCE_FORMAT(V_0280A0_EXPORT_NORM);
945			surf->export_16bpc = true;
946		}
947	}
948
949	/* These might not always be initialized to zero. */
950	surf->cb_color_base = offset >> 8;
951	surf->cb_color_size = S_028060_PITCH_TILE_MAX(pitch) |
952			      S_028060_SLICE_TILE_MAX(slice);
953	surf->cb_color_fmask = surf->cb_color_base;
954	surf->cb_color_cmask = surf->cb_color_base;
955	surf->cb_color_mask = 0;
956
957	r600_resource_reference(&surf->cb_buffer_cmask, &rtex->resource);
958	r600_resource_reference(&surf->cb_buffer_fmask, &rtex->resource);
959
960	if (rtex->cmask.size) {
961		surf->cb_color_cmask = rtex->cmask.offset >> 8;
962		surf->cb_color_mask |= S_028100_CMASK_BLOCK_MAX(rtex->cmask.slice_tile_max);
963
964		if (rtex->fmask.size) {
965			color_info |= S_0280A0_TILE_MODE(V_0280A0_FRAG_ENABLE);
966			surf->cb_color_fmask = rtex->fmask.offset >> 8;
967			surf->cb_color_mask |= S_028100_FMASK_TILE_MAX(rtex->fmask.slice_tile_max);
968		} else { /* cmask only */
969			color_info |= S_0280A0_TILE_MODE(V_0280A0_CLEAR_ENABLE);
970		}
971	} else if (force_cmask_fmask) {
972		/* Allocate dummy FMASK and CMASK if they aren't allocated already.
973		 *
974		 * R6xx needs FMASK and CMASK for the destination buffer of color resolve,
975		 * otherwise it hangs. We don't have FMASK and CMASK pre-allocated,
976		 * because it's not an MSAA buffer.
977		 */
978		struct r600_cmask_info cmask;
979		struct r600_fmask_info fmask;
980
981		r600_texture_get_cmask_info(&rscreen->b, rtex, &cmask);
982		r600_texture_get_fmask_info(&rscreen->b, rtex, 8, &fmask);
983
984		/* CMASK. */
985		if (!rctx->dummy_cmask ||
986		    rctx->dummy_cmask->b.b.width0 < cmask.size ||
987		    rctx->dummy_cmask->buf->alignment % cmask.alignment != 0) {
988			struct pipe_transfer *transfer;
989			void *ptr;
990
991			r600_resource_reference(&rctx->dummy_cmask, NULL);
992			rctx->dummy_cmask = (struct r600_resource*)
993				r600_aligned_buffer_create(&rscreen->b.b, 0,
994							   PIPE_USAGE_DEFAULT,
995							   cmask.size, cmask.alignment);
996
997			if (unlikely(!rctx->dummy_cmask)) {
998				surf->color_initialized = false;
999				return;
1000			}
1001
1002			/* Set the contents to 0xCC. */
1003			ptr = pipe_buffer_map(&rctx->b.b, &rctx->dummy_cmask->b.b, PIPE_TRANSFER_WRITE, &transfer);
1004			memset(ptr, 0xCC, cmask.size);
1005			pipe_buffer_unmap(&rctx->b.b, transfer);
1006		}
1007		r600_resource_reference(&surf->cb_buffer_cmask, rctx->dummy_cmask);
1008
1009		/* FMASK. */
1010		if (!rctx->dummy_fmask ||
1011		    rctx->dummy_fmask->b.b.width0 < fmask.size ||
1012		    rctx->dummy_fmask->buf->alignment % fmask.alignment != 0) {
1013			r600_resource_reference(&rctx->dummy_fmask, NULL);
1014			rctx->dummy_fmask = (struct r600_resource*)
1015				r600_aligned_buffer_create(&rscreen->b.b, 0,
1016							   PIPE_USAGE_DEFAULT,
1017							   fmask.size, fmask.alignment);
1018
1019			if (unlikely(!rctx->dummy_fmask)) {
1020				surf->color_initialized = false;
1021				return;
1022			}
1023		}
1024		r600_resource_reference(&surf->cb_buffer_fmask, rctx->dummy_fmask);
1025
1026		/* Init the registers. */
1027		color_info |= S_0280A0_TILE_MODE(V_0280A0_FRAG_ENABLE);
1028		surf->cb_color_cmask = 0;
1029		surf->cb_color_fmask = 0;
1030		surf->cb_color_mask = S_028100_CMASK_BLOCK_MAX(cmask.slice_tile_max) |
1031				      S_028100_FMASK_TILE_MAX(fmask.slice_tile_max);
1032	}
1033
1034	surf->cb_color_info = color_info;
1035	surf->cb_color_view = color_view;
1036	surf->color_initialized = true;
1037}
1038
1039static void r600_init_depth_surface(struct r600_context *rctx,
1040				    struct r600_surface *surf)
1041{
1042	struct r600_texture *rtex = (struct r600_texture*)surf->base.texture;
1043	unsigned level, pitch, slice, format, offset, array_mode;
1044
1045	level = surf->base.u.tex.level;
1046	offset = rtex->surface.u.legacy.level[level].offset;
1047	pitch = rtex->surface.u.legacy.level[level].nblk_x / 8 - 1;
1048	slice = (rtex->surface.u.legacy.level[level].nblk_x * rtex->surface.u.legacy.level[level].nblk_y) / 64;
1049	if (slice) {
1050		slice = slice - 1;
1051	}
1052	switch (rtex->surface.u.legacy.level[level].mode) {
1053	case RADEON_SURF_MODE_2D:
1054		array_mode = V_0280A0_ARRAY_2D_TILED_THIN1;
1055		break;
1056	case RADEON_SURF_MODE_1D:
1057	case RADEON_SURF_MODE_LINEAR_ALIGNED:
1058	default:
1059		array_mode = V_0280A0_ARRAY_1D_TILED_THIN1;
1060		break;
1061	}
1062
1063	format = r600_translate_dbformat(surf->base.format);
1064	assert(format != ~0);
1065
1066	surf->db_depth_info = S_028010_ARRAY_MODE(array_mode) | S_028010_FORMAT(format);
1067	surf->db_depth_base = offset >> 8;
1068	surf->db_depth_view = S_028004_SLICE_START(surf->base.u.tex.first_layer) |
1069			      S_028004_SLICE_MAX(surf->base.u.tex.last_layer);
1070	surf->db_depth_size = S_028000_PITCH_TILE_MAX(pitch) | S_028000_SLICE_TILE_MAX(slice);
1071	surf->db_prefetch_limit = (rtex->surface.u.legacy.level[level].nblk_y / 8) - 1;
1072
1073	if (r600_htile_enabled(rtex, level)) {
1074		surf->db_htile_data_base = rtex->htile_offset >> 8;
1075		surf->db_htile_surface = S_028D24_HTILE_WIDTH(1) |
1076					 S_028D24_HTILE_HEIGHT(1) |
1077					 S_028D24_FULL_CACHE(1);
1078		/* preload is not working properly on r6xx/r7xx */
1079		surf->db_depth_info |= S_028010_TILE_SURFACE_ENABLE(1);
1080	}
1081
1082	surf->depth_initialized = true;
1083}
1084
1085static void r600_set_framebuffer_state(struct pipe_context *ctx,
1086					const struct pipe_framebuffer_state *state)
1087{
1088	struct r600_context *rctx = (struct r600_context *)ctx;
1089	struct r600_surface *surf;
1090	struct r600_texture *rtex;
1091	unsigned i;
1092	uint32_t target_mask = 0;
1093
1094	/* Flush TC when changing the framebuffer state, because the only
1095	 * client not using TC that can change textures is the framebuffer.
1096	 * Other places don't typically have to flush TC.
1097	 */
1098	rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE |
1099			 R600_CONTEXT_FLUSH_AND_INV |
1100			 R600_CONTEXT_FLUSH_AND_INV_CB |
1101			 R600_CONTEXT_FLUSH_AND_INV_CB_META |
1102			 R600_CONTEXT_FLUSH_AND_INV_DB |
1103			 R600_CONTEXT_FLUSH_AND_INV_DB_META |
1104			 R600_CONTEXT_INV_TEX_CACHE;
1105
1106	/* Set the new state. */
1107	util_copy_framebuffer_state(&rctx->framebuffer.state, state);
1108
1109	rctx->framebuffer.export_16bpc = state->nr_cbufs != 0;
1110	rctx->framebuffer.cb0_is_integer = state->nr_cbufs && state->cbufs[0] &&
1111			       util_format_is_pure_integer(state->cbufs[0]->format);
1112	rctx->framebuffer.compressed_cb_mask = 0;
1113	rctx->framebuffer.is_msaa_resolve = state->nr_cbufs == 2 &&
1114					    state->cbufs[0] && state->cbufs[1] &&
1115					    state->cbufs[0]->texture->nr_samples > 1 &&
1116				            state->cbufs[1]->texture->nr_samples <= 1;
1117	rctx->framebuffer.nr_samples = util_framebuffer_get_num_samples(state);
1118
1119	/* Colorbuffers. */
1120	for (i = 0; i < state->nr_cbufs; i++) {
1121		/* The resolve buffer must have CMASK and FMASK to prevent hardlocks on R6xx. */
1122		bool force_cmask_fmask = rctx->b.chip_class == R600 &&
1123					 rctx->framebuffer.is_msaa_resolve &&
1124					 i == 1;
1125
1126		surf = (struct r600_surface*)state->cbufs[i];
1127		if (!surf)
1128			continue;
1129
1130		rtex = (struct r600_texture*)surf->base.texture;
1131		r600_context_add_resource_size(ctx, state->cbufs[i]->texture);
1132
1133		target_mask |= (0xf << (i * 4));
1134
1135		if (!surf->color_initialized || force_cmask_fmask) {
1136			r600_init_color_surface(rctx, surf, force_cmask_fmask);
1137			if (force_cmask_fmask) {
1138				/* re-initialize later without compression */
1139				surf->color_initialized = false;
1140			}
1141		}
1142
1143		if (!surf->export_16bpc) {
1144			rctx->framebuffer.export_16bpc = false;
1145		}
1146
1147		if (rtex->fmask.size) {
1148			rctx->framebuffer.compressed_cb_mask |= 1 << i;
1149		}
1150	}
1151
1152	/* Update alpha-test state dependencies.
1153	 * Alpha-test is done on the first colorbuffer only. */
1154	if (state->nr_cbufs) {
1155		bool alphatest_bypass = false;
1156
1157		surf = (struct r600_surface*)state->cbufs[0];
1158		if (surf) {
1159			alphatest_bypass = surf->alphatest_bypass;
1160		}
1161
1162		if (rctx->alphatest_state.bypass != alphatest_bypass) {
1163			rctx->alphatest_state.bypass = alphatest_bypass;
1164			r600_mark_atom_dirty(rctx, &rctx->alphatest_state.atom);
1165		}
1166	}
1167
1168	/* ZS buffer. */
1169	if (state->zsbuf) {
1170		surf = (struct r600_surface*)state->zsbuf;
1171
1172		r600_context_add_resource_size(ctx, state->zsbuf->texture);
1173
1174		if (!surf->depth_initialized) {
1175			r600_init_depth_surface(rctx, surf);
1176		}
1177
1178		if (state->zsbuf->format != rctx->poly_offset_state.zs_format) {
1179			rctx->poly_offset_state.zs_format = state->zsbuf->format;
1180			r600_mark_atom_dirty(rctx, &rctx->poly_offset_state.atom);
1181		}
1182
1183		if (rctx->db_state.rsurf != surf) {
1184			rctx->db_state.rsurf = surf;
1185			r600_mark_atom_dirty(rctx, &rctx->db_state.atom);
1186			r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom);
1187		}
1188	} else if (rctx->db_state.rsurf) {
1189		rctx->db_state.rsurf = NULL;
1190		r600_mark_atom_dirty(rctx, &rctx->db_state.atom);
1191		r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom);
1192	}
1193
1194	if (rctx->cb_misc_state.nr_cbufs != state->nr_cbufs ||
1195	    rctx->cb_misc_state.bound_cbufs_target_mask != target_mask) {
1196		rctx->cb_misc_state.bound_cbufs_target_mask = target_mask;
1197		rctx->cb_misc_state.nr_cbufs = state->nr_cbufs;
1198		r600_mark_atom_dirty(rctx, &rctx->cb_misc_state.atom);
1199	}
1200
1201	if (state->nr_cbufs == 0 && rctx->alphatest_state.bypass) {
1202		rctx->alphatest_state.bypass = false;
1203		r600_mark_atom_dirty(rctx, &rctx->alphatest_state.atom);
1204	}
1205
1206	/* Calculate the CS size. */
1207	rctx->framebuffer.atom.num_dw =
1208		10 /*COLOR_INFO*/ + 4 /*SCISSOR*/ + 3 /*SHADER_CONTROL*/ + 8 /*MSAA*/;
1209
1210	if (rctx->framebuffer.state.nr_cbufs) {
1211		rctx->framebuffer.atom.num_dw += 15 * rctx->framebuffer.state.nr_cbufs;
1212		rctx->framebuffer.atom.num_dw += 3 * (2 + rctx->framebuffer.state.nr_cbufs);
1213	}
1214	if (rctx->framebuffer.state.zsbuf) {
1215		rctx->framebuffer.atom.num_dw += 16;
1216	} else if (rctx->screen->b.info.drm_minor >= 18) {
1217		rctx->framebuffer.atom.num_dw += 3;
1218	}
1219	if (rctx->b.family > CHIP_R600 && rctx->b.family < CHIP_RV770) {
1220		rctx->framebuffer.atom.num_dw += 2;
1221	}
1222
1223	r600_mark_atom_dirty(rctx, &rctx->framebuffer.atom);
1224
1225	r600_set_sample_locations_constant_buffer(rctx);
1226	rctx->framebuffer.do_update_surf_dirtiness = true;
1227}
1228
1229static const uint32_t sample_locs_2x[] = {
1230	FILL_SREG(-4, 4, 4, -4, -4, 4, 4, -4),
1231	FILL_SREG(-4, 4, 4, -4, -4, 4, 4, -4),
1232};
1233static const unsigned max_dist_2x = 4;
1234
1235static const uint32_t sample_locs_4x[] = {
1236	FILL_SREG(-2, -2, 2, 2, -6, 6, 6, -6),
1237	FILL_SREG(-2, -2, 2, 2, -6, 6, 6, -6),
1238};
1239static const unsigned max_dist_4x = 6;
1240static const uint32_t sample_locs_8x[] = {
1241	FILL_SREG(-1,  1,  1,  5,  3, -5,  5,  3),
1242	FILL_SREG(-7, -1, -3, -7,  7, -3, -5,  7),
1243};
1244static const unsigned max_dist_8x = 7;
1245
1246static void r600_get_sample_position(struct pipe_context *ctx,
1247				     unsigned sample_count,
1248				     unsigned sample_index,
1249				     float *out_value)
1250{
1251	int offset, index;
1252	struct {
1253		int idx:4;
1254	} val;
1255	switch (sample_count) {
1256	case 1:
1257	default:
1258		out_value[0] = out_value[1] = 0.5;
1259		break;
1260	case 2:
1261		offset = 4 * (sample_index * 2);
1262		val.idx = (sample_locs_2x[0] >> offset) & 0xf;
1263		out_value[0] = (float)(val.idx + 8) / 16.0f;
1264		val.idx = (sample_locs_2x[0] >> (offset + 4)) & 0xf;
1265		out_value[1] = (float)(val.idx + 8) / 16.0f;
1266		break;
1267	case 4:
1268		offset = 4 * (sample_index * 2);
1269		val.idx = (sample_locs_4x[0] >> offset) & 0xf;
1270		out_value[0] = (float)(val.idx + 8) / 16.0f;
1271		val.idx = (sample_locs_4x[0] >> (offset + 4)) & 0xf;
1272		out_value[1] = (float)(val.idx + 8) / 16.0f;
1273		break;
1274	case 8:
1275		offset = 4 * (sample_index % 4 * 2);
1276		index = (sample_index / 4);
1277		val.idx = (sample_locs_8x[index] >> offset) & 0xf;
1278		out_value[0] = (float)(val.idx + 8) / 16.0f;
1279		val.idx = (sample_locs_8x[index] >> (offset + 4)) & 0xf;
1280		out_value[1] = (float)(val.idx + 8) / 16.0f;
1281		break;
1282	}
1283}
1284
1285static void r600_emit_msaa_state(struct r600_context *rctx, int nr_samples)
1286{
1287	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
1288	unsigned max_dist = 0;
1289
1290	if (rctx->b.family == CHIP_R600) {
1291		switch (nr_samples) {
1292		default:
1293			nr_samples = 0;
1294			break;
1295		case 2:
1296			radeon_set_config_reg(cs, R_008B40_PA_SC_AA_SAMPLE_LOCS_2S, sample_locs_2x[0]);
1297			max_dist = max_dist_2x;
1298			break;
1299		case 4:
1300			radeon_set_config_reg(cs, R_008B44_PA_SC_AA_SAMPLE_LOCS_4S, sample_locs_4x[0]);
1301			max_dist = max_dist_4x;
1302			break;
1303		case 8:
1304			radeon_set_config_reg_seq(cs, R_008B48_PA_SC_AA_SAMPLE_LOCS_8S_WD0, 2);
1305			radeon_emit(cs, sample_locs_8x[0]); /* R_008B48_PA_SC_AA_SAMPLE_LOCS_8S_WD0 */
1306			radeon_emit(cs, sample_locs_8x[1]); /* R_008B4C_PA_SC_AA_SAMPLE_LOCS_8S_WD1 */
1307			max_dist = max_dist_8x;
1308			break;
1309		}
1310	} else {
1311		switch (nr_samples) {
1312		default:
1313			radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2);
1314			radeon_emit(cs, 0); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */
1315			radeon_emit(cs, 0); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */
1316			nr_samples = 0;
1317			break;
1318		case 2:
1319			radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2);
1320			radeon_emit(cs, sample_locs_2x[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */
1321			radeon_emit(cs, sample_locs_2x[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */
1322			max_dist = max_dist_2x;
1323			break;
1324		case 4:
1325			radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2);
1326			radeon_emit(cs, sample_locs_4x[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */
1327			radeon_emit(cs, sample_locs_4x[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */
1328			max_dist = max_dist_4x;
1329			break;
1330		case 8:
1331			radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2);
1332			radeon_emit(cs, sample_locs_8x[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */
1333			radeon_emit(cs, sample_locs_8x[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */
1334			max_dist = max_dist_8x;
1335			break;
1336		}
1337	}
1338
1339	if (nr_samples > 1) {
1340		radeon_set_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2);
1341		radeon_emit(cs, S_028C00_LAST_PIXEL(1) |
1342				     S_028C00_EXPAND_LINE_WIDTH(1)); /* R_028C00_PA_SC_LINE_CNTL */
1343		radeon_emit(cs, S_028C04_MSAA_NUM_SAMPLES(util_logbase2(nr_samples)) |
1344				     S_028C04_MAX_SAMPLE_DIST(max_dist)); /* R_028C04_PA_SC_AA_CONFIG */
1345	} else {
1346		radeon_set_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2);
1347		radeon_emit(cs, S_028C00_LAST_PIXEL(1)); /* R_028C00_PA_SC_LINE_CNTL */
1348		radeon_emit(cs, 0); /* R_028C04_PA_SC_AA_CONFIG */
1349	}
1350}
1351
1352static void r600_emit_framebuffer_state(struct r600_context *rctx, struct r600_atom *atom)
1353{
1354	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
1355	struct pipe_framebuffer_state *state = &rctx->framebuffer.state;
1356	unsigned nr_cbufs = state->nr_cbufs;
1357	struct r600_surface **cb = (struct r600_surface**)&state->cbufs[0];
1358	unsigned i, sbu = 0;
1359
1360	/* Colorbuffers. */
1361	radeon_set_context_reg_seq(cs, R_0280A0_CB_COLOR0_INFO, 8);
1362	for (i = 0; i < nr_cbufs; i++) {
1363		radeon_emit(cs, cb[i] ? cb[i]->cb_color_info : 0);
1364	}
1365	/* set CB_COLOR1_INFO for possible dual-src blending */
1366	if (rctx->framebuffer.dual_src_blend && i == 1 && cb[0]) {
1367		radeon_emit(cs, cb[0]->cb_color_info);
1368		i++;
1369	}
1370	for (; i < 8; i++) {
1371		radeon_emit(cs, 0);
1372	}
1373
1374	if (nr_cbufs) {
1375		for (i = 0; i < nr_cbufs; i++) {
1376			unsigned reloc;
1377
1378			if (!cb[i])
1379				continue;
1380
1381			/* COLOR_BASE */
1382			radeon_set_context_reg(cs, R_028040_CB_COLOR0_BASE + i*4, cb[i]->cb_color_base);
1383
1384			reloc = radeon_add_to_buffer_list(&rctx->b,
1385						      &rctx->b.gfx,
1386						      (struct r600_resource*)cb[i]->base.texture,
1387						      RADEON_USAGE_READWRITE,
1388						      cb[i]->base.texture->nr_samples > 1 ?
1389							      RADEON_PRIO_COLOR_BUFFER_MSAA :
1390							      RADEON_PRIO_COLOR_BUFFER);
1391			radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
1392			radeon_emit(cs, reloc);
1393
1394			/* FMASK */
1395			radeon_set_context_reg(cs, R_0280E0_CB_COLOR0_FRAG + i*4, cb[i]->cb_color_fmask);
1396
1397			reloc = radeon_add_to_buffer_list(&rctx->b,
1398						      &rctx->b.gfx,
1399						      cb[i]->cb_buffer_fmask,
1400						      RADEON_USAGE_READWRITE,
1401						      cb[i]->base.texture->nr_samples > 1 ?
1402							      RADEON_PRIO_COLOR_BUFFER_MSAA :
1403							      RADEON_PRIO_COLOR_BUFFER);
1404			radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
1405			radeon_emit(cs, reloc);
1406
1407			/* CMASK */
1408			radeon_set_context_reg(cs, R_0280C0_CB_COLOR0_TILE + i*4, cb[i]->cb_color_cmask);
1409
1410			reloc = radeon_add_to_buffer_list(&rctx->b,
1411						      &rctx->b.gfx,
1412						      cb[i]->cb_buffer_cmask,
1413						      RADEON_USAGE_READWRITE,
1414						      cb[i]->base.texture->nr_samples > 1 ?
1415							      RADEON_PRIO_COLOR_BUFFER_MSAA :
1416							      RADEON_PRIO_COLOR_BUFFER);
1417			radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
1418			radeon_emit(cs, reloc);
1419		}
1420
1421		radeon_set_context_reg_seq(cs, R_028060_CB_COLOR0_SIZE, nr_cbufs);
1422		for (i = 0; i < nr_cbufs; i++) {
1423			radeon_emit(cs, cb[i] ? cb[i]->cb_color_size : 0);
1424		}
1425
1426		radeon_set_context_reg_seq(cs, R_028080_CB_COLOR0_VIEW, nr_cbufs);
1427		for (i = 0; i < nr_cbufs; i++) {
1428			radeon_emit(cs, cb[i] ? cb[i]->cb_color_view : 0);
1429		}
1430
1431		radeon_set_context_reg_seq(cs, R_028100_CB_COLOR0_MASK, nr_cbufs);
1432		for (i = 0; i < nr_cbufs; i++) {
1433			radeon_emit(cs, cb[i] ? cb[i]->cb_color_mask : 0);
1434		}
1435
1436		sbu |= SURFACE_BASE_UPDATE_COLOR_NUM(nr_cbufs);
1437	}
1438
1439	/* SURFACE_BASE_UPDATE */
1440	if (rctx->b.family > CHIP_R600 && rctx->b.family < CHIP_RV770 && sbu) {
1441		radeon_emit(cs, PKT3(PKT3_SURFACE_BASE_UPDATE, 0, 0));
1442		radeon_emit(cs, sbu);
1443		sbu = 0;
1444	}
1445
1446	/* Zbuffer. */
1447	if (state->zsbuf) {
1448		struct r600_surface *surf = (struct r600_surface*)state->zsbuf;
1449		unsigned reloc = radeon_add_to_buffer_list(&rctx->b,
1450						       &rctx->b.gfx,
1451						       (struct r600_resource*)state->zsbuf->texture,
1452						       RADEON_USAGE_READWRITE,
1453						       surf->base.texture->nr_samples > 1 ?
1454							       RADEON_PRIO_DEPTH_BUFFER_MSAA :
1455							       RADEON_PRIO_DEPTH_BUFFER);
1456
1457		radeon_set_context_reg_seq(cs, R_028000_DB_DEPTH_SIZE, 2);
1458		radeon_emit(cs, surf->db_depth_size); /* R_028000_DB_DEPTH_SIZE */
1459		radeon_emit(cs, surf->db_depth_view); /* R_028004_DB_DEPTH_VIEW */
1460		radeon_set_context_reg_seq(cs, R_02800C_DB_DEPTH_BASE, 2);
1461		radeon_emit(cs, surf->db_depth_base); /* R_02800C_DB_DEPTH_BASE */
1462		radeon_emit(cs, surf->db_depth_info); /* R_028010_DB_DEPTH_INFO */
1463
1464		radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
1465		radeon_emit(cs, reloc);
1466
1467		radeon_set_context_reg(cs, R_028D34_DB_PREFETCH_LIMIT, surf->db_prefetch_limit);
1468
1469		sbu |= SURFACE_BASE_UPDATE_DEPTH;
1470	} else if (rctx->screen->b.info.drm_minor >= 18) {
1471		/* DRM 2.6.18 allows the INVALID format to disable depth/stencil.
1472		 * Older kernels are out of luck. */
1473		radeon_set_context_reg(cs, R_028010_DB_DEPTH_INFO, S_028010_FORMAT(V_028010_DEPTH_INVALID));
1474	}
1475
1476	/* SURFACE_BASE_UPDATE */
1477	if (rctx->b.family > CHIP_R600 && rctx->b.family < CHIP_RV770 && sbu) {
1478		radeon_emit(cs, PKT3(PKT3_SURFACE_BASE_UPDATE, 0, 0));
1479		radeon_emit(cs, sbu);
1480		sbu = 0;
1481	}
1482
1483	/* Framebuffer dimensions. */
1484	radeon_set_context_reg_seq(cs, R_028204_PA_SC_WINDOW_SCISSOR_TL, 2);
1485	radeon_emit(cs, S_028240_TL_X(0) | S_028240_TL_Y(0) |
1486			     S_028240_WINDOW_OFFSET_DISABLE(1)); /* R_028204_PA_SC_WINDOW_SCISSOR_TL */
1487	radeon_emit(cs, S_028244_BR_X(state->width) |
1488			     S_028244_BR_Y(state->height)); /* R_028208_PA_SC_WINDOW_SCISSOR_BR */
1489
1490	if (rctx->framebuffer.is_msaa_resolve) {
1491		radeon_set_context_reg(cs, R_0287A0_CB_SHADER_CONTROL, 1);
1492	} else {
1493		/* Always enable the first colorbuffer in CB_SHADER_CONTROL. This
1494		 * will assure that the alpha-test will work even if there is
1495		 * no colorbuffer bound. */
1496		radeon_set_context_reg(cs, R_0287A0_CB_SHADER_CONTROL,
1497				       (1ull << MAX2(nr_cbufs, 1)) - 1);
1498	}
1499
1500	r600_emit_msaa_state(rctx, rctx->framebuffer.nr_samples);
1501}
1502
1503static void r600_set_min_samples(struct pipe_context *ctx, unsigned min_samples)
1504{
1505	struct r600_context *rctx = (struct r600_context *)ctx;
1506
1507	if (rctx->ps_iter_samples == min_samples)
1508		return;
1509
1510	rctx->ps_iter_samples = min_samples;
1511	if (rctx->framebuffer.nr_samples > 1) {
1512		r600_mark_atom_dirty(rctx, &rctx->rasterizer_state.atom);
1513		if (rctx->b.chip_class == R600)
1514			r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom);
1515	}
1516}
1517
1518static void r600_emit_cb_misc_state(struct r600_context *rctx, struct r600_atom *atom)
1519{
1520	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
1521	struct r600_cb_misc_state *a = (struct r600_cb_misc_state*)atom;
1522
1523	if (G_028808_SPECIAL_OP(a->cb_color_control) == V_028808_SPECIAL_RESOLVE_BOX) {
1524		radeon_set_context_reg_seq(cs, R_028238_CB_TARGET_MASK, 2);
1525		if (rctx->b.chip_class == R600) {
1526			radeon_emit(cs, 0xff); /* R_028238_CB_TARGET_MASK */
1527			radeon_emit(cs, 0xff); /* R_02823C_CB_SHADER_MASK */
1528		} else {
1529			radeon_emit(cs, 0xf); /* R_028238_CB_TARGET_MASK */
1530			radeon_emit(cs, 0xf); /* R_02823C_CB_SHADER_MASK */
1531		}
1532		radeon_set_context_reg(cs, R_028808_CB_COLOR_CONTROL, a->cb_color_control);
1533	} else {
1534		unsigned fb_colormask = a->bound_cbufs_target_mask;
1535		unsigned ps_colormask = a->ps_color_export_mask;
1536		unsigned multiwrite = a->multiwrite && a->nr_cbufs > 1;
1537
1538		radeon_set_context_reg_seq(cs, R_028238_CB_TARGET_MASK, 2);
1539		radeon_emit(cs, a->blend_colormask & fb_colormask); /* R_028238_CB_TARGET_MASK */
1540		/* Always enable the first color output to make sure alpha-test works even without one. */
1541		radeon_emit(cs, 0xf | (multiwrite ? fb_colormask : ps_colormask)); /* R_02823C_CB_SHADER_MASK */
1542		radeon_set_context_reg(cs, R_028808_CB_COLOR_CONTROL,
1543				       a->cb_color_control |
1544				       S_028808_MULTIWRITE_ENABLE(multiwrite));
1545	}
1546}
1547
1548static void r600_emit_db_state(struct r600_context *rctx, struct r600_atom *atom)
1549{
1550	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
1551	struct r600_db_state *a = (struct r600_db_state*)atom;
1552
1553	if (a->rsurf && a->rsurf->db_htile_surface) {
1554		struct r600_texture *rtex = (struct r600_texture *)a->rsurf->base.texture;
1555		unsigned reloc_idx;
1556
1557		radeon_set_context_reg(cs, R_02802C_DB_DEPTH_CLEAR, fui(rtex->depth_clear_value));
1558		radeon_set_context_reg(cs, R_028D24_DB_HTILE_SURFACE, a->rsurf->db_htile_surface);
1559		radeon_set_context_reg(cs, R_028014_DB_HTILE_DATA_BASE, a->rsurf->db_htile_data_base);
1560		reloc_idx = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, &rtex->resource,
1561						  RADEON_USAGE_READWRITE, RADEON_PRIO_SEPARATE_META);
1562		radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
1563		radeon_emit(cs, reloc_idx);
1564	} else {
1565		radeon_set_context_reg(cs, R_028D24_DB_HTILE_SURFACE, 0);
1566	}
1567}
1568
1569static void r600_emit_db_misc_state(struct r600_context *rctx, struct r600_atom *atom)
1570{
1571	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
1572	struct r600_db_misc_state *a = (struct r600_db_misc_state*)atom;
1573	unsigned db_render_control = 0;
1574	unsigned db_render_override =
1575		S_028D10_FORCE_HIS_ENABLE0(V_028D10_FORCE_DISABLE) |
1576		S_028D10_FORCE_HIS_ENABLE1(V_028D10_FORCE_DISABLE);
1577
1578	if (rctx->b.chip_class >= R700) {
1579		switch (a->ps_conservative_z) {
1580		default: /* fall through */
1581		case TGSI_FS_DEPTH_LAYOUT_ANY:
1582			db_render_control |= S_028D0C_CONSERVATIVE_Z_EXPORT(V_028D0C_EXPORT_ANY_Z);
1583			break;
1584		case TGSI_FS_DEPTH_LAYOUT_GREATER:
1585			db_render_control |= S_028D0C_CONSERVATIVE_Z_EXPORT(V_028D0C_EXPORT_GREATER_THAN_Z);
1586			break;
1587		case TGSI_FS_DEPTH_LAYOUT_LESS:
1588			db_render_control |= S_028D0C_CONSERVATIVE_Z_EXPORT(V_028D0C_EXPORT_LESS_THAN_Z);
1589			break;
1590		}
1591	}
1592
1593	if (rctx->b.num_occlusion_queries > 0 &&
1594	    !a->occlusion_queries_disabled) {
1595		if (rctx->b.chip_class >= R700) {
1596			db_render_control |= S_028D0C_R700_PERFECT_ZPASS_COUNTS(1);
1597		}
1598		db_render_override |= S_028D10_NOOP_CULL_DISABLE(1);
1599	} else {
1600		db_render_control |= S_028D0C_ZPASS_INCREMENT_DISABLE(1);
1601	}
1602
1603	if (rctx->db_state.rsurf && rctx->db_state.rsurf->db_htile_surface) {
1604		/* FORCE_OFF means HiZ/HiS are determined by DB_SHADER_CONTROL */
1605		db_render_override |= S_028D10_FORCE_HIZ_ENABLE(V_028D10_FORCE_OFF);
1606		/* This is to fix a lockup when hyperz and alpha test are enabled at
1607		 * the same time somehow GPU get confuse on which order to pick for
1608		 * z test
1609		 */
1610		if (rctx->alphatest_state.sx_alpha_test_control) {
1611			db_render_override |= S_028D10_FORCE_SHADER_Z_ORDER(1);
1612		}
1613	} else {
1614		db_render_override |= S_028D10_FORCE_HIZ_ENABLE(V_028D10_FORCE_DISABLE);
1615	}
1616	if (rctx->b.chip_class == R600 && rctx->framebuffer.nr_samples > 1 && rctx->ps_iter_samples > 0) {
1617		/* sample shading and hyperz causes lockups on R6xx chips */
1618		db_render_override |= S_028D10_FORCE_HIZ_ENABLE(V_028D10_FORCE_DISABLE);
1619	}
1620	if (a->flush_depthstencil_through_cb) {
1621		assert(a->copy_depth || a->copy_stencil);
1622
1623		db_render_control |= S_028D0C_DEPTH_COPY_ENABLE(a->copy_depth) |
1624				     S_028D0C_STENCIL_COPY_ENABLE(a->copy_stencil) |
1625				     S_028D0C_COPY_CENTROID(1) |
1626				     S_028D0C_COPY_SAMPLE(a->copy_sample);
1627
1628		if (rctx->b.chip_class == R600)
1629			db_render_override |= S_028D10_NOOP_CULL_DISABLE(1);
1630
1631		if (rctx->b.family == CHIP_RV610 || rctx->b.family == CHIP_RV630 ||
1632		    rctx->b.family == CHIP_RV620 || rctx->b.family == CHIP_RV635)
1633			db_render_override |= S_028D10_FORCE_HIZ_ENABLE(V_028D10_FORCE_DISABLE);
1634	} else if (a->flush_depth_inplace || a->flush_stencil_inplace) {
1635		db_render_control |= S_028D0C_DEPTH_COMPRESS_DISABLE(a->flush_depth_inplace) |
1636				     S_028D0C_STENCIL_COMPRESS_DISABLE(a->flush_stencil_inplace);
1637		db_render_override |= S_028D10_NOOP_CULL_DISABLE(1);
1638	}
1639	if (a->htile_clear) {
1640		db_render_control |= S_028D0C_DEPTH_CLEAR_ENABLE(1);
1641	}
1642
1643	/* RV770 workaround for a hang with 8x MSAA. */
1644	if (rctx->b.family == CHIP_RV770 && a->log_samples == 3) {
1645		db_render_override |= S_028D10_MAX_TILES_IN_DTT(6);
1646	}
1647
1648	radeon_set_context_reg_seq(cs, R_028D0C_DB_RENDER_CONTROL, 2);
1649	radeon_emit(cs, db_render_control); /* R_028D0C_DB_RENDER_CONTROL */
1650	radeon_emit(cs, db_render_override); /* R_028D10_DB_RENDER_OVERRIDE */
1651	radeon_set_context_reg(cs, R_02880C_DB_SHADER_CONTROL, a->db_shader_control);
1652}
1653
1654static void r600_emit_config_state(struct r600_context *rctx, struct r600_atom *atom)
1655{
1656	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
1657	struct r600_config_state *a = (struct r600_config_state*)atom;
1658
1659	radeon_set_config_reg(cs, R_008C04_SQ_GPR_RESOURCE_MGMT_1, a->sq_gpr_resource_mgmt_1);
1660	radeon_set_config_reg(cs, R_008C08_SQ_GPR_RESOURCE_MGMT_2, a->sq_gpr_resource_mgmt_2);
1661}
1662
1663static void r600_emit_vertex_buffers(struct r600_context *rctx, struct r600_atom *atom)
1664{
1665	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
1666	uint32_t dirty_mask = rctx->vertex_buffer_state.dirty_mask;
1667
1668	while (dirty_mask) {
1669		struct pipe_vertex_buffer *vb;
1670		struct r600_resource *rbuffer;
1671		unsigned offset;
1672		unsigned buffer_index = u_bit_scan(&dirty_mask);
1673
1674		vb = &rctx->vertex_buffer_state.vb[buffer_index];
1675		rbuffer = (struct r600_resource*)vb->buffer.resource;
1676		assert(rbuffer);
1677
1678		offset = vb->buffer_offset;
1679
1680		/* fetch resources start at index 320 (OFFSET_FS) */
1681		radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 7, 0));
1682		radeon_emit(cs, (R600_FETCH_CONSTANTS_OFFSET_FS + buffer_index) * 7);
1683		radeon_emit(cs, offset); /* RESOURCEi_WORD0 */
1684		radeon_emit(cs, rbuffer->b.b.width0 - offset - 1); /* RESOURCEi_WORD1 */
1685		radeon_emit(cs, /* RESOURCEi_WORD2 */
1686				 S_038008_ENDIAN_SWAP(r600_endian_swap(32)) |
1687				 S_038008_STRIDE(vb->stride));
1688		radeon_emit(cs, 0); /* RESOURCEi_WORD3 */
1689		radeon_emit(cs, 0); /* RESOURCEi_WORD4 */
1690		radeon_emit(cs, 0); /* RESOURCEi_WORD5 */
1691		radeon_emit(cs, 0xc0000000); /* RESOURCEi_WORD6 */
1692
1693		radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
1694		radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,
1695						      RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER));
1696	}
1697}
1698
1699static void r600_emit_constant_buffers(struct r600_context *rctx,
1700				       struct r600_constbuf_state *state,
1701				       unsigned buffer_id_base,
1702				       unsigned reg_alu_constbuf_size,
1703				       unsigned reg_alu_const_cache)
1704{
1705	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
1706	uint32_t dirty_mask = state->dirty_mask;
1707
1708	while (dirty_mask) {
1709		struct pipe_constant_buffer *cb;
1710		struct r600_resource *rbuffer;
1711		unsigned offset;
1712		unsigned buffer_index = ffs(dirty_mask) - 1;
1713		unsigned gs_ring_buffer = (buffer_index == R600_GS_RING_CONST_BUFFER);
1714		cb = &state->cb[buffer_index];
1715		rbuffer = (struct r600_resource*)cb->buffer;
1716		assert(rbuffer);
1717
1718		offset = cb->buffer_offset;
1719
1720		if (!gs_ring_buffer) {
1721			assert(buffer_index < R600_MAX_HW_CONST_BUFFERS);
1722			radeon_set_context_reg(cs, reg_alu_constbuf_size + buffer_index * 4,
1723					       DIV_ROUND_UP(cb->buffer_size, 256));
1724			radeon_set_context_reg(cs, reg_alu_const_cache + buffer_index * 4, offset >> 8);
1725			radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
1726			radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,
1727								  RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER));
1728		}
1729
1730		radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 7, 0));
1731		radeon_emit(cs, (buffer_id_base + buffer_index) * 7);
1732		radeon_emit(cs, offset); /* RESOURCEi_WORD0 */
1733		radeon_emit(cs, cb->buffer_size - 1); /* RESOURCEi_WORD1 */
1734		radeon_emit(cs, /* RESOURCEi_WORD2 */
1735			    S_038008_ENDIAN_SWAP(gs_ring_buffer ? ENDIAN_NONE : r600_endian_swap(32)) |
1736			    S_038008_STRIDE(gs_ring_buffer ? 4 : 16));
1737		radeon_emit(cs, 0); /* RESOURCEi_WORD3 */
1738		radeon_emit(cs, 0); /* RESOURCEi_WORD4 */
1739		radeon_emit(cs, 0); /* RESOURCEi_WORD5 */
1740		radeon_emit(cs, 0xc0000000); /* RESOURCEi_WORD6 */
1741
1742		radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
1743		radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,
1744						      RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER));
1745
1746		dirty_mask &= ~(1 << buffer_index);
1747	}
1748	state->dirty_mask = 0;
1749}
1750
1751static void r600_emit_vs_constant_buffers(struct r600_context *rctx, struct r600_atom *atom)
1752{
1753	r600_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_VERTEX],
1754				   R600_FETCH_CONSTANTS_OFFSET_VS,
1755				   R_028180_ALU_CONST_BUFFER_SIZE_VS_0,
1756				   R_028980_ALU_CONST_CACHE_VS_0);
1757}
1758
1759static void r600_emit_gs_constant_buffers(struct r600_context *rctx, struct r600_atom *atom)
1760{
1761	r600_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_GEOMETRY],
1762				   R600_FETCH_CONSTANTS_OFFSET_GS,
1763				   R_0281C0_ALU_CONST_BUFFER_SIZE_GS_0,
1764				   R_0289C0_ALU_CONST_CACHE_GS_0);
1765}
1766
1767static void r600_emit_ps_constant_buffers(struct r600_context *rctx, struct r600_atom *atom)
1768{
1769	r600_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_FRAGMENT],
1770				   R600_FETCH_CONSTANTS_OFFSET_PS,
1771				   R_028140_ALU_CONST_BUFFER_SIZE_PS_0,
1772				   R_028940_ALU_CONST_CACHE_PS_0);
1773}
1774
1775static void r600_emit_sampler_views(struct r600_context *rctx,
1776				    struct r600_samplerview_state *state,
1777				    unsigned resource_id_base)
1778{
1779	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
1780	uint32_t dirty_mask = state->dirty_mask;
1781
1782	while (dirty_mask) {
1783		struct r600_pipe_sampler_view *rview;
1784		unsigned resource_index = u_bit_scan(&dirty_mask);
1785		unsigned reloc;
1786
1787		rview = state->views[resource_index];
1788		assert(rview);
1789
1790		radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 7, 0));
1791		radeon_emit(cs, (resource_id_base + resource_index) * 7);
1792		radeon_emit_array(cs, rview->tex_resource_words, 7);
1793
1794		reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rview->tex_resource,
1795					      RADEON_USAGE_READ,
1796					      r600_get_sampler_view_priority(rview->tex_resource));
1797		radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
1798		radeon_emit(cs, reloc);
1799		radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
1800		radeon_emit(cs, reloc);
1801	}
1802	state->dirty_mask = 0;
1803}
1804
1805
1806static void r600_emit_vs_sampler_views(struct r600_context *rctx, struct r600_atom *atom)
1807{
1808	r600_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_VERTEX].views, R600_FETCH_CONSTANTS_OFFSET_VS + R600_MAX_CONST_BUFFERS);
1809}
1810
1811static void r600_emit_gs_sampler_views(struct r600_context *rctx, struct r600_atom *atom)
1812{
1813	r600_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_GEOMETRY].views, R600_FETCH_CONSTANTS_OFFSET_GS + R600_MAX_CONST_BUFFERS);
1814}
1815
1816static void r600_emit_ps_sampler_views(struct r600_context *rctx, struct r600_atom *atom)
1817{
1818	r600_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_FRAGMENT].views, R600_FETCH_CONSTANTS_OFFSET_PS + R600_MAX_CONST_BUFFERS);
1819}
1820
1821static void r600_emit_sampler_states(struct r600_context *rctx,
1822				struct r600_textures_info *texinfo,
1823				unsigned resource_id_base,
1824				unsigned border_color_reg)
1825{
1826	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
1827	uint32_t dirty_mask = texinfo->states.dirty_mask;
1828
1829	while (dirty_mask) {
1830		struct r600_pipe_sampler_state *rstate;
1831		struct r600_pipe_sampler_view *rview;
1832		unsigned i = u_bit_scan(&dirty_mask);
1833
1834		rstate = texinfo->states.states[i];
1835		assert(rstate);
1836		rview = texinfo->views.views[i];
1837
1838		/* TEX_ARRAY_OVERRIDE must be set for array textures to disable
1839		 * filtering between layers.
1840		 */
1841		enum pipe_texture_target target = PIPE_BUFFER;
1842		if (rview)
1843			target = rview->base.texture->target;
1844		if (target == PIPE_TEXTURE_1D_ARRAY ||
1845		    target == PIPE_TEXTURE_2D_ARRAY) {
1846			rstate->tex_sampler_words[0] |= S_03C000_TEX_ARRAY_OVERRIDE(1);
1847			texinfo->is_array_sampler[i] = true;
1848		} else {
1849			rstate->tex_sampler_words[0] &= C_03C000_TEX_ARRAY_OVERRIDE;
1850			texinfo->is_array_sampler[i] = false;
1851		}
1852
1853		radeon_emit(cs, PKT3(PKT3_SET_SAMPLER, 3, 0));
1854		radeon_emit(cs, (resource_id_base + i) * 3);
1855		radeon_emit_array(cs, rstate->tex_sampler_words, 3);
1856
1857		if (rstate->border_color_use) {
1858			unsigned offset;
1859
1860			offset = border_color_reg;
1861			offset += i * 16;
1862			radeon_set_config_reg_seq(cs, offset, 4);
1863			radeon_emit_array(cs, rstate->border_color.ui, 4);
1864		}
1865	}
1866	texinfo->states.dirty_mask = 0;
1867}
1868
1869static void r600_emit_vs_sampler_states(struct r600_context *rctx, struct r600_atom *atom)
1870{
1871	r600_emit_sampler_states(rctx, &rctx->samplers[PIPE_SHADER_VERTEX], 18, R_00A600_TD_VS_SAMPLER0_BORDER_RED);
1872}
1873
1874static void r600_emit_gs_sampler_states(struct r600_context *rctx, struct r600_atom *atom)
1875{
1876	r600_emit_sampler_states(rctx, &rctx->samplers[PIPE_SHADER_GEOMETRY], 36, R_00A800_TD_GS_SAMPLER0_BORDER_RED);
1877}
1878
1879static void r600_emit_ps_sampler_states(struct r600_context *rctx, struct r600_atom *atom)
1880{
1881	r600_emit_sampler_states(rctx, &rctx->samplers[PIPE_SHADER_FRAGMENT], 0, R_00A400_TD_PS_SAMPLER0_BORDER_RED);
1882}
1883
1884static void r600_emit_seamless_cube_map(struct r600_context *rctx, struct r600_atom *atom)
1885{
1886	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
1887	unsigned tmp;
1888
1889	tmp = S_009508_DISABLE_CUBE_ANISO(1) |
1890		S_009508_SYNC_GRADIENT(1) |
1891		S_009508_SYNC_WALKER(1) |
1892		S_009508_SYNC_ALIGNER(1);
1893	if (!rctx->seamless_cube_map.enabled) {
1894		tmp |= S_009508_DISABLE_CUBE_WRAP(1);
1895	}
1896	radeon_set_config_reg(cs, R_009508_TA_CNTL_AUX, tmp);
1897}
1898
1899static void r600_emit_sample_mask(struct r600_context *rctx, struct r600_atom *a)
1900{
1901	struct r600_sample_mask *s = (struct r600_sample_mask*)a;
1902	uint8_t mask = s->sample_mask;
1903
1904	radeon_set_context_reg(rctx->b.gfx.cs, R_028C48_PA_SC_AA_MASK,
1905			       mask | (mask << 8) | (mask << 16) | (mask << 24));
1906}
1907
1908static void r600_emit_vertex_fetch_shader(struct r600_context *rctx, struct r600_atom *a)
1909{
1910	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
1911	struct r600_cso_state *state = (struct r600_cso_state*)a;
1912	struct r600_fetch_shader *shader = (struct r600_fetch_shader*)state->cso;
1913
1914	if (!shader)
1915		return;
1916
1917	radeon_set_context_reg(cs, R_028894_SQ_PGM_START_FS, shader->offset >> 8);
1918	radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
1919	radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, shader->buffer,
1920                                                  RADEON_USAGE_READ,
1921                                                  RADEON_PRIO_SHADER_BINARY));
1922}
1923
1924static void r600_emit_shader_stages(struct r600_context *rctx, struct r600_atom *a)
1925{
1926	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
1927	struct r600_shader_stages_state *state = (struct r600_shader_stages_state*)a;
1928
1929	uint32_t v2 = 0, primid = 0;
1930
1931	if (rctx->vs_shader->current->shader.vs_as_gs_a) {
1932		v2 = S_028A40_MODE(V_028A40_GS_SCENARIO_A);
1933		primid = 1;
1934	}
1935
1936	if (state->geom_enable) {
1937		uint32_t cut_val;
1938
1939		if (rctx->gs_shader->gs_max_out_vertices <= 128)
1940			cut_val = V_028A40_GS_CUT_128;
1941		else if (rctx->gs_shader->gs_max_out_vertices <= 256)
1942			cut_val = V_028A40_GS_CUT_256;
1943		else if (rctx->gs_shader->gs_max_out_vertices <= 512)
1944			cut_val = V_028A40_GS_CUT_512;
1945		else
1946			cut_val = V_028A40_GS_CUT_1024;
1947
1948		v2 = S_028A40_MODE(V_028A40_GS_SCENARIO_G) |
1949			S_028A40_CUT_MODE(cut_val);
1950
1951		if (rctx->gs_shader->current->shader.gs_prim_id_input)
1952			primid = 1;
1953	}
1954
1955	radeon_set_context_reg(cs, R_028A40_VGT_GS_MODE, v2);
1956	radeon_set_context_reg(cs, R_028A84_VGT_PRIMITIVEID_EN, primid);
1957}
1958
1959static void r600_emit_gs_rings(struct r600_context *rctx, struct r600_atom *a)
1960{
1961	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
1962	struct r600_gs_rings_state *state = (struct r600_gs_rings_state*)a;
1963	struct r600_resource *rbuffer;
1964
1965	radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1));
1966	radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1967	radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH));
1968
1969	if (state->enable) {
1970		rbuffer =(struct r600_resource*)state->esgs_ring.buffer;
1971		radeon_set_config_reg(cs, R_008C40_SQ_ESGS_RING_BASE, 0);
1972		radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
1973		radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,
1974						      RADEON_USAGE_READWRITE,
1975						      RADEON_PRIO_SHADER_RINGS));
1976		radeon_set_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE,
1977				state->esgs_ring.buffer_size >> 8);
1978
1979		rbuffer =(struct r600_resource*)state->gsvs_ring.buffer;
1980		radeon_set_config_reg(cs, R_008C48_SQ_GSVS_RING_BASE, 0);
1981		radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
1982		radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,
1983						      RADEON_USAGE_READWRITE,
1984						      RADEON_PRIO_SHADER_RINGS));
1985		radeon_set_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE,
1986				state->gsvs_ring.buffer_size >> 8);
1987	} else {
1988		radeon_set_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE, 0);
1989		radeon_set_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE, 0);
1990	}
1991
1992	radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1));
1993	radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1994	radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH));
1995}
1996
1997/* Adjust GPR allocation on R6xx/R7xx */
1998bool r600_adjust_gprs(struct r600_context *rctx)
1999{
2000	unsigned num_gprs[R600_NUM_HW_STAGES];
2001	unsigned new_gprs[R600_NUM_HW_STAGES];
2002	unsigned cur_gprs[R600_NUM_HW_STAGES];
2003	unsigned def_gprs[R600_NUM_HW_STAGES];
2004	unsigned def_num_clause_temp_gprs = rctx->r6xx_num_clause_temp_gprs;
2005	unsigned max_gprs;
2006	unsigned tmp, tmp2;
2007	unsigned i;
2008	bool need_recalc = false, use_default = true;
2009
2010	/* hardware will reserve twice num_clause_temp_gprs */
2011	max_gprs = def_num_clause_temp_gprs * 2;
2012	for (i = 0; i < R600_NUM_HW_STAGES; i++) {
2013		def_gprs[i] = rctx->default_gprs[i];
2014		max_gprs += def_gprs[i];
2015	}
2016
2017	cur_gprs[R600_HW_STAGE_PS] = G_008C04_NUM_PS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_1);
2018	cur_gprs[R600_HW_STAGE_VS] = G_008C04_NUM_VS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_1);
2019	cur_gprs[R600_HW_STAGE_GS] = G_008C08_NUM_GS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_2);
2020	cur_gprs[R600_HW_STAGE_ES] = G_008C08_NUM_ES_GPRS(rctx->config_state.sq_gpr_resource_mgmt_2);
2021
2022	num_gprs[R600_HW_STAGE_PS] = rctx->ps_shader->current->shader.bc.ngpr;
2023	if (rctx->gs_shader) {
2024		num_gprs[R600_HW_STAGE_ES] = rctx->vs_shader->current->shader.bc.ngpr;
2025		num_gprs[R600_HW_STAGE_GS] = rctx->gs_shader->current->shader.bc.ngpr;
2026		num_gprs[R600_HW_STAGE_VS] = rctx->gs_shader->current->gs_copy_shader->shader.bc.ngpr;
2027	} else {
2028		num_gprs[R600_HW_STAGE_ES] = 0;
2029		num_gprs[R600_HW_STAGE_GS] = 0;
2030		num_gprs[R600_HW_STAGE_VS] = rctx->vs_shader->current->shader.bc.ngpr;
2031	}
2032
2033	for (i = 0; i < R600_NUM_HW_STAGES; i++) {
2034		new_gprs[i] = num_gprs[i];
2035		if (new_gprs[i] > cur_gprs[i])
2036			need_recalc = true;
2037		if (new_gprs[i] > def_gprs[i])
2038			use_default = false;
2039	}
2040
2041	/* the sum of all SQ_GPR_RESOURCE_MGMT*.NUM_*_GPRS must <= to max_gprs */
2042	if (!need_recalc)
2043		return true;
2044
2045	/* try to use switch back to default */
2046	if (!use_default) {
2047		/* always privilege vs stage so that at worst we have the
2048		 * pixel stage producing wrong output (not the vertex
2049		 * stage) */
2050		new_gprs[R600_HW_STAGE_PS] = max_gprs - def_num_clause_temp_gprs * 2;
2051		for (i = R600_HW_STAGE_VS; i < R600_NUM_HW_STAGES; i++)
2052			new_gprs[R600_HW_STAGE_PS] -= new_gprs[i];
2053	} else {
2054		for (i = 0; i < R600_NUM_HW_STAGES; i++)
2055			new_gprs[i] = def_gprs[i];
2056	}
2057
2058	/* SQ_PGM_RESOURCES_*.NUM_GPRS must always be program to a value <=
2059	 * SQ_GPR_RESOURCE_MGMT*.NUM_*_GPRS otherwise the GPU will lockup
2060	 * Also if a shader use more gpr than SQ_GPR_RESOURCE_MGMT*.NUM_*_GPRS
2061	 * it will lockup. So in this case just discard the draw command
2062	 * and don't change the current gprs repartitions.
2063	 */
2064	for (i = 0; i < R600_NUM_HW_STAGES; i++) {
2065		if (num_gprs[i] > new_gprs[i]) {
2066			R600_ERR("shaders require too many register (%d + %d + %d + %d) "
2067				 "for a combined maximum of %d\n",
2068				 num_gprs[R600_HW_STAGE_PS], num_gprs[R600_HW_STAGE_VS], num_gprs[R600_HW_STAGE_ES], num_gprs[R600_HW_STAGE_GS], max_gprs);
2069			return false;
2070		}
2071	}
2072
2073	/* in some case we endup recomputing the current value */
2074	tmp = S_008C04_NUM_PS_GPRS(new_gprs[R600_HW_STAGE_PS]) |
2075		S_008C04_NUM_VS_GPRS(new_gprs[R600_HW_STAGE_VS]) |
2076		S_008C04_NUM_CLAUSE_TEMP_GPRS(def_num_clause_temp_gprs);
2077
2078	tmp2 = S_008C08_NUM_ES_GPRS(new_gprs[R600_HW_STAGE_ES]) |
2079		S_008C08_NUM_GS_GPRS(new_gprs[R600_HW_STAGE_GS]);
2080	if (rctx->config_state.sq_gpr_resource_mgmt_1 != tmp || rctx->config_state.sq_gpr_resource_mgmt_2 != tmp2) {
2081		rctx->config_state.sq_gpr_resource_mgmt_1 = tmp;
2082		rctx->config_state.sq_gpr_resource_mgmt_2 = tmp2;
2083		r600_mark_atom_dirty(rctx, &rctx->config_state.atom);
2084		rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE;
2085	}
2086	return true;
2087}
2088
2089void r600_init_atom_start_cs(struct r600_context *rctx)
2090{
2091	int ps_prio;
2092	int vs_prio;
2093	int gs_prio;
2094	int es_prio;
2095	int num_ps_gprs;
2096	int num_vs_gprs;
2097	int num_gs_gprs;
2098	int num_es_gprs;
2099	int num_temp_gprs;
2100	int num_ps_threads;
2101	int num_vs_threads;
2102	int num_gs_threads;
2103	int num_es_threads;
2104	int num_ps_stack_entries;
2105	int num_vs_stack_entries;
2106	int num_gs_stack_entries;
2107	int num_es_stack_entries;
2108	enum radeon_family family;
2109	struct r600_command_buffer *cb = &rctx->start_cs_cmd;
2110	uint32_t tmp, i;
2111
2112	r600_init_command_buffer(cb, 256);
2113
2114	/* R6xx requires this packet at the start of each command buffer */
2115	if (rctx->b.chip_class == R600) {
2116		r600_store_value(cb, PKT3(PKT3_START_3D_CMDBUF, 0, 0));
2117		r600_store_value(cb, 0);
2118	}
2119	/* All asics require this one */
2120	r600_store_value(cb, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
2121	r600_store_value(cb, 0x80000000);
2122	r600_store_value(cb, 0x80000000);
2123
2124	/* We're setting config registers here. */
2125	r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0));
2126	r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
2127
2128	/* This enables pipeline stat & streamout queries.
2129	 * They are only disabled by blits.
2130	 */
2131	r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0));
2132	r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_PIPELINESTAT_START) | EVENT_INDEX(0));
2133
2134	family = rctx->b.family;
2135	ps_prio = 0;
2136	vs_prio = 1;
2137	gs_prio = 2;
2138	es_prio = 3;
2139	switch (family) {
2140	case CHIP_R600:
2141		num_ps_gprs = 192;
2142		num_vs_gprs = 56;
2143		num_temp_gprs = 4;
2144		num_gs_gprs = 0;
2145		num_es_gprs = 0;
2146		num_ps_threads = 136;
2147		num_vs_threads = 48;
2148		num_gs_threads = 4;
2149		num_es_threads = 4;
2150		num_ps_stack_entries = 128;
2151		num_vs_stack_entries = 128;
2152		num_gs_stack_entries = 0;
2153		num_es_stack_entries = 0;
2154		break;
2155	case CHIP_RV630:
2156	case CHIP_RV635:
2157		num_ps_gprs = 84;
2158		num_vs_gprs = 36;
2159		num_temp_gprs = 4;
2160		num_gs_gprs = 0;
2161		num_es_gprs = 0;
2162		num_ps_threads = 144;
2163		num_vs_threads = 40;
2164		num_gs_threads = 4;
2165		num_es_threads = 4;
2166		num_ps_stack_entries = 40;
2167		num_vs_stack_entries = 40;
2168		num_gs_stack_entries = 32;
2169		num_es_stack_entries = 16;
2170		break;
2171	case CHIP_RV610:
2172	case CHIP_RV620:
2173	case CHIP_RS780:
2174	case CHIP_RS880:
2175	default:
2176		num_ps_gprs = 84;
2177		num_vs_gprs = 36;
2178		num_temp_gprs = 4;
2179		num_gs_gprs = 0;
2180		num_es_gprs = 0;
2181		/* use limits 40 VS and at least 16 ES/GS */
2182		num_ps_threads = 120;
2183		num_vs_threads = 40;
2184		num_gs_threads = 16;
2185		num_es_threads = 16;
2186		num_ps_stack_entries = 40;
2187		num_vs_stack_entries = 40;
2188		num_gs_stack_entries = 32;
2189		num_es_stack_entries = 16;
2190		break;
2191	case CHIP_RV670:
2192		num_ps_gprs = 144;
2193		num_vs_gprs = 40;
2194		num_temp_gprs = 4;
2195		num_gs_gprs = 0;
2196		num_es_gprs = 0;
2197		num_ps_threads = 136;
2198		num_vs_threads = 48;
2199		num_gs_threads = 4;
2200		num_es_threads = 4;
2201		num_ps_stack_entries = 40;
2202		num_vs_stack_entries = 40;
2203		num_gs_stack_entries = 32;
2204		num_es_stack_entries = 16;
2205		break;
2206	case CHIP_RV770:
2207		num_ps_gprs = 130;
2208		num_vs_gprs = 56;
2209		num_temp_gprs = 4;
2210		num_gs_gprs = 31;
2211		num_es_gprs = 31;
2212		num_ps_threads = 180;
2213		num_vs_threads = 60;
2214		num_gs_threads = 4;
2215		num_es_threads = 4;
2216		num_ps_stack_entries = 128;
2217		num_vs_stack_entries = 128;
2218		num_gs_stack_entries = 128;
2219		num_es_stack_entries = 128;
2220		break;
2221	case CHIP_RV730:
2222	case CHIP_RV740:
2223		num_ps_gprs = 84;
2224		num_vs_gprs = 36;
2225		num_temp_gprs = 4;
2226		num_gs_gprs = 0;
2227		num_es_gprs = 0;
2228		num_ps_threads = 180;
2229		num_vs_threads = 60;
2230		num_gs_threads = 4;
2231		num_es_threads = 4;
2232		num_ps_stack_entries = 128;
2233		num_vs_stack_entries = 128;
2234		num_gs_stack_entries = 0;
2235		num_es_stack_entries = 0;
2236		break;
2237	case CHIP_RV710:
2238		num_ps_gprs = 192;
2239		num_vs_gprs = 56;
2240		num_temp_gprs = 4;
2241		num_gs_gprs = 0;
2242		num_es_gprs = 0;
2243		num_ps_threads = 136;
2244		num_vs_threads = 48;
2245		num_gs_threads = 4;
2246		num_es_threads = 4;
2247		num_ps_stack_entries = 128;
2248		num_vs_stack_entries = 128;
2249		num_gs_stack_entries = 0;
2250		num_es_stack_entries = 0;
2251		break;
2252	}
2253
2254	rctx->default_gprs[R600_HW_STAGE_PS] = num_ps_gprs;
2255	rctx->default_gprs[R600_HW_STAGE_VS] = num_vs_gprs;
2256	rctx->default_gprs[R600_HW_STAGE_GS] = 0;
2257	rctx->default_gprs[R600_HW_STAGE_ES] = 0;
2258
2259	rctx->r6xx_num_clause_temp_gprs = num_temp_gprs;
2260
2261	/* SQ_CONFIG */
2262	tmp = 0;
2263	switch (family) {
2264	case CHIP_RV610:
2265	case CHIP_RV620:
2266	case CHIP_RS780:
2267	case CHIP_RS880:
2268	case CHIP_RV710:
2269		break;
2270	default:
2271		tmp |= S_008C00_VC_ENABLE(1);
2272		break;
2273	}
2274	tmp |= S_008C00_DX9_CONSTS(0);
2275	tmp |= S_008C00_ALU_INST_PREFER_VECTOR(1);
2276	tmp |= S_008C00_PS_PRIO(ps_prio);
2277	tmp |= S_008C00_VS_PRIO(vs_prio);
2278	tmp |= S_008C00_GS_PRIO(gs_prio);
2279	tmp |= S_008C00_ES_PRIO(es_prio);
2280	r600_store_config_reg(cb, R_008C00_SQ_CONFIG, tmp);
2281
2282	/* SQ_GPR_RESOURCE_MGMT_2 */
2283	tmp = S_008C08_NUM_GS_GPRS(num_gs_gprs);
2284	tmp |= S_008C08_NUM_ES_GPRS(num_es_gprs);
2285	r600_store_config_reg_seq(cb, R_008C08_SQ_GPR_RESOURCE_MGMT_2, 4);
2286	r600_store_value(cb, tmp);
2287
2288	/* SQ_THREAD_RESOURCE_MGMT */
2289	tmp = S_008C0C_NUM_PS_THREADS(num_ps_threads);
2290	tmp |= S_008C0C_NUM_VS_THREADS(num_vs_threads);
2291	tmp |= S_008C0C_NUM_GS_THREADS(num_gs_threads);
2292	tmp |= S_008C0C_NUM_ES_THREADS(num_es_threads);
2293	r600_store_value(cb, tmp); /* R_008C0C_SQ_THREAD_RESOURCE_MGMT */
2294
2295	/* SQ_STACK_RESOURCE_MGMT_1 */
2296	tmp = S_008C10_NUM_PS_STACK_ENTRIES(num_ps_stack_entries);
2297	tmp |= S_008C10_NUM_VS_STACK_ENTRIES(num_vs_stack_entries);
2298	r600_store_value(cb, tmp); /* R_008C10_SQ_STACK_RESOURCE_MGMT_1 */
2299
2300	/* SQ_STACK_RESOURCE_MGMT_2 */
2301	tmp = S_008C14_NUM_GS_STACK_ENTRIES(num_gs_stack_entries);
2302	tmp |= S_008C14_NUM_ES_STACK_ENTRIES(num_es_stack_entries);
2303	r600_store_value(cb, tmp); /* R_008C14_SQ_STACK_RESOURCE_MGMT_2 */
2304
2305	r600_store_config_reg(cb, R_009714_VC_ENHANCE, 0);
2306
2307	if (rctx->b.chip_class >= R700) {
2308		r600_store_context_reg(cb, R_028A50_VGT_ENHANCE, 4);
2309		r600_store_config_reg(cb, R_008D8C_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0x00004000);
2310		r600_store_config_reg(cb, R_009830_DB_DEBUG, 0);
2311		r600_store_config_reg(cb, R_009838_DB_WATERMARKS, 0x00420204);
2312		r600_store_context_reg(cb, R_0286C8_SPI_THREAD_GROUPING, 0);
2313	} else {
2314		r600_store_config_reg(cb, R_008D8C_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0);
2315		r600_store_config_reg(cb, R_009830_DB_DEBUG, 0x82000000);
2316		r600_store_config_reg(cb, R_009838_DB_WATERMARKS, 0x01020204);
2317		r600_store_context_reg(cb, R_0286C8_SPI_THREAD_GROUPING, 1);
2318	}
2319	r600_store_context_reg_seq(cb, R_0288A8_SQ_ESGS_RING_ITEMSIZE, 9);
2320	r600_store_value(cb, 0); /* R_0288A8_SQ_ESGS_RING_ITEMSIZE */
2321	r600_store_value(cb, 0); /* R_0288AC_SQ_GSVS_RING_ITEMSIZE */
2322	r600_store_value(cb, 0); /* R_0288B0_SQ_ESTMP_RING_ITEMSIZE */
2323	r600_store_value(cb, 0); /* R_0288B4_SQ_GSTMP_RING_ITEMSIZE */
2324	r600_store_value(cb, 0); /* R_0288B8_SQ_VSTMP_RING_ITEMSIZE */
2325	r600_store_value(cb, 0); /* R_0288BC_SQ_PSTMP_RING_ITEMSIZE */
2326	r600_store_value(cb, 0); /* R_0288C0_SQ_FBUF_RING_ITEMSIZE */
2327	r600_store_value(cb, 0); /* R_0288C4_SQ_REDUC_RING_ITEMSIZE */
2328	r600_store_value(cb, 0); /* R_0288C8_SQ_GS_VERT_ITEMSIZE */
2329
2330	/* to avoid GPU doing any preloading of constant from random address */
2331	r600_store_context_reg_seq(cb, R_028140_ALU_CONST_BUFFER_SIZE_PS_0, 16);
2332	for (i = 0; i < 16; i++)
2333		r600_store_value(cb, 0);
2334
2335	r600_store_context_reg_seq(cb, R_028180_ALU_CONST_BUFFER_SIZE_VS_0, 16);
2336	for (i = 0; i < 16; i++)
2337		r600_store_value(cb, 0);
2338
2339	r600_store_context_reg_seq(cb, R_0281C0_ALU_CONST_BUFFER_SIZE_GS_0, 16);
2340	for (i = 0; i < 16; i++)
2341		r600_store_value(cb, 0);
2342
2343	r600_store_context_reg_seq(cb, R_028A10_VGT_OUTPUT_PATH_CNTL, 13);
2344	r600_store_value(cb, 0); /* R_028A10_VGT_OUTPUT_PATH_CNTL */
2345	r600_store_value(cb, 0); /* R_028A14_VGT_HOS_CNTL */
2346	r600_store_value(cb, 0); /* R_028A18_VGT_HOS_MAX_TESS_LEVEL */
2347	r600_store_value(cb, 0); /* R_028A1C_VGT_HOS_MIN_TESS_LEVEL */
2348	r600_store_value(cb, 0); /* R_028A20_VGT_HOS_REUSE_DEPTH */
2349	r600_store_value(cb, 0); /* R_028A24_VGT_GROUP_PRIM_TYPE */
2350	r600_store_value(cb, 0); /* R_028A28_VGT_GROUP_FIRST_DECR */
2351	r600_store_value(cb, 0); /* R_028A2C_VGT_GROUP_DECR */
2352	r600_store_value(cb, 0); /* R_028A30_VGT_GROUP_VECT_0_CNTL */
2353	r600_store_value(cb, 0); /* R_028A34_VGT_GROUP_VECT_1_CNTL */
2354	r600_store_value(cb, 0); /* R_028A38_VGT_GROUP_VECT_0_FMT_CNTL */
2355	r600_store_value(cb, 0); /* R_028A3C_VGT_GROUP_VECT_1_FMT_CNTL */
2356	r600_store_value(cb, 0); /* R_028A40_VGT_GS_MODE, 0); */
2357
2358	r600_store_context_reg(cb, R_028A84_VGT_PRIMITIVEID_EN, 0);
2359	r600_store_context_reg(cb, R_028AA0_VGT_INSTANCE_STEP_RATE_0, 0);
2360	r600_store_context_reg(cb, R_028AA4_VGT_INSTANCE_STEP_RATE_1, 0);
2361
2362	r600_store_context_reg_seq(cb, R_028AB4_VGT_REUSE_OFF, 2);
2363	r600_store_value(cb, 1); /* R_028AB4_VGT_REUSE_OFF */
2364	r600_store_value(cb, 0); /* R_028AB8_VGT_VTX_CNT_EN */
2365
2366	r600_store_context_reg(cb, R_028B20_VGT_STRMOUT_BUFFER_EN, 0);
2367
2368	r600_store_ctl_const(cb, R_03CFF0_SQ_VTX_BASE_VTX_LOC, 0);
2369
2370	r600_store_context_reg(cb, R_028028_DB_STENCIL_CLEAR, 0);
2371
2372	r600_store_context_reg_seq(cb, R_0286DC_SPI_FOG_CNTL, 3);
2373	r600_store_value(cb, 0); /* R_0286DC_SPI_FOG_CNTL */
2374	r600_store_value(cb, 0); /* R_0286E0_SPI_FOG_FUNC_SCALE */
2375	r600_store_value(cb, 0); /* R_0286E4_SPI_FOG_FUNC_BIAS */
2376
2377	r600_store_context_reg_seq(cb, R_028D28_DB_SRESULTS_COMPARE_STATE0, 3);
2378	r600_store_value(cb, 0); /* R_028D28_DB_SRESULTS_COMPARE_STATE0 */
2379	r600_store_value(cb, 0); /* R_028D2C_DB_SRESULTS_COMPARE_STATE1 */
2380	r600_store_value(cb, 0); /* R_028D30_DB_PRELOAD_CONTROL */
2381
2382	r600_store_context_reg(cb, R_028820_PA_CL_NANINF_CNTL, 0);
2383	r600_store_context_reg(cb, R_028A48_PA_SC_MPASS_PS_CNTL, 0);
2384
2385	r600_store_context_reg(cb, R_028200_PA_SC_WINDOW_OFFSET, 0);
2386	r600_store_context_reg(cb, R_02820C_PA_SC_CLIPRECT_RULE, 0xFFFF);
2387
2388	if (rctx->b.chip_class >= R700) {
2389		r600_store_context_reg(cb, R_028230_PA_SC_EDGERULE, 0xAAAAAAAA);
2390	}
2391
2392	r600_store_context_reg_seq(cb, R_028C30_CB_CLRCMP_CONTROL, 4);
2393	r600_store_value(cb, 0x1000000);  /* R_028C30_CB_CLRCMP_CONTROL */
2394	r600_store_value(cb, 0);          /* R_028C34_CB_CLRCMP_SRC */
2395	r600_store_value(cb, 0xFF);       /* R_028C38_CB_CLRCMP_DST */
2396	r600_store_value(cb, 0xFFFFFFFF); /* R_028C3C_CB_CLRCMP_MSK */
2397
2398	r600_store_context_reg_seq(cb, R_028030_PA_SC_SCREEN_SCISSOR_TL, 2);
2399	r600_store_value(cb, 0); /* R_028030_PA_SC_SCREEN_SCISSOR_TL */
2400	r600_store_value(cb, S_028034_BR_X(8192) | S_028034_BR_Y(8192)); /* R_028034_PA_SC_SCREEN_SCISSOR_BR */
2401
2402	r600_store_context_reg_seq(cb, R_028240_PA_SC_GENERIC_SCISSOR_TL, 2);
2403	r600_store_value(cb, 0); /* R_028240_PA_SC_GENERIC_SCISSOR_TL */
2404	r600_store_value(cb, S_028244_BR_X(8192) | S_028244_BR_Y(8192)); /* R_028244_PA_SC_GENERIC_SCISSOR_BR */
2405
2406	r600_store_context_reg_seq(cb, R_0288CC_SQ_PGM_CF_OFFSET_PS, 5);
2407	r600_store_value(cb, 0); /* R_0288CC_SQ_PGM_CF_OFFSET_PS */
2408	r600_store_value(cb, 0); /* R_0288D0_SQ_PGM_CF_OFFSET_VS */
2409	r600_store_value(cb, 0); /* R_0288D4_SQ_PGM_CF_OFFSET_GS */
2410	r600_store_value(cb, 0); /* R_0288D8_SQ_PGM_CF_OFFSET_ES */
2411	r600_store_value(cb, 0); /* R_0288DC_SQ_PGM_CF_OFFSET_FS */
2412
2413        r600_store_context_reg(cb, R_0288E0_SQ_VTX_SEMANTIC_CLEAR, ~0);
2414
2415        r600_store_context_reg_seq(cb, R_028400_VGT_MAX_VTX_INDX, 2);
2416	r600_store_value(cb, ~0); /* R_028400_VGT_MAX_VTX_INDX */
2417	r600_store_value(cb, 0); /* R_028404_VGT_MIN_VTX_INDX */
2418
2419	r600_store_context_reg(cb, R_0288A4_SQ_PGM_RESOURCES_FS, 0);
2420
2421	if (rctx->b.chip_class == R700)
2422		r600_store_context_reg(cb, R_028350_SX_MISC, 0);
2423	if (rctx->b.chip_class == R700 && rctx->screen->b.has_streamout)
2424		r600_store_context_reg(cb, R_028354_SX_SURFACE_SYNC, S_028354_SURFACE_SYNC_MASK(0xf));
2425
2426	r600_store_context_reg(cb, R_028800_DB_DEPTH_CONTROL, 0);
2427	if (rctx->screen->b.has_streamout) {
2428		r600_store_context_reg(cb, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET, 0);
2429	}
2430
2431	r600_store_loop_const(cb, R_03E200_SQ_LOOP_CONST_0, 0x1000FFF);
2432	r600_store_loop_const(cb, R_03E200_SQ_LOOP_CONST_0 + (32 * 4), 0x1000FFF);
2433	r600_store_loop_const(cb, R_03E200_SQ_LOOP_CONST_0 + (64 * 4), 0x1000FFF);
2434}
2435
2436void r600_update_ps_state(struct pipe_context *ctx, struct r600_pipe_shader *shader)
2437{
2438	struct r600_context *rctx = (struct r600_context *)ctx;
2439	struct r600_command_buffer *cb = &shader->command_buffer;
2440	struct r600_shader *rshader = &shader->shader;
2441	unsigned i, exports_ps, num_cout, spi_ps_in_control_0, spi_input_z, spi_ps_in_control_1, db_shader_control;
2442	int pos_index = -1, face_index = -1, fixed_pt_position_index = -1;
2443	unsigned tmp, sid, ufi = 0;
2444	int need_linear = 0;
2445	unsigned z_export = 0, stencil_export = 0, mask_export = 0;
2446	unsigned sprite_coord_enable = rctx->rasterizer ? rctx->rasterizer->sprite_coord_enable : 0;
2447
2448	if (!cb->buf) {
2449		r600_init_command_buffer(cb, 64);
2450	} else {
2451		cb->num_dw = 0;
2452	}
2453
2454	r600_store_context_reg_seq(cb, R_028644_SPI_PS_INPUT_CNTL_0, rshader->ninput);
2455	for (i = 0; i < rshader->ninput; i++) {
2456		if (rshader->input[i].name == TGSI_SEMANTIC_POSITION)
2457			pos_index = i;
2458		if (rshader->input[i].name == TGSI_SEMANTIC_FACE && face_index == -1)
2459			face_index = i;
2460		if (rshader->input[i].name == TGSI_SEMANTIC_SAMPLEID)
2461			fixed_pt_position_index = i;
2462
2463		sid = rshader->input[i].spi_sid;
2464
2465		tmp = S_028644_SEMANTIC(sid);
2466
2467		/* D3D 9 behaviour. GL is undefined */
2468		if (rshader->input[i].name == TGSI_SEMANTIC_COLOR && rshader->input[i].sid == 0)
2469			tmp |= S_028644_DEFAULT_VAL(3);
2470
2471		if (rshader->input[i].name == TGSI_SEMANTIC_POSITION ||
2472			rshader->input[i].interpolate == TGSI_INTERPOLATE_CONSTANT ||
2473			(rshader->input[i].interpolate == TGSI_INTERPOLATE_COLOR &&
2474				rctx->rasterizer && rctx->rasterizer->flatshade))
2475			tmp |= S_028644_FLAT_SHADE(1);
2476
2477		if (rshader->input[i].name == TGSI_SEMANTIC_GENERIC &&
2478		    sprite_coord_enable & (1 << rshader->input[i].sid)) {
2479			tmp |= S_028644_PT_SPRITE_TEX(1);
2480		}
2481
2482		if (rshader->input[i].interpolate_location == TGSI_INTERPOLATE_LOC_CENTROID)
2483			tmp |= S_028644_SEL_CENTROID(1);
2484
2485		if (rshader->input[i].interpolate_location == TGSI_INTERPOLATE_LOC_SAMPLE)
2486			tmp |= S_028644_SEL_SAMPLE(1);
2487
2488		if (rshader->input[i].interpolate == TGSI_INTERPOLATE_LINEAR) {
2489			need_linear = 1;
2490			tmp |= S_028644_SEL_LINEAR(1);
2491		}
2492
2493		r600_store_value(cb, tmp);
2494	}
2495
2496	db_shader_control = 0;
2497	for (i = 0; i < rshader->noutput; i++) {
2498		if (rshader->output[i].name == TGSI_SEMANTIC_POSITION)
2499			z_export = 1;
2500		if (rshader->output[i].name == TGSI_SEMANTIC_STENCIL)
2501			stencil_export = 1;
2502		if (rshader->output[i].name == TGSI_SEMANTIC_SAMPLEMASK &&
2503			rctx->framebuffer.nr_samples > 1 && rctx->ps_iter_samples > 0)
2504			mask_export = 1;
2505	}
2506	db_shader_control |= S_02880C_Z_EXPORT_ENABLE(z_export);
2507	db_shader_control |= S_02880C_STENCIL_REF_EXPORT_ENABLE(stencil_export);
2508	db_shader_control |= S_02880C_MASK_EXPORT_ENABLE(mask_export);
2509	if (rshader->uses_kill)
2510		db_shader_control |= S_02880C_KILL_ENABLE(1);
2511
2512	exports_ps = 0;
2513	for (i = 0; i < rshader->noutput; i++) {
2514		if (rshader->output[i].name == TGSI_SEMANTIC_POSITION ||
2515		    rshader->output[i].name == TGSI_SEMANTIC_STENCIL ||
2516		    rshader->output[i].name == TGSI_SEMANTIC_SAMPLEMASK) {
2517			exports_ps |= 1;
2518		}
2519	}
2520	num_cout = rshader->nr_ps_color_exports;
2521	exports_ps |= S_028854_EXPORT_COLORS(num_cout);
2522	if (!exports_ps) {
2523		/* always at least export 1 component per pixel */
2524		exports_ps = 2;
2525	}
2526
2527	shader->nr_ps_color_outputs = num_cout;
2528	shader->ps_color_export_mask = rshader->ps_color_export_mask;
2529
2530	spi_ps_in_control_0 = S_0286CC_NUM_INTERP(rshader->ninput) |
2531				S_0286CC_PERSP_GRADIENT_ENA(1)|
2532				S_0286CC_LINEAR_GRADIENT_ENA(need_linear);
2533	spi_input_z = 0;
2534	if (pos_index != -1) {
2535		spi_ps_in_control_0 |= (S_0286CC_POSITION_ENA(1) |
2536					S_0286CC_POSITION_CENTROID(rshader->input[pos_index].interpolate_location == TGSI_INTERPOLATE_LOC_CENTROID) |
2537					S_0286CC_POSITION_ADDR(rshader->input[pos_index].gpr) |
2538					S_0286CC_BARYC_SAMPLE_CNTL(1)) |
2539					S_0286CC_POSITION_SAMPLE(rshader->input[pos_index].interpolate_location == TGSI_INTERPOLATE_LOC_SAMPLE);
2540		spi_input_z |= S_0286D8_PROVIDE_Z_TO_SPI(1);
2541	}
2542
2543	spi_ps_in_control_1 = 0;
2544	if (face_index != -1) {
2545		spi_ps_in_control_1 |= S_0286D0_FRONT_FACE_ENA(1) |
2546			S_0286D0_FRONT_FACE_ADDR(rshader->input[face_index].gpr);
2547	}
2548	if (fixed_pt_position_index != -1) {
2549		spi_ps_in_control_1 |= S_0286D0_FIXED_PT_POSITION_ENA(1) |
2550			S_0286D0_FIXED_PT_POSITION_ADDR(rshader->input[fixed_pt_position_index].gpr);
2551	}
2552
2553	/* HW bug in original R600 */
2554	if (rctx->b.family == CHIP_R600)
2555		ufi = 1;
2556
2557	r600_store_context_reg_seq(cb, R_0286CC_SPI_PS_IN_CONTROL_0, 2);
2558	r600_store_value(cb, spi_ps_in_control_0); /* R_0286CC_SPI_PS_IN_CONTROL_0 */
2559	r600_store_value(cb, spi_ps_in_control_1); /* R_0286D0_SPI_PS_IN_CONTROL_1 */
2560
2561	r600_store_context_reg(cb, R_0286D8_SPI_INPUT_Z, spi_input_z);
2562
2563	r600_store_context_reg_seq(cb, R_028850_SQ_PGM_RESOURCES_PS, 2);
2564	r600_store_value(cb, /* R_028850_SQ_PGM_RESOURCES_PS*/
2565			 S_028850_NUM_GPRS(rshader->bc.ngpr) |
2566	/*
2567	 * docs are misleading about the dx10_clamp bit. This only affects
2568	 * instructions using CLAMP dst modifier, in which case they will
2569	 * return 0 with this set for a NaN (otherwise NaN).
2570	 */
2571			 S_028850_DX10_CLAMP(1) |
2572			 S_028850_STACK_SIZE(rshader->bc.nstack) |
2573			 S_028850_UNCACHED_FIRST_INST(ufi));
2574	r600_store_value(cb, exports_ps); /* R_028854_SQ_PGM_EXPORTS_PS */
2575
2576	r600_store_context_reg(cb, R_028840_SQ_PGM_START_PS, 0);
2577	/* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */
2578
2579	/* only set some bits here, the other bits are set in the dsa state */
2580	shader->db_shader_control = db_shader_control;
2581	shader->ps_depth_export = z_export | stencil_export | mask_export;
2582
2583	shader->sprite_coord_enable = sprite_coord_enable;
2584	if (rctx->rasterizer)
2585		shader->flatshade = rctx->rasterizer->flatshade;
2586}
2587
2588void r600_update_vs_state(struct pipe_context *ctx, struct r600_pipe_shader *shader)
2589{
2590	struct r600_command_buffer *cb = &shader->command_buffer;
2591	struct r600_shader *rshader = &shader->shader;
2592	unsigned spi_vs_out_id[10] = {};
2593	unsigned i, tmp, nparams = 0;
2594
2595	for (i = 0; i < rshader->noutput; i++) {
2596		if (rshader->output[i].spi_sid) {
2597			tmp = rshader->output[i].spi_sid << ((nparams & 3) * 8);
2598			spi_vs_out_id[nparams / 4] |= tmp;
2599			nparams++;
2600		}
2601	}
2602
2603	r600_init_command_buffer(cb, 32);
2604
2605	r600_store_context_reg_seq(cb, R_028614_SPI_VS_OUT_ID_0, 10);
2606	for (i = 0; i < 10; i++) {
2607		r600_store_value(cb, spi_vs_out_id[i]);
2608	}
2609
2610	/* Certain attributes (position, psize, etc.) don't count as params.
2611	 * VS is required to export at least one param and r600_shader_from_tgsi()
2612	 * takes care of adding a dummy export.
2613	 */
2614	if (nparams < 1)
2615		nparams = 1;
2616
2617	r600_store_context_reg(cb, R_0286C4_SPI_VS_OUT_CONFIG,
2618			       S_0286C4_VS_EXPORT_COUNT(nparams - 1));
2619	r600_store_context_reg(cb, R_028868_SQ_PGM_RESOURCES_VS,
2620			       S_028868_NUM_GPRS(rshader->bc.ngpr) |
2621			       S_028868_DX10_CLAMP(1) |
2622			       S_028868_STACK_SIZE(rshader->bc.nstack));
2623	if (rshader->vs_position_window_space) {
2624		r600_store_context_reg(cb, R_028818_PA_CL_VTE_CNTL,
2625			S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1));
2626	} else {
2627		r600_store_context_reg(cb, R_028818_PA_CL_VTE_CNTL,
2628			S_028818_VTX_W0_FMT(1) |
2629			S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
2630			S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
2631			S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
2632
2633	}
2634	r600_store_context_reg(cb, R_028858_SQ_PGM_START_VS, 0);
2635	/* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */
2636
2637	shader->pa_cl_vs_out_cntl =
2638		S_02881C_VS_OUT_CCDIST0_VEC_ENA((rshader->clip_dist_write & 0x0F) != 0) |
2639		S_02881C_VS_OUT_CCDIST1_VEC_ENA((rshader->clip_dist_write & 0xF0) != 0) |
2640		S_02881C_VS_OUT_MISC_VEC_ENA(rshader->vs_out_misc_write) |
2641		S_02881C_USE_VTX_POINT_SIZE(rshader->vs_out_point_size) |
2642		S_02881C_USE_VTX_EDGE_FLAG(rshader->vs_out_edgeflag) |
2643		S_02881C_USE_VTX_RENDER_TARGET_INDX(rshader->vs_out_layer) |
2644		S_02881C_USE_VTX_VIEWPORT_INDX(rshader->vs_out_viewport);
2645}
2646
2647#define RV610_GSVS_ALIGN 32
2648#define R600_GSVS_ALIGN 16
2649
2650void r600_update_gs_state(struct pipe_context *ctx, struct r600_pipe_shader *shader)
2651{
2652	struct r600_context *rctx = (struct r600_context *)ctx;
2653	struct r600_command_buffer *cb = &shader->command_buffer;
2654	struct r600_shader *rshader = &shader->shader;
2655	struct r600_shader *cp_shader = &shader->gs_copy_shader->shader;
2656	unsigned gsvs_itemsize =
2657			(cp_shader->ring_item_sizes[0] * shader->selector->gs_max_out_vertices) >> 2;
2658
2659	/* some r600s needs gsvs itemsize aligned to cacheline size
2660	   this was fixed in rs780 and above. */
2661	switch (rctx->b.family) {
2662	case CHIP_RV610:
2663		gsvs_itemsize = align(gsvs_itemsize, RV610_GSVS_ALIGN);
2664		break;
2665	case CHIP_R600:
2666	case CHIP_RV630:
2667	case CHIP_RV670:
2668	case CHIP_RV620:
2669	case CHIP_RV635:
2670		gsvs_itemsize = align(gsvs_itemsize, R600_GSVS_ALIGN);
2671		break;
2672	default:
2673		break;
2674	}
2675
2676	r600_init_command_buffer(cb, 64);
2677
2678	/* VGT_GS_MODE is written by r600_emit_shader_stages */
2679	r600_store_context_reg(cb, R_028AB8_VGT_VTX_CNT_EN, 1);
2680
2681	if (rctx->b.chip_class >= R700) {
2682		r600_store_context_reg(cb, R_028B38_VGT_GS_MAX_VERT_OUT,
2683				       S_028B38_MAX_VERT_OUT(shader->selector->gs_max_out_vertices));
2684	}
2685	r600_store_context_reg(cb, R_028A6C_VGT_GS_OUT_PRIM_TYPE,
2686			       r600_conv_prim_to_gs_out(shader->selector->gs_output_prim));
2687
2688	r600_store_context_reg(cb, R_0288C8_SQ_GS_VERT_ITEMSIZE,
2689	                       cp_shader->ring_item_sizes[0] >> 2);
2690
2691	r600_store_context_reg(cb, R_0288A8_SQ_ESGS_RING_ITEMSIZE,
2692			       (rshader->ring_item_sizes[0]) >> 2);
2693
2694	r600_store_context_reg(cb, R_0288AC_SQ_GSVS_RING_ITEMSIZE,
2695			       gsvs_itemsize);
2696
2697	/* FIXME calculate these values somehow ??? */
2698	r600_store_config_reg_seq(cb, R_0088C8_VGT_GS_PER_ES, 2);
2699	r600_store_value(cb, 0x80); /* GS_PER_ES */
2700	r600_store_value(cb, 0x100); /* ES_PER_GS */
2701	r600_store_config_reg_seq(cb, R_0088E8_VGT_GS_PER_VS, 1);
2702	r600_store_value(cb, 0x2); /* GS_PER_VS */
2703
2704	r600_store_context_reg(cb, R_02887C_SQ_PGM_RESOURCES_GS,
2705			       S_02887C_NUM_GPRS(rshader->bc.ngpr) |
2706			       S_02887C_DX10_CLAMP(1) |
2707			       S_02887C_STACK_SIZE(rshader->bc.nstack));
2708	r600_store_context_reg(cb, R_02886C_SQ_PGM_START_GS, 0);
2709	/* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */
2710}
2711
2712void r600_update_es_state(struct pipe_context *ctx, struct r600_pipe_shader *shader)
2713{
2714	struct r600_command_buffer *cb = &shader->command_buffer;
2715	struct r600_shader *rshader = &shader->shader;
2716
2717	r600_init_command_buffer(cb, 32);
2718
2719	r600_store_context_reg(cb, R_028890_SQ_PGM_RESOURCES_ES,
2720			       S_028890_NUM_GPRS(rshader->bc.ngpr) |
2721			       S_028890_DX10_CLAMP(1) |
2722			       S_028890_STACK_SIZE(rshader->bc.nstack));
2723	r600_store_context_reg(cb, R_028880_SQ_PGM_START_ES, 0);
2724	/* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */
2725}
2726
2727
2728void *r600_create_resolve_blend(struct r600_context *rctx)
2729{
2730	struct pipe_blend_state blend;
2731	unsigned i;
2732
2733	memset(&blend, 0, sizeof(blend));
2734	blend.independent_blend_enable = true;
2735	for (i = 0; i < 2; i++) {
2736		blend.rt[i].colormask = 0xf;
2737		blend.rt[i].blend_enable = 1;
2738		blend.rt[i].rgb_func = PIPE_BLEND_ADD;
2739		blend.rt[i].alpha_func = PIPE_BLEND_ADD;
2740		blend.rt[i].rgb_src_factor = PIPE_BLENDFACTOR_ZERO;
2741		blend.rt[i].rgb_dst_factor = PIPE_BLENDFACTOR_ZERO;
2742		blend.rt[i].alpha_src_factor = PIPE_BLENDFACTOR_ZERO;
2743		blend.rt[i].alpha_dst_factor = PIPE_BLENDFACTOR_ZERO;
2744	}
2745	return r600_create_blend_state_mode(&rctx->b.b, &blend, V_028808_SPECIAL_RESOLVE_BOX);
2746}
2747
2748void *r700_create_resolve_blend(struct r600_context *rctx)
2749{
2750	struct pipe_blend_state blend;
2751
2752	memset(&blend, 0, sizeof(blend));
2753	blend.independent_blend_enable = true;
2754	blend.rt[0].colormask = 0xf;
2755	return r600_create_blend_state_mode(&rctx->b.b, &blend, V_028808_SPECIAL_RESOLVE_BOX);
2756}
2757
2758void *r600_create_decompress_blend(struct r600_context *rctx)
2759{
2760	struct pipe_blend_state blend;
2761
2762	memset(&blend, 0, sizeof(blend));
2763	blend.independent_blend_enable = true;
2764	blend.rt[0].colormask = 0xf;
2765	return r600_create_blend_state_mode(&rctx->b.b, &blend, V_028808_SPECIAL_EXPAND_SAMPLES);
2766}
2767
2768void *r600_create_db_flush_dsa(struct r600_context *rctx)
2769{
2770	struct pipe_depth_stencil_alpha_state dsa;
2771	boolean quirk = false;
2772
2773	if (rctx->b.family == CHIP_RV610 || rctx->b.family == CHIP_RV630 ||
2774		rctx->b.family == CHIP_RV620 || rctx->b.family == CHIP_RV635)
2775		quirk = true;
2776
2777	memset(&dsa, 0, sizeof(dsa));
2778
2779	if (quirk) {
2780		dsa.depth.enabled = 1;
2781		dsa.depth.func = PIPE_FUNC_LEQUAL;
2782		dsa.stencil[0].enabled = 1;
2783		dsa.stencil[0].func = PIPE_FUNC_ALWAYS;
2784		dsa.stencil[0].zpass_op = PIPE_STENCIL_OP_KEEP;
2785		dsa.stencil[0].zfail_op = PIPE_STENCIL_OP_INCR;
2786		dsa.stencil[0].writemask = 0xff;
2787	}
2788
2789	return rctx->b.b.create_depth_stencil_alpha_state(&rctx->b.b, &dsa);
2790}
2791
2792void r600_update_db_shader_control(struct r600_context * rctx)
2793{
2794	bool dual_export;
2795	unsigned db_shader_control;
2796	uint8_t ps_conservative_z;
2797
2798	if (!rctx->ps_shader) {
2799		return;
2800	}
2801
2802	dual_export = rctx->framebuffer.export_16bpc &&
2803		      !rctx->ps_shader->current->ps_depth_export;
2804
2805	db_shader_control = rctx->ps_shader->current->db_shader_control |
2806			    S_02880C_DUAL_EXPORT_ENABLE(dual_export);
2807
2808	ps_conservative_z = rctx->ps_shader->current->shader.ps_conservative_z;
2809
2810	/* When alpha test is enabled we can't trust the hw to make the proper
2811	 * decision on the order in which ztest should be run related to fragment
2812	 * shader execution.
2813	 *
2814	 * If alpha test is enabled perform z test after fragment. RE_Z (early
2815	 * z test but no write to the zbuffer) seems to cause lockup on r6xx/r7xx
2816	 */
2817	if (rctx->alphatest_state.sx_alpha_test_control) {
2818		db_shader_control |= S_02880C_Z_ORDER(V_02880C_LATE_Z);
2819	} else {
2820		db_shader_control |= S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z);
2821	}
2822
2823	if (db_shader_control != rctx->db_misc_state.db_shader_control ||
2824		ps_conservative_z != rctx->db_misc_state.ps_conservative_z) {
2825		rctx->db_misc_state.db_shader_control = db_shader_control;
2826		rctx->db_misc_state.ps_conservative_z = ps_conservative_z;
2827		r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom);
2828	}
2829}
2830
2831static inline unsigned r600_array_mode(unsigned mode)
2832{
2833	switch (mode) {
2834	default:
2835	case RADEON_SURF_MODE_LINEAR_ALIGNED:	return V_0280A0_ARRAY_LINEAR_ALIGNED;
2836		break;
2837	case RADEON_SURF_MODE_1D:		return V_0280A0_ARRAY_1D_TILED_THIN1;
2838		break;
2839	case RADEON_SURF_MODE_2D:		return V_0280A0_ARRAY_2D_TILED_THIN1;
2840	}
2841}
2842
2843static boolean r600_dma_copy_tile(struct r600_context *rctx,
2844				struct pipe_resource *dst,
2845				unsigned dst_level,
2846				unsigned dst_x,
2847				unsigned dst_y,
2848				unsigned dst_z,
2849				struct pipe_resource *src,
2850				unsigned src_level,
2851				unsigned src_x,
2852				unsigned src_y,
2853				unsigned src_z,
2854				unsigned copy_height,
2855				unsigned pitch,
2856				unsigned bpp)
2857{
2858	struct radeon_cmdbuf *cs = rctx->b.dma.cs;
2859	struct r600_texture *rsrc = (struct r600_texture*)src;
2860	struct r600_texture *rdst = (struct r600_texture*)dst;
2861	unsigned array_mode, lbpp, pitch_tile_max, slice_tile_max, size;
2862	unsigned ncopy, height, cheight, detile, i, x, y, z, src_mode, dst_mode;
2863	uint64_t base, addr;
2864
2865	dst_mode = rdst->surface.u.legacy.level[dst_level].mode;
2866	src_mode = rsrc->surface.u.legacy.level[src_level].mode;
2867	assert(dst_mode != src_mode);
2868
2869	y = 0;
2870	lbpp = util_logbase2(bpp);
2871	pitch_tile_max = ((pitch / bpp) / 8) - 1;
2872
2873	if (dst_mode == RADEON_SURF_MODE_LINEAR_ALIGNED) {
2874		/* T2L */
2875		array_mode = r600_array_mode(src_mode);
2876		slice_tile_max = (rsrc->surface.u.legacy.level[src_level].nblk_x * rsrc->surface.u.legacy.level[src_level].nblk_y) / (8*8);
2877		slice_tile_max = slice_tile_max ? slice_tile_max - 1 : 0;
2878		/* linear height must be the same as the slice tile max height, it's ok even
2879		 * if the linear destination/source have smaller heigh as the size of the
2880		 * dma packet will be using the copy_height which is always smaller or equal
2881		 * to the linear height
2882		 */
2883		height = u_minify(rsrc->resource.b.b.height0, src_level);
2884		detile = 1;
2885		x = src_x;
2886		y = src_y;
2887		z = src_z;
2888		base = rsrc->surface.u.legacy.level[src_level].offset;
2889		addr = rdst->surface.u.legacy.level[dst_level].offset;
2890		addr += (uint64_t)rdst->surface.u.legacy.level[dst_level].slice_size_dw * 4 * dst_z;
2891		addr += dst_y * pitch + dst_x * bpp;
2892	} else {
2893		/* L2T */
2894		array_mode = r600_array_mode(dst_mode);
2895		slice_tile_max = (rdst->surface.u.legacy.level[dst_level].nblk_x * rdst->surface.u.legacy.level[dst_level].nblk_y) / (8*8);
2896		slice_tile_max = slice_tile_max ? slice_tile_max - 1 : 0;
2897		/* linear height must be the same as the slice tile max height, it's ok even
2898		 * if the linear destination/source have smaller heigh as the size of the
2899		 * dma packet will be using the copy_height which is always smaller or equal
2900		 * to the linear height
2901		 */
2902		height = u_minify(rdst->resource.b.b.height0, dst_level);
2903		detile = 0;
2904		x = dst_x;
2905		y = dst_y;
2906		z = dst_z;
2907		base = rdst->surface.u.legacy.level[dst_level].offset;
2908		addr = rsrc->surface.u.legacy.level[src_level].offset;
2909		addr += (uint64_t)rsrc->surface.u.legacy.level[src_level].slice_size_dw * 4 * src_z;
2910		addr += src_y * pitch + src_x * bpp;
2911	}
2912	/* check that we are in dw/base alignment constraint */
2913	if (addr % 4 || base % 256) {
2914		return FALSE;
2915	}
2916
2917	/* It's a r6xx/r7xx limitation, the blit must be on 8 boundary for number
2918	 * line in the blit. Compute max 8 line we can copy in the size limit
2919	 */
2920	cheight = ((R600_DMA_COPY_MAX_SIZE_DW * 4) / pitch) & 0xfffffff8;
2921	ncopy = (copy_height / cheight) + !!(copy_height % cheight);
2922	r600_need_dma_space(&rctx->b, ncopy * 7, &rdst->resource, &rsrc->resource);
2923
2924	for (i = 0; i < ncopy; i++) {
2925		cheight = cheight > copy_height ? copy_height : cheight;
2926		size = (cheight * pitch) / 4;
2927		/* emit reloc before writing cs so that cs is always in consistent state */
2928		radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, &rsrc->resource, RADEON_USAGE_READ, 0);
2929		radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, &rdst->resource, RADEON_USAGE_WRITE, 0);
2930		radeon_emit(cs, DMA_PACKET(DMA_PACKET_COPY, 1, 0, size));
2931		radeon_emit(cs, base >> 8);
2932		radeon_emit(cs, (detile << 31) | (array_mode << 27) |
2933				(lbpp << 24) | ((height - 1) << 10) |
2934				pitch_tile_max);
2935		radeon_emit(cs, (slice_tile_max << 12) | (z << 0));
2936		radeon_emit(cs, (x << 3) | (y << 17));
2937		radeon_emit(cs, addr & 0xfffffffc);
2938		radeon_emit(cs, (addr >> 32UL) & 0xff);
2939		copy_height -= cheight;
2940		addr += cheight * pitch;
2941		y += cheight;
2942	}
2943	return TRUE;
2944}
2945
2946static void r600_dma_copy(struct pipe_context *ctx,
2947			  struct pipe_resource *dst,
2948			  unsigned dst_level,
2949			  unsigned dstx, unsigned dsty, unsigned dstz,
2950			  struct pipe_resource *src,
2951			  unsigned src_level,
2952			  const struct pipe_box *src_box)
2953{
2954	struct r600_context *rctx = (struct r600_context *)ctx;
2955	struct r600_texture *rsrc = (struct r600_texture*)src;
2956	struct r600_texture *rdst = (struct r600_texture*)dst;
2957	unsigned dst_pitch, src_pitch, bpp, dst_mode, src_mode, copy_height;
2958	unsigned src_w, dst_w;
2959	unsigned src_x, src_y;
2960	unsigned dst_x = dstx, dst_y = dsty, dst_z = dstz;
2961
2962	if (rctx->b.dma.cs == NULL) {
2963		goto fallback;
2964	}
2965
2966	if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) {
2967		if (dst_x % 4 || src_box->x % 4 || src_box->width % 4)
2968			goto fallback;
2969
2970		r600_dma_copy_buffer(rctx, dst, src, dst_x, src_box->x, src_box->width);
2971		return;
2972	}
2973
2974	if (src_box->depth > 1 ||
2975	    !r600_prepare_for_dma_blit(&rctx->b, rdst, dst_level, dstx, dsty,
2976					dstz, rsrc, src_level, src_box))
2977		goto fallback;
2978
2979	src_x = util_format_get_nblocksx(src->format, src_box->x);
2980	dst_x = util_format_get_nblocksx(src->format, dst_x);
2981	src_y = util_format_get_nblocksy(src->format, src_box->y);
2982	dst_y = util_format_get_nblocksy(src->format, dst_y);
2983
2984	bpp = rdst->surface.bpe;
2985	dst_pitch = rdst->surface.u.legacy.level[dst_level].nblk_x * rdst->surface.bpe;
2986	src_pitch = rsrc->surface.u.legacy.level[src_level].nblk_x * rsrc->surface.bpe;
2987	src_w = u_minify(rsrc->resource.b.b.width0, src_level);
2988	dst_w = u_minify(rdst->resource.b.b.width0, dst_level);
2989	copy_height = src_box->height / rsrc->surface.blk_h;
2990
2991	dst_mode = rdst->surface.u.legacy.level[dst_level].mode;
2992	src_mode = rsrc->surface.u.legacy.level[src_level].mode;
2993
2994	if (src_pitch != dst_pitch || src_box->x || dst_x || src_w != dst_w) {
2995		/* strict requirement on r6xx/r7xx */
2996		goto fallback;
2997	}
2998	/* lot of constraint on alignment this should capture them all */
2999	if (src_pitch % 8 || src_box->y % 8 || dst_y % 8) {
3000		goto fallback;
3001	}
3002
3003	if (src_mode == dst_mode) {
3004		uint64_t dst_offset, src_offset, size;
3005
3006		/* simple dma blit would do NOTE code here assume :
3007		 *   src_box.x/y == 0
3008		 *   dst_x/y == 0
3009		 *   dst_pitch == src_pitch
3010		 */
3011		src_offset= rsrc->surface.u.legacy.level[src_level].offset;
3012		src_offset += (uint64_t)rsrc->surface.u.legacy.level[src_level].slice_size_dw * 4 * src_box->z;
3013		src_offset += src_y * src_pitch + src_x * bpp;
3014		dst_offset = rdst->surface.u.legacy.level[dst_level].offset;
3015		dst_offset += (uint64_t)rdst->surface.u.legacy.level[dst_level].slice_size_dw * 4 * dst_z;
3016		dst_offset += dst_y * dst_pitch + dst_x * bpp;
3017		size = src_box->height * src_pitch;
3018		/* must be dw aligned */
3019		if (dst_offset % 4 || src_offset % 4 || size % 4) {
3020			goto fallback;
3021		}
3022		r600_dma_copy_buffer(rctx, dst, src, dst_offset, src_offset, size);
3023	} else {
3024		if (!r600_dma_copy_tile(rctx, dst, dst_level, dst_x, dst_y, dst_z,
3025					src, src_level, src_x, src_y, src_box->z,
3026					copy_height, dst_pitch, bpp)) {
3027			goto fallback;
3028		}
3029	}
3030	return;
3031
3032fallback:
3033	r600_resource_copy_region(ctx, dst, dst_level, dstx, dsty, dstz,
3034				  src, src_level, src_box);
3035}
3036
3037void r600_init_state_functions(struct r600_context *rctx)
3038{
3039	unsigned id = 1;
3040	unsigned i;
3041	/* !!!
3042	 *  To avoid GPU lockup registers must be emited in a specific order
3043	 * (no kidding ...). The order below is important and have been
3044	 * partialy infered from analyzing fglrx command stream.
3045	 *
3046	 * Don't reorder atom without carefully checking the effect (GPU lockup
3047	 * or piglit regression).
3048	 * !!!
3049	 */
3050
3051	r600_init_atom(rctx, &rctx->framebuffer.atom, id++, r600_emit_framebuffer_state, 0);
3052
3053	/* shader const */
3054	r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_VERTEX].atom, id++, r600_emit_vs_constant_buffers, 0);
3055	r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_GEOMETRY].atom, id++, r600_emit_gs_constant_buffers, 0);
3056	r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_FRAGMENT].atom, id++, r600_emit_ps_constant_buffers, 0);
3057
3058	/* sampler must be emited before TA_CNTL_AUX otherwise DISABLE_CUBE_WRAP change
3059	 * does not take effect (TA_CNTL_AUX emited by r600_emit_seamless_cube_map)
3060	 */
3061	r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_VERTEX].states.atom, id++, r600_emit_vs_sampler_states, 0);
3062	r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_GEOMETRY].states.atom, id++, r600_emit_gs_sampler_states, 0);
3063	r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_FRAGMENT].states.atom, id++, r600_emit_ps_sampler_states, 0);
3064	/* resource */
3065	r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_VERTEX].views.atom, id++, r600_emit_vs_sampler_views, 0);
3066	r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_GEOMETRY].views.atom, id++, r600_emit_gs_sampler_views, 0);
3067	r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_FRAGMENT].views.atom, id++, r600_emit_ps_sampler_views, 0);
3068	r600_init_atom(rctx, &rctx->vertex_buffer_state.atom, id++, r600_emit_vertex_buffers, 0);
3069
3070	r600_init_atom(rctx, &rctx->vgt_state.atom, id++, r600_emit_vgt_state, 10);
3071
3072	r600_init_atom(rctx, &rctx->seamless_cube_map.atom, id++, r600_emit_seamless_cube_map, 3);
3073	r600_init_atom(rctx, &rctx->sample_mask.atom, id++, r600_emit_sample_mask, 3);
3074	rctx->sample_mask.sample_mask = ~0;
3075
3076	r600_init_atom(rctx, &rctx->alphatest_state.atom, id++, r600_emit_alphatest_state, 6);
3077	r600_init_atom(rctx, &rctx->blend_color.atom, id++, r600_emit_blend_color, 6);
3078	r600_init_atom(rctx, &rctx->blend_state.atom, id++, r600_emit_cso_state, 0);
3079	r600_init_atom(rctx, &rctx->cb_misc_state.atom, id++, r600_emit_cb_misc_state, 7);
3080	r600_init_atom(rctx, &rctx->clip_misc_state.atom, id++, r600_emit_clip_misc_state, 6);
3081	r600_init_atom(rctx, &rctx->clip_state.atom, id++, r600_emit_clip_state, 26);
3082	r600_init_atom(rctx, &rctx->db_misc_state.atom, id++, r600_emit_db_misc_state, 7);
3083	r600_init_atom(rctx, &rctx->db_state.atom, id++, r600_emit_db_state, 11);
3084	r600_init_atom(rctx, &rctx->dsa_state.atom, id++, r600_emit_cso_state, 0);
3085	r600_init_atom(rctx, &rctx->poly_offset_state.atom, id++, r600_emit_polygon_offset, 9);
3086	r600_init_atom(rctx, &rctx->rasterizer_state.atom, id++, r600_emit_cso_state, 0);
3087	r600_add_atom(rctx, &rctx->b.scissors.atom, id++);
3088	r600_add_atom(rctx, &rctx->b.viewports.atom, id++);
3089	r600_init_atom(rctx, &rctx->config_state.atom, id++, r600_emit_config_state, 3);
3090	r600_init_atom(rctx, &rctx->stencil_ref.atom, id++, r600_emit_stencil_ref, 4);
3091	r600_init_atom(rctx, &rctx->vertex_fetch_shader.atom, id++, r600_emit_vertex_fetch_shader, 5);
3092	r600_add_atom(rctx, &rctx->b.render_cond_atom, id++);
3093	r600_add_atom(rctx, &rctx->b.streamout.begin_atom, id++);
3094	r600_add_atom(rctx, &rctx->b.streamout.enable_atom, id++);
3095	for (i = 0; i < R600_NUM_HW_STAGES; i++)
3096		r600_init_atom(rctx, &rctx->hw_shader_stages[i].atom, id++, r600_emit_shader, 0);
3097	r600_init_atom(rctx, &rctx->shader_stages.atom, id++, r600_emit_shader_stages, 0);
3098	r600_init_atom(rctx, &rctx->gs_rings.atom, id++, r600_emit_gs_rings, 0);
3099
3100	rctx->b.b.create_blend_state = r600_create_blend_state;
3101	rctx->b.b.create_depth_stencil_alpha_state = r600_create_dsa_state;
3102	rctx->b.b.create_rasterizer_state = r600_create_rs_state;
3103	rctx->b.b.create_sampler_state = r600_create_sampler_state;
3104	rctx->b.b.create_sampler_view = r600_create_sampler_view;
3105	rctx->b.b.set_framebuffer_state = r600_set_framebuffer_state;
3106	rctx->b.b.set_polygon_stipple = r600_set_polygon_stipple;
3107	rctx->b.b.set_min_samples = r600_set_min_samples;
3108	rctx->b.b.get_sample_position = r600_get_sample_position;
3109	rctx->b.dma_copy = r600_dma_copy;
3110}
3111/* this function must be last */
3112