1/*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23#include "r600_formats.h"
24#include "r600_shader.h"
25#include "r600_query.h"
26#include "evergreend.h"
27
28#include "pipe/p_shader_tokens.h"
29#include "util/u_pack_color.h"
30#include "util/u_memory.h"
31#include "util/u_framebuffer.h"
32#include "util/u_dual_blend.h"
33#include "evergreen_compute.h"
34#include "util/u_math.h"
35
36static inline unsigned evergreen_array_mode(unsigned mode)
37{
38	switch (mode) {
39	default:
40	case RADEON_SURF_MODE_LINEAR_ALIGNED:	return V_028C70_ARRAY_LINEAR_ALIGNED;
41		break;
42	case RADEON_SURF_MODE_1D:		return V_028C70_ARRAY_1D_TILED_THIN1;
43		break;
44	case RADEON_SURF_MODE_2D:		return V_028C70_ARRAY_2D_TILED_THIN1;
45	}
46}
47
48static uint32_t eg_num_banks(uint32_t nbanks)
49{
50	switch (nbanks) {
51	case 2:
52		return 0;
53	case 4:
54		return 1;
55	case 8:
56	default:
57		return 2;
58	case 16:
59		return 3;
60	}
61}
62
63
64static unsigned eg_tile_split(unsigned tile_split)
65{
66	switch (tile_split) {
67	case 64:	tile_split = 0;	break;
68	case 128:	tile_split = 1;	break;
69	case 256:	tile_split = 2;	break;
70	case 512:	tile_split = 3;	break;
71	default:
72	case 1024:	tile_split = 4;	break;
73	case 2048:	tile_split = 5;	break;
74	case 4096:	tile_split = 6;	break;
75	}
76	return tile_split;
77}
78
79static unsigned eg_macro_tile_aspect(unsigned macro_tile_aspect)
80{
81	switch (macro_tile_aspect) {
82	default:
83	case 1:	macro_tile_aspect = 0;	break;
84	case 2:	macro_tile_aspect = 1;	break;
85	case 4:	macro_tile_aspect = 2;	break;
86	case 8:	macro_tile_aspect = 3;	break;
87	}
88	return macro_tile_aspect;
89}
90
91static unsigned eg_bank_wh(unsigned bankwh)
92{
93	switch (bankwh) {
94	default:
95	case 1:	bankwh = 0;	break;
96	case 2:	bankwh = 1;	break;
97	case 4:	bankwh = 2;	break;
98	case 8:	bankwh = 3;	break;
99	}
100	return bankwh;
101}
102
103static uint32_t r600_translate_blend_function(int blend_func)
104{
105	switch (blend_func) {
106	case PIPE_BLEND_ADD:
107		return V_028780_COMB_DST_PLUS_SRC;
108	case PIPE_BLEND_SUBTRACT:
109		return V_028780_COMB_SRC_MINUS_DST;
110	case PIPE_BLEND_REVERSE_SUBTRACT:
111		return V_028780_COMB_DST_MINUS_SRC;
112	case PIPE_BLEND_MIN:
113		return V_028780_COMB_MIN_DST_SRC;
114	case PIPE_BLEND_MAX:
115		return V_028780_COMB_MAX_DST_SRC;
116	default:
117		R600_ERR("Unknown blend function %d\n", blend_func);
118		assert(0);
119		break;
120	}
121	return 0;
122}
123
124static uint32_t r600_translate_blend_factor(int blend_fact)
125{
126	switch (blend_fact) {
127	case PIPE_BLENDFACTOR_ONE:
128		return V_028780_BLEND_ONE;
129	case PIPE_BLENDFACTOR_SRC_COLOR:
130		return V_028780_BLEND_SRC_COLOR;
131	case PIPE_BLENDFACTOR_SRC_ALPHA:
132		return V_028780_BLEND_SRC_ALPHA;
133	case PIPE_BLENDFACTOR_DST_ALPHA:
134		return V_028780_BLEND_DST_ALPHA;
135	case PIPE_BLENDFACTOR_DST_COLOR:
136		return V_028780_BLEND_DST_COLOR;
137	case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE:
138		return V_028780_BLEND_SRC_ALPHA_SATURATE;
139	case PIPE_BLENDFACTOR_CONST_COLOR:
140		return V_028780_BLEND_CONST_COLOR;
141	case PIPE_BLENDFACTOR_CONST_ALPHA:
142		return V_028780_BLEND_CONST_ALPHA;
143	case PIPE_BLENDFACTOR_ZERO:
144		return V_028780_BLEND_ZERO;
145	case PIPE_BLENDFACTOR_INV_SRC_COLOR:
146		return V_028780_BLEND_ONE_MINUS_SRC_COLOR;
147	case PIPE_BLENDFACTOR_INV_SRC_ALPHA:
148		return V_028780_BLEND_ONE_MINUS_SRC_ALPHA;
149	case PIPE_BLENDFACTOR_INV_DST_ALPHA:
150		return V_028780_BLEND_ONE_MINUS_DST_ALPHA;
151	case PIPE_BLENDFACTOR_INV_DST_COLOR:
152		return V_028780_BLEND_ONE_MINUS_DST_COLOR;
153	case PIPE_BLENDFACTOR_INV_CONST_COLOR:
154		return V_028780_BLEND_ONE_MINUS_CONST_COLOR;
155	case PIPE_BLENDFACTOR_INV_CONST_ALPHA:
156		return V_028780_BLEND_ONE_MINUS_CONST_ALPHA;
157	case PIPE_BLENDFACTOR_SRC1_COLOR:
158		return V_028780_BLEND_SRC1_COLOR;
159	case PIPE_BLENDFACTOR_SRC1_ALPHA:
160		return V_028780_BLEND_SRC1_ALPHA;
161	case PIPE_BLENDFACTOR_INV_SRC1_COLOR:
162		return V_028780_BLEND_INV_SRC1_COLOR;
163	case PIPE_BLENDFACTOR_INV_SRC1_ALPHA:
164		return V_028780_BLEND_INV_SRC1_ALPHA;
165	default:
166		R600_ERR("Bad blend factor %d not supported!\n", blend_fact);
167		assert(0);
168		break;
169	}
170	return 0;
171}
172
173static unsigned r600_tex_dim(struct r600_texture *rtex,
174			     unsigned view_target, unsigned nr_samples)
175{
176	unsigned res_target = rtex->resource.b.b.target;
177
178	if (view_target == PIPE_TEXTURE_CUBE ||
179	    view_target == PIPE_TEXTURE_CUBE_ARRAY)
180		res_target = view_target;
181		/* If interpreting cubemaps as something else, set 2D_ARRAY. */
182	else if (res_target == PIPE_TEXTURE_CUBE ||
183		 res_target == PIPE_TEXTURE_CUBE_ARRAY)
184		res_target = PIPE_TEXTURE_2D_ARRAY;
185
186	switch (res_target) {
187	default:
188	case PIPE_TEXTURE_1D:
189		return V_030000_SQ_TEX_DIM_1D;
190	case PIPE_TEXTURE_1D_ARRAY:
191		return V_030000_SQ_TEX_DIM_1D_ARRAY;
192	case PIPE_TEXTURE_2D:
193	case PIPE_TEXTURE_RECT:
194		return nr_samples > 1 ? V_030000_SQ_TEX_DIM_2D_MSAA :
195					V_030000_SQ_TEX_DIM_2D;
196	case PIPE_TEXTURE_2D_ARRAY:
197		return nr_samples > 1 ? V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA :
198					V_030000_SQ_TEX_DIM_2D_ARRAY;
199	case PIPE_TEXTURE_3D:
200		return V_030000_SQ_TEX_DIM_3D;
201	case PIPE_TEXTURE_CUBE:
202	case PIPE_TEXTURE_CUBE_ARRAY:
203		return V_030000_SQ_TEX_DIM_CUBEMAP;
204	}
205}
206
207static uint32_t r600_translate_dbformat(enum pipe_format format)
208{
209	switch (format) {
210	case PIPE_FORMAT_Z16_UNORM:
211		return V_028040_Z_16;
212	case PIPE_FORMAT_Z24X8_UNORM:
213	case PIPE_FORMAT_Z24_UNORM_S8_UINT:
214	case PIPE_FORMAT_X8Z24_UNORM:
215	case PIPE_FORMAT_S8_UINT_Z24_UNORM:
216		return V_028040_Z_24;
217	case PIPE_FORMAT_Z32_FLOAT:
218	case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
219		return V_028040_Z_32_FLOAT;
220	default:
221		return ~0U;
222	}
223}
224
225static bool r600_is_sampler_format_supported(struct pipe_screen *screen, enum pipe_format format)
226{
227	return r600_translate_texformat(screen, format, NULL, NULL, NULL,
228                                   FALSE) != ~0U;
229}
230
231static bool r600_is_colorbuffer_format_supported(enum chip_class chip, enum pipe_format format)
232{
233	return r600_translate_colorformat(chip, format, FALSE) != ~0U &&
234		r600_translate_colorswap(format, FALSE) != ~0U;
235}
236
237static bool r600_is_zs_format_supported(enum pipe_format format)
238{
239	return r600_translate_dbformat(format) != ~0U;
240}
241
242boolean evergreen_is_format_supported(struct pipe_screen *screen,
243				      enum pipe_format format,
244				      enum pipe_texture_target target,
245				      unsigned sample_count,
246				      unsigned storage_sample_count,
247				      unsigned usage)
248{
249	struct r600_screen *rscreen = (struct r600_screen*)screen;
250	unsigned retval = 0;
251
252	if (target >= PIPE_MAX_TEXTURE_TYPES) {
253		R600_ERR("r600: unsupported texture type %d\n", target);
254		return FALSE;
255	}
256
257	if (MAX2(1, sample_count) != MAX2(1, storage_sample_count))
258		return false;
259
260	if (sample_count > 1) {
261		if (!rscreen->has_msaa)
262			return FALSE;
263
264		switch (sample_count) {
265		case 2:
266		case 4:
267		case 8:
268			break;
269		default:
270			return FALSE;
271		}
272	}
273
274	if (usage & PIPE_BIND_SAMPLER_VIEW) {
275		if (target == PIPE_BUFFER) {
276			if (r600_is_vertex_format_supported(format))
277				retval |= PIPE_BIND_SAMPLER_VIEW;
278		} else {
279			if (r600_is_sampler_format_supported(screen, format))
280				retval |= PIPE_BIND_SAMPLER_VIEW;
281		}
282	}
283
284	if ((usage & (PIPE_BIND_RENDER_TARGET |
285		      PIPE_BIND_DISPLAY_TARGET |
286		      PIPE_BIND_SCANOUT |
287		      PIPE_BIND_SHARED |
288		      PIPE_BIND_BLENDABLE)) &&
289	    r600_is_colorbuffer_format_supported(rscreen->b.chip_class, format)) {
290		retval |= usage &
291			  (PIPE_BIND_RENDER_TARGET |
292			   PIPE_BIND_DISPLAY_TARGET |
293			   PIPE_BIND_SCANOUT |
294			   PIPE_BIND_SHARED);
295		if (!util_format_is_pure_integer(format) &&
296		    !util_format_is_depth_or_stencil(format))
297			retval |= usage & PIPE_BIND_BLENDABLE;
298	}
299
300	if ((usage & PIPE_BIND_DEPTH_STENCIL) &&
301	    r600_is_zs_format_supported(format)) {
302		retval |= PIPE_BIND_DEPTH_STENCIL;
303	}
304
305	if ((usage & PIPE_BIND_VERTEX_BUFFER) &&
306	    r600_is_vertex_format_supported(format)) {
307		retval |= PIPE_BIND_VERTEX_BUFFER;
308	}
309
310	if ((usage & PIPE_BIND_LINEAR) &&
311	    !util_format_is_compressed(format) &&
312	    !(usage & PIPE_BIND_DEPTH_STENCIL))
313		retval |= PIPE_BIND_LINEAR;
314
315	return retval == usage;
316}
317
318static void *evergreen_create_blend_state_mode(struct pipe_context *ctx,
319					       const struct pipe_blend_state *state, int mode)
320{
321	uint32_t color_control = 0, target_mask = 0;
322	struct r600_blend_state *blend = CALLOC_STRUCT(r600_blend_state);
323
324	if (!blend) {
325		return NULL;
326	}
327
328	r600_init_command_buffer(&blend->buffer, 20);
329	r600_init_command_buffer(&blend->buffer_no_blend, 20);
330
331	if (state->logicop_enable) {
332		color_control |= (state->logicop_func << 16) | (state->logicop_func << 20);
333	} else {
334		color_control |= (0xcc << 16);
335	}
336	/* we pretend 8 buffer are used, CB_SHADER_MASK will disable unused one */
337	if (state->independent_blend_enable) {
338		for (int i = 0; i < 8; i++) {
339			target_mask |= (state->rt[i].colormask << (4 * i));
340		}
341	} else {
342		for (int i = 0; i < 8; i++) {
343			target_mask |= (state->rt[0].colormask << (4 * i));
344		}
345	}
346
347	/* only have dual source on MRT0 */
348	blend->dual_src_blend = util_blend_state_is_dual(state, 0);
349	blend->cb_target_mask = target_mask;
350	blend->alpha_to_one = state->alpha_to_one;
351
352	if (target_mask)
353		color_control |= S_028808_MODE(mode);
354	else
355		color_control |= S_028808_MODE(V_028808_CB_DISABLE);
356
357
358	r600_store_context_reg(&blend->buffer, R_028808_CB_COLOR_CONTROL, color_control);
359	r600_store_context_reg(&blend->buffer, R_028B70_DB_ALPHA_TO_MASK,
360			       S_028B70_ALPHA_TO_MASK_ENABLE(state->alpha_to_coverage) |
361			       S_028B70_ALPHA_TO_MASK_OFFSET0(2) |
362			       S_028B70_ALPHA_TO_MASK_OFFSET1(2) |
363			       S_028B70_ALPHA_TO_MASK_OFFSET2(2) |
364			       S_028B70_ALPHA_TO_MASK_OFFSET3(2));
365	r600_store_context_reg_seq(&blend->buffer, R_028780_CB_BLEND0_CONTROL, 8);
366
367	/* Copy over the dwords set so far into buffer_no_blend.
368	 * Only the CB_BLENDi_CONTROL registers must be set after this. */
369	memcpy(blend->buffer_no_blend.buf, blend->buffer.buf, blend->buffer.num_dw * 4);
370	blend->buffer_no_blend.num_dw = blend->buffer.num_dw;
371
372	for (int i = 0; i < 8; i++) {
373		/* state->rt entries > 0 only written if independent blending */
374		const int j = state->independent_blend_enable ? i : 0;
375
376		unsigned eqRGB = state->rt[j].rgb_func;
377		unsigned srcRGB = state->rt[j].rgb_src_factor;
378		unsigned dstRGB = state->rt[j].rgb_dst_factor;
379		unsigned eqA = state->rt[j].alpha_func;
380		unsigned srcA = state->rt[j].alpha_src_factor;
381		unsigned dstA = state->rt[j].alpha_dst_factor;
382		uint32_t bc = 0;
383
384		r600_store_value(&blend->buffer_no_blend, 0);
385
386		if (!state->rt[j].blend_enable) {
387			r600_store_value(&blend->buffer, 0);
388			continue;
389		}
390
391		bc |= S_028780_BLEND_CONTROL_ENABLE(1);
392		bc |= S_028780_COLOR_COMB_FCN(r600_translate_blend_function(eqRGB));
393		bc |= S_028780_COLOR_SRCBLEND(r600_translate_blend_factor(srcRGB));
394		bc |= S_028780_COLOR_DESTBLEND(r600_translate_blend_factor(dstRGB));
395
396		if (srcA != srcRGB || dstA != dstRGB || eqA != eqRGB) {
397			bc |= S_028780_SEPARATE_ALPHA_BLEND(1);
398			bc |= S_028780_ALPHA_COMB_FCN(r600_translate_blend_function(eqA));
399			bc |= S_028780_ALPHA_SRCBLEND(r600_translate_blend_factor(srcA));
400			bc |= S_028780_ALPHA_DESTBLEND(r600_translate_blend_factor(dstA));
401		}
402		r600_store_value(&blend->buffer, bc);
403	}
404	return blend;
405}
406
407static void *evergreen_create_blend_state(struct pipe_context *ctx,
408					const struct pipe_blend_state *state)
409{
410
411	return evergreen_create_blend_state_mode(ctx, state, V_028808_CB_NORMAL);
412}
413
414static void *evergreen_create_dsa_state(struct pipe_context *ctx,
415				   const struct pipe_depth_stencil_alpha_state *state)
416{
417	unsigned db_depth_control, alpha_test_control, alpha_ref;
418	struct r600_dsa_state *dsa = CALLOC_STRUCT(r600_dsa_state);
419
420	if (!dsa) {
421		return NULL;
422	}
423
424	r600_init_command_buffer(&dsa->buffer, 3);
425
426	dsa->valuemask[0] = state->stencil[0].valuemask;
427	dsa->valuemask[1] = state->stencil[1].valuemask;
428	dsa->writemask[0] = state->stencil[0].writemask;
429	dsa->writemask[1] = state->stencil[1].writemask;
430	dsa->zwritemask = state->depth.writemask;
431
432	db_depth_control = S_028800_Z_ENABLE(state->depth.enabled) |
433		S_028800_Z_WRITE_ENABLE(state->depth.writemask) |
434		S_028800_ZFUNC(state->depth.func);
435
436	/* stencil */
437	if (state->stencil[0].enabled) {
438		db_depth_control |= S_028800_STENCIL_ENABLE(1);
439		db_depth_control |= S_028800_STENCILFUNC(state->stencil[0].func); /* translates straight */
440		db_depth_control |= S_028800_STENCILFAIL(r600_translate_stencil_op(state->stencil[0].fail_op));
441		db_depth_control |= S_028800_STENCILZPASS(r600_translate_stencil_op(state->stencil[0].zpass_op));
442		db_depth_control |= S_028800_STENCILZFAIL(r600_translate_stencil_op(state->stencil[0].zfail_op));
443
444		if (state->stencil[1].enabled) {
445			db_depth_control |= S_028800_BACKFACE_ENABLE(1);
446			db_depth_control |= S_028800_STENCILFUNC_BF(state->stencil[1].func); /* translates straight */
447			db_depth_control |= S_028800_STENCILFAIL_BF(r600_translate_stencil_op(state->stencil[1].fail_op));
448			db_depth_control |= S_028800_STENCILZPASS_BF(r600_translate_stencil_op(state->stencil[1].zpass_op));
449			db_depth_control |= S_028800_STENCILZFAIL_BF(r600_translate_stencil_op(state->stencil[1].zfail_op));
450		}
451	}
452
453	/* alpha */
454	alpha_test_control = 0;
455	alpha_ref = 0;
456	if (state->alpha.enabled) {
457		alpha_test_control = S_028410_ALPHA_FUNC(state->alpha.func);
458		alpha_test_control |= S_028410_ALPHA_TEST_ENABLE(1);
459		alpha_ref = fui(state->alpha.ref_value);
460	}
461	dsa->sx_alpha_test_control = alpha_test_control & 0xff;
462	dsa->alpha_ref = alpha_ref;
463
464	/* misc */
465	r600_store_context_reg(&dsa->buffer, R_028800_DB_DEPTH_CONTROL, db_depth_control);
466	return dsa;
467}
468
469static void *evergreen_create_rs_state(struct pipe_context *ctx,
470					const struct pipe_rasterizer_state *state)
471{
472	struct r600_context *rctx = (struct r600_context *)ctx;
473	unsigned tmp, spi_interp;
474	float psize_min, psize_max;
475	struct r600_rasterizer_state *rs = CALLOC_STRUCT(r600_rasterizer_state);
476
477	if (!rs) {
478		return NULL;
479	}
480
481	r600_init_command_buffer(&rs->buffer, 30);
482
483	rs->scissor_enable = state->scissor;
484	rs->clip_halfz = state->clip_halfz;
485	rs->flatshade = state->flatshade;
486	rs->sprite_coord_enable = state->sprite_coord_enable;
487	rs->rasterizer_discard = state->rasterizer_discard;
488	rs->two_side = state->light_twoside;
489	rs->clip_plane_enable = state->clip_plane_enable;
490	rs->pa_sc_line_stipple = state->line_stipple_enable ?
491				S_028A0C_LINE_PATTERN(state->line_stipple_pattern) |
492				S_028A0C_REPEAT_COUNT(state->line_stipple_factor) : 0;
493	rs->pa_cl_clip_cntl =
494		S_028810_DX_CLIP_SPACE_DEF(state->clip_halfz) |
495		S_028810_ZCLIP_NEAR_DISABLE(!state->depth_clip_near) |
496		S_028810_ZCLIP_FAR_DISABLE(!state->depth_clip_far) |
497		S_028810_DX_LINEAR_ATTR_CLIP_ENA(1) |
498		S_028810_DX_RASTERIZATION_KILL(state->rasterizer_discard);
499	rs->multisample_enable = state->multisample;
500
501	/* offset */
502	rs->offset_units = state->offset_units;
503	rs->offset_scale = state->offset_scale * 16.0f;
504	rs->offset_enable = state->offset_point || state->offset_line || state->offset_tri;
505	rs->offset_units_unscaled = state->offset_units_unscaled;
506
507	if (state->point_size_per_vertex) {
508		psize_min = util_get_min_point_size(state);
509		psize_max = 8192;
510	} else {
511		/* Force the point size to be as if the vertex output was disabled. */
512		psize_min = state->point_size;
513		psize_max = state->point_size;
514	}
515
516	spi_interp = S_0286D4_FLAT_SHADE_ENA(1);
517	if (state->sprite_coord_enable) {
518		spi_interp |= S_0286D4_PNT_SPRITE_ENA(1) |
519			      S_0286D4_PNT_SPRITE_OVRD_X(2) |
520			      S_0286D4_PNT_SPRITE_OVRD_Y(3) |
521			      S_0286D4_PNT_SPRITE_OVRD_Z(0) |
522			      S_0286D4_PNT_SPRITE_OVRD_W(1);
523		if (state->sprite_coord_mode != PIPE_SPRITE_COORD_UPPER_LEFT) {
524			spi_interp |= S_0286D4_PNT_SPRITE_TOP_1(1);
525		}
526	}
527
528	r600_store_context_reg_seq(&rs->buffer, R_028A00_PA_SU_POINT_SIZE, 3);
529	/* point size 12.4 fixed point (divide by two, because 0.5 = 1 pixel) */
530	tmp = r600_pack_float_12p4(state->point_size/2);
531	r600_store_value(&rs->buffer, /* R_028A00_PA_SU_POINT_SIZE */
532			 S_028A00_HEIGHT(tmp) | S_028A00_WIDTH(tmp));
533	r600_store_value(&rs->buffer, /* R_028A04_PA_SU_POINT_MINMAX */
534			 S_028A04_MIN_SIZE(r600_pack_float_12p4(psize_min/2)) |
535			 S_028A04_MAX_SIZE(r600_pack_float_12p4(psize_max/2)));
536	r600_store_value(&rs->buffer, /* R_028A08_PA_SU_LINE_CNTL */
537			 S_028A08_WIDTH((unsigned)(state->line_width * 8)));
538
539	r600_store_context_reg(&rs->buffer, R_0286D4_SPI_INTERP_CONTROL_0, spi_interp);
540	r600_store_context_reg(&rs->buffer, R_028A48_PA_SC_MODE_CNTL_0,
541			       S_028A48_MSAA_ENABLE(state->multisample) |
542			       S_028A48_VPORT_SCISSOR_ENABLE(1) |
543			       S_028A48_LINE_STIPPLE_ENABLE(state->line_stipple_enable));
544
545	if (rctx->b.chip_class == CAYMAN) {
546		r600_store_context_reg(&rs->buffer, CM_R_028BE4_PA_SU_VTX_CNTL,
547				       S_028C08_PIX_CENTER_HALF(state->half_pixel_center) |
548				       S_028C08_QUANT_MODE(V_028C08_X_1_256TH));
549	} else {
550		r600_store_context_reg(&rs->buffer, R_028C08_PA_SU_VTX_CNTL,
551				       S_028C08_PIX_CENTER_HALF(state->half_pixel_center) |
552				       S_028C08_QUANT_MODE(V_028C08_X_1_256TH));
553	}
554
555	r600_store_context_reg(&rs->buffer, R_028B7C_PA_SU_POLY_OFFSET_CLAMP, fui(state->offset_clamp));
556	r600_store_context_reg(&rs->buffer, R_028814_PA_SU_SC_MODE_CNTL,
557			       S_028814_PROVOKING_VTX_LAST(!state->flatshade_first) |
558			       S_028814_CULL_FRONT((state->cull_face & PIPE_FACE_FRONT) ? 1 : 0) |
559			       S_028814_CULL_BACK((state->cull_face & PIPE_FACE_BACK) ? 1 : 0) |
560			       S_028814_FACE(!state->front_ccw) |
561			       S_028814_POLY_OFFSET_FRONT_ENABLE(util_get_offset(state, state->fill_front)) |
562			       S_028814_POLY_OFFSET_BACK_ENABLE(util_get_offset(state, state->fill_back)) |
563			       S_028814_POLY_OFFSET_PARA_ENABLE(state->offset_point || state->offset_line) |
564			       S_028814_POLY_MODE(state->fill_front != PIPE_POLYGON_MODE_FILL ||
565						  state->fill_back != PIPE_POLYGON_MODE_FILL) |
566			       S_028814_POLYMODE_FRONT_PTYPE(r600_translate_fill(state->fill_front)) |
567			       S_028814_POLYMODE_BACK_PTYPE(r600_translate_fill(state->fill_back)));
568	return rs;
569}
570
571static void *evergreen_create_sampler_state(struct pipe_context *ctx,
572					const struct pipe_sampler_state *state)
573{
574	struct r600_common_screen *rscreen = (struct r600_common_screen*)ctx->screen;
575	struct r600_pipe_sampler_state *ss = CALLOC_STRUCT(r600_pipe_sampler_state);
576	unsigned max_aniso = rscreen->force_aniso >= 0 ? rscreen->force_aniso
577						       : state->max_anisotropy;
578	unsigned max_aniso_ratio = r600_tex_aniso_filter(max_aniso);
579	float max_lod = state->max_lod;
580
581	if (!ss) {
582		return NULL;
583	}
584
585	/* If the min_mip_filter is NONE, then the texture has no mipmapping and
586	 * MIP_FILTER will also be set to NONE. However, if more then one LOD is
587	 * configured, then the texture lookup seems to fail for some specific texture
588	 * formats. Forcing the number of LODs to one in this case fixes it. */
589	if (state->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
590		max_lod = state->min_lod;
591
592	ss->border_color_use = sampler_state_needs_border_color(state);
593
594	/* R_03C000_SQ_TEX_SAMPLER_WORD0_0 */
595	ss->tex_sampler_words[0] =
596		S_03C000_CLAMP_X(r600_tex_wrap(state->wrap_s)) |
597		S_03C000_CLAMP_Y(r600_tex_wrap(state->wrap_t)) |
598		S_03C000_CLAMP_Z(r600_tex_wrap(state->wrap_r)) |
599		S_03C000_XY_MAG_FILTER(eg_tex_filter(state->mag_img_filter, max_aniso)) |
600		S_03C000_XY_MIN_FILTER(eg_tex_filter(state->min_img_filter, max_aniso)) |
601		S_03C000_MIP_FILTER(r600_tex_mipfilter(state->min_mip_filter)) |
602		S_03C000_MAX_ANISO_RATIO(max_aniso_ratio) |
603		S_03C000_DEPTH_COMPARE_FUNCTION(r600_tex_compare(state->compare_func)) |
604		S_03C000_BORDER_COLOR_TYPE(ss->border_color_use ? V_03C000_SQ_TEX_BORDER_COLOR_REGISTER : 0);
605	/* R_03C004_SQ_TEX_SAMPLER_WORD1_0 */
606	ss->tex_sampler_words[1] =
607		S_03C004_MIN_LOD(S_FIXED(CLAMP(state->min_lod, 0, 15), 8)) |
608		S_03C004_MAX_LOD(S_FIXED(CLAMP(max_lod, 0, 15), 8));
609	/* R_03C008_SQ_TEX_SAMPLER_WORD2_0 */
610	ss->tex_sampler_words[2] =
611		S_03C008_LOD_BIAS(S_FIXED(CLAMP(state->lod_bias, -16, 16), 8)) |
612		(state->seamless_cube_map ? 0 : S_03C008_DISABLE_CUBE_WRAP(1)) |
613		S_03C008_TYPE(1);
614
615	if (ss->border_color_use) {
616		memcpy(&ss->border_color, &state->border_color, sizeof(state->border_color));
617	}
618	return ss;
619}
620
621struct eg_buf_res_params {
622	enum pipe_format pipe_format;
623	unsigned offset;
624	unsigned size;
625	unsigned char swizzle[4];
626	bool uncached;
627	bool force_swizzle;
628	bool size_in_bytes;
629};
630
631static void evergreen_fill_buffer_resource_words(struct r600_context *rctx,
632						 struct pipe_resource *buffer,
633						 struct eg_buf_res_params *params,
634						 bool *skip_mip_address_reloc,
635						 unsigned tex_resource_words[8])
636{
637	struct r600_texture *tmp = (struct r600_texture*)buffer;
638	uint64_t va;
639	int stride = util_format_get_blocksize(params->pipe_format);
640	unsigned format, num_format, format_comp, endian;
641	unsigned swizzle_res;
642	const struct util_format_description *desc;
643
644	r600_vertex_data_type(params->pipe_format,
645			      &format, &num_format, &format_comp,
646			      &endian);
647
648	desc = util_format_description(params->pipe_format);
649
650	if (params->force_swizzle)
651		swizzle_res = r600_get_swizzle_combined(params->swizzle, NULL, TRUE);
652	else
653		swizzle_res = r600_get_swizzle_combined(desc->swizzle, params->swizzle, TRUE);
654
655	va = tmp->resource.gpu_address + params->offset;
656	*skip_mip_address_reloc = true;
657	tex_resource_words[0] = va;
658	tex_resource_words[1] = params->size - 1;
659	tex_resource_words[2] = S_030008_BASE_ADDRESS_HI(va >> 32UL) |
660		S_030008_STRIDE(stride) |
661		S_030008_DATA_FORMAT(format) |
662		S_030008_NUM_FORMAT_ALL(num_format) |
663		S_030008_FORMAT_COMP_ALL(format_comp) |
664		S_030008_ENDIAN_SWAP(endian);
665	tex_resource_words[3] = swizzle_res | S_03000C_UNCACHED(params->uncached);
666	/*
667	 * dword 4 is for number of elements, for use with resinfo,
668	 * albeit the amd gpu shader analyser
669	 * uses a const buffer to store the element sizes for buffer txq
670	 */
671	tex_resource_words[4] = params->size_in_bytes ? params->size : (params->size / stride);
672
673	tex_resource_words[5] = tex_resource_words[6] = 0;
674	tex_resource_words[7] = S_03001C_TYPE(V_03001C_SQ_TEX_VTX_VALID_BUFFER);
675}
676
677static struct pipe_sampler_view *
678texture_buffer_sampler_view(struct r600_context *rctx,
679			    struct r600_pipe_sampler_view *view,
680			    unsigned width0, unsigned height0)
681{
682	struct r600_texture *tmp = (struct r600_texture*)view->base.texture;
683	struct eg_buf_res_params params;
684
685	memset(&params, 0, sizeof(params));
686
687	params.pipe_format = view->base.format;
688	params.offset = view->base.u.buf.offset;
689	params.size = view->base.u.buf.size;
690	params.swizzle[0] = view->base.swizzle_r;
691	params.swizzle[1] = view->base.swizzle_g;
692	params.swizzle[2] = view->base.swizzle_b;
693	params.swizzle[3] = view->base.swizzle_a;
694
695	evergreen_fill_buffer_resource_words(rctx, view->base.texture,
696					     &params, &view->skip_mip_address_reloc,
697					     view->tex_resource_words);
698	view->tex_resource = &tmp->resource;
699
700	if (tmp->resource.gpu_address)
701		LIST_ADDTAIL(&view->list, &rctx->texture_buffers);
702	return &view->base;
703}
704
705struct eg_tex_res_params {
706	enum pipe_format pipe_format;
707	int force_level;
708	unsigned width0;
709	unsigned height0;
710	unsigned first_level;
711	unsigned last_level;
712	unsigned first_layer;
713	unsigned last_layer;
714	unsigned target;
715	unsigned char swizzle[4];
716};
717
718static int evergreen_fill_tex_resource_words(struct r600_context *rctx,
719					     struct pipe_resource *texture,
720					     struct eg_tex_res_params *params,
721					     bool *skip_mip_address_reloc,
722					     unsigned tex_resource_words[8])
723{
724	struct r600_screen *rscreen = (struct r600_screen*)rctx->b.b.screen;
725	struct r600_texture *tmp = (struct r600_texture*)texture;
726	unsigned format, endian;
727	uint32_t word4 = 0, yuv_format = 0, pitch = 0;
728	unsigned char array_mode = 0, non_disp_tiling = 0;
729	unsigned height, depth, width;
730	unsigned macro_aspect, tile_split, bankh, bankw, nbanks, fmask_bankh;
731	struct legacy_surf_level *surflevel;
732	unsigned base_level, first_level, last_level;
733	unsigned dim, last_layer;
734	uint64_t va;
735	bool do_endian_swap = FALSE;
736
737	tile_split = tmp->surface.u.legacy.tile_split;
738	surflevel = tmp->surface.u.legacy.level;
739
740	/* Texturing with separate depth and stencil. */
741	if (tmp->db_compatible) {
742		switch (params->pipe_format) {
743		case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
744			params->pipe_format = PIPE_FORMAT_Z32_FLOAT;
745			break;
746		case PIPE_FORMAT_X8Z24_UNORM:
747		case PIPE_FORMAT_S8_UINT_Z24_UNORM:
748			/* Z24 is always stored like this for DB
749			 * compatibility.
750			 */
751			params->pipe_format = PIPE_FORMAT_Z24X8_UNORM;
752			break;
753		case PIPE_FORMAT_X24S8_UINT:
754		case PIPE_FORMAT_S8X24_UINT:
755		case PIPE_FORMAT_X32_S8X24_UINT:
756			params->pipe_format = PIPE_FORMAT_S8_UINT;
757			tile_split = tmp->surface.u.legacy.stencil_tile_split;
758			surflevel = tmp->surface.u.legacy.stencil_level;
759			break;
760		default:;
761		}
762	}
763
764	if (R600_BIG_ENDIAN)
765		do_endian_swap = !tmp->db_compatible;
766
767	format = r600_translate_texformat(rctx->b.b.screen, params->pipe_format,
768					  params->swizzle,
769					  &word4, &yuv_format, do_endian_swap);
770	assert(format != ~0);
771	if (format == ~0) {
772		return -1;
773	}
774
775	endian = r600_colorformat_endian_swap(format, do_endian_swap);
776
777	base_level = 0;
778	first_level = params->first_level;
779	last_level = params->last_level;
780	width = params->width0;
781	height = params->height0;
782	depth = texture->depth0;
783
784	if (params->force_level) {
785		base_level = params->force_level;
786		first_level = 0;
787		last_level = 0;
788		width = u_minify(width, params->force_level);
789		height = u_minify(height, params->force_level);
790		depth = u_minify(depth, params->force_level);
791	}
792
793	pitch = surflevel[base_level].nblk_x * util_format_get_blockwidth(params->pipe_format);
794	non_disp_tiling = tmp->non_disp_tiling;
795
796	switch (surflevel[base_level].mode) {
797	default:
798	case RADEON_SURF_MODE_LINEAR_ALIGNED:
799		array_mode = V_028C70_ARRAY_LINEAR_ALIGNED;
800		break;
801	case RADEON_SURF_MODE_2D:
802		array_mode = V_028C70_ARRAY_2D_TILED_THIN1;
803		break;
804	case RADEON_SURF_MODE_1D:
805		array_mode = V_028C70_ARRAY_1D_TILED_THIN1;
806		break;
807	}
808	macro_aspect = tmp->surface.u.legacy.mtilea;
809	bankw = tmp->surface.u.legacy.bankw;
810	bankh = tmp->surface.u.legacy.bankh;
811	tile_split = eg_tile_split(tile_split);
812	macro_aspect = eg_macro_tile_aspect(macro_aspect);
813	bankw = eg_bank_wh(bankw);
814	bankh = eg_bank_wh(bankh);
815	fmask_bankh = eg_bank_wh(tmp->fmask.bank_height);
816
817	/* 128 bit formats require tile type = 1 */
818	if (rscreen->b.chip_class == CAYMAN) {
819		if (util_format_get_blocksize(params->pipe_format) >= 16)
820			non_disp_tiling = 1;
821	}
822	nbanks = eg_num_banks(rscreen->b.info.r600_num_banks);
823
824
825	va = tmp->resource.gpu_address;
826
827	/* array type views and views into array types need to use layer offset */
828	dim = r600_tex_dim(tmp, params->target, texture->nr_samples);
829
830	if (dim == V_030000_SQ_TEX_DIM_1D_ARRAY) {
831	        height = 1;
832		depth = texture->array_size;
833	} else if (dim == V_030000_SQ_TEX_DIM_2D_ARRAY ||
834		   dim == V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA) {
835		depth = texture->array_size;
836	} else if (dim == V_030000_SQ_TEX_DIM_CUBEMAP)
837		depth = texture->array_size / 6;
838
839	tex_resource_words[0] = (S_030000_DIM(dim) |
840				 S_030000_PITCH((pitch / 8) - 1) |
841				 S_030000_TEX_WIDTH(width - 1));
842	if (rscreen->b.chip_class == CAYMAN)
843		tex_resource_words[0] |= CM_S_030000_NON_DISP_TILING_ORDER(non_disp_tiling);
844	else
845		tex_resource_words[0] |= S_030000_NON_DISP_TILING_ORDER(non_disp_tiling);
846	tex_resource_words[1] = (S_030004_TEX_HEIGHT(height - 1) |
847				       S_030004_TEX_DEPTH(depth - 1) |
848				       S_030004_ARRAY_MODE(array_mode));
849	tex_resource_words[2] = (surflevel[base_level].offset + va) >> 8;
850
851	*skip_mip_address_reloc = false;
852	/* TEX_RESOURCE_WORD3.MIP_ADDRESS */
853	if (texture->nr_samples > 1 && rscreen->has_compressed_msaa_texturing) {
854		if (tmp->is_depth) {
855			/* disable FMASK (0 = disabled) */
856			tex_resource_words[3] = 0;
857			*skip_mip_address_reloc = true;
858		} else {
859			/* FMASK should be in MIP_ADDRESS for multisample textures */
860			tex_resource_words[3] = (tmp->fmask.offset + va) >> 8;
861		}
862	} else if (last_level && texture->nr_samples <= 1) {
863		tex_resource_words[3] = (surflevel[1].offset + va) >> 8;
864	} else {
865		tex_resource_words[3] = (surflevel[base_level].offset + va) >> 8;
866	}
867
868	last_layer = params->last_layer;
869	if (params->target != texture->target && depth == 1) {
870		last_layer = params->first_layer;
871	}
872	tex_resource_words[4] = (word4 |
873				 S_030010_ENDIAN_SWAP(endian));
874	tex_resource_words[5] = S_030014_BASE_ARRAY(params->first_layer) |
875		                S_030014_LAST_ARRAY(last_layer);
876	tex_resource_words[6] = S_030018_TILE_SPLIT(tile_split);
877
878	if (texture->nr_samples > 1) {
879		unsigned log_samples = util_logbase2(texture->nr_samples);
880		if (rscreen->b.chip_class == CAYMAN) {
881			tex_resource_words[4] |= S_030010_LOG2_NUM_FRAGMENTS(log_samples);
882		}
883		/* LAST_LEVEL holds log2(nr_samples) for multisample textures */
884		tex_resource_words[5] |= S_030014_LAST_LEVEL(log_samples);
885		tex_resource_words[6] |= S_030018_FMASK_BANK_HEIGHT(fmask_bankh);
886	} else {
887		bool no_mip = first_level == last_level;
888
889		tex_resource_words[4] |= S_030010_BASE_LEVEL(first_level);
890		tex_resource_words[5] |= S_030014_LAST_LEVEL(last_level);
891		/* aniso max 16 samples */
892		tex_resource_words[6] |= S_030018_MAX_ANISO_RATIO(no_mip ? 0 : 4);
893	}
894
895	tex_resource_words[7] = S_03001C_DATA_FORMAT(format) |
896				      S_03001C_TYPE(V_03001C_SQ_TEX_VTX_VALID_TEXTURE) |
897				      S_03001C_BANK_WIDTH(bankw) |
898				      S_03001C_BANK_HEIGHT(bankh) |
899				      S_03001C_MACRO_TILE_ASPECT(macro_aspect) |
900				      S_03001C_NUM_BANKS(nbanks) |
901				      S_03001C_DEPTH_SAMPLE_ORDER(tmp->db_compatible);
902	return 0;
903}
904
905struct pipe_sampler_view *
906evergreen_create_sampler_view_custom(struct pipe_context *ctx,
907				     struct pipe_resource *texture,
908				     const struct pipe_sampler_view *state,
909				     unsigned width0, unsigned height0,
910				     unsigned force_level)
911{
912	struct r600_context *rctx = (struct r600_context*)ctx;
913	struct r600_pipe_sampler_view *view = CALLOC_STRUCT(r600_pipe_sampler_view);
914	struct r600_texture *tmp = (struct r600_texture*)texture;
915	struct eg_tex_res_params params;
916	int ret;
917
918	if (!view)
919		return NULL;
920
921	/* initialize base object */
922	view->base = *state;
923	view->base.texture = NULL;
924	pipe_reference(NULL, &texture->reference);
925	view->base.texture = texture;
926	view->base.reference.count = 1;
927	view->base.context = ctx;
928
929	if (state->target == PIPE_BUFFER)
930		return texture_buffer_sampler_view(rctx, view, width0, height0);
931
932	memset(&params, 0, sizeof(params));
933	params.pipe_format = state->format;
934	params.force_level = force_level;
935	params.width0 = width0;
936	params.height0 = height0;
937	params.first_level = state->u.tex.first_level;
938	params.last_level = state->u.tex.last_level;
939	params.first_layer = state->u.tex.first_layer;
940	params.last_layer = state->u.tex.last_layer;
941	params.target = state->target;
942	params.swizzle[0] = state->swizzle_r;
943	params.swizzle[1] = state->swizzle_g;
944	params.swizzle[2] = state->swizzle_b;
945	params.swizzle[3] = state->swizzle_a;
946
947	ret = evergreen_fill_tex_resource_words(rctx, texture, &params,
948						&view->skip_mip_address_reloc,
949						view->tex_resource_words);
950	if (ret != 0) {
951		FREE(view);
952		return NULL;
953	}
954
955	if (state->format == PIPE_FORMAT_X24S8_UINT ||
956	    state->format == PIPE_FORMAT_S8X24_UINT ||
957	    state->format == PIPE_FORMAT_X32_S8X24_UINT ||
958	    state->format == PIPE_FORMAT_S8_UINT)
959		view->is_stencil_sampler = true;
960
961	view->tex_resource = &tmp->resource;
962
963	return &view->base;
964}
965
966static struct pipe_sampler_view *
967evergreen_create_sampler_view(struct pipe_context *ctx,
968			      struct pipe_resource *tex,
969			      const struct pipe_sampler_view *state)
970{
971	return evergreen_create_sampler_view_custom(ctx, tex, state,
972						    tex->width0, tex->height0, 0);
973}
974
975static void evergreen_emit_config_state(struct r600_context *rctx, struct r600_atom *atom)
976{
977	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
978	struct r600_config_state *a = (struct r600_config_state*)atom;
979
980	radeon_set_config_reg_seq(cs, R_008C04_SQ_GPR_RESOURCE_MGMT_1, 3);
981	if (a->dyn_gpr_enabled) {
982		radeon_emit(cs, S_008C04_NUM_CLAUSE_TEMP_GPRS(rctx->r6xx_num_clause_temp_gprs));
983		radeon_emit(cs, 0);
984		radeon_emit(cs, 0);
985	} else {
986		radeon_emit(cs, a->sq_gpr_resource_mgmt_1);
987		radeon_emit(cs, a->sq_gpr_resource_mgmt_2);
988		radeon_emit(cs, a->sq_gpr_resource_mgmt_3);
989	}
990	radeon_set_config_reg(cs, R_008D8C_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, (a->dyn_gpr_enabled << 8));
991	if (a->dyn_gpr_enabled) {
992		radeon_set_context_reg(cs, R_028838_SQ_DYN_GPR_RESOURCE_LIMIT_1,
993				       S_028838_PS_GPRS(0x1e) |
994				       S_028838_VS_GPRS(0x1e) |
995				       S_028838_GS_GPRS(0x1e) |
996				       S_028838_ES_GPRS(0x1e) |
997				       S_028838_HS_GPRS(0x1e) |
998				       S_028838_LS_GPRS(0x1e)); /* workaround for hw issues with dyn gpr - must set all limits to 240 instead of 0, 0x1e == 240 / 8*/
999	}
1000}
1001
1002static void evergreen_emit_clip_state(struct r600_context *rctx, struct r600_atom *atom)
1003{
1004	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
1005	struct pipe_clip_state *state = &rctx->clip_state.state;
1006
1007	radeon_set_context_reg_seq(cs, R_0285BC_PA_CL_UCP0_X, 6*4);
1008	radeon_emit_array(cs, (unsigned*)state, 6*4);
1009}
1010
1011static void evergreen_set_polygon_stipple(struct pipe_context *ctx,
1012					 const struct pipe_poly_stipple *state)
1013{
1014}
1015
1016static void evergreen_get_scissor_rect(struct r600_context *rctx,
1017				       unsigned tl_x, unsigned tl_y, unsigned br_x, unsigned br_y,
1018				       uint32_t *tl, uint32_t *br)
1019{
1020	struct pipe_scissor_state scissor = {tl_x, tl_y, br_x, br_y};
1021
1022	evergreen_apply_scissor_bug_workaround(&rctx->b, &scissor);
1023
1024	*tl = S_028240_TL_X(scissor.minx) | S_028240_TL_Y(scissor.miny);
1025	*br = S_028244_BR_X(scissor.maxx) | S_028244_BR_Y(scissor.maxy);
1026}
1027
1028struct r600_tex_color_info {
1029	unsigned info;
1030	unsigned view;
1031	unsigned dim;
1032	unsigned pitch;
1033	unsigned slice;
1034	unsigned attrib;
1035	unsigned ntype;
1036	unsigned fmask;
1037	unsigned fmask_slice;
1038	uint64_t offset;
1039	boolean export_16bpc;
1040};
1041
1042static void evergreen_set_color_surface_buffer(struct r600_context *rctx,
1043					       struct r600_resource *res,
1044					       enum pipe_format pformat,
1045					       unsigned first_element,
1046					       unsigned last_element,
1047					       struct r600_tex_color_info *color)
1048{
1049	unsigned format, swap, ntype, endian;
1050	const struct util_format_description *desc;
1051	unsigned block_size = util_format_get_blocksize(res->b.b.format);
1052	unsigned pitch_alignment =
1053		MAX2(64, rctx->screen->b.info.pipe_interleave_bytes / block_size);
1054	unsigned pitch = align(res->b.b.width0, pitch_alignment);
1055	int i;
1056	unsigned width_elements;
1057
1058	width_elements = last_element - first_element + 1;
1059
1060	format = r600_translate_colorformat(rctx->b.chip_class, pformat, FALSE);
1061	swap = r600_translate_colorswap(pformat, FALSE);
1062
1063	endian = r600_colorformat_endian_swap(format, FALSE);
1064
1065	desc = util_format_description(pformat);
1066	for (i = 0; i < 4; i++) {
1067		if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID) {
1068			break;
1069		}
1070	}
1071	ntype = V_028C70_NUMBER_UNORM;
1072	if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB)
1073		ntype = V_028C70_NUMBER_SRGB;
1074	else if (desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) {
1075		if (desc->channel[i].normalized)
1076			ntype = V_028C70_NUMBER_SNORM;
1077		else if (desc->channel[i].pure_integer)
1078			ntype = V_028C70_NUMBER_SINT;
1079	} else if (desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED) {
1080		if (desc->channel[i].normalized)
1081			ntype = V_028C70_NUMBER_UNORM;
1082		else if (desc->channel[i].pure_integer)
1083			ntype = V_028C70_NUMBER_UINT;
1084	} else if (desc->channel[i].type == UTIL_FORMAT_TYPE_FLOAT) {
1085		ntype = V_028C70_NUMBER_FLOAT;
1086	}
1087
1088	pitch = (pitch / 8) - 1;
1089	color->pitch = S_028C64_PITCH_TILE_MAX(pitch);
1090
1091	color->info = S_028C70_ARRAY_MODE(V_028C70_ARRAY_LINEAR_ALIGNED);
1092	color->info |= S_028C70_FORMAT(format) |
1093		       S_028C70_COMP_SWAP(swap) |
1094		       S_028C70_BLEND_CLAMP(0) |
1095		       S_028C70_BLEND_BYPASS(1) |
1096		       S_028C70_NUMBER_TYPE(ntype) |
1097		       S_028C70_ENDIAN(endian);
1098	color->attrib = S_028C74_NON_DISP_TILING_ORDER(1);
1099	color->ntype = ntype;
1100	color->export_16bpc = false;
1101	color->dim = width_elements - 1;
1102	color->slice = 0; /* (width_elements / 64) - 1;*/
1103	color->view = 0;
1104	color->offset = (res->gpu_address + first_element) >> 8;
1105
1106	color->fmask = color->offset;
1107	color->fmask_slice = 0;
1108}
1109
1110static void evergreen_set_color_surface_common(struct r600_context *rctx,
1111					       struct r600_texture *rtex,
1112					       unsigned level,
1113					       unsigned first_layer,
1114					       unsigned last_layer,
1115					       enum pipe_format pformat,
1116					       struct r600_tex_color_info *color)
1117{
1118	struct r600_screen *rscreen = rctx->screen;
1119	unsigned pitch, slice;
1120	unsigned non_disp_tiling, macro_aspect, tile_split, bankh, bankw, fmask_bankh, nbanks;
1121	unsigned format, swap, ntype, endian;
1122	const struct util_format_description *desc;
1123	bool blend_clamp = 0, blend_bypass = 0, do_endian_swap = FALSE;
1124	int i;
1125
1126	color->offset = rtex->surface.u.legacy.level[level].offset;
1127	color->view = S_028C6C_SLICE_START(first_layer) |
1128			S_028C6C_SLICE_MAX(last_layer);
1129
1130	color->offset += rtex->resource.gpu_address;
1131	color->offset >>= 8;
1132
1133	color->dim = 0;
1134	pitch = (rtex->surface.u.legacy.level[level].nblk_x) / 8 - 1;
1135	slice = (rtex->surface.u.legacy.level[level].nblk_x * rtex->surface.u.legacy.level[level].nblk_y) / 64;
1136	if (slice) {
1137		slice = slice - 1;
1138	}
1139
1140	color->info = 0;
1141	switch (rtex->surface.u.legacy.level[level].mode) {
1142	default:
1143	case RADEON_SURF_MODE_LINEAR_ALIGNED:
1144		color->info = S_028C70_ARRAY_MODE(V_028C70_ARRAY_LINEAR_ALIGNED);
1145		non_disp_tiling = 1;
1146		break;
1147	case RADEON_SURF_MODE_1D:
1148		color->info = S_028C70_ARRAY_MODE(V_028C70_ARRAY_1D_TILED_THIN1);
1149		non_disp_tiling = rtex->non_disp_tiling;
1150		break;
1151	case RADEON_SURF_MODE_2D:
1152		color->info = S_028C70_ARRAY_MODE(V_028C70_ARRAY_2D_TILED_THIN1);
1153		non_disp_tiling = rtex->non_disp_tiling;
1154		break;
1155	}
1156	tile_split = rtex->surface.u.legacy.tile_split;
1157	macro_aspect = rtex->surface.u.legacy.mtilea;
1158	bankw = rtex->surface.u.legacy.bankw;
1159	bankh = rtex->surface.u.legacy.bankh;
1160	if (rtex->fmask.size)
1161		fmask_bankh = rtex->fmask.bank_height;
1162	else
1163		fmask_bankh = rtex->surface.u.legacy.bankh;
1164	tile_split = eg_tile_split(tile_split);
1165	macro_aspect = eg_macro_tile_aspect(macro_aspect);
1166	bankw = eg_bank_wh(bankw);
1167	bankh = eg_bank_wh(bankh);
1168	fmask_bankh = eg_bank_wh(fmask_bankh);
1169
1170	if (rscreen->b.chip_class == CAYMAN) {
1171		if (util_format_get_blocksize(pformat) >= 16)
1172			non_disp_tiling = 1;
1173	}
1174	nbanks = eg_num_banks(rscreen->b.info.r600_num_banks);
1175	desc = util_format_description(pformat);
1176	for (i = 0; i < 4; i++) {
1177		if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID) {
1178			break;
1179		}
1180	}
1181	color->attrib = S_028C74_TILE_SPLIT(tile_split)|
1182		S_028C74_NUM_BANKS(nbanks) |
1183		S_028C74_BANK_WIDTH(bankw) |
1184		S_028C74_BANK_HEIGHT(bankh) |
1185		S_028C74_MACRO_TILE_ASPECT(macro_aspect) |
1186		S_028C74_NON_DISP_TILING_ORDER(non_disp_tiling) |
1187		S_028C74_FMASK_BANK_HEIGHT(fmask_bankh);
1188
1189	if (rctx->b.chip_class == CAYMAN) {
1190		color->attrib |= S_028C74_FORCE_DST_ALPHA_1(desc->swizzle[3] ==
1191							   PIPE_SWIZZLE_1);
1192
1193		if (rtex->resource.b.b.nr_samples > 1) {
1194			unsigned log_samples = util_logbase2(rtex->resource.b.b.nr_samples);
1195			color->attrib |= S_028C74_NUM_SAMPLES(log_samples) |
1196					S_028C74_NUM_FRAGMENTS(log_samples);
1197		}
1198	}
1199
1200	ntype = V_028C70_NUMBER_UNORM;
1201	if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB)
1202		ntype = V_028C70_NUMBER_SRGB;
1203	else if (desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) {
1204		if (desc->channel[i].normalized)
1205			ntype = V_028C70_NUMBER_SNORM;
1206		else if (desc->channel[i].pure_integer)
1207			ntype = V_028C70_NUMBER_SINT;
1208	} else if (desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED) {
1209		if (desc->channel[i].normalized)
1210			ntype = V_028C70_NUMBER_UNORM;
1211		else if (desc->channel[i].pure_integer)
1212			ntype = V_028C70_NUMBER_UINT;
1213	} else if (desc->channel[i].type == UTIL_FORMAT_TYPE_FLOAT) {
1214		ntype = V_028C70_NUMBER_FLOAT;
1215	}
1216
1217	if (R600_BIG_ENDIAN)
1218		do_endian_swap = !rtex->db_compatible;
1219
1220	format = r600_translate_colorformat(rctx->b.chip_class, pformat, do_endian_swap);
1221	assert(format != ~0);
1222	swap = r600_translate_colorswap(pformat, do_endian_swap);
1223	assert(swap != ~0);
1224
1225	endian = r600_colorformat_endian_swap(format, do_endian_swap);
1226
1227	/* blend clamp should be set for all NORM/SRGB types */
1228	if (ntype == V_028C70_NUMBER_UNORM || ntype == V_028C70_NUMBER_SNORM ||
1229	    ntype == V_028C70_NUMBER_SRGB)
1230		blend_clamp = 1;
1231
1232	/* set blend bypass according to docs if SINT/UINT or
1233	   8/24 COLOR variants */
1234	if (ntype == V_028C70_NUMBER_UINT || ntype == V_028C70_NUMBER_SINT ||
1235	    format == V_028C70_COLOR_8_24 || format == V_028C70_COLOR_24_8 ||
1236	    format == V_028C70_COLOR_X24_8_32_FLOAT) {
1237		blend_clamp = 0;
1238		blend_bypass = 1;
1239	}
1240
1241	color->ntype = ntype;
1242	color->info |= S_028C70_FORMAT(format) |
1243		S_028C70_COMP_SWAP(swap) |
1244		S_028C70_BLEND_CLAMP(blend_clamp) |
1245		S_028C70_BLEND_BYPASS(blend_bypass) |
1246		S_028C70_SIMPLE_FLOAT(1) |
1247		S_028C70_NUMBER_TYPE(ntype) |
1248		S_028C70_ENDIAN(endian);
1249
1250	if (rtex->fmask.size) {
1251		color->info |= S_028C70_COMPRESSION(1);
1252	}
1253
1254	/* EXPORT_NORM is an optimzation that can be enabled for better
1255	 * performance in certain cases.
1256	 * EXPORT_NORM can be enabled if:
1257	 * - 11-bit or smaller UNORM/SNORM/SRGB
1258	 * - 16-bit or smaller FLOAT
1259	 */
1260	color->export_16bpc = false;
1261	if (desc->colorspace != UTIL_FORMAT_COLORSPACE_ZS &&
1262	    ((desc->channel[i].size < 12 &&
1263	      desc->channel[i].type != UTIL_FORMAT_TYPE_FLOAT &&
1264	      ntype != V_028C70_NUMBER_UINT && ntype != V_028C70_NUMBER_SINT) ||
1265	     (desc->channel[i].size < 17 &&
1266	      desc->channel[i].type == UTIL_FORMAT_TYPE_FLOAT))) {
1267		color->info |= S_028C70_SOURCE_FORMAT(V_028C70_EXPORT_4C_16BPC);
1268		color->export_16bpc = true;
1269	}
1270
1271	color->pitch = S_028C64_PITCH_TILE_MAX(pitch);
1272	color->slice = S_028C68_SLICE_TILE_MAX(slice);
1273
1274	if (rtex->fmask.size) {
1275		color->fmask = (rtex->resource.gpu_address + rtex->fmask.offset) >> 8;
1276		color->fmask_slice = S_028C88_TILE_MAX(rtex->fmask.slice_tile_max);
1277	} else {
1278		color->fmask = color->offset;
1279		color->fmask_slice = S_028C88_TILE_MAX(slice);
1280	}
1281}
1282
1283/**
1284 * This function intializes the CB* register values for RATs.  It is meant
1285 * to be used for 1D aligned buffers that do not have an associated
1286 * radeon_surf.
1287 */
1288void evergreen_init_color_surface_rat(struct r600_context *rctx,
1289					struct r600_surface *surf)
1290{
1291	struct pipe_resource *pipe_buffer = surf->base.texture;
1292	struct r600_tex_color_info color;
1293
1294	evergreen_set_color_surface_buffer(rctx, (struct r600_resource *)surf->base.texture,
1295					   surf->base.format, 0, pipe_buffer->width0,
1296					   &color);
1297
1298	surf->cb_color_base = color.offset;
1299	surf->cb_color_dim = color.dim;
1300	surf->cb_color_info = color.info | S_028C70_RAT(1);
1301	surf->cb_color_pitch = color.pitch;
1302	surf->cb_color_slice = color.slice;
1303	surf->cb_color_view = color.view;
1304	surf->cb_color_attrib = color.attrib;
1305	surf->cb_color_fmask = color.fmask;
1306	surf->cb_color_fmask_slice = color.fmask_slice;
1307
1308	surf->cb_color_view = 0;
1309
1310	/* Set the buffer range the GPU will have access to: */
1311	util_range_add(&r600_resource(pipe_buffer)->valid_buffer_range,
1312		       0, pipe_buffer->width0);
1313}
1314
1315
1316void evergreen_init_color_surface(struct r600_context *rctx,
1317				  struct r600_surface *surf)
1318{
1319	struct r600_texture *rtex = (struct r600_texture*)surf->base.texture;
1320	unsigned level = surf->base.u.tex.level;
1321	struct r600_tex_color_info color;
1322
1323	evergreen_set_color_surface_common(rctx, rtex, level,
1324					   surf->base.u.tex.first_layer,
1325					   surf->base.u.tex.last_layer,
1326					   surf->base.format,
1327					   &color);
1328
1329	surf->alphatest_bypass = color.ntype == V_028C70_NUMBER_UINT ||
1330		color.ntype == V_028C70_NUMBER_SINT;
1331	surf->export_16bpc = color.export_16bpc;
1332
1333	/* XXX handle enabling of CB beyond BASE8 which has different offset */
1334	surf->cb_color_base = color.offset;
1335	surf->cb_color_dim = color.dim;
1336	surf->cb_color_info = color.info;
1337	surf->cb_color_pitch = color.pitch;
1338	surf->cb_color_slice = color.slice;
1339	surf->cb_color_view = color.view;
1340	surf->cb_color_attrib = color.attrib;
1341	surf->cb_color_fmask = color.fmask;
1342	surf->cb_color_fmask_slice = color.fmask_slice;
1343
1344	surf->color_initialized = true;
1345}
1346
1347static void evergreen_init_depth_surface(struct r600_context *rctx,
1348					 struct r600_surface *surf)
1349{
1350	struct r600_screen *rscreen = rctx->screen;
1351	struct r600_texture *rtex = (struct r600_texture*)surf->base.texture;
1352	unsigned level = surf->base.u.tex.level;
1353	struct legacy_surf_level *levelinfo = &rtex->surface.u.legacy.level[level];
1354	uint64_t offset;
1355	unsigned format, array_mode;
1356	unsigned macro_aspect, tile_split, bankh, bankw, nbanks;
1357
1358
1359	format = r600_translate_dbformat(surf->base.format);
1360	assert(format != ~0);
1361
1362	offset = rtex->resource.gpu_address;
1363	offset += rtex->surface.u.legacy.level[level].offset;
1364
1365	switch (rtex->surface.u.legacy.level[level].mode) {
1366	case RADEON_SURF_MODE_2D:
1367		array_mode = V_028C70_ARRAY_2D_TILED_THIN1;
1368		break;
1369	case RADEON_SURF_MODE_1D:
1370	case RADEON_SURF_MODE_LINEAR_ALIGNED:
1371	default:
1372		array_mode = V_028C70_ARRAY_1D_TILED_THIN1;
1373		break;
1374	}
1375	tile_split = rtex->surface.u.legacy.tile_split;
1376	macro_aspect = rtex->surface.u.legacy.mtilea;
1377	bankw = rtex->surface.u.legacy.bankw;
1378	bankh = rtex->surface.u.legacy.bankh;
1379	tile_split = eg_tile_split(tile_split);
1380	macro_aspect = eg_macro_tile_aspect(macro_aspect);
1381	bankw = eg_bank_wh(bankw);
1382	bankh = eg_bank_wh(bankh);
1383	nbanks = eg_num_banks(rscreen->b.info.r600_num_banks);
1384	offset >>= 8;
1385
1386	surf->db_z_info = S_028040_ARRAY_MODE(array_mode) |
1387			  S_028040_FORMAT(format) |
1388			  S_028040_TILE_SPLIT(tile_split)|
1389			  S_028040_NUM_BANKS(nbanks) |
1390			  S_028040_BANK_WIDTH(bankw) |
1391			  S_028040_BANK_HEIGHT(bankh) |
1392			  S_028040_MACRO_TILE_ASPECT(macro_aspect);
1393	if (rscreen->b.chip_class == CAYMAN && rtex->resource.b.b.nr_samples > 1) {
1394		surf->db_z_info |= S_028040_NUM_SAMPLES(util_logbase2(rtex->resource.b.b.nr_samples));
1395	}
1396
1397	assert(levelinfo->nblk_x % 8 == 0 && levelinfo->nblk_y % 8 == 0);
1398
1399	surf->db_depth_base = offset;
1400	surf->db_depth_view = S_028008_SLICE_START(surf->base.u.tex.first_layer) |
1401			      S_028008_SLICE_MAX(surf->base.u.tex.last_layer);
1402	surf->db_depth_size = S_028058_PITCH_TILE_MAX(levelinfo->nblk_x / 8 - 1) |
1403			      S_028058_HEIGHT_TILE_MAX(levelinfo->nblk_y / 8 - 1);
1404	surf->db_depth_slice = S_02805C_SLICE_TILE_MAX(levelinfo->nblk_x *
1405						       levelinfo->nblk_y / 64 - 1);
1406
1407	if (rtex->surface.has_stencil) {
1408		uint64_t stencil_offset;
1409		unsigned stile_split = rtex->surface.u.legacy.stencil_tile_split;
1410
1411		stile_split = eg_tile_split(stile_split);
1412
1413		stencil_offset = rtex->surface.u.legacy.stencil_level[level].offset;
1414		stencil_offset += rtex->resource.gpu_address;
1415
1416		surf->db_stencil_base = stencil_offset >> 8;
1417		surf->db_stencil_info = S_028044_FORMAT(V_028044_STENCIL_8) |
1418					S_028044_TILE_SPLIT(stile_split);
1419	} else {
1420		surf->db_stencil_base = offset;
1421		/* DRM 2.6.18 allows the INVALID format to disable stencil.
1422		 * Older kernels are out of luck. */
1423		surf->db_stencil_info = rctx->screen->b.info.drm_minor >= 18 ?
1424					S_028044_FORMAT(V_028044_STENCIL_INVALID) :
1425					S_028044_FORMAT(V_028044_STENCIL_8);
1426	}
1427
1428	if (r600_htile_enabled(rtex, level)) {
1429		uint64_t va = rtex->resource.gpu_address + rtex->htile_offset;
1430		surf->db_htile_data_base = va >> 8;
1431		surf->db_htile_surface = S_028ABC_HTILE_WIDTH(1) |
1432					 S_028ABC_HTILE_HEIGHT(1) |
1433					 S_028ABC_FULL_CACHE(1);
1434		surf->db_z_info |= S_028040_TILE_SURFACE_ENABLE(1);
1435		surf->db_preload_control = 0;
1436	}
1437
1438	surf->depth_initialized = true;
1439}
1440
1441static void evergreen_set_framebuffer_state(struct pipe_context *ctx,
1442					    const struct pipe_framebuffer_state *state)
1443{
1444	struct r600_context *rctx = (struct r600_context *)ctx;
1445	struct r600_surface *surf;
1446	struct r600_texture *rtex;
1447	uint32_t i, log_samples;
1448	uint32_t target_mask = 0;
1449	/* Flush TC when changing the framebuffer state, because the only
1450	 * client not using TC that can change textures is the framebuffer.
1451	 * Other places don't typically have to flush TC.
1452	 */
1453	rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE |
1454			 R600_CONTEXT_FLUSH_AND_INV |
1455			 R600_CONTEXT_FLUSH_AND_INV_CB |
1456			 R600_CONTEXT_FLUSH_AND_INV_CB_META |
1457			 R600_CONTEXT_FLUSH_AND_INV_DB |
1458			 R600_CONTEXT_FLUSH_AND_INV_DB_META |
1459			 R600_CONTEXT_INV_TEX_CACHE;
1460
1461	util_copy_framebuffer_state(&rctx->framebuffer.state, state);
1462
1463	/* Colorbuffers. */
1464	rctx->framebuffer.export_16bpc = state->nr_cbufs != 0;
1465	rctx->framebuffer.cb0_is_integer = state->nr_cbufs && state->cbufs[0] &&
1466					   util_format_is_pure_integer(state->cbufs[0]->format);
1467	rctx->framebuffer.compressed_cb_mask = 0;
1468	rctx->framebuffer.nr_samples = util_framebuffer_get_num_samples(state);
1469
1470	for (i = 0; i < state->nr_cbufs; i++) {
1471		surf = (struct r600_surface*)state->cbufs[i];
1472		if (!surf)
1473			continue;
1474
1475		target_mask |= (0xf << (i * 4));
1476
1477		rtex = (struct r600_texture*)surf->base.texture;
1478
1479		r600_context_add_resource_size(ctx, state->cbufs[i]->texture);
1480
1481		if (!surf->color_initialized) {
1482			evergreen_init_color_surface(rctx, surf);
1483		}
1484
1485		if (!surf->export_16bpc) {
1486			rctx->framebuffer.export_16bpc = false;
1487		}
1488
1489		if (rtex->fmask.size) {
1490			rctx->framebuffer.compressed_cb_mask |= 1 << i;
1491		}
1492	}
1493
1494	/* Update alpha-test state dependencies.
1495	 * Alpha-test is done on the first colorbuffer only. */
1496	if (state->nr_cbufs) {
1497		bool alphatest_bypass = false;
1498		bool export_16bpc = true;
1499
1500		surf = (struct r600_surface*)state->cbufs[0];
1501		if (surf) {
1502			alphatest_bypass = surf->alphatest_bypass;
1503			export_16bpc = surf->export_16bpc;
1504		}
1505
1506		if (rctx->alphatest_state.bypass != alphatest_bypass) {
1507			rctx->alphatest_state.bypass = alphatest_bypass;
1508			r600_mark_atom_dirty(rctx, &rctx->alphatest_state.atom);
1509		}
1510		if (rctx->alphatest_state.cb0_export_16bpc != export_16bpc) {
1511			rctx->alphatest_state.cb0_export_16bpc = export_16bpc;
1512			r600_mark_atom_dirty(rctx, &rctx->alphatest_state.atom);
1513		}
1514	}
1515
1516	/* ZS buffer. */
1517	if (state->zsbuf) {
1518		surf = (struct r600_surface*)state->zsbuf;
1519
1520		r600_context_add_resource_size(ctx, state->zsbuf->texture);
1521
1522		if (!surf->depth_initialized) {
1523			evergreen_init_depth_surface(rctx, surf);
1524		}
1525
1526		if (state->zsbuf->format != rctx->poly_offset_state.zs_format) {
1527			rctx->poly_offset_state.zs_format = state->zsbuf->format;
1528			r600_mark_atom_dirty(rctx, &rctx->poly_offset_state.atom);
1529		}
1530
1531		if (rctx->db_state.rsurf != surf) {
1532			rctx->db_state.rsurf = surf;
1533			r600_mark_atom_dirty(rctx, &rctx->db_state.atom);
1534			r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom);
1535		}
1536	} else if (rctx->db_state.rsurf) {
1537		rctx->db_state.rsurf = NULL;
1538		r600_mark_atom_dirty(rctx, &rctx->db_state.atom);
1539		r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom);
1540	}
1541
1542	if (rctx->cb_misc_state.nr_cbufs != state->nr_cbufs ||
1543	    rctx->cb_misc_state.bound_cbufs_target_mask != target_mask) {
1544		rctx->cb_misc_state.bound_cbufs_target_mask = target_mask;
1545		rctx->cb_misc_state.nr_cbufs = state->nr_cbufs;
1546		r600_mark_atom_dirty(rctx, &rctx->cb_misc_state.atom);
1547	}
1548
1549	if (state->nr_cbufs == 0 && rctx->alphatest_state.bypass) {
1550		rctx->alphatest_state.bypass = false;
1551		r600_mark_atom_dirty(rctx, &rctx->alphatest_state.atom);
1552	}
1553
1554	log_samples = util_logbase2(rctx->framebuffer.nr_samples);
1555	/* This is for Cayman to program SAMPLE_RATE, and for RV770 to fix a hw bug. */
1556	if ((rctx->b.chip_class == CAYMAN ||
1557	     rctx->b.family == CHIP_RV770) &&
1558	    rctx->db_misc_state.log_samples != log_samples) {
1559		rctx->db_misc_state.log_samples = log_samples;
1560		r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom);
1561	}
1562
1563
1564	/* Calculate the CS size. */
1565	rctx->framebuffer.atom.num_dw = 4; /* SCISSOR */
1566
1567	/* MSAA. */
1568	if (rctx->b.chip_class == EVERGREEN)
1569		rctx->framebuffer.atom.num_dw += 17; /* Evergreen */
1570	else
1571		rctx->framebuffer.atom.num_dw += 28; /* Cayman */
1572
1573	/* Colorbuffers. */
1574	rctx->framebuffer.atom.num_dw += state->nr_cbufs * 23;
1575	rctx->framebuffer.atom.num_dw += state->nr_cbufs * 2;
1576	rctx->framebuffer.atom.num_dw += (12 - state->nr_cbufs) * 3;
1577
1578	/* ZS buffer. */
1579	if (state->zsbuf) {
1580		rctx->framebuffer.atom.num_dw += 24;
1581		rctx->framebuffer.atom.num_dw += 2;
1582	} else if (rctx->screen->b.info.drm_minor >= 18) {
1583		rctx->framebuffer.atom.num_dw += 4;
1584	}
1585
1586	r600_mark_atom_dirty(rctx, &rctx->framebuffer.atom);
1587
1588	r600_set_sample_locations_constant_buffer(rctx);
1589	rctx->framebuffer.do_update_surf_dirtiness = true;
1590}
1591
1592static void evergreen_set_min_samples(struct pipe_context *ctx, unsigned min_samples)
1593{
1594	struct r600_context *rctx = (struct r600_context *)ctx;
1595
1596	if (rctx->ps_iter_samples == min_samples)
1597		return;
1598
1599	rctx->ps_iter_samples = min_samples;
1600	if (rctx->framebuffer.nr_samples > 1) {
1601		r600_mark_atom_dirty(rctx, &rctx->framebuffer.atom);
1602	}
1603}
1604
1605/* 8xMSAA */
1606static const uint32_t sample_locs_8x[] = {
1607	FILL_SREG(-1,  1,  1,  5,  3, -5,  5,  3),
1608	FILL_SREG(-7, -1, -3, -7,  7, -3, -5,  7),
1609	FILL_SREG(-1,  1,  1,  5,  3, -5,  5,  3),
1610	FILL_SREG(-7, -1, -3, -7,  7, -3, -5,  7),
1611	FILL_SREG(-1,  1,  1,  5,  3, -5,  5,  3),
1612	FILL_SREG(-7, -1, -3, -7,  7, -3, -5,  7),
1613	FILL_SREG(-1,  1,  1,  5,  3, -5,  5,  3),
1614	FILL_SREG(-7, -1, -3, -7,  7, -3, -5,  7),
1615};
1616static unsigned max_dist_8x = 7;
1617
1618static void evergreen_get_sample_position(struct pipe_context *ctx,
1619				     unsigned sample_count,
1620				     unsigned sample_index,
1621				     float *out_value)
1622{
1623	int offset, index;
1624	struct {
1625		int idx:4;
1626	} val;
1627	switch (sample_count) {
1628	case 1:
1629	default:
1630		out_value[0] = out_value[1] = 0.5;
1631		break;
1632	case 2:
1633		offset = 4 * (sample_index * 2);
1634		val.idx = (eg_sample_locs_2x[0] >> offset) & 0xf;
1635		out_value[0] = (float)(val.idx + 8) / 16.0f;
1636		val.idx = (eg_sample_locs_2x[0] >> (offset + 4)) & 0xf;
1637		out_value[1] = (float)(val.idx + 8) / 16.0f;
1638		break;
1639	case 4:
1640		offset = 4 * (sample_index * 2);
1641		val.idx = (eg_sample_locs_4x[0] >> offset) & 0xf;
1642		out_value[0] = (float)(val.idx + 8) / 16.0f;
1643		val.idx = (eg_sample_locs_4x[0] >> (offset + 4)) & 0xf;
1644		out_value[1] = (float)(val.idx + 8) / 16.0f;
1645		break;
1646	case 8:
1647		offset = 4 * (sample_index % 4 * 2);
1648		index = (sample_index / 4);
1649		val.idx = (sample_locs_8x[index] >> offset) & 0xf;
1650		out_value[0] = (float)(val.idx + 8) / 16.0f;
1651		val.idx = (sample_locs_8x[index] >> (offset + 4)) & 0xf;
1652		out_value[1] = (float)(val.idx + 8) / 16.0f;
1653		break;
1654	}
1655}
1656
1657static void evergreen_emit_msaa_state(struct r600_context *rctx, int nr_samples, int ps_iter_samples)
1658{
1659
1660	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
1661	unsigned max_dist = 0;
1662
1663	switch (nr_samples) {
1664	default:
1665		nr_samples = 0;
1666		break;
1667	case 2:
1668		radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_0, ARRAY_SIZE(eg_sample_locs_2x));
1669		radeon_emit_array(cs, eg_sample_locs_2x, ARRAY_SIZE(eg_sample_locs_2x));
1670		max_dist = eg_max_dist_2x;
1671		break;
1672	case 4:
1673		radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_0, ARRAY_SIZE(eg_sample_locs_4x));
1674		radeon_emit_array(cs, eg_sample_locs_4x, ARRAY_SIZE(eg_sample_locs_4x));
1675		max_dist = eg_max_dist_4x;
1676		break;
1677	case 8:
1678		radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_0, ARRAY_SIZE(sample_locs_8x));
1679		radeon_emit_array(cs, sample_locs_8x, ARRAY_SIZE(sample_locs_8x));
1680		max_dist = max_dist_8x;
1681		break;
1682	}
1683
1684	if (nr_samples > 1) {
1685		radeon_set_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2);
1686		radeon_emit(cs, S_028C00_LAST_PIXEL(1) |
1687				     S_028C00_EXPAND_LINE_WIDTH(1)); /* R_028C00_PA_SC_LINE_CNTL */
1688		radeon_emit(cs, S_028C04_MSAA_NUM_SAMPLES(util_logbase2(nr_samples)) |
1689				     S_028C04_MAX_SAMPLE_DIST(max_dist)); /* R_028C04_PA_SC_AA_CONFIG */
1690		radeon_set_context_reg(cs, R_028A4C_PA_SC_MODE_CNTL_1,
1691				       EG_S_028A4C_PS_ITER_SAMPLE(ps_iter_samples > 1) |
1692				       EG_S_028A4C_FORCE_EOV_CNTDWN_ENABLE(1) |
1693				       EG_S_028A4C_FORCE_EOV_REZ_ENABLE(1));
1694	} else {
1695		radeon_set_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2);
1696		radeon_emit(cs, S_028C00_LAST_PIXEL(1)); /* R_028C00_PA_SC_LINE_CNTL */
1697		radeon_emit(cs, 0); /* R_028C04_PA_SC_AA_CONFIG */
1698		radeon_set_context_reg(cs, R_028A4C_PA_SC_MODE_CNTL_1,
1699				       EG_S_028A4C_FORCE_EOV_CNTDWN_ENABLE(1) |
1700				       EG_S_028A4C_FORCE_EOV_REZ_ENABLE(1));
1701	}
1702}
1703
1704static void evergreen_emit_image_state(struct r600_context *rctx, struct r600_atom *atom,
1705				       int immed_id_base, int res_id_base, int offset, uint32_t pkt_flags)
1706{
1707	struct r600_image_state *state = (struct r600_image_state *)atom;
1708	struct pipe_framebuffer_state *fb_state = &rctx->framebuffer.state;
1709	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
1710	struct r600_texture *rtex;
1711	struct r600_resource *resource;
1712	int i;
1713
1714	for (i = 0; i < R600_MAX_IMAGES; i++) {
1715		struct r600_image_view *image = &state->views[i];
1716		unsigned reloc, immed_reloc;
1717		int idx = i + offset;
1718
1719		if (!pkt_flags)
1720			idx += fb_state->nr_cbufs + (rctx->dual_src_blend ? 1 : 0);
1721		if (!image->base.resource)
1722			continue;
1723
1724		resource = (struct r600_resource *)image->base.resource;
1725		if (resource->b.b.target != PIPE_BUFFER)
1726			rtex = (struct r600_texture *)image->base.resource;
1727		else
1728			rtex = NULL;
1729
1730		reloc = radeon_add_to_buffer_list(&rctx->b,
1731						  &rctx->b.gfx,
1732						  resource,
1733						  RADEON_USAGE_READWRITE,
1734						  RADEON_PRIO_SHADER_RW_BUFFER);
1735
1736		immed_reloc = radeon_add_to_buffer_list(&rctx->b,
1737							&rctx->b.gfx,
1738							resource->immed_buffer,
1739							RADEON_USAGE_READWRITE,
1740							RADEON_PRIO_SHADER_RW_BUFFER);
1741
1742		if (pkt_flags)
1743			radeon_compute_set_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + idx * 0x3C, 13);
1744		else
1745			radeon_set_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + idx * 0x3C, 13);
1746
1747		radeon_emit(cs, image->cb_color_base);	/* R_028C60_CB_COLOR0_BASE */
1748		radeon_emit(cs, image->cb_color_pitch);	/* R_028C64_CB_COLOR0_PITCH */
1749		radeon_emit(cs, image->cb_color_slice);	/* R_028C68_CB_COLOR0_SLICE */
1750		radeon_emit(cs, image->cb_color_view);	/* R_028C6C_CB_COLOR0_VIEW */
1751		radeon_emit(cs, image->cb_color_info); /* R_028C70_CB_COLOR0_INFO */
1752		radeon_emit(cs, image->cb_color_attrib);	/* R_028C74_CB_COLOR0_ATTRIB */
1753		radeon_emit(cs, image->cb_color_dim);		/* R_028C78_CB_COLOR0_DIM */
1754		radeon_emit(cs, rtex ? rtex->cmask.base_address_reg : image->cb_color_base);	/* R_028C7C_CB_COLOR0_CMASK */
1755		radeon_emit(cs, rtex ? rtex->cmask.slice_tile_max : 0);	/* R_028C80_CB_COLOR0_CMASK_SLICE */
1756		radeon_emit(cs, image->cb_color_fmask);	/* R_028C84_CB_COLOR0_FMASK */
1757		radeon_emit(cs, image->cb_color_fmask_slice); /* R_028C88_CB_COLOR0_FMASK_SLICE */
1758		radeon_emit(cs, rtex ? rtex->color_clear_value[0] : 0); /* R_028C8C_CB_COLOR0_CLEAR_WORD0 */
1759		radeon_emit(cs, rtex ? rtex->color_clear_value[1] : 0); /* R_028C90_CB_COLOR0_CLEAR_WORD1 */
1760
1761		radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C60_CB_COLOR0_BASE */
1762		radeon_emit(cs, reloc);
1763
1764		radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C74_CB_COLOR0_ATTRIB */
1765		radeon_emit(cs, reloc);
1766
1767		radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C7C_CB_COLOR0_CMASK */
1768		radeon_emit(cs, reloc);
1769
1770		radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C84_CB_COLOR0_FMASK */
1771		radeon_emit(cs, reloc);
1772
1773		if (pkt_flags)
1774			radeon_compute_set_context_reg(cs, R_028B9C_CB_IMMED0_BASE + (idx * 4), resource->immed_buffer->gpu_address >> 8);
1775		else
1776			radeon_set_context_reg(cs, R_028B9C_CB_IMMED0_BASE + (idx * 4), resource->immed_buffer->gpu_address >> 8);
1777
1778		radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /**/
1779		radeon_emit(cs, immed_reloc);
1780
1781		radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 8, 0) | pkt_flags);
1782		radeon_emit(cs, (immed_id_base + i + offset) * 8);
1783		radeon_emit_array(cs, image->immed_resource_words, 8);
1784
1785		radeon_emit(cs, PKT3(PKT3_NOP, 0, 0) | pkt_flags);
1786		radeon_emit(cs, immed_reloc);
1787
1788		radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 8, 0) | pkt_flags);
1789		radeon_emit(cs, (res_id_base + i + offset) * 8);
1790		radeon_emit_array(cs, image->resource_words, 8);
1791
1792		radeon_emit(cs, PKT3(PKT3_NOP, 0, 0) | pkt_flags);
1793		radeon_emit(cs, reloc);
1794
1795		if (!image->skip_mip_address_reloc) {
1796			radeon_emit(cs, PKT3(PKT3_NOP, 0, 0) | pkt_flags);
1797			radeon_emit(cs, reloc);
1798		}
1799	}
1800}
1801
1802static void evergreen_emit_fragment_image_state(struct r600_context *rctx, struct r600_atom *atom)
1803{
1804	evergreen_emit_image_state(rctx, atom,
1805				   R600_IMAGE_IMMED_RESOURCE_OFFSET,
1806				   R600_IMAGE_REAL_RESOURCE_OFFSET, 0, 0);
1807}
1808
1809static void evergreen_emit_compute_image_state(struct r600_context *rctx, struct r600_atom *atom)
1810{
1811	evergreen_emit_image_state(rctx, atom,
1812				   EG_FETCH_CONSTANTS_OFFSET_CS + R600_IMAGE_IMMED_RESOURCE_OFFSET,
1813				   EG_FETCH_CONSTANTS_OFFSET_CS + R600_IMAGE_REAL_RESOURCE_OFFSET,
1814				   0, RADEON_CP_PACKET3_COMPUTE_MODE);
1815}
1816
1817static void evergreen_emit_fragment_buffer_state(struct r600_context *rctx, struct r600_atom *atom)
1818{
1819	int offset = util_bitcount(rctx->fragment_images.enabled_mask);
1820	evergreen_emit_image_state(rctx, atom,
1821				   R600_IMAGE_IMMED_RESOURCE_OFFSET,
1822				   R600_IMAGE_REAL_RESOURCE_OFFSET, offset, 0);
1823}
1824
1825static void evergreen_emit_compute_buffer_state(struct r600_context *rctx, struct r600_atom *atom)
1826{
1827	int offset = util_bitcount(rctx->compute_images.enabled_mask);
1828	evergreen_emit_image_state(rctx, atom,
1829				   EG_FETCH_CONSTANTS_OFFSET_CS + R600_IMAGE_IMMED_RESOURCE_OFFSET,
1830				   EG_FETCH_CONSTANTS_OFFSET_CS + R600_IMAGE_REAL_RESOURCE_OFFSET,
1831				   offset, RADEON_CP_PACKET3_COMPUTE_MODE);
1832}
1833
1834static void evergreen_emit_framebuffer_state(struct r600_context *rctx, struct r600_atom *atom)
1835{
1836	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
1837	struct pipe_framebuffer_state *state = &rctx->framebuffer.state;
1838	unsigned nr_cbufs = state->nr_cbufs;
1839	unsigned i, tl, br;
1840	struct r600_texture *tex = NULL;
1841	struct r600_surface *cb = NULL;
1842
1843	/* XXX support more colorbuffers once we need them */
1844	assert(nr_cbufs <= 8);
1845	if (nr_cbufs > 8)
1846		nr_cbufs = 8;
1847
1848	/* Colorbuffers. */
1849	for (i = 0; i < nr_cbufs; i++) {
1850		unsigned reloc, cmask_reloc;
1851
1852		cb = (struct r600_surface*)state->cbufs[i];
1853		if (!cb) {
1854			radeon_set_context_reg(cs, R_028C70_CB_COLOR0_INFO + i * 0x3C,
1855					       S_028C70_FORMAT(V_028C70_COLOR_INVALID));
1856			continue;
1857		}
1858
1859		tex = (struct r600_texture *)cb->base.texture;
1860		reloc = radeon_add_to_buffer_list(&rctx->b,
1861					      &rctx->b.gfx,
1862					      (struct r600_resource*)cb->base.texture,
1863					      RADEON_USAGE_READWRITE,
1864					      tex->resource.b.b.nr_samples > 1 ?
1865						      RADEON_PRIO_COLOR_BUFFER_MSAA :
1866						      RADEON_PRIO_COLOR_BUFFER);
1867
1868		if (tex->cmask_buffer && tex->cmask_buffer != &tex->resource) {
1869			cmask_reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
1870				tex->cmask_buffer, RADEON_USAGE_READWRITE,
1871				RADEON_PRIO_SEPARATE_META);
1872		} else {
1873			cmask_reloc = reloc;
1874		}
1875
1876		radeon_set_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + i * 0x3C, 13);
1877		radeon_emit(cs, cb->cb_color_base);	/* R_028C60_CB_COLOR0_BASE */
1878		radeon_emit(cs, cb->cb_color_pitch);	/* R_028C64_CB_COLOR0_PITCH */
1879		radeon_emit(cs, cb->cb_color_slice);	/* R_028C68_CB_COLOR0_SLICE */
1880		radeon_emit(cs, cb->cb_color_view);	/* R_028C6C_CB_COLOR0_VIEW */
1881		radeon_emit(cs, cb->cb_color_info | tex->cb_color_info); /* R_028C70_CB_COLOR0_INFO */
1882		radeon_emit(cs, cb->cb_color_attrib);	/* R_028C74_CB_COLOR0_ATTRIB */
1883		radeon_emit(cs, cb->cb_color_dim);		/* R_028C78_CB_COLOR0_DIM */
1884		radeon_emit(cs, tex->cmask.base_address_reg);	/* R_028C7C_CB_COLOR0_CMASK */
1885		radeon_emit(cs, tex->cmask.slice_tile_max);	/* R_028C80_CB_COLOR0_CMASK_SLICE */
1886		radeon_emit(cs, cb->cb_color_fmask);	/* R_028C84_CB_COLOR0_FMASK */
1887		radeon_emit(cs, cb->cb_color_fmask_slice); /* R_028C88_CB_COLOR0_FMASK_SLICE */
1888		radeon_emit(cs, tex->color_clear_value[0]); /* R_028C8C_CB_COLOR0_CLEAR_WORD0 */
1889		radeon_emit(cs, tex->color_clear_value[1]); /* R_028C90_CB_COLOR0_CLEAR_WORD1 */
1890
1891		radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C60_CB_COLOR0_BASE */
1892		radeon_emit(cs, reloc);
1893
1894		radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C74_CB_COLOR0_ATTRIB */
1895		radeon_emit(cs, reloc);
1896
1897		radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C7C_CB_COLOR0_CMASK */
1898		radeon_emit(cs, cmask_reloc);
1899
1900		radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C84_CB_COLOR0_FMASK */
1901		radeon_emit(cs, reloc);
1902	}
1903	/* set CB_COLOR1_INFO for possible dual-src blending */
1904	if (rctx->framebuffer.dual_src_blend && i == 1 && state->cbufs[0]) {
1905		radeon_set_context_reg(cs, R_028C70_CB_COLOR0_INFO + 1 * 0x3C,
1906				       cb->cb_color_info | tex->cb_color_info);
1907		i++;
1908	}
1909	i += util_bitcount(rctx->fragment_images.enabled_mask);
1910	i += util_bitcount(rctx->fragment_buffers.enabled_mask);
1911	for (; i < 8 ; i++)
1912		radeon_set_context_reg(cs, R_028C70_CB_COLOR0_INFO + i * 0x3C, 0);
1913	for (; i < 12; i++)
1914		radeon_set_context_reg(cs, R_028E50_CB_COLOR8_INFO + (i - 8) * 0x1C, 0);
1915
1916	/* ZS buffer. */
1917	if (state->zsbuf) {
1918		struct r600_surface *zb = (struct r600_surface*)state->zsbuf;
1919		unsigned reloc = radeon_add_to_buffer_list(&rctx->b,
1920						       &rctx->b.gfx,
1921						       (struct r600_resource*)state->zsbuf->texture,
1922						       RADEON_USAGE_READWRITE,
1923						       zb->base.texture->nr_samples > 1 ?
1924							       RADEON_PRIO_DEPTH_BUFFER_MSAA :
1925							       RADEON_PRIO_DEPTH_BUFFER);
1926
1927		radeon_set_context_reg(cs, R_028008_DB_DEPTH_VIEW, zb->db_depth_view);
1928
1929		radeon_set_context_reg_seq(cs, R_028040_DB_Z_INFO, 8);
1930		radeon_emit(cs, zb->db_z_info);		/* R_028040_DB_Z_INFO */
1931		radeon_emit(cs, zb->db_stencil_info);	/* R_028044_DB_STENCIL_INFO */
1932		radeon_emit(cs, zb->db_depth_base);	/* R_028048_DB_Z_READ_BASE */
1933		radeon_emit(cs, zb->db_stencil_base);	/* R_02804C_DB_STENCIL_READ_BASE */
1934		radeon_emit(cs, zb->db_depth_base);	/* R_028050_DB_Z_WRITE_BASE */
1935		radeon_emit(cs, zb->db_stencil_base);	/* R_028054_DB_STENCIL_WRITE_BASE */
1936		radeon_emit(cs, zb->db_depth_size);	/* R_028058_DB_DEPTH_SIZE */
1937		radeon_emit(cs, zb->db_depth_slice);	/* R_02805C_DB_DEPTH_SLICE */
1938
1939		radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028048_DB_Z_READ_BASE */
1940		radeon_emit(cs, reloc);
1941
1942		radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_02804C_DB_STENCIL_READ_BASE */
1943		radeon_emit(cs, reloc);
1944
1945		radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028050_DB_Z_WRITE_BASE */
1946		radeon_emit(cs, reloc);
1947
1948		radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028054_DB_STENCIL_WRITE_BASE */
1949		radeon_emit(cs, reloc);
1950	} else if (rctx->screen->b.info.drm_minor >= 18) {
1951		/* DRM 2.6.18 allows the INVALID format to disable depth/stencil.
1952		 * Older kernels are out of luck. */
1953		radeon_set_context_reg_seq(cs, R_028040_DB_Z_INFO, 2);
1954		radeon_emit(cs, S_028040_FORMAT(V_028040_Z_INVALID)); /* R_028040_DB_Z_INFO */
1955		radeon_emit(cs, S_028044_FORMAT(V_028044_STENCIL_INVALID)); /* R_028044_DB_STENCIL_INFO */
1956	}
1957
1958	/* Framebuffer dimensions. */
1959	evergreen_get_scissor_rect(rctx, 0, 0, state->width, state->height, &tl, &br);
1960
1961	radeon_set_context_reg_seq(cs, R_028204_PA_SC_WINDOW_SCISSOR_TL, 2);
1962	radeon_emit(cs, tl); /* R_028204_PA_SC_WINDOW_SCISSOR_TL */
1963	radeon_emit(cs, br); /* R_028208_PA_SC_WINDOW_SCISSOR_BR */
1964
1965	if (rctx->b.chip_class == EVERGREEN) {
1966		evergreen_emit_msaa_state(rctx, rctx->framebuffer.nr_samples, rctx->ps_iter_samples);
1967	} else {
1968		cayman_emit_msaa_state(cs, rctx->framebuffer.nr_samples,
1969				       rctx->ps_iter_samples, 0);
1970	}
1971}
1972
1973static void evergreen_emit_polygon_offset(struct r600_context *rctx, struct r600_atom *a)
1974{
1975	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
1976	struct r600_poly_offset_state *state = (struct r600_poly_offset_state*)a;
1977	float offset_units = state->offset_units;
1978	float offset_scale = state->offset_scale;
1979	uint32_t pa_su_poly_offset_db_fmt_cntl = 0;
1980
1981	if (!state->offset_units_unscaled) {
1982		switch (state->zs_format) {
1983		case PIPE_FORMAT_Z24X8_UNORM:
1984		case PIPE_FORMAT_Z24_UNORM_S8_UINT:
1985		case PIPE_FORMAT_X8Z24_UNORM:
1986		case PIPE_FORMAT_S8_UINT_Z24_UNORM:
1987			offset_units *= 2.0f;
1988			pa_su_poly_offset_db_fmt_cntl =
1989				S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS((char)-24);
1990			break;
1991		case PIPE_FORMAT_Z16_UNORM:
1992			offset_units *= 4.0f;
1993			pa_su_poly_offset_db_fmt_cntl =
1994				S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS((char)-16);
1995			break;
1996		default:
1997			pa_su_poly_offset_db_fmt_cntl =
1998				S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS((char)-23) |
1999				S_028B78_POLY_OFFSET_DB_IS_FLOAT_FMT(1);
2000		}
2001	}
2002
2003	radeon_set_context_reg_seq(cs, R_028B80_PA_SU_POLY_OFFSET_FRONT_SCALE, 4);
2004	radeon_emit(cs, fui(offset_scale));
2005	radeon_emit(cs, fui(offset_units));
2006	radeon_emit(cs, fui(offset_scale));
2007	radeon_emit(cs, fui(offset_units));
2008
2009	radeon_set_context_reg(cs, R_028B78_PA_SU_POLY_OFFSET_DB_FMT_CNTL,
2010			       pa_su_poly_offset_db_fmt_cntl);
2011}
2012
2013uint32_t evergreen_construct_rat_mask(struct r600_context *rctx, struct r600_cb_misc_state *a,
2014				      unsigned nr_cbufs)
2015{
2016	unsigned base_mask = 0;
2017	unsigned dirty_mask = a->image_rat_enabled_mask;
2018	while (dirty_mask) {
2019		unsigned idx = u_bit_scan(&dirty_mask);
2020		base_mask |= (0xf << (idx * 4));
2021	}
2022	unsigned offset = util_last_bit(a->image_rat_enabled_mask);
2023	dirty_mask = a->buffer_rat_enabled_mask;
2024	while (dirty_mask) {
2025		unsigned idx = u_bit_scan(&dirty_mask);
2026		base_mask |= (0xf << (idx + offset) * 4);
2027	}
2028	return base_mask << (nr_cbufs * 4);
2029}
2030
2031static void evergreen_emit_cb_misc_state(struct r600_context *rctx, struct r600_atom *atom)
2032{
2033	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
2034	struct r600_cb_misc_state *a = (struct r600_cb_misc_state*)atom;
2035	unsigned fb_colormask = a->bound_cbufs_target_mask;
2036	unsigned ps_colormask = a->ps_color_export_mask;
2037	unsigned rat_colormask = evergreen_construct_rat_mask(rctx, a, a->nr_cbufs);
2038	radeon_set_context_reg_seq(cs, R_028238_CB_TARGET_MASK, 2);
2039	radeon_emit(cs, (a->blend_colormask & fb_colormask) | rat_colormask); /* R_028238_CB_TARGET_MASK */
2040	/* This must match the used export instructions exactly.
2041	 * Other values may lead to undefined behavior and hangs.
2042	 */
2043	radeon_emit(cs, ps_colormask); /* R_02823C_CB_SHADER_MASK */
2044}
2045
2046static void evergreen_emit_db_state(struct r600_context *rctx, struct r600_atom *atom)
2047{
2048	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
2049	struct r600_db_state *a = (struct r600_db_state*)atom;
2050
2051	if (a->rsurf && a->rsurf->db_htile_surface) {
2052		struct r600_texture *rtex = (struct r600_texture *)a->rsurf->base.texture;
2053		unsigned reloc_idx;
2054
2055		radeon_set_context_reg(cs, R_02802C_DB_DEPTH_CLEAR, fui(rtex->depth_clear_value));
2056		radeon_set_context_reg(cs, R_028ABC_DB_HTILE_SURFACE, a->rsurf->db_htile_surface);
2057		radeon_set_context_reg(cs, R_028AC8_DB_PRELOAD_CONTROL, a->rsurf->db_preload_control);
2058		radeon_set_context_reg(cs, R_028014_DB_HTILE_DATA_BASE, a->rsurf->db_htile_data_base);
2059		reloc_idx = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, &rtex->resource,
2060						  RADEON_USAGE_READWRITE, RADEON_PRIO_SEPARATE_META);
2061		radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
2062		radeon_emit(cs, reloc_idx);
2063	} else {
2064		radeon_set_context_reg(cs, R_028ABC_DB_HTILE_SURFACE, 0);
2065		radeon_set_context_reg(cs, R_028AC8_DB_PRELOAD_CONTROL, 0);
2066	}
2067}
2068
2069static void evergreen_emit_db_misc_state(struct r600_context *rctx, struct r600_atom *atom)
2070{
2071	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
2072	struct r600_db_misc_state *a = (struct r600_db_misc_state*)atom;
2073	unsigned db_render_control = 0;
2074	unsigned db_count_control = 0;
2075	unsigned db_render_override =
2076		S_02800C_FORCE_HIS_ENABLE0(V_02800C_FORCE_DISABLE) |
2077		S_02800C_FORCE_HIS_ENABLE1(V_02800C_FORCE_DISABLE);
2078
2079	if (rctx->b.num_occlusion_queries > 0 &&
2080	    !a->occlusion_queries_disabled) {
2081		db_count_control |= S_028004_PERFECT_ZPASS_COUNTS(1);
2082		if (rctx->b.chip_class == CAYMAN) {
2083			db_count_control |= S_028004_SAMPLE_RATE(a->log_samples);
2084		}
2085		db_render_override |= S_02800C_NOOP_CULL_DISABLE(1);
2086	} else {
2087		db_count_control |= S_028004_ZPASS_INCREMENT_DISABLE(1);
2088	}
2089
2090	/* This is to fix a lockup when hyperz and alpha test are enabled at
2091	 * the same time somehow GPU get confuse on which order to pick for
2092	 * z test
2093	 */
2094	if (rctx->alphatest_state.sx_alpha_test_control)
2095		db_render_override |= S_02800C_FORCE_SHADER_Z_ORDER(1);
2096
2097	if (a->flush_depthstencil_through_cb) {
2098		assert(a->copy_depth || a->copy_stencil);
2099
2100		db_render_control |= S_028000_DEPTH_COPY_ENABLE(a->copy_depth) |
2101				     S_028000_STENCIL_COPY_ENABLE(a->copy_stencil) |
2102				     S_028000_COPY_CENTROID(1) |
2103				     S_028000_COPY_SAMPLE(a->copy_sample);
2104	} else if (a->flush_depth_inplace || a->flush_stencil_inplace) {
2105		db_render_control |= S_028000_DEPTH_COMPRESS_DISABLE(a->flush_depth_inplace) |
2106				     S_028000_STENCIL_COMPRESS_DISABLE(a->flush_stencil_inplace);
2107		db_render_override |= S_02800C_DISABLE_PIXEL_RATE_TILES(1);
2108	}
2109	if (a->htile_clear) {
2110		/* FIXME we might want to disable cliprect here */
2111		db_render_control |= S_028000_DEPTH_CLEAR_ENABLE(1);
2112	}
2113
2114	radeon_set_context_reg_seq(cs, R_028000_DB_RENDER_CONTROL, 2);
2115	radeon_emit(cs, db_render_control); /* R_028000_DB_RENDER_CONTROL */
2116	radeon_emit(cs, db_count_control); /* R_028004_DB_COUNT_CONTROL */
2117	radeon_set_context_reg(cs, R_02800C_DB_RENDER_OVERRIDE, db_render_override);
2118	radeon_set_context_reg(cs, R_02880C_DB_SHADER_CONTROL, a->db_shader_control);
2119}
2120
2121static void evergreen_emit_vertex_buffers(struct r600_context *rctx,
2122					  struct r600_vertexbuf_state *state,
2123					  unsigned resource_offset,
2124					  unsigned pkt_flags)
2125{
2126	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
2127	uint32_t dirty_mask = state->dirty_mask;
2128
2129	while (dirty_mask) {
2130		struct pipe_vertex_buffer *vb;
2131		struct r600_resource *rbuffer;
2132		uint64_t va;
2133		unsigned buffer_index = u_bit_scan(&dirty_mask);
2134
2135		vb = &state->vb[buffer_index];
2136		rbuffer = (struct r600_resource*)vb->buffer.resource;
2137		assert(rbuffer);
2138
2139		va = rbuffer->gpu_address + vb->buffer_offset;
2140
2141		/* fetch resources start at index 992 */
2142		radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 8, 0) | pkt_flags);
2143		radeon_emit(cs, (resource_offset + buffer_index) * 8);
2144		radeon_emit(cs, va); /* RESOURCEi_WORD0 */
2145		radeon_emit(cs, rbuffer->b.b.width0 - vb->buffer_offset - 1); /* RESOURCEi_WORD1 */
2146		radeon_emit(cs, /* RESOURCEi_WORD2 */
2147				 S_030008_ENDIAN_SWAP(r600_endian_swap(32)) |
2148				 S_030008_STRIDE(vb->stride) |
2149				 S_030008_BASE_ADDRESS_HI(va >> 32UL));
2150		radeon_emit(cs, /* RESOURCEi_WORD3 */
2151				 S_03000C_DST_SEL_X(V_03000C_SQ_SEL_X) |
2152				 S_03000C_DST_SEL_Y(V_03000C_SQ_SEL_Y) |
2153				 S_03000C_DST_SEL_Z(V_03000C_SQ_SEL_Z) |
2154				 S_03000C_DST_SEL_W(V_03000C_SQ_SEL_W));
2155		radeon_emit(cs, 0); /* RESOURCEi_WORD4 */
2156		radeon_emit(cs, 0); /* RESOURCEi_WORD5 */
2157		radeon_emit(cs, 0); /* RESOURCEi_WORD6 */
2158		radeon_emit(cs, 0xc0000000); /* RESOURCEi_WORD7 */
2159
2160		radeon_emit(cs, PKT3(PKT3_NOP, 0, 0) | pkt_flags);
2161		radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,
2162						      RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER));
2163	}
2164	state->dirty_mask = 0;
2165}
2166
2167static void evergreen_fs_emit_vertex_buffers(struct r600_context *rctx, struct r600_atom * atom)
2168{
2169	evergreen_emit_vertex_buffers(rctx, &rctx->vertex_buffer_state, EG_FETCH_CONSTANTS_OFFSET_FS, 0);
2170}
2171
2172static void evergreen_cs_emit_vertex_buffers(struct r600_context *rctx, struct r600_atom * atom)
2173{
2174	evergreen_emit_vertex_buffers(rctx, &rctx->cs_vertex_buffer_state, EG_FETCH_CONSTANTS_OFFSET_CS,
2175				      RADEON_CP_PACKET3_COMPUTE_MODE);
2176}
2177
2178static void evergreen_emit_constant_buffers(struct r600_context *rctx,
2179					    struct r600_constbuf_state *state,
2180					    unsigned buffer_id_base,
2181					    unsigned reg_alu_constbuf_size,
2182					    unsigned reg_alu_const_cache,
2183					    unsigned pkt_flags)
2184{
2185	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
2186	uint32_t dirty_mask = state->dirty_mask;
2187
2188	while (dirty_mask) {
2189		struct pipe_constant_buffer *cb;
2190		struct r600_resource *rbuffer;
2191		uint64_t va;
2192		unsigned buffer_index = ffs(dirty_mask) - 1;
2193		unsigned gs_ring_buffer = (buffer_index == R600_GS_RING_CONST_BUFFER);
2194
2195		cb = &state->cb[buffer_index];
2196		rbuffer = (struct r600_resource*)cb->buffer;
2197		assert(rbuffer);
2198
2199		va = rbuffer->gpu_address + cb->buffer_offset;
2200
2201		if (buffer_index < R600_MAX_HW_CONST_BUFFERS) {
2202			radeon_set_context_reg_flag(cs, reg_alu_constbuf_size + buffer_index * 4,
2203						    DIV_ROUND_UP(cb->buffer_size, 256), pkt_flags);
2204			radeon_set_context_reg_flag(cs, reg_alu_const_cache + buffer_index * 4, va >> 8,
2205						    pkt_flags);
2206			radeon_emit(cs, PKT3(PKT3_NOP, 0, 0) | pkt_flags);
2207			radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,
2208								  RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER));
2209		}
2210
2211		radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 8, 0) | pkt_flags);
2212		radeon_emit(cs, (buffer_id_base + buffer_index) * 8);
2213		radeon_emit(cs, va); /* RESOURCEi_WORD0 */
2214		radeon_emit(cs, cb->buffer_size -1); /* RESOURCEi_WORD1 */
2215		radeon_emit(cs, /* RESOURCEi_WORD2 */
2216			    S_030008_ENDIAN_SWAP(gs_ring_buffer ? ENDIAN_NONE : r600_endian_swap(32)) |
2217			    S_030008_STRIDE(gs_ring_buffer ? 4 : 16) |
2218			    S_030008_BASE_ADDRESS_HI(va >> 32UL) |
2219			    S_030008_DATA_FORMAT(FMT_32_32_32_32_FLOAT));
2220		radeon_emit(cs, /* RESOURCEi_WORD3 */
2221			         S_03000C_UNCACHED(gs_ring_buffer ? 1 : 0) |
2222				 S_03000C_DST_SEL_X(V_03000C_SQ_SEL_X) |
2223				 S_03000C_DST_SEL_Y(V_03000C_SQ_SEL_Y) |
2224				 S_03000C_DST_SEL_Z(V_03000C_SQ_SEL_Z) |
2225				 S_03000C_DST_SEL_W(V_03000C_SQ_SEL_W));
2226		radeon_emit(cs, 0); /* RESOURCEi_WORD4 */
2227		radeon_emit(cs, 0); /* RESOURCEi_WORD5 */
2228		radeon_emit(cs, 0); /* RESOURCEi_WORD6 */
2229		radeon_emit(cs, /* RESOURCEi_WORD7 */
2230			    S_03001C_TYPE(V_03001C_SQ_TEX_VTX_VALID_BUFFER));
2231
2232		radeon_emit(cs, PKT3(PKT3_NOP, 0, 0) | pkt_flags);
2233		radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,
2234						      RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER));
2235
2236		dirty_mask &= ~(1 << buffer_index);
2237	}
2238	state->dirty_mask = 0;
2239}
2240
2241/* VS constants can be in VS/ES (same space) or LS if tess is enabled */
2242static void evergreen_emit_vs_constant_buffers(struct r600_context *rctx, struct r600_atom *atom)
2243{
2244	if (rctx->vs_shader->current->shader.vs_as_ls) {
2245		evergreen_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_VERTEX],
2246						EG_FETCH_CONSTANTS_OFFSET_LS,
2247						R_028FC0_ALU_CONST_BUFFER_SIZE_LS_0,
2248						R_028F40_ALU_CONST_CACHE_LS_0,
2249						0 /* PKT3 flags */);
2250	} else {
2251		evergreen_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_VERTEX],
2252						EG_FETCH_CONSTANTS_OFFSET_VS,
2253						R_028180_ALU_CONST_BUFFER_SIZE_VS_0,
2254						R_028980_ALU_CONST_CACHE_VS_0,
2255						0 /* PKT3 flags */);
2256	}
2257}
2258
2259static void evergreen_emit_gs_constant_buffers(struct r600_context *rctx, struct r600_atom *atom)
2260{
2261	evergreen_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_GEOMETRY],
2262					EG_FETCH_CONSTANTS_OFFSET_GS,
2263					R_0281C0_ALU_CONST_BUFFER_SIZE_GS_0,
2264					R_0289C0_ALU_CONST_CACHE_GS_0,
2265					0 /* PKT3 flags */);
2266}
2267
2268static void evergreen_emit_ps_constant_buffers(struct r600_context *rctx, struct r600_atom *atom)
2269{
2270	evergreen_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_FRAGMENT],
2271					EG_FETCH_CONSTANTS_OFFSET_PS,
2272					R_028140_ALU_CONST_BUFFER_SIZE_PS_0,
2273					R_028940_ALU_CONST_CACHE_PS_0,
2274					0 /* PKT3 flags */);
2275}
2276
2277static void evergreen_emit_cs_constant_buffers(struct r600_context *rctx, struct r600_atom *atom)
2278{
2279	evergreen_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_COMPUTE],
2280					EG_FETCH_CONSTANTS_OFFSET_CS,
2281					R_028FC0_ALU_CONST_BUFFER_SIZE_LS_0,
2282					R_028F40_ALU_CONST_CACHE_LS_0,
2283					RADEON_CP_PACKET3_COMPUTE_MODE);
2284}
2285
2286/* tes constants can be emitted to VS or ES - which are common */
2287static void evergreen_emit_tes_constant_buffers(struct r600_context *rctx, struct r600_atom *atom)
2288{
2289	if (!rctx->tes_shader)
2290		return;
2291	evergreen_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_TESS_EVAL],
2292					EG_FETCH_CONSTANTS_OFFSET_VS,
2293					R_028180_ALU_CONST_BUFFER_SIZE_VS_0,
2294					R_028980_ALU_CONST_CACHE_VS_0,
2295					0);
2296}
2297
2298static void evergreen_emit_tcs_constant_buffers(struct r600_context *rctx, struct r600_atom *atom)
2299{
2300	if (!rctx->tes_shader)
2301		return;
2302	evergreen_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_TESS_CTRL],
2303					EG_FETCH_CONSTANTS_OFFSET_HS,
2304					R_028F80_ALU_CONST_BUFFER_SIZE_HS_0,
2305					R_028F00_ALU_CONST_CACHE_HS_0,
2306					0);
2307}
2308
2309void evergreen_setup_scratch_buffers(struct r600_context *rctx) {
2310	static const struct {
2311		unsigned ring_base;
2312		unsigned item_size;
2313		unsigned ring_size;
2314	} regs[EG_NUM_HW_STAGES] = {
2315		[R600_HW_STAGE_PS] = { R_008C68_SQ_PSTMP_RING_BASE, R_028914_SQ_PSTMP_RING_ITEMSIZE, R_008C6C_SQ_PSTMP_RING_SIZE },
2316		[R600_HW_STAGE_VS] = { R_008C60_SQ_VSTMP_RING_BASE, R_028910_SQ_VSTMP_RING_ITEMSIZE, R_008C64_SQ_VSTMP_RING_SIZE },
2317		[R600_HW_STAGE_GS] = { R_008C58_SQ_GSTMP_RING_BASE, R_02890C_SQ_GSTMP_RING_ITEMSIZE, R_008C5C_SQ_GSTMP_RING_SIZE },
2318		[R600_HW_STAGE_ES] = { R_008C50_SQ_ESTMP_RING_BASE, R_028908_SQ_ESTMP_RING_ITEMSIZE, R_008C54_SQ_ESTMP_RING_SIZE },
2319		[EG_HW_STAGE_LS] = { R_008E10_SQ_LSTMP_RING_BASE, R_028830_SQ_LSTMP_RING_ITEMSIZE, R_008E14_SQ_LSTMP_RING_SIZE },
2320		[EG_HW_STAGE_HS] = { R_008E18_SQ_HSTMP_RING_BASE, R_028834_SQ_HSTMP_RING_ITEMSIZE, R_008E1C_SQ_HSTMP_RING_SIZE }
2321	};
2322
2323	for (unsigned i = 0; i < EG_NUM_HW_STAGES; i++) {
2324		struct r600_pipe_shader *stage = rctx->hw_shader_stages[i].shader;
2325
2326		if (stage && unlikely(stage->scratch_space_needed)) {
2327			r600_setup_scratch_area_for_shader(rctx, stage,
2328				&rctx->scratch_buffers[i], regs[i].ring_base, regs[i].item_size, regs[i].ring_size);
2329		}
2330	}
2331}
2332
2333static void evergreen_emit_sampler_views(struct r600_context *rctx,
2334					 struct r600_samplerview_state *state,
2335					 unsigned resource_id_base, unsigned pkt_flags)
2336{
2337	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
2338	uint32_t dirty_mask = state->dirty_mask;
2339
2340	while (dirty_mask) {
2341		struct r600_pipe_sampler_view *rview;
2342		unsigned resource_index = u_bit_scan(&dirty_mask);
2343		unsigned reloc;
2344
2345		rview = state->views[resource_index];
2346		assert(rview);
2347
2348		radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 8, 0) | pkt_flags);
2349		radeon_emit(cs, (resource_id_base + resource_index) * 8);
2350		radeon_emit_array(cs, rview->tex_resource_words, 8);
2351
2352		reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rview->tex_resource,
2353					      RADEON_USAGE_READ,
2354					      r600_get_sampler_view_priority(rview->tex_resource));
2355		radeon_emit(cs, PKT3(PKT3_NOP, 0, 0) | pkt_flags);
2356		radeon_emit(cs, reloc);
2357
2358		if (!rview->skip_mip_address_reloc) {
2359			radeon_emit(cs, PKT3(PKT3_NOP, 0, 0) | pkt_flags);
2360			radeon_emit(cs, reloc);
2361		}
2362	}
2363	state->dirty_mask = 0;
2364}
2365
2366static void evergreen_emit_vs_sampler_views(struct r600_context *rctx, struct r600_atom *atom)
2367{
2368	if (rctx->vs_shader->current->shader.vs_as_ls) {
2369		evergreen_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_VERTEX].views,
2370					     EG_FETCH_CONSTANTS_OFFSET_LS + R600_MAX_CONST_BUFFERS, 0);
2371	} else {
2372		evergreen_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_VERTEX].views,
2373					     EG_FETCH_CONSTANTS_OFFSET_VS + R600_MAX_CONST_BUFFERS, 0);
2374	}
2375}
2376
2377static void evergreen_emit_gs_sampler_views(struct r600_context *rctx, struct r600_atom *atom)
2378{
2379	evergreen_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_GEOMETRY].views,
2380	                             EG_FETCH_CONSTANTS_OFFSET_GS + R600_MAX_CONST_BUFFERS, 0);
2381}
2382
2383static void evergreen_emit_tcs_sampler_views(struct r600_context *rctx, struct r600_atom *atom)
2384{
2385	evergreen_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_TESS_CTRL].views,
2386	                             EG_FETCH_CONSTANTS_OFFSET_HS + R600_MAX_CONST_BUFFERS, 0);
2387}
2388
2389static void evergreen_emit_tes_sampler_views(struct r600_context *rctx, struct r600_atom *atom)
2390{
2391	if (!rctx->tes_shader)
2392		return;
2393	evergreen_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_TESS_EVAL].views,
2394	                             EG_FETCH_CONSTANTS_OFFSET_VS + R600_MAX_CONST_BUFFERS, 0);
2395}
2396
2397static void evergreen_emit_ps_sampler_views(struct r600_context *rctx, struct r600_atom *atom)
2398{
2399	evergreen_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_FRAGMENT].views,
2400	                             EG_FETCH_CONSTANTS_OFFSET_PS + R600_MAX_CONST_BUFFERS, 0);
2401}
2402
2403static void evergreen_emit_cs_sampler_views(struct r600_context *rctx, struct r600_atom *atom)
2404{
2405	evergreen_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_COMPUTE].views,
2406	                             EG_FETCH_CONSTANTS_OFFSET_CS + R600_MAX_CONST_BUFFERS, RADEON_CP_PACKET3_COMPUTE_MODE);
2407}
2408
2409static void evergreen_convert_border_color(union pipe_color_union *in,
2410                                           union pipe_color_union *out,
2411                                           enum pipe_format format)
2412{
2413	if (util_format_is_pure_integer(format) &&
2414		 !util_format_is_depth_or_stencil(format)) {
2415		const struct util_format_description *d = util_format_description(format);
2416
2417		for (int i = 0; i < d->nr_channels; ++i) {
2418			int cs = d->channel[i].size;
2419			if (d->channel[i].type == UTIL_FORMAT_TYPE_SIGNED)
2420				out->f[i] = (double)(in->i[i]) / ((1ul << (cs - 1)) - 1 );
2421			else if (d->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED)
2422				out->f[i] = (double)(in->ui[i]) / ((1ul << cs) - 1 );
2423			else
2424				out->f[i] = 0;
2425		}
2426
2427	} else {
2428		switch (format) {
2429		case PIPE_FORMAT_X24S8_UINT:
2430		case PIPE_FORMAT_X32_S8X24_UINT:
2431			out->f[0] = (double)(in->ui[0]) / 255.0;
2432			out->f[1] = out->f[2] = out->f[3] = 0.0f;
2433			break;
2434		default:
2435			memcpy(out->f, in->f, 4 * sizeof(float));
2436		}
2437	}
2438}
2439
2440static void evergreen_emit_sampler_states(struct r600_context *rctx,
2441				struct r600_textures_info *texinfo,
2442				unsigned resource_id_base,
2443				unsigned border_index_reg,
2444				unsigned pkt_flags)
2445{
2446	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
2447	uint32_t dirty_mask = texinfo->states.dirty_mask;
2448	union pipe_color_union border_color = {{0,0,0,1}};
2449	union pipe_color_union *border_color_ptr = &border_color;
2450
2451	while (dirty_mask) {
2452		struct r600_pipe_sampler_state *rstate;
2453		unsigned i = u_bit_scan(&dirty_mask);
2454
2455		rstate = texinfo->states.states[i];
2456		assert(rstate);
2457
2458		if (rstate->border_color_use) {
2459			struct r600_pipe_sampler_view	*rview = texinfo->views.views[i];
2460			if (rview) {
2461				evergreen_convert_border_color(&rstate->border_color,
2462				                               &border_color, rview->base.format);
2463			} else {
2464				border_color_ptr = &rstate->border_color;
2465			}
2466		}
2467
2468		radeon_emit(cs, PKT3(PKT3_SET_SAMPLER, 3, 0) | pkt_flags);
2469		radeon_emit(cs, (resource_id_base + i) * 3);
2470		radeon_emit_array(cs, rstate->tex_sampler_words, 3);
2471
2472		if (rstate->border_color_use) {
2473			radeon_set_config_reg_seq(cs, border_index_reg, 5);
2474			radeon_emit(cs, i);
2475			radeon_emit_array(cs, border_color_ptr->ui, 4);
2476		}
2477	}
2478	texinfo->states.dirty_mask = 0;
2479}
2480
2481static void evergreen_emit_vs_sampler_states(struct r600_context *rctx, struct r600_atom *atom)
2482{
2483	if (rctx->vs_shader->current->shader.vs_as_ls) {
2484		evergreen_emit_sampler_states(rctx, &rctx->samplers[PIPE_SHADER_VERTEX], 72,
2485					      R_00A450_TD_LS_SAMPLER0_BORDER_COLOR_INDEX, 0);
2486	} else {
2487		evergreen_emit_sampler_states(rctx, &rctx->samplers[PIPE_SHADER_VERTEX], 18,
2488					      R_00A414_TD_VS_SAMPLER0_BORDER_INDEX, 0);
2489	}
2490}
2491
2492static void evergreen_emit_gs_sampler_states(struct r600_context *rctx, struct r600_atom *atom)
2493{
2494	evergreen_emit_sampler_states(rctx, &rctx->samplers[PIPE_SHADER_GEOMETRY], 36,
2495	                              R_00A428_TD_GS_SAMPLER0_BORDER_INDEX, 0);
2496}
2497
2498static void evergreen_emit_tcs_sampler_states(struct r600_context *rctx, struct r600_atom *atom)
2499{
2500	evergreen_emit_sampler_states(rctx, &rctx->samplers[PIPE_SHADER_TESS_CTRL], 54,
2501	                              R_00A43C_TD_HS_SAMPLER0_BORDER_COLOR_INDEX, 0);
2502}
2503
2504static void evergreen_emit_tes_sampler_states(struct r600_context *rctx, struct r600_atom *atom)
2505{
2506	if (!rctx->tes_shader)
2507		return;
2508	evergreen_emit_sampler_states(rctx, &rctx->samplers[PIPE_SHADER_TESS_EVAL], 18,
2509				      R_00A414_TD_VS_SAMPLER0_BORDER_INDEX, 0);
2510}
2511
2512static void evergreen_emit_ps_sampler_states(struct r600_context *rctx, struct r600_atom *atom)
2513{
2514	evergreen_emit_sampler_states(rctx, &rctx->samplers[PIPE_SHADER_FRAGMENT], 0,
2515	                              R_00A400_TD_PS_SAMPLER0_BORDER_INDEX, 0);
2516}
2517
2518static void evergreen_emit_cs_sampler_states(struct r600_context *rctx, struct r600_atom *atom)
2519{
2520	evergreen_emit_sampler_states(rctx, &rctx->samplers[PIPE_SHADER_COMPUTE], 90,
2521	                              R_00A464_TD_CS_SAMPLER0_BORDER_INDEX,
2522	                              RADEON_CP_PACKET3_COMPUTE_MODE);
2523}
2524
2525static void evergreen_emit_sample_mask(struct r600_context *rctx, struct r600_atom *a)
2526{
2527	struct r600_sample_mask *s = (struct r600_sample_mask*)a;
2528	uint8_t mask = s->sample_mask;
2529
2530	radeon_set_context_reg(rctx->b.gfx.cs, R_028C3C_PA_SC_AA_MASK,
2531			       mask | (mask << 8) | (mask << 16) | (mask << 24));
2532}
2533
2534static void cayman_emit_sample_mask(struct r600_context *rctx, struct r600_atom *a)
2535{
2536	struct r600_sample_mask *s = (struct r600_sample_mask*)a;
2537	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
2538	uint16_t mask = s->sample_mask;
2539
2540	radeon_set_context_reg_seq(cs, CM_R_028C38_PA_SC_AA_MASK_X0Y0_X1Y0, 2);
2541	radeon_emit(cs, mask | (mask << 16)); /* X0Y0_X1Y0 */
2542	radeon_emit(cs, mask | (mask << 16)); /* X0Y1_X1Y1 */
2543}
2544
2545static void evergreen_emit_vertex_fetch_shader(struct r600_context *rctx, struct r600_atom *a)
2546{
2547	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
2548	struct r600_cso_state *state = (struct r600_cso_state*)a;
2549	struct r600_fetch_shader *shader = (struct r600_fetch_shader*)state->cso;
2550
2551	if (!shader)
2552		return;
2553
2554	radeon_set_context_reg(cs, R_0288A4_SQ_PGM_START_FS,
2555			       (shader->buffer->gpu_address + shader->offset) >> 8);
2556	radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
2557	radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, shader->buffer,
2558                                                  RADEON_USAGE_READ,
2559                                                  RADEON_PRIO_SHADER_BINARY));
2560}
2561
2562static void evergreen_emit_shader_stages(struct r600_context *rctx, struct r600_atom *a)
2563{
2564	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
2565	struct r600_shader_stages_state *state = (struct r600_shader_stages_state*)a;
2566
2567	uint32_t v = 0, v2 = 0, primid = 0, tf_param = 0;
2568
2569	if (rctx->vs_shader->current->shader.vs_as_gs_a) {
2570		v2 = S_028A40_MODE(V_028A40_GS_SCENARIO_A);
2571		primid = 1;
2572	}
2573
2574	if (state->geom_enable) {
2575		uint32_t cut_val;
2576
2577		if (rctx->gs_shader->gs_max_out_vertices <= 128)
2578			cut_val = V_028A40_GS_CUT_128;
2579		else if (rctx->gs_shader->gs_max_out_vertices <= 256)
2580			cut_val = V_028A40_GS_CUT_256;
2581		else if (rctx->gs_shader->gs_max_out_vertices <= 512)
2582			cut_val = V_028A40_GS_CUT_512;
2583		else
2584			cut_val = V_028A40_GS_CUT_1024;
2585
2586		v = S_028B54_GS_EN(1) |
2587		    S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER);
2588		if (!rctx->tes_shader)
2589			v |= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL);
2590
2591		v2 = S_028A40_MODE(V_028A40_GS_SCENARIO_G) |
2592			S_028A40_CUT_MODE(cut_val);
2593
2594		if (rctx->gs_shader->current->shader.gs_prim_id_input)
2595			primid = 1;
2596	}
2597
2598	if (rctx->tes_shader) {
2599		uint32_t type, partitioning, topology;
2600		struct tgsi_shader_info *info = &rctx->tes_shader->current->selector->info;
2601		unsigned tes_prim_mode = info->properties[TGSI_PROPERTY_TES_PRIM_MODE];
2602		unsigned tes_spacing = info->properties[TGSI_PROPERTY_TES_SPACING];
2603		bool tes_vertex_order_cw = info->properties[TGSI_PROPERTY_TES_VERTEX_ORDER_CW];
2604		bool tes_point_mode = info->properties[TGSI_PROPERTY_TES_POINT_MODE];
2605		switch (tes_prim_mode) {
2606		case PIPE_PRIM_LINES:
2607			type = V_028B6C_TESS_ISOLINE;
2608			break;
2609		case PIPE_PRIM_TRIANGLES:
2610			type = V_028B6C_TESS_TRIANGLE;
2611			break;
2612		case PIPE_PRIM_QUADS:
2613			type = V_028B6C_TESS_QUAD;
2614			break;
2615		default:
2616			assert(0);
2617			return;
2618		}
2619
2620		switch (tes_spacing) {
2621		case PIPE_TESS_SPACING_FRACTIONAL_ODD:
2622			partitioning = V_028B6C_PART_FRAC_ODD;
2623			break;
2624		case PIPE_TESS_SPACING_FRACTIONAL_EVEN:
2625			partitioning = V_028B6C_PART_FRAC_EVEN;
2626			break;
2627		case PIPE_TESS_SPACING_EQUAL:
2628			partitioning = V_028B6C_PART_INTEGER;
2629			break;
2630		default:
2631			assert(0);
2632			return;
2633		}
2634
2635		if (tes_point_mode)
2636			topology = V_028B6C_OUTPUT_POINT;
2637		else if (tes_prim_mode == PIPE_PRIM_LINES)
2638			topology = V_028B6C_OUTPUT_LINE;
2639		else if (tes_vertex_order_cw)
2640			/* XXX follow radeonsi and invert */
2641			topology = V_028B6C_OUTPUT_TRIANGLE_CCW;
2642		else
2643			topology = V_028B6C_OUTPUT_TRIANGLE_CW;
2644
2645		tf_param = S_028B6C_TYPE(type) |
2646			S_028B6C_PARTITIONING(partitioning) |
2647			S_028B6C_TOPOLOGY(topology);
2648	}
2649
2650	if (rctx->tes_shader) {
2651		v |= S_028B54_LS_EN(V_028B54_LS_STAGE_ON) |
2652		     S_028B54_HS_EN(1);
2653		if (!state->geom_enable)
2654			v |= S_028B54_VS_EN(V_028B54_VS_STAGE_DS);
2655		else
2656			v |= S_028B54_ES_EN(V_028B54_ES_STAGE_DS);
2657	}
2658
2659	radeon_set_context_reg(cs, R_028AB8_VGT_VTX_CNT_EN, v ? 1 : 0 );
2660	radeon_set_context_reg(cs, R_028B54_VGT_SHADER_STAGES_EN, v);
2661	radeon_set_context_reg(cs, R_028A40_VGT_GS_MODE, v2);
2662	radeon_set_context_reg(cs, R_028A84_VGT_PRIMITIVEID_EN, primid);
2663	radeon_set_context_reg(cs, R_028B6C_VGT_TF_PARAM, tf_param);
2664}
2665
2666static void evergreen_emit_gs_rings(struct r600_context *rctx, struct r600_atom *a)
2667{
2668	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
2669	struct r600_gs_rings_state *state = (struct r600_gs_rings_state*)a;
2670	struct r600_resource *rbuffer;
2671
2672	radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1));
2673	radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
2674	radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH));
2675
2676	if (state->enable) {
2677		rbuffer =(struct r600_resource*)state->esgs_ring.buffer;
2678		radeon_set_config_reg(cs, R_008C40_SQ_ESGS_RING_BASE,
2679				rbuffer->gpu_address >> 8);
2680		radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
2681		radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,
2682						      RADEON_USAGE_READWRITE,
2683						      RADEON_PRIO_SHADER_RINGS));
2684		radeon_set_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE,
2685				state->esgs_ring.buffer_size >> 8);
2686
2687		rbuffer =(struct r600_resource*)state->gsvs_ring.buffer;
2688		radeon_set_config_reg(cs, R_008C48_SQ_GSVS_RING_BASE,
2689				rbuffer->gpu_address >> 8);
2690		radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
2691		radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,
2692						      RADEON_USAGE_READWRITE,
2693						      RADEON_PRIO_SHADER_RINGS));
2694		radeon_set_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE,
2695				state->gsvs_ring.buffer_size >> 8);
2696	} else {
2697		radeon_set_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE, 0);
2698		radeon_set_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE, 0);
2699	}
2700
2701	radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1));
2702	radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
2703	radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH));
2704}
2705
2706void cayman_init_common_regs(struct r600_command_buffer *cb,
2707			     enum chip_class ctx_chip_class,
2708			     enum radeon_family ctx_family,
2709			     int ctx_drm_minor)
2710{
2711	r600_store_config_reg_seq(cb, R_008C00_SQ_CONFIG, 2);
2712	r600_store_value(cb, S_008C00_EXPORT_SRC_C(1)); /* R_008C00_SQ_CONFIG */
2713	/* always set the temp clauses */
2714	r600_store_value(cb, S_008C04_NUM_CLAUSE_TEMP_GPRS(4)); /* R_008C04_SQ_GPR_RESOURCE_MGMT_1 */
2715
2716	r600_store_config_reg_seq(cb, R_008C10_SQ_GLOBAL_GPR_RESOURCE_MGMT_1, 2);
2717	r600_store_value(cb, 0); /* R_008C10_SQ_GLOBAL_GPR_RESOURCE_MGMT_1 */
2718	r600_store_value(cb, 0); /* R_008C14_SQ_GLOBAL_GPR_RESOURCE_MGMT_2 */
2719
2720	r600_store_config_reg(cb, R_008D8C_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, (1 << 8));
2721
2722	r600_store_context_reg_seq(cb, R_028350_SX_MISC, 2);
2723	r600_store_value(cb, 0);
2724	r600_store_value(cb, S_028354_SURFACE_SYNC_MASK(0xf));
2725
2726	r600_store_context_reg(cb, R_028800_DB_DEPTH_CONTROL, 0);
2727}
2728
2729static void cayman_init_atom_start_cs(struct r600_context *rctx)
2730{
2731	struct r600_command_buffer *cb = &rctx->start_cs_cmd;
2732	int i;
2733
2734	r600_init_command_buffer(cb, 338);
2735
2736	/* This must be first. */
2737	r600_store_value(cb, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
2738	r600_store_value(cb, 0x80000000);
2739	r600_store_value(cb, 0x80000000);
2740
2741	/* We're setting config registers here. */
2742	r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0));
2743	r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
2744
2745	/* This enables pipeline stat & streamout queries.
2746	 * They are only disabled by blits.
2747	 */
2748	r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0));
2749	r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_PIPELINESTAT_START) | EVENT_INDEX(0));
2750
2751	cayman_init_common_regs(cb, rctx->b.chip_class,
2752				rctx->b.family, rctx->screen->b.info.drm_minor);
2753
2754	r600_store_config_reg(cb, R_009100_SPI_CONFIG_CNTL, 0);
2755	r600_store_config_reg(cb, R_00913C_SPI_CONFIG_CNTL_1, S_00913C_VTX_DONE_DELAY(4));
2756
2757	/* remove LS/HS from one SIMD for hw workaround */
2758	r600_store_config_reg_seq(cb, R_008E20_SQ_STATIC_THREAD_MGMT1, 3);
2759	r600_store_value(cb, 0xffffffff);
2760	r600_store_value(cb, 0xffffffff);
2761	r600_store_value(cb, 0xfffffffe);
2762
2763	r600_store_context_reg_seq(cb, R_028900_SQ_ESGS_RING_ITEMSIZE, 6);
2764	r600_store_value(cb, 0); /* R_028900_SQ_ESGS_RING_ITEMSIZE */
2765	r600_store_value(cb, 0); /* R_028904_SQ_GSVS_RING_ITEMSIZE */
2766	r600_store_value(cb, 0); /* R_028908_SQ_ESTMP_RING_ITEMSIZE */
2767	r600_store_value(cb, 0); /* R_02890C_SQ_GSTMP_RING_ITEMSIZE */
2768	r600_store_value(cb, 0); /* R_028910_SQ_VSTMP_RING_ITEMSIZE */
2769	r600_store_value(cb, 0); /* R_028914_SQ_PSTMP_RING_ITEMSIZE */
2770
2771	r600_store_context_reg_seq(cb, R_02891C_SQ_GS_VERT_ITEMSIZE, 4);
2772	r600_store_value(cb, 0); /* R_02891C_SQ_GS_VERT_ITEMSIZE */
2773	r600_store_value(cb, 0); /* R_028920_SQ_GS_VERT_ITEMSIZE_1 */
2774	r600_store_value(cb, 0); /* R_028924_SQ_GS_VERT_ITEMSIZE_2 */
2775	r600_store_value(cb, 0); /* R_028928_SQ_GS_VERT_ITEMSIZE_3 */
2776
2777	r600_store_context_reg_seq(cb, R_028A10_VGT_OUTPUT_PATH_CNTL, 13);
2778	r600_store_value(cb, 0); /* R_028A10_VGT_OUTPUT_PATH_CNTL */
2779	r600_store_value(cb, 0); /* R_028A14_VGT_HOS_CNTL */
2780	r600_store_value(cb, fui(64)); /* R_028A18_VGT_HOS_MAX_TESS_LEVEL */
2781	r600_store_value(cb, fui(0)); /* R_028A1C_VGT_HOS_MIN_TESS_LEVEL */
2782	r600_store_value(cb, 16); /* R_028A20_VGT_HOS_REUSE_DEPTH */
2783	r600_store_value(cb, 0); /* R_028A24_VGT_GROUP_PRIM_TYPE */
2784	r600_store_value(cb, 0); /* R_028A28_VGT_GROUP_FIRST_DECR */
2785	r600_store_value(cb, 0); /* R_028A2C_VGT_GROUP_DECR */
2786	r600_store_value(cb, 0); /* R_028A30_VGT_GROUP_VECT_0_CNTL */
2787	r600_store_value(cb, 0); /* R_028A34_VGT_GROUP_VECT_1_CNTL */
2788	r600_store_value(cb, 0); /* R_028A38_VGT_GROUP_VECT_0_FMT_CNTL */
2789	r600_store_value(cb, 0); /* R_028A3C_VGT_GROUP_VECT_1_FMT_CNTL */
2790	r600_store_value(cb, 0); /* R_028A40_VGT_GS_MODE */
2791
2792	r600_store_context_reg(cb, R_028B98_VGT_STRMOUT_BUFFER_CONFIG, 0);
2793
2794	r600_store_config_reg(cb, R_008A14_PA_CL_ENHANCE, (3 << 1) | 1);
2795
2796	r600_store_context_reg_seq(cb, CM_R_028BD4_PA_SC_CENTROID_PRIORITY_0, 2);
2797	r600_store_value(cb, 0x76543210); /* CM_R_028BD4_PA_SC_CENTROID_PRIORITY_0 */
2798	r600_store_value(cb, 0xfedcba98); /* CM_R_028BD8_PA_SC_CENTROID_PRIORITY_1 */
2799
2800	r600_store_context_reg(cb, R_028724_GDS_ADDR_SIZE, 0x3fff);
2801	r600_store_context_reg_seq(cb, R_0288E8_SQ_LDS_ALLOC, 2);
2802	r600_store_value(cb, 0); /* R_0288E8_SQ_LDS_ALLOC */
2803	r600_store_value(cb, 0); /* R_0288EC_SQ_LDS_ALLOC_PS */
2804
2805        r600_store_context_reg(cb, R_0288F0_SQ_VTX_SEMANTIC_CLEAR, ~0);
2806
2807        r600_store_context_reg_seq(cb, R_028400_VGT_MAX_VTX_INDX, 2);
2808	r600_store_value(cb, ~0); /* R_028400_VGT_MAX_VTX_INDX */
2809	r600_store_value(cb, 0); /* R_028404_VGT_MIN_VTX_INDX */
2810
2811	r600_store_ctl_const(cb, R_03CFF0_SQ_VTX_BASE_VTX_LOC, 0);
2812
2813	r600_store_context_reg(cb, R_028028_DB_STENCIL_CLEAR, 0);
2814
2815	r600_store_context_reg(cb, R_0286DC_SPI_FOG_CNTL, 0);
2816
2817	r600_store_context_reg_seq(cb, R_028AC0_DB_SRESULTS_COMPARE_STATE0, 3);
2818	r600_store_value(cb, 0); /* R_028AC0_DB_SRESULTS_COMPARE_STATE0 */
2819	r600_store_value(cb, 0); /* R_028AC4_DB_SRESULTS_COMPARE_STATE1 */
2820	r600_store_value(cb, 0); /* R_028AC8_DB_PRELOAD_CONTROL */
2821
2822	r600_store_context_reg(cb, R_028200_PA_SC_WINDOW_OFFSET, 0);
2823	r600_store_context_reg(cb, R_02820C_PA_SC_CLIPRECT_RULE, 0xFFFF);
2824
2825	r600_store_context_reg(cb, R_028230_PA_SC_EDGERULE, 0xAAAAAAAA);
2826	r600_store_context_reg(cb, R_028820_PA_CL_NANINF_CNTL, 0);
2827
2828	r600_store_context_reg_seq(cb, R_028240_PA_SC_GENERIC_SCISSOR_TL, 2);
2829	r600_store_value(cb, 0); /* R_028240_PA_SC_GENERIC_SCISSOR_TL */
2830	r600_store_value(cb, S_028244_BR_X(16384) | S_028244_BR_Y(16384)); /* R_028244_PA_SC_GENERIC_SCISSOR_BR */
2831
2832	r600_store_context_reg_seq(cb, R_028030_PA_SC_SCREEN_SCISSOR_TL, 2);
2833	r600_store_value(cb, 0); /* R_028030_PA_SC_SCREEN_SCISSOR_TL */
2834	r600_store_value(cb, S_028034_BR_X(16384) | S_028034_BR_Y(16384)); /* R_028034_PA_SC_SCREEN_SCISSOR_BR */
2835
2836	r600_store_context_reg(cb, R_028848_SQ_PGM_RESOURCES_2_PS, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN));
2837	r600_store_context_reg(cb, R_028864_SQ_PGM_RESOURCES_2_VS, S_028864_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN));
2838	r600_store_context_reg(cb, R_02887C_SQ_PGM_RESOURCES_2_GS, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN));
2839	r600_store_context_reg(cb, R_028894_SQ_PGM_RESOURCES_2_ES, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN));
2840	r600_store_context_reg(cb, R_0288C0_SQ_PGM_RESOURCES_2_HS, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN));
2841	r600_store_context_reg(cb, R_0288D8_SQ_PGM_RESOURCES_2_LS, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN));
2842
2843	r600_store_context_reg(cb, R_0288A8_SQ_PGM_RESOURCES_FS, 0);
2844
2845	/* to avoid GPU doing any preloading of constant from random address */
2846	r600_store_context_reg_seq(cb, R_028140_ALU_CONST_BUFFER_SIZE_PS_0, 16);
2847	for (i = 0; i < 16; i++)
2848		r600_store_value(cb, 0);
2849
2850	r600_store_context_reg_seq(cb, R_028180_ALU_CONST_BUFFER_SIZE_VS_0, 16);
2851	for (i = 0; i < 16; i++)
2852		r600_store_value(cb, 0);
2853
2854	r600_store_context_reg_seq(cb, R_0281C0_ALU_CONST_BUFFER_SIZE_GS_0, 16);
2855	for (i = 0; i < 16; i++)
2856		r600_store_value(cb, 0);
2857
2858	r600_store_context_reg_seq(cb, R_028FC0_ALU_CONST_BUFFER_SIZE_LS_0, 16);
2859	for (i = 0; i < 16; i++)
2860		r600_store_value(cb, 0);
2861
2862	r600_store_context_reg_seq(cb, R_028F80_ALU_CONST_BUFFER_SIZE_HS_0, 16);
2863	for (i = 0; i < 16; i++)
2864		r600_store_value(cb, 0);
2865
2866	if (rctx->screen->b.has_streamout) {
2867		r600_store_context_reg(cb, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET, 0);
2868	}
2869
2870	r600_store_context_reg(cb, R_028010_DB_RENDER_OVERRIDE2, 0);
2871	r600_store_context_reg(cb, R_028234_PA_SU_HARDWARE_SCREEN_OFFSET, 0);
2872	r600_store_context_reg(cb, R_0286C8_SPI_THREAD_GROUPING, 0);
2873	r600_store_context_reg_seq(cb, R_0286E4_SPI_PS_IN_CONTROL_2, 2);
2874	r600_store_value(cb, 0); /* R_0286E4_SPI_PS_IN_CONTROL_2 */
2875	r600_store_value(cb, 0); /* R_0286E8_SPI_COMPUTE_INPUT_CNTL */
2876
2877	r600_store_context_reg_seq(cb, R_028B54_VGT_SHADER_STAGES_EN, 2);
2878	r600_store_value(cb, 0); /* R028B54_VGT_SHADER_STAGES_EN */
2879	r600_store_value(cb, 0); /* R028B58_VGT_LS_HS_CONFIG */
2880	r600_store_context_reg(cb, R_028B6C_VGT_TF_PARAM, 0);
2881	eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0, 0x01000FFF);
2882	eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (32 * 4), 0x01000FFF);
2883	eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (64 * 4), 0x01000FFF);
2884	eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (96 * 4), 0x01000FFF);
2885	eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (128 * 4), 0x01000FFF);
2886}
2887
2888void evergreen_init_common_regs(struct r600_context *rctx, struct r600_command_buffer *cb,
2889				enum chip_class ctx_chip_class,
2890				enum radeon_family ctx_family,
2891				int ctx_drm_minor)
2892{
2893	int ps_prio;
2894	int vs_prio;
2895	int gs_prio;
2896	int es_prio;
2897
2898	int hs_prio;
2899	int cs_prio;
2900	int ls_prio;
2901
2902	unsigned tmp;
2903
2904	ps_prio = 0;
2905	vs_prio = 1;
2906	gs_prio = 2;
2907	es_prio = 3;
2908	hs_prio = 3;
2909	ls_prio = 3;
2910	cs_prio = 0;
2911
2912	rctx->default_gprs[R600_HW_STAGE_PS] = 93;
2913	rctx->default_gprs[R600_HW_STAGE_VS] = 46;
2914	rctx->r6xx_num_clause_temp_gprs = 4;
2915	rctx->default_gprs[R600_HW_STAGE_GS] = 31;
2916	rctx->default_gprs[R600_HW_STAGE_ES] = 31;
2917	rctx->default_gprs[EG_HW_STAGE_HS] = 23;
2918	rctx->default_gprs[EG_HW_STAGE_LS] = 23;
2919
2920	tmp = 0;
2921	switch (ctx_family) {
2922	case CHIP_CEDAR:
2923	case CHIP_PALM:
2924	case CHIP_SUMO:
2925	case CHIP_SUMO2:
2926	case CHIP_CAICOS:
2927		break;
2928	default:
2929		tmp |= S_008C00_VC_ENABLE(1);
2930		break;
2931	}
2932	tmp |= S_008C00_EXPORT_SRC_C(1);
2933	tmp |= S_008C00_CS_PRIO(cs_prio);
2934	tmp |= S_008C00_LS_PRIO(ls_prio);
2935	tmp |= S_008C00_HS_PRIO(hs_prio);
2936	tmp |= S_008C00_PS_PRIO(ps_prio);
2937	tmp |= S_008C00_VS_PRIO(vs_prio);
2938	tmp |= S_008C00_GS_PRIO(gs_prio);
2939	tmp |= S_008C00_ES_PRIO(es_prio);
2940
2941	r600_store_config_reg_seq(cb, R_008C00_SQ_CONFIG, 1);
2942	r600_store_value(cb, tmp); /* R_008C00_SQ_CONFIG */
2943
2944	r600_store_config_reg_seq(cb, R_008C10_SQ_GLOBAL_GPR_RESOURCE_MGMT_1, 2);
2945	r600_store_value(cb, 0); /* R_008C10_SQ_GLOBAL_GPR_RESOURCE_MGMT_1 */
2946	r600_store_value(cb, 0); /* R_008C14_SQ_GLOBAL_GPR_RESOURCE_MGMT_2 */
2947
2948	/* The cs checker requires this register to be set. */
2949	r600_store_context_reg(cb, R_028800_DB_DEPTH_CONTROL, 0);
2950
2951	r600_store_context_reg_seq(cb, R_028350_SX_MISC, 2);
2952	r600_store_value(cb, 0);
2953	r600_store_value(cb, S_028354_SURFACE_SYNC_MASK(0xf));
2954
2955	return;
2956}
2957
2958void evergreen_init_atom_start_cs(struct r600_context *rctx)
2959{
2960	struct r600_command_buffer *cb = &rctx->start_cs_cmd;
2961	int num_ps_threads;
2962	int num_vs_threads;
2963	int num_gs_threads;
2964	int num_es_threads;
2965	int num_hs_threads;
2966	int num_ls_threads;
2967
2968	int num_ps_stack_entries;
2969	int num_vs_stack_entries;
2970	int num_gs_stack_entries;
2971	int num_es_stack_entries;
2972	int num_hs_stack_entries;
2973	int num_ls_stack_entries;
2974	enum radeon_family family;
2975	unsigned tmp, i;
2976
2977	if (rctx->b.chip_class == CAYMAN) {
2978		cayman_init_atom_start_cs(rctx);
2979		return;
2980	}
2981
2982	r600_init_command_buffer(cb, 338);
2983
2984	/* This must be first. */
2985	r600_store_value(cb, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
2986	r600_store_value(cb, 0x80000000);
2987	r600_store_value(cb, 0x80000000);
2988
2989	/* We're setting config registers here. */
2990	r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0));
2991	r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
2992
2993	/* This enables pipeline stat & streamout queries.
2994	 * They are only disabled by blits.
2995	 */
2996	r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0));
2997	r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_PIPELINESTAT_START) | EVENT_INDEX(0));
2998
2999	evergreen_init_common_regs(rctx, cb, rctx->b.chip_class,
3000				   rctx->b.family, rctx->screen->b.info.drm_minor);
3001
3002	family = rctx->b.family;
3003	switch (family) {
3004	case CHIP_CEDAR:
3005	default:
3006		num_ps_threads = 96;
3007		num_vs_threads = 16;
3008		num_gs_threads = 16;
3009		num_es_threads = 16;
3010		num_hs_threads = 16;
3011		num_ls_threads = 16;
3012		num_ps_stack_entries = 42;
3013		num_vs_stack_entries = 42;
3014		num_gs_stack_entries = 42;
3015		num_es_stack_entries = 42;
3016		num_hs_stack_entries = 42;
3017		num_ls_stack_entries = 42;
3018		break;
3019	case CHIP_REDWOOD:
3020		num_ps_threads = 128;
3021		num_vs_threads = 20;
3022		num_gs_threads = 20;
3023		num_es_threads = 20;
3024		num_hs_threads = 20;
3025		num_ls_threads = 20;
3026		num_ps_stack_entries = 42;
3027		num_vs_stack_entries = 42;
3028		num_gs_stack_entries = 42;
3029		num_es_stack_entries = 42;
3030		num_hs_stack_entries = 42;
3031		num_ls_stack_entries = 42;
3032		break;
3033	case CHIP_JUNIPER:
3034		num_ps_threads = 128;
3035		num_vs_threads = 20;
3036		num_gs_threads = 20;
3037		num_es_threads = 20;
3038		num_hs_threads = 20;
3039		num_ls_threads = 20;
3040		num_ps_stack_entries = 85;
3041		num_vs_stack_entries = 85;
3042		num_gs_stack_entries = 85;
3043		num_es_stack_entries = 85;
3044		num_hs_stack_entries = 85;
3045		num_ls_stack_entries = 85;
3046		break;
3047	case CHIP_CYPRESS:
3048	case CHIP_HEMLOCK:
3049		num_ps_threads = 128;
3050		num_vs_threads = 20;
3051		num_gs_threads = 20;
3052		num_es_threads = 20;
3053		num_hs_threads = 20;
3054		num_ls_threads = 20;
3055		num_ps_stack_entries = 85;
3056		num_vs_stack_entries = 85;
3057		num_gs_stack_entries = 85;
3058		num_es_stack_entries = 85;
3059		num_hs_stack_entries = 85;
3060		num_ls_stack_entries = 85;
3061		break;
3062	case CHIP_PALM:
3063		num_ps_threads = 96;
3064		num_vs_threads = 16;
3065		num_gs_threads = 16;
3066		num_es_threads = 16;
3067		num_hs_threads = 16;
3068		num_ls_threads = 16;
3069		num_ps_stack_entries = 42;
3070		num_vs_stack_entries = 42;
3071		num_gs_stack_entries = 42;
3072		num_es_stack_entries = 42;
3073		num_hs_stack_entries = 42;
3074		num_ls_stack_entries = 42;
3075		break;
3076	case CHIP_SUMO:
3077		num_ps_threads = 96;
3078		num_vs_threads = 25;
3079		num_gs_threads = 25;
3080		num_es_threads = 25;
3081		num_hs_threads = 16;
3082		num_ls_threads = 16;
3083		num_ps_stack_entries = 42;
3084		num_vs_stack_entries = 42;
3085		num_gs_stack_entries = 42;
3086		num_es_stack_entries = 42;
3087		num_hs_stack_entries = 42;
3088		num_ls_stack_entries = 42;
3089		break;
3090	case CHIP_SUMO2:
3091		num_ps_threads = 96;
3092		num_vs_threads = 25;
3093		num_gs_threads = 25;
3094		num_es_threads = 25;
3095		num_hs_threads = 16;
3096		num_ls_threads = 16;
3097		num_ps_stack_entries = 85;
3098		num_vs_stack_entries = 85;
3099		num_gs_stack_entries = 85;
3100		num_es_stack_entries = 85;
3101		num_hs_stack_entries = 85;
3102		num_ls_stack_entries = 85;
3103		break;
3104	case CHIP_BARTS:
3105		num_ps_threads = 128;
3106		num_vs_threads = 20;
3107		num_gs_threads = 20;
3108		num_es_threads = 20;
3109		num_hs_threads = 20;
3110		num_ls_threads = 20;
3111		num_ps_stack_entries = 85;
3112		num_vs_stack_entries = 85;
3113		num_gs_stack_entries = 85;
3114		num_es_stack_entries = 85;
3115		num_hs_stack_entries = 85;
3116		num_ls_stack_entries = 85;
3117		break;
3118	case CHIP_TURKS:
3119		num_ps_threads = 128;
3120		num_vs_threads = 20;
3121		num_gs_threads = 20;
3122		num_es_threads = 20;
3123		num_hs_threads = 20;
3124		num_ls_threads = 20;
3125		num_ps_stack_entries = 42;
3126		num_vs_stack_entries = 42;
3127		num_gs_stack_entries = 42;
3128		num_es_stack_entries = 42;
3129		num_hs_stack_entries = 42;
3130		num_ls_stack_entries = 42;
3131		break;
3132	case CHIP_CAICOS:
3133		num_ps_threads = 96;
3134		num_vs_threads = 10;
3135		num_gs_threads = 10;
3136		num_es_threads = 10;
3137		num_hs_threads = 10;
3138		num_ls_threads = 10;
3139		num_ps_stack_entries = 42;
3140		num_vs_stack_entries = 42;
3141		num_gs_stack_entries = 42;
3142		num_es_stack_entries = 42;
3143		num_hs_stack_entries = 42;
3144		num_ls_stack_entries = 42;
3145		break;
3146	}
3147
3148	tmp = S_008C18_NUM_PS_THREADS(num_ps_threads);
3149	tmp |= S_008C18_NUM_VS_THREADS(num_vs_threads);
3150	tmp |= S_008C18_NUM_GS_THREADS(num_gs_threads);
3151	tmp |= S_008C18_NUM_ES_THREADS(num_es_threads);
3152
3153	r600_store_config_reg_seq(cb, R_008C18_SQ_THREAD_RESOURCE_MGMT_1, 5);
3154	r600_store_value(cb, tmp); /* R_008C18_SQ_THREAD_RESOURCE_MGMT_1 */
3155
3156	tmp = S_008C1C_NUM_HS_THREADS(num_hs_threads);
3157	tmp |= S_008C1C_NUM_LS_THREADS(num_ls_threads);
3158	r600_store_value(cb, tmp); /* R_008C1C_SQ_THREAD_RESOURCE_MGMT_2 */
3159
3160	tmp = S_008C20_NUM_PS_STACK_ENTRIES(num_ps_stack_entries);
3161	tmp |= S_008C20_NUM_VS_STACK_ENTRIES(num_vs_stack_entries);
3162	r600_store_value(cb, tmp); /* R_008C20_SQ_STACK_RESOURCE_MGMT_1 */
3163
3164	tmp = S_008C24_NUM_GS_STACK_ENTRIES(num_gs_stack_entries);
3165	tmp |= S_008C24_NUM_ES_STACK_ENTRIES(num_es_stack_entries);
3166	r600_store_value(cb, tmp); /* R_008C24_SQ_STACK_RESOURCE_MGMT_2 */
3167
3168	tmp = S_008C28_NUM_HS_STACK_ENTRIES(num_hs_stack_entries);
3169	tmp |= S_008C28_NUM_LS_STACK_ENTRIES(num_ls_stack_entries);
3170	r600_store_value(cb, tmp); /* R_008C28_SQ_STACK_RESOURCE_MGMT_3 */
3171
3172	r600_store_config_reg(cb, R_008E2C_SQ_LDS_RESOURCE_MGMT,
3173			      S_008E2C_NUM_PS_LDS(0x1000) | S_008E2C_NUM_LS_LDS(0x1000));
3174
3175	/* remove LS/HS from one SIMD for hw workaround */
3176	r600_store_config_reg_seq(cb, R_008E20_SQ_STATIC_THREAD_MGMT1, 3);
3177	r600_store_value(cb, 0xffffffff);
3178	r600_store_value(cb, 0xffffffff);
3179	r600_store_value(cb, 0xfffffffe);
3180
3181	r600_store_config_reg(cb, R_009100_SPI_CONFIG_CNTL, 0);
3182	r600_store_config_reg(cb, R_00913C_SPI_CONFIG_CNTL_1, S_00913C_VTX_DONE_DELAY(4));
3183
3184	r600_store_context_reg_seq(cb, R_028900_SQ_ESGS_RING_ITEMSIZE, 6);
3185	r600_store_value(cb, 0); /* R_028900_SQ_ESGS_RING_ITEMSIZE */
3186	r600_store_value(cb, 0); /* R_028904_SQ_GSVS_RING_ITEMSIZE */
3187	r600_store_value(cb, 0); /* R_028908_SQ_ESTMP_RING_ITEMSIZE */
3188	r600_store_value(cb, 0); /* R_02890C_SQ_GSTMP_RING_ITEMSIZE */
3189	r600_store_value(cb, 0); /* R_028910_SQ_VSTMP_RING_ITEMSIZE */
3190	r600_store_value(cb, 0); /* R_028914_SQ_PSTMP_RING_ITEMSIZE */
3191
3192	r600_store_context_reg_seq(cb, R_02891C_SQ_GS_VERT_ITEMSIZE, 4);
3193	r600_store_value(cb, 0); /* R_02891C_SQ_GS_VERT_ITEMSIZE */
3194	r600_store_value(cb, 0); /* R_028920_SQ_GS_VERT_ITEMSIZE_1 */
3195	r600_store_value(cb, 0); /* R_028924_SQ_GS_VERT_ITEMSIZE_2 */
3196	r600_store_value(cb, 0); /* R_028928_SQ_GS_VERT_ITEMSIZE_3 */
3197
3198	r600_store_context_reg_seq(cb, R_028A10_VGT_OUTPUT_PATH_CNTL, 13);
3199	r600_store_value(cb, 0); /* R_028A10_VGT_OUTPUT_PATH_CNTL */
3200	r600_store_value(cb, 0); /* R_028A14_VGT_HOS_CNTL */
3201	r600_store_value(cb, fui(64)); /* R_028A18_VGT_HOS_MAX_TESS_LEVEL */
3202	r600_store_value(cb, fui(1.0)); /* R_028A1C_VGT_HOS_MIN_TESS_LEVEL */
3203	r600_store_value(cb, 16); /* R_028A20_VGT_HOS_REUSE_DEPTH */
3204	r600_store_value(cb, 0); /* R_028A24_VGT_GROUP_PRIM_TYPE */
3205	r600_store_value(cb, 0); /* R_028A28_VGT_GROUP_FIRST_DECR */
3206	r600_store_value(cb, 0); /* R_028A2C_VGT_GROUP_DECR */
3207	r600_store_value(cb, 0); /* R_028A30_VGT_GROUP_VECT_0_CNTL */
3208	r600_store_value(cb, 0); /* R_028A34_VGT_GROUP_VECT_1_CNTL */
3209	r600_store_value(cb, 0); /* R_028A38_VGT_GROUP_VECT_0_FMT_CNTL */
3210	r600_store_value(cb, 0); /* R_028A3C_VGT_GROUP_VECT_1_FMT_CNTL */
3211	r600_store_value(cb, 0); /* R_028A40_VGT_GS_MODE */
3212
3213	r600_store_config_reg(cb, R_008A14_PA_CL_ENHANCE, (3 << 1) | 1);
3214
3215        r600_store_context_reg(cb, R_0288F0_SQ_VTX_SEMANTIC_CLEAR, ~0);
3216
3217        r600_store_context_reg_seq(cb, R_028400_VGT_MAX_VTX_INDX, 2);
3218	r600_store_value(cb, ~0); /* R_028400_VGT_MAX_VTX_INDX */
3219	r600_store_value(cb, 0); /* R_028404_VGT_MIN_VTX_INDX */
3220
3221	r600_store_ctl_const(cb, R_03CFF0_SQ_VTX_BASE_VTX_LOC, 0);
3222
3223	r600_store_context_reg(cb, R_028028_DB_STENCIL_CLEAR, 0);
3224
3225	r600_store_context_reg(cb, R_028200_PA_SC_WINDOW_OFFSET, 0);
3226	r600_store_context_reg(cb, R_02820C_PA_SC_CLIPRECT_RULE, 0xFFFF);
3227	r600_store_context_reg(cb, R_028230_PA_SC_EDGERULE, 0xAAAAAAAA);
3228
3229	r600_store_context_reg(cb, R_0286DC_SPI_FOG_CNTL, 0);
3230	r600_store_context_reg(cb, R_028820_PA_CL_NANINF_CNTL, 0);
3231
3232	r600_store_context_reg_seq(cb, R_028AC0_DB_SRESULTS_COMPARE_STATE0, 3);
3233	r600_store_value(cb, 0); /* R_028AC0_DB_SRESULTS_COMPARE_STATE0 */
3234	r600_store_value(cb, 0); /* R_028AC4_DB_SRESULTS_COMPARE_STATE1 */
3235	r600_store_value(cb, 0); /* R_028AC8_DB_PRELOAD_CONTROL */
3236
3237	r600_store_context_reg_seq(cb, R_028240_PA_SC_GENERIC_SCISSOR_TL, 2);
3238	r600_store_value(cb, 0); /* R_028240_PA_SC_GENERIC_SCISSOR_TL */
3239	r600_store_value(cb, S_028244_BR_X(16384) | S_028244_BR_Y(16384)); /* R_028244_PA_SC_GENERIC_SCISSOR_BR */
3240
3241	r600_store_context_reg_seq(cb, R_028030_PA_SC_SCREEN_SCISSOR_TL, 2);
3242	r600_store_value(cb, 0); /* R_028030_PA_SC_SCREEN_SCISSOR_TL */
3243	r600_store_value(cb, S_028034_BR_X(16384) | S_028034_BR_Y(16384)); /* R_028034_PA_SC_SCREEN_SCISSOR_BR */
3244
3245	r600_store_context_reg(cb, R_028848_SQ_PGM_RESOURCES_2_PS, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN));
3246	r600_store_context_reg(cb, R_028864_SQ_PGM_RESOURCES_2_VS, S_028864_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN));
3247	r600_store_context_reg(cb, R_02887C_SQ_PGM_RESOURCES_2_GS, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN));
3248	r600_store_context_reg(cb, R_028894_SQ_PGM_RESOURCES_2_ES, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN));
3249	r600_store_context_reg(cb, R_0288A8_SQ_PGM_RESOURCES_FS, 0);
3250	r600_store_context_reg(cb, R_0288C0_SQ_PGM_RESOURCES_2_HS, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN));
3251	r600_store_context_reg(cb, R_0288D8_SQ_PGM_RESOURCES_2_LS, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN));
3252
3253	/* to avoid GPU doing any preloading of constant from random address */
3254	r600_store_context_reg_seq(cb, R_028140_ALU_CONST_BUFFER_SIZE_PS_0, 16);
3255	for (i = 0; i < 16; i++)
3256		r600_store_value(cb, 0);
3257
3258	r600_store_context_reg_seq(cb, R_028180_ALU_CONST_BUFFER_SIZE_VS_0, 16);
3259	for (i = 0; i < 16; i++)
3260		r600_store_value(cb, 0);
3261
3262	r600_store_context_reg_seq(cb, R_0281C0_ALU_CONST_BUFFER_SIZE_GS_0, 16);
3263	for (i = 0; i < 16; i++)
3264		r600_store_value(cb, 0);
3265
3266	r600_store_context_reg_seq(cb, R_028FC0_ALU_CONST_BUFFER_SIZE_LS_0, 16);
3267	for (i = 0; i < 16; i++)
3268		r600_store_value(cb, 0);
3269
3270	r600_store_context_reg_seq(cb, R_028F80_ALU_CONST_BUFFER_SIZE_HS_0, 16);
3271	for (i = 0; i < 16; i++)
3272		r600_store_value(cb, 0);
3273
3274	r600_store_context_reg(cb, R_028B98_VGT_STRMOUT_BUFFER_CONFIG, 0);
3275
3276	if (rctx->screen->b.has_streamout) {
3277		r600_store_context_reg(cb, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET, 0);
3278	}
3279
3280	r600_store_context_reg(cb, R_028010_DB_RENDER_OVERRIDE2, 0);
3281	r600_store_context_reg(cb, R_028234_PA_SU_HARDWARE_SCREEN_OFFSET, 0);
3282	r600_store_context_reg(cb, R_0286C8_SPI_THREAD_GROUPING, 0);
3283	r600_store_context_reg_seq(cb, R_0286E4_SPI_PS_IN_CONTROL_2, 2);
3284	r600_store_value(cb, 0); /* R_0286E4_SPI_PS_IN_CONTROL_2 */
3285	r600_store_value(cb, 0); /* R_0286E8_SPI_COMPUTE_INPUT_CNTL */
3286
3287	r600_store_context_reg_seq(cb, R_0288E8_SQ_LDS_ALLOC, 2);
3288	r600_store_value(cb, 0); /* R_0288E8_SQ_LDS_ALLOC */
3289	r600_store_value(cb, 0); /* R_0288EC_SQ_LDS_ALLOC_PS */
3290
3291	if (rctx->b.family == CHIP_CAICOS) {
3292		r600_store_context_reg_seq(cb, R_028B54_VGT_SHADER_STAGES_EN, 2);
3293		r600_store_value(cb, 0); /* R028B54_VGT_SHADER_STAGES_EN */
3294		r600_store_value(cb, 0); /* R028B58_VGT_LS_HS_CONFIG */
3295		r600_store_context_reg(cb, R_028B6C_VGT_TF_PARAM, 0);
3296	} else {
3297		r600_store_context_reg_seq(cb, R_028B54_VGT_SHADER_STAGES_EN, 7);
3298		r600_store_value(cb, 0); /* R028B54_VGT_SHADER_STAGES_EN */
3299		r600_store_value(cb, 0); /* R028B58_VGT_LS_HS_CONFIG */
3300		r600_store_value(cb, 0); /* R028B5C_VGT_LS_SIZE */
3301		r600_store_value(cb, 0); /* R028B60_VGT_HS_SIZE */
3302		r600_store_value(cb, 0); /* R028B64_VGT_LS_HS_ALLOC */
3303		r600_store_value(cb, 0); /* R028B68_VGT_HS_PATCH_CONST */
3304		r600_store_value(cb, 0); /* R028B68_VGT_TF_PARAM */
3305	}
3306
3307	eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0, 0x01000FFF);
3308	eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (32 * 4), 0x01000FFF);
3309	eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (64 * 4), 0x01000FFF);
3310	eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (96 * 4), 0x01000FFF);
3311	eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (128 * 4), 0x01000FFF);
3312}
3313
3314void evergreen_update_ps_state(struct pipe_context *ctx, struct r600_pipe_shader *shader)
3315{
3316	struct r600_context *rctx = (struct r600_context *)ctx;
3317	struct r600_command_buffer *cb = &shader->command_buffer;
3318	struct r600_shader *rshader = &shader->shader;
3319	unsigned i, exports_ps, num_cout, spi_ps_in_control_0, spi_input_z, spi_ps_in_control_1, db_shader_control = 0;
3320	int pos_index = -1, face_index = -1, fixed_pt_position_index = -1;
3321	int ninterp = 0;
3322	boolean have_perspective = FALSE, have_linear = FALSE;
3323	static const unsigned spi_baryc_enable_bit[6] = {
3324		S_0286E0_PERSP_SAMPLE_ENA(1),
3325		S_0286E0_PERSP_CENTER_ENA(1),
3326		S_0286E0_PERSP_CENTROID_ENA(1),
3327		S_0286E0_LINEAR_SAMPLE_ENA(1),
3328		S_0286E0_LINEAR_CENTER_ENA(1),
3329		S_0286E0_LINEAR_CENTROID_ENA(1)
3330	};
3331	unsigned spi_baryc_cntl = 0, sid, tmp, num = 0;
3332	unsigned z_export = 0, stencil_export = 0, mask_export = 0;
3333	unsigned sprite_coord_enable = rctx->rasterizer ? rctx->rasterizer->sprite_coord_enable : 0;
3334	uint32_t spi_ps_input_cntl[32];
3335
3336	if (!cb->buf) {
3337		r600_init_command_buffer(cb, 64);
3338	} else {
3339		cb->num_dw = 0;
3340	}
3341
3342	for (i = 0; i < rshader->ninput; i++) {
3343		/* evergreen NUM_INTERP only contains values interpolated into the LDS,
3344		   POSITION goes via GPRs from the SC so isn't counted */
3345		if (rshader->input[i].name == TGSI_SEMANTIC_POSITION)
3346			pos_index = i;
3347		else if (rshader->input[i].name == TGSI_SEMANTIC_FACE) {
3348			if (face_index == -1)
3349				face_index = i;
3350		}
3351		else if (rshader->input[i].name == TGSI_SEMANTIC_SAMPLEMASK) {
3352			if (face_index == -1)
3353				face_index = i; /* lives in same register, same enable bit */
3354		}
3355		else if (rshader->input[i].name == TGSI_SEMANTIC_SAMPLEID) {
3356			fixed_pt_position_index = i;
3357		}
3358		else {
3359			ninterp++;
3360			int k = eg_get_interpolator_index(
3361				rshader->input[i].interpolate,
3362				rshader->input[i].interpolate_location);
3363			if (k >= 0) {
3364				spi_baryc_cntl |= spi_baryc_enable_bit[k];
3365				have_perspective |= k < 3;
3366				have_linear |= !(k < 3);
3367			}
3368		}
3369
3370		sid = rshader->input[i].spi_sid;
3371
3372		if (sid) {
3373			tmp = S_028644_SEMANTIC(sid);
3374
3375			/* D3D 9 behaviour. GL is undefined */
3376			if (rshader->input[i].name == TGSI_SEMANTIC_COLOR && rshader->input[i].sid == 0)
3377				tmp |= S_028644_DEFAULT_VAL(3);
3378
3379			if (rshader->input[i].name == TGSI_SEMANTIC_POSITION ||
3380				rshader->input[i].interpolate == TGSI_INTERPOLATE_CONSTANT ||
3381				(rshader->input[i].interpolate == TGSI_INTERPOLATE_COLOR &&
3382					rctx->rasterizer && rctx->rasterizer->flatshade)) {
3383				tmp |= S_028644_FLAT_SHADE(1);
3384			}
3385
3386			if (rshader->input[i].name == TGSI_SEMANTIC_GENERIC &&
3387			    (sprite_coord_enable & (1 << rshader->input[i].sid))) {
3388				tmp |= S_028644_PT_SPRITE_TEX(1);
3389			}
3390
3391			spi_ps_input_cntl[num++] = tmp;
3392		}
3393	}
3394
3395	r600_store_context_reg_seq(cb, R_028644_SPI_PS_INPUT_CNTL_0, num);
3396	r600_store_array(cb, num, spi_ps_input_cntl);
3397
3398	for (i = 0; i < rshader->noutput; i++) {
3399		if (rshader->output[i].name == TGSI_SEMANTIC_POSITION)
3400			z_export = 1;
3401		if (rshader->output[i].name == TGSI_SEMANTIC_STENCIL)
3402			stencil_export = 1;
3403		if (rshader->output[i].name == TGSI_SEMANTIC_SAMPLEMASK &&
3404			rctx->framebuffer.nr_samples > 1 && rctx->ps_iter_samples > 0)
3405			mask_export = 1;
3406	}
3407	if (rshader->uses_kill)
3408		db_shader_control |= S_02880C_KILL_ENABLE(1);
3409
3410	db_shader_control |= S_02880C_Z_EXPORT_ENABLE(z_export);
3411	db_shader_control |= S_02880C_STENCIL_EXPORT_ENABLE(stencil_export);
3412	db_shader_control |= S_02880C_MASK_EXPORT_ENABLE(mask_export);
3413
3414	if (shader->selector->info.properties[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL]) {
3415		db_shader_control |= S_02880C_DEPTH_BEFORE_SHADER(1) |
3416			S_02880C_EXEC_ON_NOOP(shader->selector->info.writes_memory);
3417	} else if (shader->selector->info.writes_memory) {
3418		db_shader_control |= S_02880C_EXEC_ON_HIER_FAIL(1);
3419	}
3420
3421	switch (rshader->ps_conservative_z) {
3422	default: /* fall through */
3423	case TGSI_FS_DEPTH_LAYOUT_ANY:
3424		db_shader_control |= S_02880C_CONSERVATIVE_Z_EXPORT(V_02880C_EXPORT_ANY_Z);
3425		break;
3426	case TGSI_FS_DEPTH_LAYOUT_GREATER:
3427		db_shader_control |= S_02880C_CONSERVATIVE_Z_EXPORT(V_02880C_EXPORT_GREATER_THAN_Z);
3428		break;
3429	case TGSI_FS_DEPTH_LAYOUT_LESS:
3430		db_shader_control |= S_02880C_CONSERVATIVE_Z_EXPORT(V_02880C_EXPORT_LESS_THAN_Z);
3431		break;
3432	}
3433
3434	exports_ps = 0;
3435	for (i = 0; i < rshader->noutput; i++) {
3436		if (rshader->output[i].name == TGSI_SEMANTIC_POSITION ||
3437		    rshader->output[i].name == TGSI_SEMANTIC_STENCIL ||
3438		    rshader->output[i].name == TGSI_SEMANTIC_SAMPLEMASK)
3439			exports_ps |= 1;
3440	}
3441
3442	num_cout = rshader->ps_export_highest + 1;
3443
3444	exports_ps |= S_02884C_EXPORT_COLORS(num_cout);
3445	if (!exports_ps) {
3446		/* always at least export 1 component per pixel */
3447		exports_ps = 2;
3448	}
3449	shader->nr_ps_color_outputs = num_cout;
3450	shader->ps_color_export_mask = rshader->ps_color_export_mask;
3451	if (ninterp == 0) {
3452		ninterp = 1;
3453		have_perspective = TRUE;
3454	}
3455	if (!spi_baryc_cntl)
3456		spi_baryc_cntl |= spi_baryc_enable_bit[0];
3457
3458	if (!have_perspective && !have_linear)
3459		have_perspective = TRUE;
3460
3461	spi_ps_in_control_0 = S_0286CC_NUM_INTERP(ninterp) |
3462		              S_0286CC_PERSP_GRADIENT_ENA(have_perspective) |
3463		              S_0286CC_LINEAR_GRADIENT_ENA(have_linear);
3464	spi_input_z = 0;
3465	if (pos_index != -1) {
3466		spi_ps_in_control_0 |=  S_0286CC_POSITION_ENA(1) |
3467			S_0286CC_POSITION_CENTROID(rshader->input[pos_index].interpolate_location == TGSI_INTERPOLATE_LOC_CENTROID) |
3468			S_0286CC_POSITION_ADDR(rshader->input[pos_index].gpr);
3469		spi_input_z |= S_0286D8_PROVIDE_Z_TO_SPI(1);
3470	}
3471
3472	spi_ps_in_control_1 = 0;
3473	if (face_index != -1) {
3474		spi_ps_in_control_1 |= S_0286D0_FRONT_FACE_ENA(1) |
3475			S_0286D0_FRONT_FACE_ADDR(rshader->input[face_index].gpr);
3476	}
3477	if (fixed_pt_position_index != -1) {
3478		spi_ps_in_control_1 |= S_0286D0_FIXED_PT_POSITION_ENA(1) |
3479			S_0286D0_FIXED_PT_POSITION_ADDR(rshader->input[fixed_pt_position_index].gpr);
3480	}
3481
3482	r600_store_context_reg_seq(cb, R_0286CC_SPI_PS_IN_CONTROL_0, 2);
3483	r600_store_value(cb, spi_ps_in_control_0); /* R_0286CC_SPI_PS_IN_CONTROL_0 */
3484	r600_store_value(cb, spi_ps_in_control_1); /* R_0286D0_SPI_PS_IN_CONTROL_1 */
3485
3486	r600_store_context_reg(cb, R_0286E0_SPI_BARYC_CNTL, spi_baryc_cntl);
3487	r600_store_context_reg(cb, R_0286D8_SPI_INPUT_Z, spi_input_z);
3488	r600_store_context_reg(cb, R_02884C_SQ_PGM_EXPORTS_PS, exports_ps);
3489
3490	r600_store_context_reg_seq(cb, R_028840_SQ_PGM_START_PS, 2);
3491	r600_store_value(cb, shader->bo->gpu_address >> 8);
3492	r600_store_value(cb, /* R_028844_SQ_PGM_RESOURCES_PS */
3493			 S_028844_NUM_GPRS(rshader->bc.ngpr) |
3494			 S_028844_PRIME_CACHE_ON_DRAW(1) |
3495			 S_028844_DX10_CLAMP(1) |
3496			 S_028844_STACK_SIZE(rshader->bc.nstack));
3497	/* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */
3498
3499	shader->db_shader_control = db_shader_control;
3500	shader->ps_depth_export = z_export | stencil_export | mask_export;
3501
3502	shader->sprite_coord_enable = sprite_coord_enable;
3503	if (rctx->rasterizer)
3504		shader->flatshade = rctx->rasterizer->flatshade;
3505}
3506
3507void evergreen_update_es_state(struct pipe_context *ctx, struct r600_pipe_shader *shader)
3508{
3509	struct r600_command_buffer *cb = &shader->command_buffer;
3510	struct r600_shader *rshader = &shader->shader;
3511
3512	r600_init_command_buffer(cb, 32);
3513
3514	r600_store_context_reg(cb, R_028890_SQ_PGM_RESOURCES_ES,
3515			       S_028890_NUM_GPRS(rshader->bc.ngpr) |
3516			       S_028890_DX10_CLAMP(1) |
3517			       S_028890_STACK_SIZE(rshader->bc.nstack));
3518	r600_store_context_reg(cb, R_02888C_SQ_PGM_START_ES,
3519			       shader->bo->gpu_address >> 8);
3520	/* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */
3521}
3522
3523void evergreen_update_gs_state(struct pipe_context *ctx, struct r600_pipe_shader *shader)
3524{
3525	struct r600_context *rctx = (struct r600_context *)ctx;
3526	struct r600_command_buffer *cb = &shader->command_buffer;
3527	struct r600_shader *rshader = &shader->shader;
3528	struct r600_shader *cp_shader = &shader->gs_copy_shader->shader;
3529	unsigned gsvs_itemsizes[4] = {
3530			(cp_shader->ring_item_sizes[0] * shader->selector->gs_max_out_vertices) >> 2,
3531			(cp_shader->ring_item_sizes[1] * shader->selector->gs_max_out_vertices) >> 2,
3532			(cp_shader->ring_item_sizes[2] * shader->selector->gs_max_out_vertices) >> 2,
3533			(cp_shader->ring_item_sizes[3] * shader->selector->gs_max_out_vertices) >> 2
3534	};
3535
3536	r600_init_command_buffer(cb, 64);
3537
3538	/* VGT_GS_MODE is written by evergreen_emit_shader_stages */
3539
3540
3541	r600_store_context_reg(cb, R_028B38_VGT_GS_MAX_VERT_OUT,
3542			       S_028B38_MAX_VERT_OUT(shader->selector->gs_max_out_vertices));
3543	r600_store_context_reg(cb, R_028A6C_VGT_GS_OUT_PRIM_TYPE,
3544			       r600_conv_prim_to_gs_out(shader->selector->gs_output_prim));
3545
3546	if (rctx->screen->b.info.drm_minor >= 35) {
3547		r600_store_context_reg(cb, R_028B90_VGT_GS_INSTANCE_CNT,
3548				S_028B90_CNT(MIN2(shader->selector->gs_num_invocations, 127)) |
3549				S_028B90_ENABLE(shader->selector->gs_num_invocations > 0));
3550	}
3551	r600_store_context_reg_seq(cb, R_02891C_SQ_GS_VERT_ITEMSIZE, 4);
3552	r600_store_value(cb, cp_shader->ring_item_sizes[0] >> 2);
3553	r600_store_value(cb, cp_shader->ring_item_sizes[1] >> 2);
3554	r600_store_value(cb, cp_shader->ring_item_sizes[2] >> 2);
3555	r600_store_value(cb, cp_shader->ring_item_sizes[3] >> 2);
3556
3557	r600_store_context_reg(cb, R_028900_SQ_ESGS_RING_ITEMSIZE,
3558			       (rshader->ring_item_sizes[0]) >> 2);
3559
3560	r600_store_context_reg(cb, R_028904_SQ_GSVS_RING_ITEMSIZE,
3561			       gsvs_itemsizes[0] +
3562			       gsvs_itemsizes[1] +
3563			       gsvs_itemsizes[2] +
3564			       gsvs_itemsizes[3]);
3565
3566	r600_store_context_reg_seq(cb, R_02892C_SQ_GSVS_RING_OFFSET_1, 3);
3567	r600_store_value(cb, gsvs_itemsizes[0]);
3568	r600_store_value(cb, gsvs_itemsizes[0] + gsvs_itemsizes[1]);
3569	r600_store_value(cb, gsvs_itemsizes[0] + gsvs_itemsizes[1] + gsvs_itemsizes[2]);
3570
3571	/* FIXME calculate these values somehow ??? */
3572	r600_store_context_reg_seq(cb, R_028A54_GS_PER_ES, 3);
3573	r600_store_value(cb, 0x80); /* GS_PER_ES */
3574	r600_store_value(cb, 0x100); /* ES_PER_GS */
3575	r600_store_value(cb, 0x2); /* GS_PER_VS */
3576
3577	r600_store_context_reg(cb, R_028878_SQ_PGM_RESOURCES_GS,
3578			       S_028878_NUM_GPRS(rshader->bc.ngpr) |
3579			       S_028878_DX10_CLAMP(1) |
3580			       S_028878_STACK_SIZE(rshader->bc.nstack));
3581	r600_store_context_reg(cb, R_028874_SQ_PGM_START_GS,
3582			       shader->bo->gpu_address >> 8);
3583	/* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */
3584}
3585
3586
3587void evergreen_update_vs_state(struct pipe_context *ctx, struct r600_pipe_shader *shader)
3588{
3589	struct r600_command_buffer *cb = &shader->command_buffer;
3590	struct r600_shader *rshader = &shader->shader;
3591	unsigned spi_vs_out_id[10] = {};
3592	unsigned i, tmp, nparams = 0;
3593
3594	for (i = 0; i < rshader->noutput; i++) {
3595		if (rshader->output[i].spi_sid) {
3596			tmp = rshader->output[i].spi_sid << ((nparams & 3) * 8);
3597			spi_vs_out_id[nparams / 4] |= tmp;
3598			nparams++;
3599		}
3600	}
3601
3602	r600_init_command_buffer(cb, 32);
3603
3604	r600_store_context_reg_seq(cb, R_02861C_SPI_VS_OUT_ID_0, 10);
3605	for (i = 0; i < 10; i++) {
3606		r600_store_value(cb, spi_vs_out_id[i]);
3607	}
3608
3609	/* Certain attributes (position, psize, etc.) don't count as params.
3610	 * VS is required to export at least one param and r600_shader_from_tgsi()
3611	 * takes care of adding a dummy export.
3612	 */
3613	if (nparams < 1)
3614		nparams = 1;
3615
3616	r600_store_context_reg(cb, R_0286C4_SPI_VS_OUT_CONFIG,
3617			       S_0286C4_VS_EXPORT_COUNT(nparams - 1));
3618	r600_store_context_reg(cb, R_028860_SQ_PGM_RESOURCES_VS,
3619			       S_028860_NUM_GPRS(rshader->bc.ngpr) |
3620			       S_028860_DX10_CLAMP(1) |
3621			       S_028860_STACK_SIZE(rshader->bc.nstack));
3622	if (rshader->vs_position_window_space) {
3623		r600_store_context_reg(cb, R_028818_PA_CL_VTE_CNTL,
3624			S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1));
3625	} else {
3626		r600_store_context_reg(cb, R_028818_PA_CL_VTE_CNTL,
3627			S_028818_VTX_W0_FMT(1) |
3628			S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
3629			S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
3630			S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
3631
3632	}
3633	r600_store_context_reg(cb, R_02885C_SQ_PGM_START_VS,
3634			       shader->bo->gpu_address >> 8);
3635	/* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */
3636
3637	shader->pa_cl_vs_out_cntl =
3638		S_02881C_VS_OUT_CCDIST0_VEC_ENA((rshader->cc_dist_mask & 0x0F) != 0) |
3639		S_02881C_VS_OUT_CCDIST1_VEC_ENA((rshader->cc_dist_mask & 0xF0) != 0) |
3640		S_02881C_VS_OUT_MISC_VEC_ENA(rshader->vs_out_misc_write) |
3641		S_02881C_USE_VTX_POINT_SIZE(rshader->vs_out_point_size) |
3642		S_02881C_USE_VTX_EDGE_FLAG(rshader->vs_out_edgeflag) |
3643		S_02881C_USE_VTX_VIEWPORT_INDX(rshader->vs_out_viewport) |
3644		S_02881C_USE_VTX_RENDER_TARGET_INDX(rshader->vs_out_layer);
3645}
3646
3647void evergreen_update_hs_state(struct pipe_context *ctx, struct r600_pipe_shader *shader)
3648{
3649	struct r600_command_buffer *cb = &shader->command_buffer;
3650	struct r600_shader *rshader = &shader->shader;
3651
3652	r600_init_command_buffer(cb, 32);
3653	r600_store_context_reg(cb, R_0288BC_SQ_PGM_RESOURCES_HS,
3654			       S_0288BC_NUM_GPRS(rshader->bc.ngpr) |
3655			       S_0288BC_DX10_CLAMP(1) |
3656			       S_0288BC_STACK_SIZE(rshader->bc.nstack));
3657	r600_store_context_reg(cb, R_0288B8_SQ_PGM_START_HS,
3658			       shader->bo->gpu_address >> 8);
3659}
3660
3661void evergreen_update_ls_state(struct pipe_context *ctx, struct r600_pipe_shader *shader)
3662{
3663	struct r600_command_buffer *cb = &shader->command_buffer;
3664	struct r600_shader *rshader = &shader->shader;
3665
3666	r600_init_command_buffer(cb, 32);
3667	r600_store_context_reg(cb, R_0288D4_SQ_PGM_RESOURCES_LS,
3668			       S_0288D4_NUM_GPRS(rshader->bc.ngpr) |
3669			       S_0288D4_DX10_CLAMP(1) |
3670			       S_0288D4_STACK_SIZE(rshader->bc.nstack));
3671	r600_store_context_reg(cb, R_0288D0_SQ_PGM_START_LS,
3672			       shader->bo->gpu_address >> 8);
3673}
3674void *evergreen_create_resolve_blend(struct r600_context *rctx)
3675{
3676	struct pipe_blend_state blend;
3677
3678	memset(&blend, 0, sizeof(blend));
3679	blend.independent_blend_enable = true;
3680	blend.rt[0].colormask = 0xf;
3681	return evergreen_create_blend_state_mode(&rctx->b.b, &blend, V_028808_CB_RESOLVE);
3682}
3683
3684void *evergreen_create_decompress_blend(struct r600_context *rctx)
3685{
3686	struct pipe_blend_state blend;
3687	unsigned mode = rctx->screen->has_compressed_msaa_texturing ?
3688			V_028808_CB_FMASK_DECOMPRESS : V_028808_CB_DECOMPRESS;
3689
3690	memset(&blend, 0, sizeof(blend));
3691	blend.independent_blend_enable = true;
3692	blend.rt[0].colormask = 0xf;
3693	return evergreen_create_blend_state_mode(&rctx->b.b, &blend, mode);
3694}
3695
3696void *evergreen_create_fastclear_blend(struct r600_context *rctx)
3697{
3698	struct pipe_blend_state blend;
3699	unsigned mode = V_028808_CB_ELIMINATE_FAST_CLEAR;
3700
3701	memset(&blend, 0, sizeof(blend));
3702	blend.independent_blend_enable = true;
3703	blend.rt[0].colormask = 0xf;
3704	return evergreen_create_blend_state_mode(&rctx->b.b, &blend, mode);
3705}
3706
3707void *evergreen_create_db_flush_dsa(struct r600_context *rctx)
3708{
3709	struct pipe_depth_stencil_alpha_state dsa = {{0}};
3710
3711	return rctx->b.b.create_depth_stencil_alpha_state(&rctx->b.b, &dsa);
3712}
3713
3714void evergreen_update_db_shader_control(struct r600_context * rctx)
3715{
3716	bool dual_export;
3717	unsigned db_shader_control;
3718
3719	if (!rctx->ps_shader) {
3720		return;
3721	}
3722
3723	dual_export = rctx->framebuffer.export_16bpc &&
3724		      !rctx->ps_shader->current->ps_depth_export;
3725
3726	db_shader_control = rctx->ps_shader->current->db_shader_control |
3727			    S_02880C_DUAL_EXPORT_ENABLE(dual_export) |
3728			    S_02880C_DB_SOURCE_FORMAT(dual_export ? V_02880C_EXPORT_DB_TWO :
3729								    V_02880C_EXPORT_DB_FULL) |
3730			    S_02880C_ALPHA_TO_MASK_DISABLE(rctx->framebuffer.cb0_is_integer);
3731
3732	/* When alpha test is enabled we can't trust the hw to make the proper
3733	 * decision on the order in which ztest should be run related to fragment
3734	 * shader execution.
3735	 *
3736	 * If alpha test is enabled perform early z rejection (RE_Z) but don't early
3737	 * write to the zbuffer. Write to zbuffer is delayed after fragment shader
3738	 * execution and thus after alpha test so if discarded by the alpha test
3739	 * the z value is not written.
3740	 * If ReZ is enabled, and the zfunc/zenable/zwrite values change you can
3741	 * get a hang unless you flush the DB in between.  For now just use
3742	 * LATE_Z.
3743	 */
3744	if (rctx->alphatest_state.sx_alpha_test_control || rctx->ps_shader->info.writes_memory) {
3745		db_shader_control |= S_02880C_Z_ORDER(V_02880C_LATE_Z);
3746	} else {
3747		db_shader_control |= S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z);
3748	}
3749
3750	if (db_shader_control != rctx->db_misc_state.db_shader_control) {
3751		rctx->db_misc_state.db_shader_control = db_shader_control;
3752		r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom);
3753	}
3754}
3755
3756static void evergreen_dma_copy_tile(struct r600_context *rctx,
3757				struct pipe_resource *dst,
3758				unsigned dst_level,
3759				unsigned dst_x,
3760				unsigned dst_y,
3761				unsigned dst_z,
3762				struct pipe_resource *src,
3763				unsigned src_level,
3764				unsigned src_x,
3765				unsigned src_y,
3766				unsigned src_z,
3767				unsigned copy_height,
3768				unsigned pitch,
3769				unsigned bpp)
3770{
3771	struct radeon_cmdbuf *cs = rctx->b.dma.cs;
3772	struct r600_texture *rsrc = (struct r600_texture*)src;
3773	struct r600_texture *rdst = (struct r600_texture*)dst;
3774	unsigned array_mode, lbpp, pitch_tile_max, slice_tile_max, size;
3775	unsigned ncopy, height, cheight, detile, i, x, y, z, src_mode, dst_mode;
3776	unsigned sub_cmd, bank_h, bank_w, mt_aspect, nbanks, tile_split, non_disp_tiling = 0;
3777	uint64_t base, addr;
3778
3779	dst_mode = rdst->surface.u.legacy.level[dst_level].mode;
3780	src_mode = rsrc->surface.u.legacy.level[src_level].mode;
3781	assert(dst_mode != src_mode);
3782
3783	/* non_disp_tiling bit needs to be set for depth, stencil, and fmask surfaces */
3784	if (util_format_has_depth(util_format_description(src->format)))
3785		non_disp_tiling = 1;
3786
3787	y = 0;
3788	sub_cmd = EG_DMA_COPY_TILED;
3789	lbpp = util_logbase2(bpp);
3790	pitch_tile_max = ((pitch / bpp) / 8) - 1;
3791	nbanks = eg_num_banks(rctx->screen->b.info.r600_num_banks);
3792
3793	if (dst_mode == RADEON_SURF_MODE_LINEAR_ALIGNED) {
3794		/* T2L */
3795		array_mode = evergreen_array_mode(src_mode);
3796		slice_tile_max = (rsrc->surface.u.legacy.level[src_level].nblk_x * rsrc->surface.u.legacy.level[src_level].nblk_y) / (8*8);
3797		slice_tile_max = slice_tile_max ? slice_tile_max - 1 : 0;
3798		/* linear height must be the same as the slice tile max height, it's ok even
3799		 * if the linear destination/source have smaller heigh as the size of the
3800		 * dma packet will be using the copy_height which is always smaller or equal
3801		 * to the linear height
3802		 */
3803		height = u_minify(rsrc->resource.b.b.height0, src_level);
3804		detile = 1;
3805		x = src_x;
3806		y = src_y;
3807		z = src_z;
3808		base = rsrc->surface.u.legacy.level[src_level].offset;
3809		addr = rdst->surface.u.legacy.level[dst_level].offset;
3810		addr += (uint64_t)rdst->surface.u.legacy.level[dst_level].slice_size_dw * 4 * dst_z;
3811		addr += dst_y * pitch + dst_x * bpp;
3812		bank_h = eg_bank_wh(rsrc->surface.u.legacy.bankh);
3813		bank_w = eg_bank_wh(rsrc->surface.u.legacy.bankw);
3814		mt_aspect = eg_macro_tile_aspect(rsrc->surface.u.legacy.mtilea);
3815		tile_split = eg_tile_split(rsrc->surface.u.legacy.tile_split);
3816		base += rsrc->resource.gpu_address;
3817		addr += rdst->resource.gpu_address;
3818	} else {
3819		/* L2T */
3820		array_mode = evergreen_array_mode(dst_mode);
3821		slice_tile_max = (rdst->surface.u.legacy.level[dst_level].nblk_x * rdst->surface.u.legacy.level[dst_level].nblk_y) / (8*8);
3822		slice_tile_max = slice_tile_max ? slice_tile_max - 1 : 0;
3823		/* linear height must be the same as the slice tile max height, it's ok even
3824		 * if the linear destination/source have smaller heigh as the size of the
3825		 * dma packet will be using the copy_height which is always smaller or equal
3826		 * to the linear height
3827		 */
3828		height = u_minify(rdst->resource.b.b.height0, dst_level);
3829		detile = 0;
3830		x = dst_x;
3831		y = dst_y;
3832		z = dst_z;
3833		base = rdst->surface.u.legacy.level[dst_level].offset;
3834		addr = rsrc->surface.u.legacy.level[src_level].offset;
3835		addr += (uint64_t)rsrc->surface.u.legacy.level[src_level].slice_size_dw * 4 * src_z;
3836		addr += src_y * pitch + src_x * bpp;
3837		bank_h = eg_bank_wh(rdst->surface.u.legacy.bankh);
3838		bank_w = eg_bank_wh(rdst->surface.u.legacy.bankw);
3839		mt_aspect = eg_macro_tile_aspect(rdst->surface.u.legacy.mtilea);
3840		tile_split = eg_tile_split(rdst->surface.u.legacy.tile_split);
3841		base += rdst->resource.gpu_address;
3842		addr += rsrc->resource.gpu_address;
3843	}
3844
3845	size = (copy_height * pitch) / 4;
3846	ncopy = (size / EG_DMA_COPY_MAX_SIZE) + !!(size % EG_DMA_COPY_MAX_SIZE);
3847	r600_need_dma_space(&rctx->b, ncopy * 9, &rdst->resource, &rsrc->resource);
3848
3849	for (i = 0; i < ncopy; i++) {
3850		cheight = copy_height;
3851		if (((cheight * pitch) / 4) > EG_DMA_COPY_MAX_SIZE) {
3852			cheight = (EG_DMA_COPY_MAX_SIZE * 4) / pitch;
3853		}
3854		size = (cheight * pitch) / 4;
3855		/* emit reloc before writing cs so that cs is always in consistent state */
3856		radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, &rsrc->resource,
3857				      RADEON_USAGE_READ, 0);
3858		radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, &rdst->resource,
3859				      RADEON_USAGE_WRITE, 0);
3860		radeon_emit(cs, DMA_PACKET(DMA_PACKET_COPY, sub_cmd, size));
3861		radeon_emit(cs, base >> 8);
3862		radeon_emit(cs, (detile << 31) | (array_mode << 27) |
3863				(lbpp << 24) | (bank_h << 21) |
3864				(bank_w << 18) | (mt_aspect << 16));
3865		radeon_emit(cs, (pitch_tile_max << 0) | ((height - 1) << 16));
3866		radeon_emit(cs, (slice_tile_max << 0));
3867		radeon_emit(cs, (x << 0) | (z << 18));
3868		radeon_emit(cs, (y << 0) | (tile_split << 21) | (nbanks << 25) | (non_disp_tiling << 28));
3869		radeon_emit(cs, addr & 0xfffffffc);
3870		radeon_emit(cs, (addr >> 32UL) & 0xff);
3871		copy_height -= cheight;
3872		addr += cheight * pitch;
3873		y += cheight;
3874	}
3875}
3876
3877static void evergreen_dma_copy(struct pipe_context *ctx,
3878			       struct pipe_resource *dst,
3879			       unsigned dst_level,
3880			       unsigned dstx, unsigned dsty, unsigned dstz,
3881			       struct pipe_resource *src,
3882			       unsigned src_level,
3883			       const struct pipe_box *src_box)
3884{
3885	struct r600_context *rctx = (struct r600_context *)ctx;
3886	struct r600_texture *rsrc = (struct r600_texture*)src;
3887	struct r600_texture *rdst = (struct r600_texture*)dst;
3888	unsigned dst_pitch, src_pitch, bpp, dst_mode, src_mode, copy_height;
3889	unsigned src_w, dst_w;
3890	unsigned src_x, src_y;
3891	unsigned dst_x = dstx, dst_y = dsty, dst_z = dstz;
3892
3893	if (rctx->b.dma.cs == NULL) {
3894		goto fallback;
3895	}
3896
3897	if (rctx->cmd_buf_is_compute) {
3898		rctx->b.gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
3899		rctx->cmd_buf_is_compute = false;
3900	}
3901
3902	if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) {
3903		evergreen_dma_copy_buffer(rctx, dst, src, dst_x, src_box->x, src_box->width);
3904		return;
3905	}
3906
3907	if (src_box->depth > 1 ||
3908	    !r600_prepare_for_dma_blit(&rctx->b, rdst, dst_level, dstx, dsty,
3909					dstz, rsrc, src_level, src_box))
3910		goto fallback;
3911
3912	src_x = util_format_get_nblocksx(src->format, src_box->x);
3913	dst_x = util_format_get_nblocksx(src->format, dst_x);
3914	src_y = util_format_get_nblocksy(src->format, src_box->y);
3915	dst_y = util_format_get_nblocksy(src->format, dst_y);
3916
3917	bpp = rdst->surface.bpe;
3918	dst_pitch = rdst->surface.u.legacy.level[dst_level].nblk_x * rdst->surface.bpe;
3919	src_pitch = rsrc->surface.u.legacy.level[src_level].nblk_x * rsrc->surface.bpe;
3920	src_w = u_minify(rsrc->resource.b.b.width0, src_level);
3921	dst_w = u_minify(rdst->resource.b.b.width0, dst_level);
3922	copy_height = src_box->height / rsrc->surface.blk_h;
3923
3924	dst_mode = rdst->surface.u.legacy.level[dst_level].mode;
3925	src_mode = rsrc->surface.u.legacy.level[src_level].mode;
3926
3927	if (src_pitch != dst_pitch || src_box->x || dst_x || src_w != dst_w) {
3928		/* FIXME evergreen can do partial blit */
3929		goto fallback;
3930	}
3931	/* the x test here are currently useless (because we don't support partial blit)
3932	 * but keep them around so we don't forget about those
3933	 */
3934	if (src_pitch % 8 || src_box->x % 8 || dst_x % 8 || src_box->y % 8 || dst_y % 8) {
3935		goto fallback;
3936	}
3937
3938	/* 128 bpp surfaces require non_disp_tiling for both
3939	 * tiled and linear buffers on cayman.  However, async
3940	 * DMA only supports it on the tiled side.  As such
3941	 * the tile order is backwards after a L2T/T2L packet.
3942	 */
3943	if ((rctx->b.chip_class == CAYMAN) &&
3944	    (src_mode != dst_mode) &&
3945	    (util_format_get_blocksize(src->format) >= 16)) {
3946		goto fallback;
3947	}
3948
3949	if (src_mode == dst_mode) {
3950		uint64_t dst_offset, src_offset;
3951		/* simple dma blit would do NOTE code here assume :
3952		 *   src_box.x/y == 0
3953		 *   dst_x/y == 0
3954		 *   dst_pitch == src_pitch
3955		 */
3956		src_offset= rsrc->surface.u.legacy.level[src_level].offset;
3957		src_offset += (uint64_t)rsrc->surface.u.legacy.level[src_level].slice_size_dw * 4 * src_box->z;
3958		src_offset += src_y * src_pitch + src_x * bpp;
3959		dst_offset = rdst->surface.u.legacy.level[dst_level].offset;
3960		dst_offset += (uint64_t)rdst->surface.u.legacy.level[dst_level].slice_size_dw * 4 * dst_z;
3961		dst_offset += dst_y * dst_pitch + dst_x * bpp;
3962		evergreen_dma_copy_buffer(rctx, dst, src, dst_offset, src_offset,
3963					src_box->height * src_pitch);
3964	} else {
3965		evergreen_dma_copy_tile(rctx, dst, dst_level, dst_x, dst_y, dst_z,
3966					src, src_level, src_x, src_y, src_box->z,
3967					copy_height, dst_pitch, bpp);
3968	}
3969	return;
3970
3971fallback:
3972	r600_resource_copy_region(ctx, dst, dst_level, dstx, dsty, dstz,
3973				  src, src_level, src_box);
3974}
3975
3976static void evergreen_set_tess_state(struct pipe_context *ctx,
3977				     const float default_outer_level[4],
3978				     const float default_inner_level[2])
3979{
3980	struct r600_context *rctx = (struct r600_context *)ctx;
3981
3982	memcpy(rctx->tess_state, default_outer_level, sizeof(float) * 4);
3983	memcpy(rctx->tess_state+4, default_inner_level, sizeof(float) * 2);
3984	rctx->driver_consts[PIPE_SHADER_TESS_CTRL].tcs_default_levels_dirty = true;
3985}
3986
3987static void evergreen_setup_immed_buffer(struct r600_context *rctx,
3988					 struct r600_image_view *rview,
3989					 enum pipe_format pformat)
3990{
3991	struct r600_screen *rscreen = (struct r600_screen *)rctx->b.b.screen;
3992	uint32_t immed_size = rscreen->b.info.max_se * 256 * 64 * util_format_get_blocksize(pformat);
3993	struct eg_buf_res_params buf_params;
3994	bool skip_reloc = false;
3995	struct r600_resource *resource = (struct r600_resource *)rview->base.resource;
3996	if (!resource->immed_buffer) {
3997		eg_resource_alloc_immed(&rscreen->b, resource, immed_size);
3998	}
3999
4000	memset(&buf_params, 0, sizeof(buf_params));
4001	buf_params.pipe_format = pformat;
4002	buf_params.size = resource->immed_buffer->b.b.width0;
4003	buf_params.swizzle[0] = PIPE_SWIZZLE_X;
4004	buf_params.swizzle[1] = PIPE_SWIZZLE_Y;
4005	buf_params.swizzle[2] = PIPE_SWIZZLE_Z;
4006	buf_params.swizzle[3] = PIPE_SWIZZLE_W;
4007	buf_params.uncached = 1;
4008	evergreen_fill_buffer_resource_words(rctx, &resource->immed_buffer->b.b,
4009					     &buf_params, &skip_reloc,
4010					     rview->immed_resource_words);
4011}
4012
4013static void evergreen_set_hw_atomic_buffers(struct pipe_context *ctx,
4014					    unsigned start_slot,
4015					    unsigned count,
4016					    const struct pipe_shader_buffer *buffers)
4017{
4018	struct r600_context *rctx = (struct r600_context *)ctx;
4019	struct r600_atomic_buffer_state *astate;
4020	unsigned i, idx;
4021
4022	astate = &rctx->atomic_buffer_state;
4023
4024	/* we'd probably like to expand this to 8 later so put the logic in */
4025	for (i = start_slot, idx = 0; i < start_slot + count; i++, idx++) {
4026		const struct pipe_shader_buffer *buf;
4027		struct pipe_shader_buffer *abuf;
4028
4029		abuf = &astate->buffer[i];
4030
4031		if (!buffers || !buffers[idx].buffer) {
4032			pipe_resource_reference(&abuf->buffer, NULL);
4033			continue;
4034		}
4035		buf = &buffers[idx];
4036
4037		pipe_resource_reference(&abuf->buffer, buf->buffer);
4038		abuf->buffer_offset = buf->buffer_offset;
4039		abuf->buffer_size = buf->buffer_size;
4040	}
4041}
4042
4043static void evergreen_set_shader_buffers(struct pipe_context *ctx,
4044					 enum pipe_shader_type shader, unsigned start_slot,
4045					 unsigned count,
4046					 const struct pipe_shader_buffer *buffers,
4047					 unsigned writable_bitmask)
4048{
4049	struct r600_context *rctx = (struct r600_context *)ctx;
4050	struct r600_image_state *istate = NULL;
4051	struct r600_image_view *rview;
4052	struct r600_tex_color_info color;
4053	struct eg_buf_res_params buf_params;
4054	struct r600_resource *resource;
4055	unsigned i, idx;
4056	unsigned old_mask;
4057
4058	if (shader != PIPE_SHADER_FRAGMENT &&
4059	    shader != PIPE_SHADER_COMPUTE && count == 0)
4060		return;
4061
4062	if (shader == PIPE_SHADER_FRAGMENT)
4063		istate = &rctx->fragment_buffers;
4064	else if (shader == PIPE_SHADER_COMPUTE)
4065		istate = &rctx->compute_buffers;
4066
4067	old_mask = istate->enabled_mask;
4068	for (i = start_slot, idx = 0; i < start_slot + count; i++, idx++) {
4069		const struct pipe_shader_buffer *buf;
4070		unsigned res_type;
4071
4072		rview = &istate->views[i];
4073
4074		if (!buffers || !buffers[idx].buffer) {
4075			pipe_resource_reference((struct pipe_resource **)&rview->base.resource, NULL);
4076			istate->enabled_mask &= ~(1 << i);
4077			continue;
4078		}
4079
4080		buf = &buffers[idx];
4081		pipe_resource_reference((struct pipe_resource **)&rview->base.resource, buf->buffer);
4082
4083		resource = (struct r600_resource *)rview->base.resource;
4084
4085		evergreen_setup_immed_buffer(rctx, rview, PIPE_FORMAT_R32_UINT);
4086
4087		color.offset = 0;
4088		color.view = 0;
4089		evergreen_set_color_surface_buffer(rctx, resource,
4090						   PIPE_FORMAT_R32_UINT,
4091						   buf->buffer_offset,
4092						   buf->buffer_offset + buf->buffer_size,
4093						   &color);
4094
4095		res_type = V_028C70_BUFFER;
4096
4097		rview->cb_color_base = color.offset;
4098		rview->cb_color_dim = color.dim;
4099		rview->cb_color_info = color.info |
4100			S_028C70_RAT(1) |
4101			S_028C70_RESOURCE_TYPE(res_type);
4102		rview->cb_color_pitch = color.pitch;
4103		rview->cb_color_slice = color.slice;
4104		rview->cb_color_view = color.view;
4105		rview->cb_color_attrib = color.attrib;
4106		rview->cb_color_fmask = color.fmask;
4107		rview->cb_color_fmask_slice = color.fmask_slice;
4108
4109		memset(&buf_params, 0, sizeof(buf_params));
4110		buf_params.pipe_format = PIPE_FORMAT_R32_UINT;
4111		buf_params.offset = buf->buffer_offset;
4112		buf_params.size = buf->buffer_size;
4113		buf_params.swizzle[0] = PIPE_SWIZZLE_X;
4114		buf_params.swizzle[1] = PIPE_SWIZZLE_Y;
4115		buf_params.swizzle[2] = PIPE_SWIZZLE_Z;
4116		buf_params.swizzle[3] = PIPE_SWIZZLE_W;
4117		buf_params.force_swizzle = true;
4118		buf_params.uncached = 1;
4119		buf_params.size_in_bytes = true;
4120		evergreen_fill_buffer_resource_words(rctx, &resource->b.b,
4121						     &buf_params,
4122						     &rview->skip_mip_address_reloc,
4123						     rview->resource_words);
4124
4125		istate->enabled_mask |= (1 << i);
4126	}
4127
4128	istate->atom.num_dw = util_bitcount(istate->enabled_mask) * 46;
4129
4130	if (old_mask != istate->enabled_mask)
4131		r600_mark_atom_dirty(rctx, &rctx->framebuffer.atom);
4132
4133	/* construct the target mask */
4134	if (rctx->cb_misc_state.buffer_rat_enabled_mask != istate->enabled_mask) {
4135		rctx->cb_misc_state.buffer_rat_enabled_mask = istate->enabled_mask;
4136		r600_mark_atom_dirty(rctx, &rctx->cb_misc_state.atom);
4137	}
4138
4139	if (shader == PIPE_SHADER_FRAGMENT)
4140		r600_mark_atom_dirty(rctx, &istate->atom);
4141}
4142
4143static void evergreen_set_shader_images(struct pipe_context *ctx,
4144					enum pipe_shader_type shader, unsigned start_slot,
4145					unsigned count,
4146					const struct pipe_image_view *images)
4147{
4148	struct r600_context *rctx = (struct r600_context *)ctx;
4149	unsigned i;
4150	struct r600_image_view *rview;
4151	struct pipe_resource *image;
4152	struct r600_resource *resource;
4153	struct r600_tex_color_info color;
4154	struct eg_buf_res_params buf_params;
4155	struct eg_tex_res_params tex_params;
4156	unsigned old_mask;
4157	struct r600_image_state *istate = NULL;
4158	int idx;
4159	if (shader != PIPE_SHADER_FRAGMENT && shader != PIPE_SHADER_COMPUTE && count == 0)
4160		return;
4161
4162	if (shader == PIPE_SHADER_FRAGMENT)
4163		istate = &rctx->fragment_images;
4164	else if (shader == PIPE_SHADER_COMPUTE)
4165		istate = &rctx->compute_images;
4166
4167	assert (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE);
4168
4169	old_mask = istate->enabled_mask;
4170	for (i = start_slot, idx = 0; i < start_slot + count; i++, idx++) {
4171		unsigned res_type;
4172		const struct pipe_image_view *iview;
4173		rview = &istate->views[i];
4174
4175		if (!images || !images[idx].resource) {
4176			pipe_resource_reference((struct pipe_resource **)&rview->base.resource, NULL);
4177			istate->enabled_mask &= ~(1 << i);
4178			istate->compressed_colortex_mask &= ~(1 << i);
4179			istate->compressed_depthtex_mask &= ~(1 << i);
4180			continue;
4181		}
4182
4183		iview = &images[idx];
4184		image = iview->resource;
4185		resource = (struct r600_resource *)image;
4186
4187		r600_context_add_resource_size(ctx, image);
4188
4189		rview->base = *iview;
4190		rview->base.resource = NULL;
4191		pipe_resource_reference((struct pipe_resource **)&rview->base.resource, image);
4192
4193		evergreen_setup_immed_buffer(rctx, rview, iview->format);
4194
4195		bool is_buffer = image->target == PIPE_BUFFER;
4196		struct r600_texture *rtex = (struct r600_texture *)image;
4197		if (!is_buffer & rtex->db_compatible)
4198			istate->compressed_depthtex_mask |= 1 << i;
4199		else
4200			istate->compressed_depthtex_mask &= ~(1 << i);
4201
4202		if (!is_buffer && rtex->cmask.size)
4203			istate->compressed_colortex_mask |= 1 << i;
4204		else
4205			istate->compressed_colortex_mask &= ~(1 << i);
4206		if (!is_buffer) {
4207
4208			evergreen_set_color_surface_common(rctx, rtex,
4209							   iview->u.tex.level,
4210							   iview->u.tex.first_layer,
4211							   iview->u.tex.last_layer,
4212							   iview->format,
4213							   &color);
4214			color.dim = S_028C78_WIDTH_MAX(u_minify(image->width0, iview->u.tex.level) - 1) |
4215			  S_028C78_HEIGHT_MAX(u_minify(image->height0, iview->u.tex.level) - 1);
4216		} else {
4217			color.offset = 0;
4218			color.view = 0;
4219			evergreen_set_color_surface_buffer(rctx, resource,
4220							   iview->format,
4221							   iview->u.buf.offset,
4222							   iview->u.buf.size,
4223							   &color);
4224		}
4225
4226		switch (image->target) {
4227		case PIPE_BUFFER:
4228			res_type = V_028C70_BUFFER;
4229			break;
4230		case PIPE_TEXTURE_1D:
4231			res_type = V_028C70_TEXTURE1D;
4232			break;
4233		case PIPE_TEXTURE_1D_ARRAY:
4234			res_type = V_028C70_TEXTURE1DARRAY;
4235			break;
4236		case PIPE_TEXTURE_2D:
4237		case PIPE_TEXTURE_RECT:
4238			res_type = V_028C70_TEXTURE2D;
4239			break;
4240		case PIPE_TEXTURE_3D:
4241			res_type = V_028C70_TEXTURE3D;
4242			break;
4243		case PIPE_TEXTURE_2D_ARRAY:
4244		case PIPE_TEXTURE_CUBE:
4245		case PIPE_TEXTURE_CUBE_ARRAY:
4246			res_type = V_028C70_TEXTURE2DARRAY;
4247			break;
4248		default:
4249			assert(0);
4250			res_type = 0;
4251			break;
4252		}
4253
4254		rview->cb_color_base = color.offset;
4255		rview->cb_color_dim = color.dim;
4256		rview->cb_color_info = color.info |
4257			S_028C70_RAT(1) |
4258			S_028C70_RESOURCE_TYPE(res_type);
4259		rview->cb_color_pitch = color.pitch;
4260		rview->cb_color_slice = color.slice;
4261		rview->cb_color_view = color.view;
4262		rview->cb_color_attrib = color.attrib;
4263		rview->cb_color_fmask = color.fmask;
4264		rview->cb_color_fmask_slice = color.fmask_slice;
4265
4266		if (image->target != PIPE_BUFFER) {
4267			memset(&tex_params, 0, sizeof(tex_params));
4268			tex_params.pipe_format = iview->format;
4269			tex_params.force_level = 0;
4270			tex_params.width0 = image->width0;
4271			tex_params.height0 = image->height0;
4272			tex_params.first_level = iview->u.tex.level;
4273			tex_params.last_level = iview->u.tex.level;
4274			tex_params.first_layer = iview->u.tex.first_layer;
4275			tex_params.last_layer = iview->u.tex.last_layer;
4276			tex_params.target = image->target;
4277			tex_params.swizzle[0] = PIPE_SWIZZLE_X;
4278			tex_params.swizzle[1] = PIPE_SWIZZLE_Y;
4279			tex_params.swizzle[2] = PIPE_SWIZZLE_Z;
4280			tex_params.swizzle[3] = PIPE_SWIZZLE_W;
4281			evergreen_fill_tex_resource_words(rctx, &resource->b.b, &tex_params,
4282							  &rview->skip_mip_address_reloc,
4283							  rview->resource_words);
4284
4285		} else {
4286			memset(&buf_params, 0, sizeof(buf_params));
4287			buf_params.pipe_format = iview->format;
4288			buf_params.size = iview->u.buf.size;
4289			buf_params.offset = iview->u.buf.offset;
4290			buf_params.swizzle[0] = PIPE_SWIZZLE_X;
4291			buf_params.swizzle[1] = PIPE_SWIZZLE_Y;
4292			buf_params.swizzle[2] = PIPE_SWIZZLE_Z;
4293			buf_params.swizzle[3] = PIPE_SWIZZLE_W;
4294			evergreen_fill_buffer_resource_words(rctx, &resource->b.b,
4295							     &buf_params,
4296							     &rview->skip_mip_address_reloc,
4297							     rview->resource_words);
4298		}
4299		istate->enabled_mask |= (1 << i);
4300	}
4301
4302	istate->atom.num_dw = util_bitcount(istate->enabled_mask) * 46;
4303	istate->dirty_buffer_constants = TRUE;
4304	rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV;
4305	rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_CB |
4306		R600_CONTEXT_FLUSH_AND_INV_CB_META;
4307
4308	if (old_mask != istate->enabled_mask)
4309		r600_mark_atom_dirty(rctx, &rctx->framebuffer.atom);
4310
4311	if (rctx->cb_misc_state.image_rat_enabled_mask != istate->enabled_mask) {
4312		rctx->cb_misc_state.image_rat_enabled_mask = istate->enabled_mask;
4313		r600_mark_atom_dirty(rctx, &rctx->cb_misc_state.atom);
4314	}
4315
4316	if (shader == PIPE_SHADER_FRAGMENT)
4317		r600_mark_atom_dirty(rctx, &istate->atom);
4318}
4319
4320static void evergreen_get_pipe_constant_buffer(struct r600_context *rctx,
4321					       enum pipe_shader_type shader, uint slot,
4322					       struct pipe_constant_buffer *cbuf)
4323{
4324	struct r600_constbuf_state *state = &rctx->constbuf_state[shader];
4325	struct pipe_constant_buffer *cb;
4326	cbuf->user_buffer = NULL;
4327
4328	cb = &state->cb[slot];
4329
4330	cbuf->buffer_size = cb->buffer_size;
4331	pipe_resource_reference(&cbuf->buffer, cb->buffer);
4332}
4333
4334static void evergreen_get_shader_buffers(struct r600_context *rctx,
4335					 enum pipe_shader_type shader,
4336					 uint start_slot, uint count,
4337					 struct pipe_shader_buffer *sbuf)
4338{
4339	assert(shader == PIPE_SHADER_COMPUTE);
4340	int idx, i;
4341	struct r600_image_state *istate = &rctx->compute_buffers;
4342	struct r600_image_view *rview;
4343
4344	for (i = start_slot, idx = 0; i < start_slot + count; i++, idx++) {
4345
4346		rview = &istate->views[i];
4347
4348		pipe_resource_reference(&sbuf[idx].buffer, rview->base.resource);
4349		if (rview->base.resource) {
4350			uint64_t rview_va = ((struct r600_resource *)rview->base.resource)->gpu_address;
4351
4352			uint64_t prog_va = rview->resource_words[0];
4353
4354			prog_va += ((uint64_t)G_030008_BASE_ADDRESS_HI(rview->resource_words[2])) << 32;
4355			prog_va -= rview_va;
4356
4357			sbuf[idx].buffer_offset = prog_va & 0xffffffff;
4358			sbuf[idx].buffer_size = rview->resource_words[1] + 1;;
4359		} else {
4360			sbuf[idx].buffer_offset = 0;
4361			sbuf[idx].buffer_size = 0;
4362		}
4363	}
4364}
4365
4366static void evergreen_save_qbo_state(struct pipe_context *ctx, struct r600_qbo_state *st)
4367{
4368	struct r600_context *rctx = (struct r600_context *)ctx;
4369	st->saved_compute = rctx->cs_shader_state.shader;
4370
4371	/* save constant buffer 0 */
4372	evergreen_get_pipe_constant_buffer(rctx, PIPE_SHADER_COMPUTE, 0, &st->saved_const0);
4373	/* save ssbo 0 */
4374	evergreen_get_shader_buffers(rctx, PIPE_SHADER_COMPUTE, 0, 3, st->saved_ssbo);
4375}
4376
4377
4378void evergreen_init_state_functions(struct r600_context *rctx)
4379{
4380	unsigned id = 1;
4381	unsigned i;
4382	/* !!!
4383	 *  To avoid GPU lockup registers must be emitted in a specific order
4384	 * (no kidding ...). The order below is important and have been
4385	 * partially inferred from analyzing fglrx command stream.
4386	 *
4387	 * Don't reorder atom without carefully checking the effect (GPU lockup
4388	 * or piglit regression).
4389	 * !!!
4390	 */
4391	if (rctx->b.chip_class == EVERGREEN) {
4392		r600_init_atom(rctx, &rctx->config_state.atom, id++, evergreen_emit_config_state, 11);
4393		rctx->config_state.dyn_gpr_enabled = true;
4394	}
4395	r600_init_atom(rctx, &rctx->framebuffer.atom, id++, evergreen_emit_framebuffer_state, 0);
4396	r600_init_atom(rctx, &rctx->fragment_images.atom, id++, evergreen_emit_fragment_image_state, 0);
4397	r600_init_atom(rctx, &rctx->compute_images.atom, id++, evergreen_emit_compute_image_state, 0);
4398	r600_init_atom(rctx, &rctx->fragment_buffers.atom, id++, evergreen_emit_fragment_buffer_state, 0);
4399	r600_init_atom(rctx, &rctx->compute_buffers.atom, id++, evergreen_emit_compute_buffer_state, 0);
4400	/* shader const */
4401	r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_VERTEX].atom, id++, evergreen_emit_vs_constant_buffers, 0);
4402	r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_GEOMETRY].atom, id++, evergreen_emit_gs_constant_buffers, 0);
4403	r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_FRAGMENT].atom, id++, evergreen_emit_ps_constant_buffers, 0);
4404	r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_TESS_CTRL].atom, id++, evergreen_emit_tcs_constant_buffers, 0);
4405	r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_TESS_EVAL].atom, id++, evergreen_emit_tes_constant_buffers, 0);
4406	r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_COMPUTE].atom, id++, evergreen_emit_cs_constant_buffers, 0);
4407	/* shader program */
4408	r600_init_atom(rctx, &rctx->cs_shader_state.atom, id++, evergreen_emit_cs_shader, 0);
4409	/* sampler */
4410	r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_VERTEX].states.atom, id++, evergreen_emit_vs_sampler_states, 0);
4411	r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_GEOMETRY].states.atom, id++, evergreen_emit_gs_sampler_states, 0);
4412	r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_TESS_CTRL].states.atom, id++, evergreen_emit_tcs_sampler_states, 0);
4413	r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_TESS_EVAL].states.atom, id++, evergreen_emit_tes_sampler_states, 0);
4414	r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_FRAGMENT].states.atom, id++, evergreen_emit_ps_sampler_states, 0);
4415	r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_COMPUTE].states.atom, id++, evergreen_emit_cs_sampler_states, 0);
4416	/* resources */
4417	r600_init_atom(rctx, &rctx->vertex_buffer_state.atom, id++, evergreen_fs_emit_vertex_buffers, 0);
4418	r600_init_atom(rctx, &rctx->cs_vertex_buffer_state.atom, id++, evergreen_cs_emit_vertex_buffers, 0);
4419	r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_VERTEX].views.atom, id++, evergreen_emit_vs_sampler_views, 0);
4420	r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_GEOMETRY].views.atom, id++, evergreen_emit_gs_sampler_views, 0);
4421	r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_TESS_CTRL].views.atom, id++, evergreen_emit_tcs_sampler_views, 0);
4422	r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_TESS_EVAL].views.atom, id++, evergreen_emit_tes_sampler_views, 0);
4423	r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_FRAGMENT].views.atom, id++, evergreen_emit_ps_sampler_views, 0);
4424	r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_COMPUTE].views.atom, id++, evergreen_emit_cs_sampler_views, 0);
4425
4426	r600_init_atom(rctx, &rctx->vgt_state.atom, id++, r600_emit_vgt_state, 10);
4427
4428	if (rctx->b.chip_class == EVERGREEN) {
4429		r600_init_atom(rctx, &rctx->sample_mask.atom, id++, evergreen_emit_sample_mask, 3);
4430	} else {
4431		r600_init_atom(rctx, &rctx->sample_mask.atom, id++, cayman_emit_sample_mask, 4);
4432	}
4433	rctx->sample_mask.sample_mask = ~0;
4434
4435	r600_init_atom(rctx, &rctx->alphatest_state.atom, id++, r600_emit_alphatest_state, 6);
4436	r600_init_atom(rctx, &rctx->blend_color.atom, id++, r600_emit_blend_color, 6);
4437	r600_init_atom(rctx, &rctx->blend_state.atom, id++, r600_emit_cso_state, 0);
4438	r600_init_atom(rctx, &rctx->cb_misc_state.atom, id++, evergreen_emit_cb_misc_state, 4);
4439	r600_init_atom(rctx, &rctx->clip_misc_state.atom, id++, r600_emit_clip_misc_state, 9);
4440	r600_init_atom(rctx, &rctx->clip_state.atom, id++, evergreen_emit_clip_state, 26);
4441	r600_init_atom(rctx, &rctx->db_misc_state.atom, id++, evergreen_emit_db_misc_state, 10);
4442	r600_init_atom(rctx, &rctx->db_state.atom, id++, evergreen_emit_db_state, 14);
4443	r600_init_atom(rctx, &rctx->dsa_state.atom, id++, r600_emit_cso_state, 0);
4444	r600_init_atom(rctx, &rctx->poly_offset_state.atom, id++, evergreen_emit_polygon_offset, 9);
4445	r600_init_atom(rctx, &rctx->rasterizer_state.atom, id++, r600_emit_cso_state, 0);
4446	r600_add_atom(rctx, &rctx->b.scissors.atom, id++);
4447	r600_add_atom(rctx, &rctx->b.viewports.atom, id++);
4448	r600_init_atom(rctx, &rctx->stencil_ref.atom, id++, r600_emit_stencil_ref, 4);
4449	r600_init_atom(rctx, &rctx->vertex_fetch_shader.atom, id++, evergreen_emit_vertex_fetch_shader, 5);
4450	r600_add_atom(rctx, &rctx->b.render_cond_atom, id++);
4451	r600_add_atom(rctx, &rctx->b.streamout.begin_atom, id++);
4452	r600_add_atom(rctx, &rctx->b.streamout.enable_atom, id++);
4453	for (i = 0; i < EG_NUM_HW_STAGES; i++)
4454		r600_init_atom(rctx, &rctx->hw_shader_stages[i].atom, id++, r600_emit_shader, 0);
4455	r600_init_atom(rctx, &rctx->shader_stages.atom, id++, evergreen_emit_shader_stages, 15);
4456	r600_init_atom(rctx, &rctx->gs_rings.atom, id++, evergreen_emit_gs_rings, 26);
4457
4458	rctx->b.b.create_blend_state = evergreen_create_blend_state;
4459	rctx->b.b.create_depth_stencil_alpha_state = evergreen_create_dsa_state;
4460	rctx->b.b.create_rasterizer_state = evergreen_create_rs_state;
4461	rctx->b.b.create_sampler_state = evergreen_create_sampler_state;
4462	rctx->b.b.create_sampler_view = evergreen_create_sampler_view;
4463	rctx->b.b.set_framebuffer_state = evergreen_set_framebuffer_state;
4464	rctx->b.b.set_polygon_stipple = evergreen_set_polygon_stipple;
4465	rctx->b.b.set_min_samples = evergreen_set_min_samples;
4466	rctx->b.b.set_tess_state = evergreen_set_tess_state;
4467	rctx->b.b.set_hw_atomic_buffers = evergreen_set_hw_atomic_buffers;
4468	rctx->b.b.set_shader_images = evergreen_set_shader_images;
4469	rctx->b.b.set_shader_buffers = evergreen_set_shader_buffers;
4470	if (rctx->b.chip_class == EVERGREEN)
4471                rctx->b.b.get_sample_position = evergreen_get_sample_position;
4472        else
4473                rctx->b.b.get_sample_position = cayman_get_sample_position;
4474	rctx->b.dma_copy = evergreen_dma_copy;
4475	rctx->b.save_qbo_state = evergreen_save_qbo_state;
4476
4477	evergreen_init_compute_state_functions(rctx);
4478}
4479
4480/**
4481 * This calculates the LDS size for tessellation shaders (VS, TCS, TES).
4482 *
4483 * The information about LDS and other non-compile-time parameters is then
4484 * written to the const buffer.
4485
4486 * const buffer contains -
4487 * uint32_t input_patch_size
4488 * uint32_t input_vertex_size
4489 * uint32_t num_tcs_input_cp
4490 * uint32_t num_tcs_output_cp;
4491 * uint32_t output_patch_size
4492 * uint32_t output_vertex_size
4493 * uint32_t output_patch0_offset
4494 * uint32_t perpatch_output_offset
4495 * and the same constbuf is bound to LS/HS/VS(ES).
4496 */
4497void evergreen_setup_tess_constants(struct r600_context *rctx, const struct pipe_draw_info *info, unsigned *num_patches)
4498{
4499	struct pipe_constant_buffer constbuf = {0};
4500	struct r600_pipe_shader_selector *tcs = rctx->tcs_shader ? rctx->tcs_shader : rctx->tes_shader;
4501	struct r600_pipe_shader_selector *ls = rctx->vs_shader;
4502	unsigned num_tcs_input_cp = info->vertices_per_patch;
4503	unsigned num_tcs_outputs;
4504	unsigned num_tcs_output_cp;
4505	unsigned num_tcs_patch_outputs;
4506	unsigned num_tcs_inputs;
4507	unsigned input_vertex_size, output_vertex_size;
4508	unsigned input_patch_size, pervertex_output_patch_size, output_patch_size;
4509	unsigned output_patch0_offset, perpatch_output_offset, lds_size;
4510	uint32_t values[8];
4511	unsigned num_waves;
4512	unsigned num_pipes = rctx->screen->b.info.r600_max_quad_pipes;
4513	unsigned wave_divisor = (16 * num_pipes);
4514
4515	*num_patches = 1;
4516
4517	if (!rctx->tes_shader) {
4518		rctx->lds_alloc = 0;
4519		rctx->b.b.set_constant_buffer(&rctx->b.b, PIPE_SHADER_VERTEX,
4520					      R600_LDS_INFO_CONST_BUFFER, NULL);
4521		rctx->b.b.set_constant_buffer(&rctx->b.b, PIPE_SHADER_TESS_CTRL,
4522					      R600_LDS_INFO_CONST_BUFFER, NULL);
4523		rctx->b.b.set_constant_buffer(&rctx->b.b, PIPE_SHADER_TESS_EVAL,
4524					      R600_LDS_INFO_CONST_BUFFER, NULL);
4525		return;
4526	}
4527
4528	if (rctx->lds_alloc != 0 &&
4529	    rctx->last_ls == ls &&
4530	    rctx->last_num_tcs_input_cp == num_tcs_input_cp &&
4531	    rctx->last_tcs == tcs)
4532		return;
4533
4534	num_tcs_inputs = util_last_bit64(ls->lds_outputs_written_mask);
4535
4536	if (rctx->tcs_shader) {
4537		num_tcs_outputs = util_last_bit64(tcs->lds_outputs_written_mask);
4538		num_tcs_output_cp = tcs->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT];
4539		num_tcs_patch_outputs = util_last_bit64(tcs->lds_patch_outputs_written_mask);
4540	} else {
4541		num_tcs_outputs = num_tcs_inputs;
4542		num_tcs_output_cp = num_tcs_input_cp;
4543		num_tcs_patch_outputs = 2; /* TESSINNER + TESSOUTER */
4544	}
4545
4546	/* size in bytes */
4547	input_vertex_size = num_tcs_inputs * 16;
4548	output_vertex_size = num_tcs_outputs * 16;
4549
4550	input_patch_size = num_tcs_input_cp * input_vertex_size;
4551
4552	pervertex_output_patch_size = num_tcs_output_cp * output_vertex_size;
4553	output_patch_size = pervertex_output_patch_size + num_tcs_patch_outputs * 16;
4554
4555	output_patch0_offset = rctx->tcs_shader ? input_patch_size * *num_patches : 0;
4556	perpatch_output_offset = output_patch0_offset + pervertex_output_patch_size;
4557
4558	lds_size = output_patch0_offset + output_patch_size * *num_patches;
4559
4560	values[0] = input_patch_size;
4561	values[1] = input_vertex_size;
4562	values[2] = num_tcs_input_cp;
4563	values[3] = num_tcs_output_cp;
4564
4565	values[4] = output_patch_size;
4566	values[5] = output_vertex_size;
4567	values[6] = output_patch0_offset;
4568	values[7] = perpatch_output_offset;
4569
4570	/* docs say HS_NUM_WAVES - CEIL((LS_HS_CONFIG.NUM_PATCHES *
4571	   LS_HS_CONFIG.HS_NUM_OUTPUT_CP) / (NUM_GOOD_PIPES * 16)) */
4572	num_waves = ceilf((float)(*num_patches * num_tcs_output_cp) / (float)wave_divisor);
4573
4574	rctx->lds_alloc = (lds_size | (num_waves << 14));
4575
4576	rctx->last_ls = ls;
4577	rctx->last_tcs = tcs;
4578	rctx->last_num_tcs_input_cp = num_tcs_input_cp;
4579
4580	constbuf.user_buffer = values;
4581	constbuf.buffer_size = 8 * 4;
4582
4583	rctx->b.b.set_constant_buffer(&rctx->b.b, PIPE_SHADER_VERTEX,
4584				      R600_LDS_INFO_CONST_BUFFER, &constbuf);
4585	rctx->b.b.set_constant_buffer(&rctx->b.b, PIPE_SHADER_TESS_CTRL,
4586				      R600_LDS_INFO_CONST_BUFFER, &constbuf);
4587	rctx->b.b.set_constant_buffer(&rctx->b.b, PIPE_SHADER_TESS_EVAL,
4588				      R600_LDS_INFO_CONST_BUFFER, &constbuf);
4589	pipe_resource_reference(&constbuf.buffer, NULL);
4590}
4591
4592uint32_t evergreen_get_ls_hs_config(struct r600_context *rctx,
4593				    const struct pipe_draw_info *info,
4594				    unsigned num_patches)
4595{
4596	unsigned num_output_cp;
4597
4598	if (!rctx->tes_shader)
4599		return 0;
4600
4601	num_output_cp = rctx->tcs_shader ?
4602		rctx->tcs_shader->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT] :
4603		info->vertices_per_patch;
4604
4605	return S_028B58_NUM_PATCHES(num_patches) |
4606		S_028B58_HS_NUM_INPUT_CP(info->vertices_per_patch) |
4607		S_028B58_HS_NUM_OUTPUT_CP(num_output_cp);
4608}
4609
4610void evergreen_set_ls_hs_config(struct r600_context *rctx,
4611				struct radeon_cmdbuf *cs,
4612				uint32_t ls_hs_config)
4613{
4614	radeon_set_context_reg(cs, R_028B58_VGT_LS_HS_CONFIG, ls_hs_config);
4615}
4616
4617void evergreen_set_lds_alloc(struct r600_context *rctx,
4618			     struct radeon_cmdbuf *cs,
4619			     uint32_t lds_alloc)
4620{
4621	radeon_set_context_reg(cs, R_0288E8_SQ_LDS_ALLOC, lds_alloc);
4622}
4623
4624/* on evergreen if you are running tessellation you need to disable dynamic
4625   GPRs to workaround a hardware bug.*/
4626bool evergreen_adjust_gprs(struct r600_context *rctx)
4627{
4628	unsigned num_gprs[EG_NUM_HW_STAGES];
4629	unsigned def_gprs[EG_NUM_HW_STAGES];
4630	unsigned cur_gprs[EG_NUM_HW_STAGES];
4631	unsigned new_gprs[EG_NUM_HW_STAGES];
4632	unsigned def_num_clause_temp_gprs = rctx->r6xx_num_clause_temp_gprs;
4633	unsigned max_gprs;
4634	unsigned i;
4635	unsigned total_gprs;
4636	unsigned tmp[3];
4637	bool rework = false, set_default = false, set_dirty = false;
4638	max_gprs = 0;
4639	for (i = 0; i < EG_NUM_HW_STAGES; i++) {
4640		def_gprs[i] = rctx->default_gprs[i];
4641		max_gprs += def_gprs[i];
4642	}
4643	max_gprs += def_num_clause_temp_gprs * 2;
4644
4645	/* if we have no TESS and dyn gpr is enabled then do nothing. */
4646	if (!rctx->hw_shader_stages[EG_HW_STAGE_HS].shader) {
4647		if (rctx->config_state.dyn_gpr_enabled)
4648			return true;
4649
4650		/* transition back to dyn gpr enabled state */
4651		rctx->config_state.dyn_gpr_enabled = true;
4652		r600_mark_atom_dirty(rctx, &rctx->config_state.atom);
4653		rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE;
4654		return true;
4655	}
4656
4657
4658	/* gather required shader gprs */
4659	for (i = 0; i < EG_NUM_HW_STAGES; i++) {
4660		if (rctx->hw_shader_stages[i].shader)
4661			num_gprs[i] = rctx->hw_shader_stages[i].shader->shader.bc.ngpr;
4662		else
4663			num_gprs[i] = 0;
4664	}
4665
4666	cur_gprs[R600_HW_STAGE_PS] = G_008C04_NUM_PS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_1);
4667	cur_gprs[R600_HW_STAGE_VS] = G_008C04_NUM_VS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_1);
4668	cur_gprs[R600_HW_STAGE_GS] = G_008C08_NUM_GS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_2);
4669	cur_gprs[R600_HW_STAGE_ES] = G_008C08_NUM_ES_GPRS(rctx->config_state.sq_gpr_resource_mgmt_2);
4670	cur_gprs[EG_HW_STAGE_LS] = G_008C0C_NUM_LS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_3);
4671	cur_gprs[EG_HW_STAGE_HS] = G_008C0C_NUM_HS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_3);
4672
4673	total_gprs = 0;
4674	for (i = 0; i < EG_NUM_HW_STAGES; i++)	{
4675		new_gprs[i] = num_gprs[i];
4676		total_gprs += num_gprs[i];
4677	}
4678
4679	if (total_gprs > (max_gprs - (2 * def_num_clause_temp_gprs)))
4680		return false;
4681
4682	for (i = 0; i < EG_NUM_HW_STAGES; i++) {
4683		if (new_gprs[i] > cur_gprs[i]) {
4684			rework = true;
4685			break;
4686		}
4687	}
4688
4689	if (rctx->config_state.dyn_gpr_enabled) {
4690		set_dirty = true;
4691		rctx->config_state.dyn_gpr_enabled = false;
4692	}
4693
4694	if (rework) {
4695		set_default = true;
4696		for (i = 0; i < EG_NUM_HW_STAGES; i++) {
4697			if (new_gprs[i] > def_gprs[i])
4698				set_default = false;
4699		}
4700
4701		if (set_default) {
4702			for (i = 0; i < EG_NUM_HW_STAGES; i++) {
4703				new_gprs[i] = def_gprs[i];
4704			}
4705		} else {
4706			unsigned ps_value = max_gprs;
4707
4708			ps_value -= (def_num_clause_temp_gprs * 2);
4709			for (i = R600_HW_STAGE_VS; i < EG_NUM_HW_STAGES; i++)
4710				ps_value -= new_gprs[i];
4711
4712			new_gprs[R600_HW_STAGE_PS] = ps_value;
4713		}
4714
4715		tmp[0] = S_008C04_NUM_PS_GPRS(new_gprs[R600_HW_STAGE_PS]) |
4716			S_008C04_NUM_VS_GPRS(new_gprs[R600_HW_STAGE_VS]) |
4717			S_008C04_NUM_CLAUSE_TEMP_GPRS(def_num_clause_temp_gprs);
4718
4719		tmp[1] = S_008C08_NUM_ES_GPRS(new_gprs[R600_HW_STAGE_ES]) |
4720			S_008C08_NUM_GS_GPRS(new_gprs[R600_HW_STAGE_GS]);
4721
4722		tmp[2] = S_008C0C_NUM_HS_GPRS(new_gprs[EG_HW_STAGE_HS]) |
4723			S_008C0C_NUM_LS_GPRS(new_gprs[EG_HW_STAGE_LS]);
4724
4725		if (rctx->config_state.sq_gpr_resource_mgmt_1 != tmp[0] ||
4726		    rctx->config_state.sq_gpr_resource_mgmt_2 != tmp[1] ||
4727		    rctx->config_state.sq_gpr_resource_mgmt_3 != tmp[2]) {
4728			rctx->config_state.sq_gpr_resource_mgmt_1 = tmp[0];
4729			rctx->config_state.sq_gpr_resource_mgmt_2 = tmp[1];
4730			rctx->config_state.sq_gpr_resource_mgmt_3 = tmp[2];
4731			set_dirty = true;
4732		}
4733	}
4734
4735
4736	if (set_dirty) {
4737		r600_mark_atom_dirty(rctx, &rctx->config_state.atom);
4738		rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE;
4739	}
4740	return true;
4741}
4742
4743#define AC_ENCODE_TRACE_POINT(id)       (0xcafe0000 | ((id) & 0xffff))
4744
4745void eg_trace_emit(struct r600_context *rctx)
4746{
4747	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
4748	unsigned reloc;
4749
4750	if (rctx->b.chip_class < EVERGREEN)
4751		return;
4752
4753	/* This must be done after r600_need_cs_space. */
4754	reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
4755					  (struct r600_resource*)rctx->trace_buf, RADEON_USAGE_WRITE,
4756					  RADEON_PRIO_CP_DMA);
4757
4758	rctx->trace_id++;
4759	radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rctx->trace_buf,
4760			      RADEON_USAGE_READWRITE, RADEON_PRIO_TRACE);
4761	radeon_emit(cs, PKT3(PKT3_MEM_WRITE, 3, 0));
4762	radeon_emit(cs, rctx->trace_buf->gpu_address);
4763	radeon_emit(cs, rctx->trace_buf->gpu_address >> 32 | MEM_WRITE_32_BITS | MEM_WRITE_CONFIRM);
4764	radeon_emit(cs, rctx->trace_id);
4765	radeon_emit(cs, 0);
4766	radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
4767	radeon_emit(cs, reloc);
4768	radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
4769	radeon_emit(cs, AC_ENCODE_TRACE_POINT(rctx->trace_id));
4770}
4771
4772static void evergreen_emit_set_append_cnt(struct r600_context *rctx,
4773					  struct r600_shader_atomic *atomic,
4774					  struct r600_resource *resource,
4775					  uint32_t pkt_flags)
4776{
4777	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
4778	unsigned reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
4779						   resource,
4780						   RADEON_USAGE_READ,
4781						   RADEON_PRIO_SHADER_RW_BUFFER);
4782	uint64_t dst_offset = resource->gpu_address + (atomic->start * 4);
4783	uint32_t base_reg_0 = R_02872C_GDS_APPEND_COUNT_0;
4784
4785	uint32_t reg_val = (base_reg_0 + atomic->hw_idx * 4 - EVERGREEN_CONTEXT_REG_OFFSET) >> 2;
4786
4787	radeon_emit(cs, PKT3(PKT3_SET_APPEND_CNT, 2, 0) | pkt_flags);
4788	radeon_emit(cs, (reg_val << 16) | 0x3);
4789	radeon_emit(cs, dst_offset & 0xfffffffc);
4790	radeon_emit(cs, (dst_offset >> 32) & 0xff);
4791	radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
4792	radeon_emit(cs, reloc);
4793}
4794
4795static void evergreen_emit_event_write_eos(struct r600_context *rctx,
4796					   struct r600_shader_atomic *atomic,
4797					   struct r600_resource *resource,
4798					   uint32_t pkt_flags)
4799{
4800	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
4801	uint32_t event = EVENT_TYPE_PS_DONE;
4802	uint32_t base_reg_0 = R_02872C_GDS_APPEND_COUNT_0;
4803	uint32_t reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
4804						   resource,
4805						   RADEON_USAGE_WRITE,
4806						   RADEON_PRIO_SHADER_RW_BUFFER);
4807	uint64_t dst_offset = resource->gpu_address + (atomic->start * 4);
4808	uint32_t reg_val = (base_reg_0 + atomic->hw_idx * 4) >> 2;
4809
4810	if (pkt_flags == RADEON_CP_PACKET3_COMPUTE_MODE)
4811		event = EVENT_TYPE_CS_DONE;
4812
4813	radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOS, 3, 0) | pkt_flags);
4814	radeon_emit(cs, EVENT_TYPE(event) | EVENT_INDEX(6));
4815	radeon_emit(cs, (dst_offset) & 0xffffffff);
4816	radeon_emit(cs, (0 << 29) | ((dst_offset >> 32) & 0xff));
4817	radeon_emit(cs, reg_val);
4818	radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
4819	radeon_emit(cs, reloc);
4820}
4821
4822static void cayman_emit_event_write_eos(struct r600_context *rctx,
4823					struct r600_shader_atomic *atomic,
4824					struct r600_resource *resource,
4825					uint32_t pkt_flags)
4826{
4827	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
4828	uint32_t event = EVENT_TYPE_PS_DONE;
4829	uint32_t reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
4830						   resource,
4831						   RADEON_USAGE_WRITE,
4832						   RADEON_PRIO_SHADER_RW_BUFFER);
4833	uint64_t dst_offset = resource->gpu_address + (atomic->start * 4);
4834
4835	if (pkt_flags == RADEON_CP_PACKET3_COMPUTE_MODE)
4836		event = EVENT_TYPE_CS_DONE;
4837
4838	radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOS, 3, 0) | pkt_flags);
4839	radeon_emit(cs, EVENT_TYPE(event) | EVENT_INDEX(6));
4840	radeon_emit(cs, (dst_offset) & 0xffffffff);
4841	radeon_emit(cs, (1 << 29) | ((dst_offset >> 32) & 0xff));
4842	radeon_emit(cs, (atomic->hw_idx) | (1 << 16));
4843	radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
4844	radeon_emit(cs, reloc);
4845}
4846
4847/* writes count from a buffer into GDS */
4848static void cayman_write_count_to_gds(struct r600_context *rctx,
4849				      struct r600_shader_atomic *atomic,
4850				      struct r600_resource *resource,
4851				      uint32_t pkt_flags)
4852{
4853	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
4854	unsigned reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
4855						   resource,
4856						   RADEON_USAGE_READ,
4857						   RADEON_PRIO_SHADER_RW_BUFFER);
4858	uint64_t dst_offset = resource->gpu_address + (atomic->start * 4);
4859
4860	radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0) | pkt_flags);
4861	radeon_emit(cs, dst_offset & 0xffffffff);
4862	radeon_emit(cs, PKT3_CP_DMA_CP_SYNC | PKT3_CP_DMA_DST_SEL(1) | ((dst_offset >> 32) & 0xff));// GDS
4863	radeon_emit(cs, atomic->hw_idx * 4);
4864	radeon_emit(cs, 0);
4865	radeon_emit(cs, PKT3_CP_DMA_CMD_DAS | 4);
4866	radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
4867	radeon_emit(cs, reloc);
4868}
4869
4870void evergreen_emit_atomic_buffer_setup_count(struct r600_context *rctx,
4871					      struct r600_pipe_shader *cs_shader,
4872					      struct r600_shader_atomic *combined_atomics,
4873					      uint8_t *atomic_used_mask_p)
4874{
4875	uint8_t atomic_used_mask = 0;
4876	int i, j, k;
4877	bool is_compute = cs_shader ? true : false;
4878
4879	for (i = 0; i < (is_compute ? 1 : EG_NUM_HW_STAGES); i++) {
4880		uint8_t num_atomic_stage;
4881		struct r600_pipe_shader *pshader;
4882
4883		if (is_compute)
4884			pshader = cs_shader;
4885		else
4886			pshader = rctx->hw_shader_stages[i].shader;
4887		if (!pshader)
4888			continue;
4889
4890		num_atomic_stage = pshader->shader.nhwatomic_ranges;
4891		if (!num_atomic_stage)
4892			continue;
4893
4894		for (j = 0; j < num_atomic_stage; j++) {
4895			struct r600_shader_atomic *atomic = &pshader->shader.atomics[j];
4896			int natomics = atomic->end - atomic->start + 1;
4897
4898			for (k = 0; k < natomics; k++) {
4899				/* seen this in a previous stage */
4900				if (atomic_used_mask & (1u << (atomic->hw_idx + k)))
4901					continue;
4902
4903				combined_atomics[atomic->hw_idx + k].hw_idx = atomic->hw_idx + k;
4904				combined_atomics[atomic->hw_idx + k].buffer_id = atomic->buffer_id;
4905				combined_atomics[atomic->hw_idx + k].start = atomic->start + k;
4906				combined_atomics[atomic->hw_idx + k].end = combined_atomics[atomic->hw_idx + k].start + 1;
4907				atomic_used_mask |= (1u << (atomic->hw_idx + k));
4908			}
4909		}
4910	}
4911	*atomic_used_mask_p = atomic_used_mask;
4912}
4913
4914void evergreen_emit_atomic_buffer_setup(struct r600_context *rctx,
4915					bool is_compute,
4916					struct r600_shader_atomic *combined_atomics,
4917					uint8_t atomic_used_mask)
4918{
4919	struct r600_atomic_buffer_state *astate = &rctx->atomic_buffer_state;
4920	unsigned pkt_flags = 0;
4921	uint32_t mask;
4922
4923	if (is_compute)
4924		pkt_flags = RADEON_CP_PACKET3_COMPUTE_MODE;
4925
4926	mask = atomic_used_mask;
4927	if (!mask)
4928		return;
4929
4930	while (mask) {
4931		unsigned atomic_index = u_bit_scan(&mask);
4932		struct r600_shader_atomic *atomic = &combined_atomics[atomic_index];
4933		struct r600_resource *resource = r600_resource(astate->buffer[atomic->buffer_id].buffer);
4934		assert(resource);
4935
4936		if (rctx->b.chip_class == CAYMAN)
4937			cayman_write_count_to_gds(rctx, atomic, resource, pkt_flags);
4938		else
4939			evergreen_emit_set_append_cnt(rctx, atomic, resource, pkt_flags);
4940	}
4941}
4942
4943void evergreen_emit_atomic_buffer_save(struct r600_context *rctx,
4944				       bool is_compute,
4945				       struct r600_shader_atomic *combined_atomics,
4946				       uint8_t *atomic_used_mask_p)
4947{
4948	struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
4949	struct r600_atomic_buffer_state *astate = &rctx->atomic_buffer_state;
4950	uint32_t pkt_flags = 0;
4951	uint32_t event = EVENT_TYPE_PS_DONE;
4952	uint32_t mask;
4953	uint64_t dst_offset;
4954	unsigned reloc;
4955
4956	if (is_compute)
4957		pkt_flags = RADEON_CP_PACKET3_COMPUTE_MODE;
4958
4959	mask = *atomic_used_mask_p;
4960	if (!mask)
4961		return;
4962
4963	while (mask) {
4964		unsigned atomic_index = u_bit_scan(&mask);
4965		struct r600_shader_atomic *atomic = &combined_atomics[atomic_index];
4966		struct r600_resource *resource = r600_resource(astate->buffer[atomic->buffer_id].buffer);
4967		assert(resource);
4968
4969		if (rctx->b.chip_class == CAYMAN)
4970			cayman_emit_event_write_eos(rctx, atomic, resource, pkt_flags);
4971		else
4972			evergreen_emit_event_write_eos(rctx, atomic, resource, pkt_flags);
4973	}
4974
4975	if (pkt_flags == RADEON_CP_PACKET3_COMPUTE_MODE)
4976		event = EVENT_TYPE_CS_DONE;
4977
4978	++rctx->append_fence_id;
4979	reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
4980					  r600_resource(rctx->append_fence),
4981					  RADEON_USAGE_READWRITE,
4982					  RADEON_PRIO_SHADER_RW_BUFFER);
4983	dst_offset = r600_resource(rctx->append_fence)->gpu_address;
4984	radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOS, 3, 0) | pkt_flags);
4985	radeon_emit(cs, EVENT_TYPE(event) | EVENT_INDEX(6));
4986	radeon_emit(cs, dst_offset & 0xffffffff);
4987	radeon_emit(cs, (2 << 29) | ((dst_offset >> 32) & 0xff));
4988	radeon_emit(cs, rctx->append_fence_id);
4989	radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
4990	radeon_emit(cs, reloc);
4991
4992	radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0) | pkt_flags);
4993	radeon_emit(cs, WAIT_REG_MEM_GEQUAL | WAIT_REG_MEM_MEMORY | (1 << 8));
4994	radeon_emit(cs, dst_offset & 0xffffffff);
4995	radeon_emit(cs, ((dst_offset >> 32) & 0xff));
4996	radeon_emit(cs, rctx->append_fence_id);
4997	radeon_emit(cs, 0xffffffff);
4998	radeon_emit(cs, 0xa);
4999	radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
5000	radeon_emit(cs, reloc);
5001}
5002