Home | History | Annotate | Line # | Download | only in radeon
radeon_evergreen_cs.c revision 1.1
      1 /*	$NetBSD: radeon_evergreen_cs.c,v 1.1 2018/08/27 14:38:20 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2010 Advanced Micro Devices, Inc.
      5  * Copyright 2008 Red Hat Inc.
      6  * Copyright 2009 Jerome Glisse.
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a
      9  * copy of this software and associated documentation files (the "Software"),
     10  * to deal in the Software without restriction, including without limitation
     11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     12  * and/or sell copies of the Software, and to permit persons to whom the
     13  * Software is furnished to do so, subject to the following conditions:
     14  *
     15  * The above copyright notice and this permission notice shall be included in
     16  * all copies or substantial portions of the Software.
     17  *
     18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     21  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     22  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     23  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     24  * OTHER DEALINGS IN THE SOFTWARE.
     25  *
     26  * Authors: Dave Airlie
     27  *          Alex Deucher
     28  *          Jerome Glisse
     29  */
     30 #include <sys/cdefs.h>
     31 __KERNEL_RCSID(0, "$NetBSD: radeon_evergreen_cs.c,v 1.1 2018/08/27 14:38:20 riastradh Exp $");
     32 
     33 #include <drm/drmP.h>
     34 #include "radeon.h"
     35 #include "evergreend.h"
     36 #include "evergreen_reg_safe.h"
     37 #include "cayman_reg_safe.h"
     38 
     39 #ifndef __NetBSD__
     40 #define MAX(a,b)			(((a)>(b))?(a):(b))
     41 #define MIN(a,b)			(((a)<(b))?(a):(b))
     42 #endif
     43 
     44 #define REG_SAFE_BM_SIZE ARRAY_SIZE(evergreen_reg_safe_bm)
     45 
     46 int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
     47 			   struct radeon_bo_list **cs_reloc);
     48 struct evergreen_cs_track {
     49 	u32			group_size;
     50 	u32			nbanks;
     51 	u32			npipes;
     52 	u32			row_size;
     53 	/* value we track */
     54 	u32			nsamples;		/* unused */
     55 	struct radeon_bo	*cb_color_bo[12];
     56 	u32			cb_color_bo_offset[12];
     57 	struct radeon_bo	*cb_color_fmask_bo[8];	/* unused */
     58 	struct radeon_bo	*cb_color_cmask_bo[8];	/* unused */
     59 	u32			cb_color_info[12];
     60 	u32			cb_color_view[12];
     61 	u32			cb_color_pitch[12];
     62 	u32			cb_color_slice[12];
     63 	u32			cb_color_slice_idx[12];
     64 	u32			cb_color_attrib[12];
     65 	u32			cb_color_cmask_slice[8];/* unused */
     66 	u32			cb_color_fmask_slice[8];/* unused */
     67 	u32			cb_target_mask;
     68 	u32			cb_shader_mask; /* unused */
     69 	u32			vgt_strmout_config;
     70 	u32			vgt_strmout_buffer_config;
     71 	struct radeon_bo	*vgt_strmout_bo[4];
     72 	u32			vgt_strmout_bo_offset[4];
     73 	u32			vgt_strmout_size[4];
     74 	u32			db_depth_control;
     75 	u32			db_depth_view;
     76 	u32			db_depth_slice;
     77 	u32			db_depth_size;
     78 	u32			db_z_info;
     79 	u32			db_z_read_offset;
     80 	u32			db_z_write_offset;
     81 	struct radeon_bo	*db_z_read_bo;
     82 	struct radeon_bo	*db_z_write_bo;
     83 	u32			db_s_info;
     84 	u32			db_s_read_offset;
     85 	u32			db_s_write_offset;
     86 	struct radeon_bo	*db_s_read_bo;
     87 	struct radeon_bo	*db_s_write_bo;
     88 	bool			sx_misc_kill_all_prims;
     89 	bool			cb_dirty;
     90 	bool			db_dirty;
     91 	bool			streamout_dirty;
     92 	u32			htile_offset;
     93 	u32			htile_surface;
     94 	struct radeon_bo	*htile_bo;
     95 	unsigned long		indirect_draw_buffer_size;
     96 	const unsigned		*reg_safe_bm;
     97 };
     98 
     99 static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
    100 {
    101 	if (tiling_flags & RADEON_TILING_MACRO)
    102 		return ARRAY_2D_TILED_THIN1;
    103 	else if (tiling_flags & RADEON_TILING_MICRO)
    104 		return ARRAY_1D_TILED_THIN1;
    105 	else
    106 		return ARRAY_LINEAR_GENERAL;
    107 }
    108 
    109 static u32 evergreen_cs_get_num_banks(u32 nbanks)
    110 {
    111 	switch (nbanks) {
    112 	case 2:
    113 		return ADDR_SURF_2_BANK;
    114 	case 4:
    115 		return ADDR_SURF_4_BANK;
    116 	case 8:
    117 	default:
    118 		return ADDR_SURF_8_BANK;
    119 	case 16:
    120 		return ADDR_SURF_16_BANK;
    121 	}
    122 }
    123 
    124 static void evergreen_cs_track_init(struct evergreen_cs_track *track)
    125 {
    126 	int i;
    127 
    128 	for (i = 0; i < 8; i++) {
    129 		track->cb_color_fmask_bo[i] = NULL;
    130 		track->cb_color_cmask_bo[i] = NULL;
    131 		track->cb_color_cmask_slice[i] = 0;
    132 		track->cb_color_fmask_slice[i] = 0;
    133 	}
    134 
    135 	for (i = 0; i < 12; i++) {
    136 		track->cb_color_bo[i] = NULL;
    137 		track->cb_color_bo_offset[i] = 0xFFFFFFFF;
    138 		track->cb_color_info[i] = 0;
    139 		track->cb_color_view[i] = 0xFFFFFFFF;
    140 		track->cb_color_pitch[i] = 0;
    141 		track->cb_color_slice[i] = 0xfffffff;
    142 		track->cb_color_slice_idx[i] = 0;
    143 	}
    144 	track->cb_target_mask = 0xFFFFFFFF;
    145 	track->cb_shader_mask = 0xFFFFFFFF;
    146 	track->cb_dirty = true;
    147 
    148 	track->db_depth_slice = 0xffffffff;
    149 	track->db_depth_view = 0xFFFFC000;
    150 	track->db_depth_size = 0xFFFFFFFF;
    151 	track->db_depth_control = 0xFFFFFFFF;
    152 	track->db_z_info = 0xFFFFFFFF;
    153 	track->db_z_read_offset = 0xFFFFFFFF;
    154 	track->db_z_write_offset = 0xFFFFFFFF;
    155 	track->db_z_read_bo = NULL;
    156 	track->db_z_write_bo = NULL;
    157 	track->db_s_info = 0xFFFFFFFF;
    158 	track->db_s_read_offset = 0xFFFFFFFF;
    159 	track->db_s_write_offset = 0xFFFFFFFF;
    160 	track->db_s_read_bo = NULL;
    161 	track->db_s_write_bo = NULL;
    162 	track->db_dirty = true;
    163 	track->htile_bo = NULL;
    164 	track->htile_offset = 0xFFFFFFFF;
    165 	track->htile_surface = 0;
    166 
    167 	for (i = 0; i < 4; i++) {
    168 		track->vgt_strmout_size[i] = 0;
    169 		track->vgt_strmout_bo[i] = NULL;
    170 		track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
    171 	}
    172 	track->streamout_dirty = true;
    173 	track->sx_misc_kill_all_prims = false;
    174 }
    175 
    176 struct eg_surface {
    177 	/* value gathered from cs */
    178 	unsigned	nbx;
    179 	unsigned	nby;
    180 	unsigned	format;
    181 	unsigned	mode;
    182 	unsigned	nbanks;
    183 	unsigned	bankw;
    184 	unsigned	bankh;
    185 	unsigned	tsplit;
    186 	unsigned	mtilea;
    187 	unsigned	nsamples;
    188 	/* output value */
    189 	unsigned	bpe;
    190 	unsigned	layer_size;
    191 	unsigned	palign;
    192 	unsigned	halign;
    193 	unsigned long	base_align;
    194 };
    195 
    196 static int evergreen_surface_check_linear(struct radeon_cs_parser *p,
    197 					  struct eg_surface *surf,
    198 					  const char *prefix)
    199 {
    200 	surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples;
    201 	surf->base_align = surf->bpe;
    202 	surf->palign = 1;
    203 	surf->halign = 1;
    204 	return 0;
    205 }
    206 
    207 static int evergreen_surface_check_linear_aligned(struct radeon_cs_parser *p,
    208 						  struct eg_surface *surf,
    209 						  const char *prefix)
    210 {
    211 	struct evergreen_cs_track *track = p->track;
    212 	unsigned palign;
    213 
    214 	palign = MAX(64, track->group_size / surf->bpe);
    215 	surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples;
    216 	surf->base_align = track->group_size;
    217 	surf->palign = palign;
    218 	surf->halign = 1;
    219 	if (surf->nbx & (palign - 1)) {
    220 		if (prefix) {
    221 			dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
    222 				 __func__, __LINE__, prefix, surf->nbx, palign);
    223 		}
    224 		return -EINVAL;
    225 	}
    226 	return 0;
    227 }
    228 
    229 static int evergreen_surface_check_1d(struct radeon_cs_parser *p,
    230 				      struct eg_surface *surf,
    231 				      const char *prefix)
    232 {
    233 	struct evergreen_cs_track *track = p->track;
    234 	unsigned palign;
    235 
    236 	palign = track->group_size / (8 * surf->bpe * surf->nsamples);
    237 	palign = MAX(8, palign);
    238 	surf->layer_size = surf->nbx * surf->nby * surf->bpe;
    239 	surf->base_align = track->group_size;
    240 	surf->palign = palign;
    241 	surf->halign = 8;
    242 	if ((surf->nbx & (palign - 1))) {
    243 		if (prefix) {
    244 			dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d (%d %d %d)\n",
    245 				 __func__, __LINE__, prefix, surf->nbx, palign,
    246 				 track->group_size, surf->bpe, surf->nsamples);
    247 		}
    248 		return -EINVAL;
    249 	}
    250 	if ((surf->nby & (8 - 1))) {
    251 		if (prefix) {
    252 			dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with 8\n",
    253 				 __func__, __LINE__, prefix, surf->nby);
    254 		}
    255 		return -EINVAL;
    256 	}
    257 	return 0;
    258 }
    259 
    260 static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
    261 				      struct eg_surface *surf,
    262 				      const char *prefix)
    263 {
    264 	struct evergreen_cs_track *track = p->track;
    265 	unsigned palign, halign, tileb, slice_pt;
    266 	unsigned mtile_pr, mtile_ps, mtileb;
    267 
    268 	tileb = 64 * surf->bpe * surf->nsamples;
    269 	slice_pt = 1;
    270 	if (tileb > surf->tsplit) {
    271 		slice_pt = tileb / surf->tsplit;
    272 	}
    273 	tileb = tileb / slice_pt;
    274 	/* macro tile width & height */
    275 	palign = (8 * surf->bankw * track->npipes) * surf->mtilea;
    276 	halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea;
    277 	mtileb = (palign / 8) * (halign / 8) * tileb;
    278 	mtile_pr = surf->nbx / palign;
    279 	mtile_ps = (mtile_pr * surf->nby) / halign;
    280 	surf->layer_size = mtile_ps * mtileb * slice_pt;
    281 	surf->base_align = (palign / 8) * (halign / 8) * tileb;
    282 	surf->palign = palign;
    283 	surf->halign = halign;
    284 
    285 	if ((surf->nbx & (palign - 1))) {
    286 		if (prefix) {
    287 			dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
    288 				 __func__, __LINE__, prefix, surf->nbx, palign);
    289 		}
    290 		return -EINVAL;
    291 	}
    292 	if ((surf->nby & (halign - 1))) {
    293 		if (prefix) {
    294 			dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with %d\n",
    295 				 __func__, __LINE__, prefix, surf->nby, halign);
    296 		}
    297 		return -EINVAL;
    298 	}
    299 
    300 	return 0;
    301 }
    302 
    303 static int evergreen_surface_check(struct radeon_cs_parser *p,
    304 				   struct eg_surface *surf,
    305 				   const char *prefix)
    306 {
    307 	/* some common value computed here */
    308 	surf->bpe = r600_fmt_get_blocksize(surf->format);
    309 
    310 	switch (surf->mode) {
    311 	case ARRAY_LINEAR_GENERAL:
    312 		return evergreen_surface_check_linear(p, surf, prefix);
    313 	case ARRAY_LINEAR_ALIGNED:
    314 		return evergreen_surface_check_linear_aligned(p, surf, prefix);
    315 	case ARRAY_1D_TILED_THIN1:
    316 		return evergreen_surface_check_1d(p, surf, prefix);
    317 	case ARRAY_2D_TILED_THIN1:
    318 		return evergreen_surface_check_2d(p, surf, prefix);
    319 	default:
    320 		dev_warn(p->dev, "%s:%d %s invalid array mode %d\n",
    321 				__func__, __LINE__, prefix, surf->mode);
    322 		return -EINVAL;
    323 	}
    324 	return -EINVAL;
    325 }
    326 
    327 static int evergreen_surface_value_conv_check(struct radeon_cs_parser *p,
    328 					      struct eg_surface *surf,
    329 					      const char *prefix)
    330 {
    331 	switch (surf->mode) {
    332 	case ARRAY_2D_TILED_THIN1:
    333 		break;
    334 	case ARRAY_LINEAR_GENERAL:
    335 	case ARRAY_LINEAR_ALIGNED:
    336 	case ARRAY_1D_TILED_THIN1:
    337 		return 0;
    338 	default:
    339 		dev_warn(p->dev, "%s:%d %s invalid array mode %d\n",
    340 				__func__, __LINE__, prefix, surf->mode);
    341 		return -EINVAL;
    342 	}
    343 
    344 	switch (surf->nbanks) {
    345 	case 0: surf->nbanks = 2; break;
    346 	case 1: surf->nbanks = 4; break;
    347 	case 2: surf->nbanks = 8; break;
    348 	case 3: surf->nbanks = 16; break;
    349 	default:
    350 		dev_warn(p->dev, "%s:%d %s invalid number of banks %d\n",
    351 			 __func__, __LINE__, prefix, surf->nbanks);
    352 		return -EINVAL;
    353 	}
    354 	switch (surf->bankw) {
    355 	case 0: surf->bankw = 1; break;
    356 	case 1: surf->bankw = 2; break;
    357 	case 2: surf->bankw = 4; break;
    358 	case 3: surf->bankw = 8; break;
    359 	default:
    360 		dev_warn(p->dev, "%s:%d %s invalid bankw %d\n",
    361 			 __func__, __LINE__, prefix, surf->bankw);
    362 		return -EINVAL;
    363 	}
    364 	switch (surf->bankh) {
    365 	case 0: surf->bankh = 1; break;
    366 	case 1: surf->bankh = 2; break;
    367 	case 2: surf->bankh = 4; break;
    368 	case 3: surf->bankh = 8; break;
    369 	default:
    370 		dev_warn(p->dev, "%s:%d %s invalid bankh %d\n",
    371 			 __func__, __LINE__, prefix, surf->bankh);
    372 		return -EINVAL;
    373 	}
    374 	switch (surf->mtilea) {
    375 	case 0: surf->mtilea = 1; break;
    376 	case 1: surf->mtilea = 2; break;
    377 	case 2: surf->mtilea = 4; break;
    378 	case 3: surf->mtilea = 8; break;
    379 	default:
    380 		dev_warn(p->dev, "%s:%d %s invalid macro tile aspect %d\n",
    381 			 __func__, __LINE__, prefix, surf->mtilea);
    382 		return -EINVAL;
    383 	}
    384 	switch (surf->tsplit) {
    385 	case 0: surf->tsplit = 64; break;
    386 	case 1: surf->tsplit = 128; break;
    387 	case 2: surf->tsplit = 256; break;
    388 	case 3: surf->tsplit = 512; break;
    389 	case 4: surf->tsplit = 1024; break;
    390 	case 5: surf->tsplit = 2048; break;
    391 	case 6: surf->tsplit = 4096; break;
    392 	default:
    393 		dev_warn(p->dev, "%s:%d %s invalid tile split %d\n",
    394 			 __func__, __LINE__, prefix, surf->tsplit);
    395 		return -EINVAL;
    396 	}
    397 	return 0;
    398 }
    399 
    400 static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned id)
    401 {
    402 	struct evergreen_cs_track *track = p->track;
    403 	struct eg_surface surf;
    404 	unsigned pitch, slice, mslice;
    405 	unsigned long offset;
    406 	int r;
    407 
    408 	mslice = G_028C6C_SLICE_MAX(track->cb_color_view[id]) + 1;
    409 	pitch = track->cb_color_pitch[id];
    410 	slice = track->cb_color_slice[id];
    411 	surf.nbx = (pitch + 1) * 8;
    412 	surf.nby = ((slice + 1) * 64) / surf.nbx;
    413 	surf.mode = G_028C70_ARRAY_MODE(track->cb_color_info[id]);
    414 	surf.format = G_028C70_FORMAT(track->cb_color_info[id]);
    415 	surf.tsplit = G_028C74_TILE_SPLIT(track->cb_color_attrib[id]);
    416 	surf.nbanks = G_028C74_NUM_BANKS(track->cb_color_attrib[id]);
    417 	surf.bankw = G_028C74_BANK_WIDTH(track->cb_color_attrib[id]);
    418 	surf.bankh = G_028C74_BANK_HEIGHT(track->cb_color_attrib[id]);
    419 	surf.mtilea = G_028C74_MACRO_TILE_ASPECT(track->cb_color_attrib[id]);
    420 	surf.nsamples = 1;
    421 
    422 	if (!r600_fmt_is_valid_color(surf.format)) {
    423 		dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08x)\n",
    424 			 __func__, __LINE__, surf.format,
    425 			id, track->cb_color_info[id]);
    426 		return -EINVAL;
    427 	}
    428 
    429 	r = evergreen_surface_value_conv_check(p, &surf, "cb");
    430 	if (r) {
    431 		return r;
    432 	}
    433 
    434 	r = evergreen_surface_check(p, &surf, "cb");
    435 	if (r) {
    436 		dev_warn(p->dev, "%s:%d cb[%d] invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
    437 			 __func__, __LINE__, id, track->cb_color_pitch[id],
    438 			 track->cb_color_slice[id], track->cb_color_attrib[id],
    439 			 track->cb_color_info[id]);
    440 		return r;
    441 	}
    442 
    443 	offset = track->cb_color_bo_offset[id] << 8;
    444 	if (offset & (surf.base_align - 1)) {
    445 		dev_warn(p->dev, "%s:%d cb[%d] bo base %ld not aligned with %ld\n",
    446 			 __func__, __LINE__, id, offset, surf.base_align);
    447 		return -EINVAL;
    448 	}
    449 
    450 	offset += surf.layer_size * mslice;
    451 	if (offset > radeon_bo_size(track->cb_color_bo[id])) {
    452 		/* old ddx are broken they allocate bo with w*h*bpp but
    453 		 * program slice with ALIGN(h, 8), catch this and patch
    454 		 * command stream.
    455 		 */
    456 		if (!surf.mode) {
    457 			uint32_t *ib = p->ib.ptr;
    458 			unsigned long tmp, nby, bsize, size, vmin = 0;
    459 
    460 			/* find the height the ddx wants */
    461 			if (surf.nby > 8) {
    462 				vmin = surf.nby - 8;
    463 			}
    464 			bsize = radeon_bo_size(track->cb_color_bo[id]);
    465 			tmp = track->cb_color_bo_offset[id] << 8;
    466 			for (nby = surf.nby; nby > vmin; nby--) {
    467 				size = nby * surf.nbx * surf.bpe * surf.nsamples;
    468 				if ((tmp + size * mslice) <= bsize) {
    469 					break;
    470 				}
    471 			}
    472 			if (nby > vmin) {
    473 				surf.nby = nby;
    474 				slice = ((nby * surf.nbx) / 64) - 1;
    475 				if (!evergreen_surface_check(p, &surf, "cb")) {
    476 					/* check if this one works */
    477 					tmp += surf.layer_size * mslice;
    478 					if (tmp <= bsize) {
    479 						ib[track->cb_color_slice_idx[id]] = slice;
    480 						goto old_ddx_ok;
    481 					}
    482 				}
    483 			}
    484 		}
    485 		dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, "
    486 			 "offset %d, max layer %d, bo size %ld, slice %d)\n",
    487 			 __func__, __LINE__, id, surf.layer_size,
    488 			track->cb_color_bo_offset[id] << 8, mslice,
    489 			radeon_bo_size(track->cb_color_bo[id]), slice);
    490 		dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
    491 			 __func__, __LINE__, surf.nbx, surf.nby,
    492 			surf.mode, surf.bpe, surf.nsamples,
    493 			surf.bankw, surf.bankh,
    494 			surf.tsplit, surf.mtilea);
    495 		return -EINVAL;
    496 	}
    497 old_ddx_ok:
    498 
    499 	return 0;
    500 }
    501 
    502 static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p,
    503 						unsigned nbx, unsigned nby)
    504 {
    505 	struct evergreen_cs_track *track = p->track;
    506 	unsigned long size;
    507 
    508 	if (track->htile_bo == NULL) {
    509 		dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
    510 				__func__, __LINE__, track->db_z_info);
    511 		return -EINVAL;
    512 	}
    513 
    514 	if (G_028ABC_LINEAR(track->htile_surface)) {
    515 		/* pitch must be 16 htiles aligned == 16 * 8 pixel aligned */
    516 		nbx = round_up(nbx, 16 * 8);
    517 		/* height is npipes htiles aligned == npipes * 8 pixel aligned */
    518 		nby = round_up(nby, track->npipes * 8);
    519 	} else {
    520 		/* always assume 8x8 htile */
    521 		/* align is htile align * 8, htile align vary according to
    522 		 * number of pipe and tile width and nby
    523 		 */
    524 		switch (track->npipes) {
    525 		case 8:
    526 			/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
    527 			nbx = round_up(nbx, 64 * 8);
    528 			nby = round_up(nby, 64 * 8);
    529 			break;
    530 		case 4:
    531 			/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
    532 			nbx = round_up(nbx, 64 * 8);
    533 			nby = round_up(nby, 32 * 8);
    534 			break;
    535 		case 2:
    536 			/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
    537 			nbx = round_up(nbx, 32 * 8);
    538 			nby = round_up(nby, 32 * 8);
    539 			break;
    540 		case 1:
    541 			/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
    542 			nbx = round_up(nbx, 32 * 8);
    543 			nby = round_up(nby, 16 * 8);
    544 			break;
    545 		default:
    546 			dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
    547 					__func__, __LINE__, track->npipes);
    548 			return -EINVAL;
    549 		}
    550 	}
    551 	/* compute number of htile */
    552 	nbx = nbx >> 3;
    553 	nby = nby >> 3;
    554 	/* size must be aligned on npipes * 2K boundary */
    555 	size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
    556 	size += track->htile_offset;
    557 
    558 	if (!track->htile_bo) {
    559 		dev_warn(p->dev, "%s:%d htile_bo not set", __func__, __LINE__);
    560 		return -EINVAL;
    561 	}
    562 	if (size > radeon_bo_size(track->htile_bo)) {
    563 		dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
    564 				__func__, __LINE__, radeon_bo_size(track->htile_bo),
    565 				size, nbx, nby);
    566 		return -EINVAL;
    567 	}
    568 	return 0;
    569 }
    570 
    571 static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p)
    572 {
    573 	struct evergreen_cs_track *track = p->track;
    574 	struct eg_surface surf;
    575 	unsigned pitch, slice, mslice;
    576 	unsigned long offset;
    577 	int r;
    578 
    579 	mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
    580 	pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size);
    581 	slice = track->db_depth_slice;
    582 	surf.nbx = (pitch + 1) * 8;
    583 	surf.nby = ((slice + 1) * 64) / surf.nbx;
    584 	surf.mode = G_028040_ARRAY_MODE(track->db_z_info);
    585 	surf.format = G_028044_FORMAT(track->db_s_info);
    586 	surf.tsplit = G_028044_TILE_SPLIT(track->db_s_info);
    587 	surf.nbanks = G_028040_NUM_BANKS(track->db_z_info);
    588 	surf.bankw = G_028040_BANK_WIDTH(track->db_z_info);
    589 	surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info);
    590 	surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info);
    591 	surf.nsamples = 1;
    592 
    593 	if (surf.format != 1) {
    594 		dev_warn(p->dev, "%s:%d stencil invalid format %d\n",
    595 			 __func__, __LINE__, surf.format);
    596 		return -EINVAL;
    597 	}
    598 	/* replace by color format so we can use same code */
    599 	surf.format = V_028C70_COLOR_8;
    600 
    601 	r = evergreen_surface_value_conv_check(p, &surf, "stencil");
    602 	if (r) {
    603 		return r;
    604 	}
    605 
    606 	r = evergreen_surface_check(p, &surf, NULL);
    607 	if (r) {
    608 		/* old userspace doesn't compute proper depth/stencil alignment
    609 		 * check that alignment against a bigger byte per elements and
    610 		 * only report if that alignment is wrong too.
    611 		 */
    612 		surf.format = V_028C70_COLOR_8_8_8_8;
    613 		r = evergreen_surface_check(p, &surf, "stencil");
    614 		if (r) {
    615 			dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
    616 				 __func__, __LINE__, track->db_depth_size,
    617 				 track->db_depth_slice, track->db_s_info, track->db_z_info);
    618 		}
    619 		return r;
    620 	}
    621 
    622 	offset = track->db_s_read_offset << 8;
    623 	if (offset & (surf.base_align - 1)) {
    624 		dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
    625 			 __func__, __LINE__, offset, surf.base_align);
    626 		return -EINVAL;
    627 	}
    628 	offset += surf.layer_size * mslice;
    629 	if (!track->db_s_read_bo) {
    630 		dev_warn(p->dev, "%s:%d db_s_read_bo not set", __func__, __LINE__);
    631 		return -EINVAL;
    632 	}
    633 	if (offset > radeon_bo_size(track->db_s_read_bo)) {
    634 		dev_warn(p->dev, "%s:%d stencil read bo too small (layer size %d, "
    635 			 "offset %ld, max layer %d, bo size %ld)\n",
    636 			 __func__, __LINE__, surf.layer_size,
    637 			(unsigned long)track->db_s_read_offset << 8, mslice,
    638 			radeon_bo_size(track->db_s_read_bo));
    639 		dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
    640 			 __func__, __LINE__, track->db_depth_size,
    641 			 track->db_depth_slice, track->db_s_info, track->db_z_info);
    642 		return -EINVAL;
    643 	}
    644 
    645 	offset = track->db_s_write_offset << 8;
    646 	if (offset & (surf.base_align - 1)) {
    647 		dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
    648 			 __func__, __LINE__, offset, surf.base_align);
    649 		return -EINVAL;
    650 	}
    651 	offset += surf.layer_size * mslice;
    652 	if (!track->db_s_write_bo) {
    653 		dev_warn(p->dev, "%s:%d db_s_write_bo not set", __func__, __LINE__);
    654 		return -EINVAL;
    655 	}
    656 	if (offset > radeon_bo_size(track->db_s_write_bo)) {
    657 		dev_warn(p->dev, "%s:%d stencil write bo too small (layer size %d, "
    658 			 "offset %ld, max layer %d, bo size %ld)\n",
    659 			 __func__, __LINE__, surf.layer_size,
    660 			(unsigned long)track->db_s_write_offset << 8, mslice,
    661 			radeon_bo_size(track->db_s_write_bo));
    662 		return -EINVAL;
    663 	}
    664 
    665 	/* hyperz */
    666 	if (G_028040_TILE_SURFACE_ENABLE(track->db_z_info)) {
    667 		r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby);
    668 		if (r) {
    669 			return r;
    670 		}
    671 	}
    672 
    673 	return 0;
    674 }
    675 
    676 static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p)
    677 {
    678 	struct evergreen_cs_track *track = p->track;
    679 	struct eg_surface surf;
    680 	unsigned pitch, slice, mslice;
    681 	unsigned long offset;
    682 	int r;
    683 
    684 	mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
    685 	pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size);
    686 	slice = track->db_depth_slice;
    687 	surf.nbx = (pitch + 1) * 8;
    688 	surf.nby = ((slice + 1) * 64) / surf.nbx;
    689 	surf.mode = G_028040_ARRAY_MODE(track->db_z_info);
    690 	surf.format = G_028040_FORMAT(track->db_z_info);
    691 	surf.tsplit = G_028040_TILE_SPLIT(track->db_z_info);
    692 	surf.nbanks = G_028040_NUM_BANKS(track->db_z_info);
    693 	surf.bankw = G_028040_BANK_WIDTH(track->db_z_info);
    694 	surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info);
    695 	surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info);
    696 	surf.nsamples = 1;
    697 
    698 	switch (surf.format) {
    699 	case V_028040_Z_16:
    700 		surf.format = V_028C70_COLOR_16;
    701 		break;
    702 	case V_028040_Z_24:
    703 	case V_028040_Z_32_FLOAT:
    704 		surf.format = V_028C70_COLOR_8_8_8_8;
    705 		break;
    706 	default:
    707 		dev_warn(p->dev, "%s:%d depth invalid format %d\n",
    708 			 __func__, __LINE__, surf.format);
    709 		return -EINVAL;
    710 	}
    711 
    712 	r = evergreen_surface_value_conv_check(p, &surf, "depth");
    713 	if (r) {
    714 		dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
    715 			 __func__, __LINE__, track->db_depth_size,
    716 			 track->db_depth_slice, track->db_z_info);
    717 		return r;
    718 	}
    719 
    720 	r = evergreen_surface_check(p, &surf, "depth");
    721 	if (r) {
    722 		dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
    723 			 __func__, __LINE__, track->db_depth_size,
    724 			 track->db_depth_slice, track->db_z_info);
    725 		return r;
    726 	}
    727 
    728 	offset = track->db_z_read_offset << 8;
    729 	if (offset & (surf.base_align - 1)) {
    730 		dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
    731 			 __func__, __LINE__, offset, surf.base_align);
    732 		return -EINVAL;
    733 	}
    734 	offset += surf.layer_size * mslice;
    735 	if (!track->db_z_read_bo) {
    736 		dev_warn(p->dev, "%s:%d db_z_read_bo not set", __func__, __LINE__);
    737 		return -EINVAL;
    738 	}
    739 	if (offset > radeon_bo_size(track->db_z_read_bo)) {
    740 		dev_warn(p->dev, "%s:%d depth read bo too small (layer size %d, "
    741 			 "offset %ld, max layer %d, bo size %ld)\n",
    742 			 __func__, __LINE__, surf.layer_size,
    743 			(unsigned long)track->db_z_read_offset << 8, mslice,
    744 			radeon_bo_size(track->db_z_read_bo));
    745 		return -EINVAL;
    746 	}
    747 
    748 	offset = track->db_z_write_offset << 8;
    749 	if (offset & (surf.base_align - 1)) {
    750 		dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
    751 			 __func__, __LINE__, offset, surf.base_align);
    752 		return -EINVAL;
    753 	}
    754 	offset += surf.layer_size * mslice;
    755 	if (!track->db_z_write_bo) {
    756 		dev_warn(p->dev, "%s:%d db_z_write_bo not set", __func__, __LINE__);
    757 		return -EINVAL;
    758 	}
    759 	if (offset > radeon_bo_size(track->db_z_write_bo)) {
    760 		dev_warn(p->dev, "%s:%d depth write bo too small (layer size %d, "
    761 			 "offset %ld, max layer %d, bo size %ld)\n",
    762 			 __func__, __LINE__, surf.layer_size,
    763 			(unsigned long)track->db_z_write_offset << 8, mslice,
    764 			radeon_bo_size(track->db_z_write_bo));
    765 		return -EINVAL;
    766 	}
    767 
    768 	/* hyperz */
    769 	if (G_028040_TILE_SURFACE_ENABLE(track->db_z_info)) {
    770 		r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby);
    771 		if (r) {
    772 			return r;
    773 		}
    774 	}
    775 
    776 	return 0;
    777 }
    778 
    779 static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
    780 					       struct radeon_bo *texture,
    781 					       struct radeon_bo *mipmap,
    782 					       unsigned idx)
    783 {
    784 	struct eg_surface surf;
    785 	unsigned long toffset, moffset;
    786 	unsigned dim, llevel, mslice, width, height, depth, i;
    787 	u32 texdw[8];
    788 	int r;
    789 
    790 	texdw[0] = radeon_get_ib_value(p, idx + 0);
    791 	texdw[1] = radeon_get_ib_value(p, idx + 1);
    792 	texdw[2] = radeon_get_ib_value(p, idx + 2);
    793 	texdw[3] = radeon_get_ib_value(p, idx + 3);
    794 	texdw[4] = radeon_get_ib_value(p, idx + 4);
    795 	texdw[5] = radeon_get_ib_value(p, idx + 5);
    796 	texdw[6] = radeon_get_ib_value(p, idx + 6);
    797 	texdw[7] = radeon_get_ib_value(p, idx + 7);
    798 	dim = G_030000_DIM(texdw[0]);
    799 	llevel = G_030014_LAST_LEVEL(texdw[5]);
    800 	mslice = G_030014_LAST_ARRAY(texdw[5]) + 1;
    801 	width = G_030000_TEX_WIDTH(texdw[0]) + 1;
    802 	height =  G_030004_TEX_HEIGHT(texdw[1]) + 1;
    803 	depth = G_030004_TEX_DEPTH(texdw[1]) + 1;
    804 	surf.format = G_03001C_DATA_FORMAT(texdw[7]);
    805 	surf.nbx = (G_030000_PITCH(texdw[0]) + 1) * 8;
    806 	surf.nbx = r600_fmt_get_nblocksx(surf.format, surf.nbx);
    807 	surf.nby = r600_fmt_get_nblocksy(surf.format, height);
    808 	surf.mode = G_030004_ARRAY_MODE(texdw[1]);
    809 	surf.tsplit = G_030018_TILE_SPLIT(texdw[6]);
    810 	surf.nbanks = G_03001C_NUM_BANKS(texdw[7]);
    811 	surf.bankw = G_03001C_BANK_WIDTH(texdw[7]);
    812 	surf.bankh = G_03001C_BANK_HEIGHT(texdw[7]);
    813 	surf.mtilea = G_03001C_MACRO_TILE_ASPECT(texdw[7]);
    814 	surf.nsamples = 1;
    815 	toffset = texdw[2] << 8;
    816 	moffset = texdw[3] << 8;
    817 
    818 	if (!r600_fmt_is_valid_texture(surf.format, p->family)) {
    819 		dev_warn(p->dev, "%s:%d texture invalid format %d\n",
    820 			 __func__, __LINE__, surf.format);
    821 		return -EINVAL;
    822 	}
    823 	switch (dim) {
    824 	case V_030000_SQ_TEX_DIM_1D:
    825 	case V_030000_SQ_TEX_DIM_2D:
    826 	case V_030000_SQ_TEX_DIM_CUBEMAP:
    827 	case V_030000_SQ_TEX_DIM_1D_ARRAY:
    828 	case V_030000_SQ_TEX_DIM_2D_ARRAY:
    829 		depth = 1;
    830 		break;
    831 	case V_030000_SQ_TEX_DIM_2D_MSAA:
    832 	case V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA:
    833 		surf.nsamples = 1 << llevel;
    834 		llevel = 0;
    835 		depth = 1;
    836 		break;
    837 	case V_030000_SQ_TEX_DIM_3D:
    838 		break;
    839 	default:
    840 		dev_warn(p->dev, "%s:%d texture invalid dimension %d\n",
    841 			 __func__, __LINE__, dim);
    842 		return -EINVAL;
    843 	}
    844 
    845 	r = evergreen_surface_value_conv_check(p, &surf, "texture");
    846 	if (r) {
    847 		return r;
    848 	}
    849 
    850 	/* align height */
    851 	evergreen_surface_check(p, &surf, NULL);
    852 #ifdef __NetBSD__		/* XXX ALIGN means something else */
    853 	surf.nby = round_up(surf.nby, surf.halign);
    854 #else
    855 	surf.nby = ALIGN(surf.nby, surf.halign);
    856 #endif
    857 
    858 	r = evergreen_surface_check(p, &surf, "texture");
    859 	if (r) {
    860 		dev_warn(p->dev, "%s:%d texture invalid 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
    861 			 __func__, __LINE__, texdw[0], texdw[1], texdw[4],
    862 			 texdw[5], texdw[6], texdw[7]);
    863 		return r;
    864 	}
    865 
    866 	/* check texture size */
    867 	if (toffset & (surf.base_align - 1)) {
    868 		dev_warn(p->dev, "%s:%d texture bo base %ld not aligned with %ld\n",
    869 			 __func__, __LINE__, toffset, surf.base_align);
    870 		return -EINVAL;
    871 	}
    872 	if (surf.nsamples <= 1 && moffset & (surf.base_align - 1)) {
    873 		dev_warn(p->dev, "%s:%d mipmap bo base %ld not aligned with %ld\n",
    874 			 __func__, __LINE__, moffset, surf.base_align);
    875 		return -EINVAL;
    876 	}
    877 	if (dim == SQ_TEX_DIM_3D) {
    878 		toffset += surf.layer_size * depth;
    879 	} else {
    880 		toffset += surf.layer_size * mslice;
    881 	}
    882 	if (toffset > radeon_bo_size(texture)) {
    883 		dev_warn(p->dev, "%s:%d texture bo too small (layer size %d, "
    884 			 "offset %ld, max layer %d, depth %d, bo size %ld) (%d %d)\n",
    885 			 __func__, __LINE__, surf.layer_size,
    886 			(unsigned long)texdw[2] << 8, mslice,
    887 			depth, radeon_bo_size(texture),
    888 			surf.nbx, surf.nby);
    889 		return -EINVAL;
    890 	}
    891 
    892 	if (!mipmap) {
    893 		if (llevel) {
    894 			dev_warn(p->dev, "%s:%i got NULL MIP_ADDRESS relocation\n",
    895 				 __func__, __LINE__);
    896 			return -EINVAL;
    897 		} else {
    898 			return 0; /* everything's ok */
    899 		}
    900 	}
    901 
    902 	/* check mipmap size */
    903 	for (i = 1; i <= llevel; i++) {
    904 		unsigned w, h, d;
    905 
    906 		w = r600_mip_minify(width, i);
    907 		h = r600_mip_minify(height, i);
    908 		d = r600_mip_minify(depth, i);
    909 		surf.nbx = r600_fmt_get_nblocksx(surf.format, w);
    910 		surf.nby = r600_fmt_get_nblocksy(surf.format, h);
    911 
    912 		switch (surf.mode) {
    913 		case ARRAY_2D_TILED_THIN1:
    914 			if (surf.nbx < surf.palign || surf.nby < surf.halign) {
    915 				surf.mode = ARRAY_1D_TILED_THIN1;
    916 			}
    917 			/* recompute alignment */
    918 			evergreen_surface_check(p, &surf, NULL);
    919 			break;
    920 		case ARRAY_LINEAR_GENERAL:
    921 		case ARRAY_LINEAR_ALIGNED:
    922 		case ARRAY_1D_TILED_THIN1:
    923 			break;
    924 		default:
    925 			dev_warn(p->dev, "%s:%d invalid array mode %d\n",
    926 				 __func__, __LINE__, surf.mode);
    927 			return -EINVAL;
    928 		}
    929 #ifdef __NetBSD__		/* XXX ALIGN means something else.  */
    930 		surf.nbx = round_up(surf.nbx, surf.palign);
    931 		surf.nby = round_up(surf.nby, surf.halign);
    932 #else
    933 		surf.nbx = ALIGN(surf.nbx, surf.palign);
    934 		surf.nby = ALIGN(surf.nby, surf.halign);
    935 #endif
    936 
    937 		r = evergreen_surface_check(p, &surf, "mipmap");
    938 		if (r) {
    939 			return r;
    940 		}
    941 
    942 		if (dim == SQ_TEX_DIM_3D) {
    943 			moffset += surf.layer_size * d;
    944 		} else {
    945 			moffset += surf.layer_size * mslice;
    946 		}
    947 		if (moffset > radeon_bo_size(mipmap)) {
    948 			dev_warn(p->dev, "%s:%d mipmap [%d] bo too small (layer size %d, "
    949 					"offset %ld, coffset %ld, max layer %d, depth %d, "
    950 					"bo size %ld) level0 (%d %d %d)\n",
    951 					__func__, __LINE__, i, surf.layer_size,
    952 					(unsigned long)texdw[3] << 8, moffset, mslice,
    953 					d, radeon_bo_size(mipmap),
    954 					width, height, depth);
    955 			dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
    956 				 __func__, __LINE__, surf.nbx, surf.nby,
    957 				surf.mode, surf.bpe, surf.nsamples,
    958 				surf.bankw, surf.bankh,
    959 				surf.tsplit, surf.mtilea);
    960 			return -EINVAL;
    961 		}
    962 	}
    963 
    964 	return 0;
    965 }
    966 
    967 static int evergreen_cs_track_check(struct radeon_cs_parser *p)
    968 {
    969 	struct evergreen_cs_track *track = p->track;
    970 	unsigned tmp, i;
    971 	int r;
    972 	unsigned buffer_mask = 0;
    973 
    974 	/* check streamout */
    975 	if (track->streamout_dirty && track->vgt_strmout_config) {
    976 		for (i = 0; i < 4; i++) {
    977 			if (track->vgt_strmout_config & (1 << i)) {
    978 				buffer_mask |= (track->vgt_strmout_buffer_config >> (i * 4)) & 0xf;
    979 			}
    980 		}
    981 
    982 		for (i = 0; i < 4; i++) {
    983 			if (buffer_mask & (1 << i)) {
    984 				if (track->vgt_strmout_bo[i]) {
    985 					u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
    986 							(u64)track->vgt_strmout_size[i];
    987 					if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
    988 						DRM_ERROR("streamout %d bo too small: 0x%"PRIx64", 0x%lx\n",
    989 							  i, offset,
    990 							  radeon_bo_size(track->vgt_strmout_bo[i]));
    991 						return -EINVAL;
    992 					}
    993 				} else {
    994 					dev_warn(p->dev, "No buffer for streamout %d\n", i);
    995 					return -EINVAL;
    996 				}
    997 			}
    998 		}
    999 		track->streamout_dirty = false;
   1000 	}
   1001 
   1002 	if (track->sx_misc_kill_all_prims)
   1003 		return 0;
   1004 
   1005 	/* check that we have a cb for each enabled target
   1006 	 */
   1007 	if (track->cb_dirty) {
   1008 		tmp = track->cb_target_mask;
   1009 		for (i = 0; i < 8; i++) {
   1010 			u32 format = G_028C70_FORMAT(track->cb_color_info[i]);
   1011 
   1012 			if (format != V_028C70_COLOR_INVALID &&
   1013 			    (tmp >> (i * 4)) & 0xF) {
   1014 				/* at least one component is enabled */
   1015 				if (track->cb_color_bo[i] == NULL) {
   1016 					dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
   1017 						__func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
   1018 					return -EINVAL;
   1019 				}
   1020 				/* check cb */
   1021 				r = evergreen_cs_track_validate_cb(p, i);
   1022 				if (r) {
   1023 					return r;
   1024 				}
   1025 			}
   1026 		}
   1027 		track->cb_dirty = false;
   1028 	}
   1029 
   1030 	if (track->db_dirty) {
   1031 		/* Check stencil buffer */
   1032 		if (G_028044_FORMAT(track->db_s_info) != V_028044_STENCIL_INVALID &&
   1033 		    G_028800_STENCIL_ENABLE(track->db_depth_control)) {
   1034 			r = evergreen_cs_track_validate_stencil(p);
   1035 			if (r)
   1036 				return r;
   1037 		}
   1038 		/* Check depth buffer */
   1039 		if (G_028040_FORMAT(track->db_z_info) != V_028040_Z_INVALID &&
   1040 		    G_028800_Z_ENABLE(track->db_depth_control)) {
   1041 			r = evergreen_cs_track_validate_depth(p);
   1042 			if (r)
   1043 				return r;
   1044 		}
   1045 		track->db_dirty = false;
   1046 	}
   1047 
   1048 	return 0;
   1049 }
   1050 
   1051 /**
   1052  * evergreen_cs_packet_parse_vline() - parse userspace VLINE packet
   1053  * @parser:		parser structure holding parsing context.
   1054  *
   1055  * This is an Evergreen(+)-specific function for parsing VLINE packets.
   1056  * Real work is done by r600_cs_common_vline_parse function.
   1057  * Here we just set up ASIC-specific register table and call
   1058  * the common implementation function.
   1059  */
   1060 static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
   1061 {
   1062 
   1063 	static uint32_t vline_start_end[6] = {
   1064 		EVERGREEN_VLINE_START_END + EVERGREEN_CRTC0_REGISTER_OFFSET,
   1065 		EVERGREEN_VLINE_START_END + EVERGREEN_CRTC1_REGISTER_OFFSET,
   1066 		EVERGREEN_VLINE_START_END + EVERGREEN_CRTC2_REGISTER_OFFSET,
   1067 		EVERGREEN_VLINE_START_END + EVERGREEN_CRTC3_REGISTER_OFFSET,
   1068 		EVERGREEN_VLINE_START_END + EVERGREEN_CRTC4_REGISTER_OFFSET,
   1069 		EVERGREEN_VLINE_START_END + EVERGREEN_CRTC5_REGISTER_OFFSET
   1070 	};
   1071 	static uint32_t vline_status[6] = {
   1072 		EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET,
   1073 		EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET,
   1074 		EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET,
   1075 		EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET,
   1076 		EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET,
   1077 		EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET
   1078 	};
   1079 
   1080 	return r600_cs_common_vline_parse(p, vline_start_end, vline_status);
   1081 }
   1082 
   1083 static int evergreen_packet0_check(struct radeon_cs_parser *p,
   1084 				   struct radeon_cs_packet *pkt,
   1085 				   unsigned idx, unsigned reg)
   1086 {
   1087 	int r;
   1088 
   1089 	switch (reg) {
   1090 	case EVERGREEN_VLINE_START_END:
   1091 		r = evergreen_cs_packet_parse_vline(p);
   1092 		if (r) {
   1093 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
   1094 					idx, reg);
   1095 			return r;
   1096 		}
   1097 		break;
   1098 	default:
   1099 		printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
   1100 		       reg, idx);
   1101 		return -EINVAL;
   1102 	}
   1103 	return 0;
   1104 }
   1105 
   1106 static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
   1107 				      struct radeon_cs_packet *pkt)
   1108 {
   1109 	unsigned reg, i;
   1110 	unsigned idx;
   1111 	int r;
   1112 
   1113 	idx = pkt->idx + 1;
   1114 	reg = pkt->reg;
   1115 	for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
   1116 		r = evergreen_packet0_check(p, pkt, idx, reg);
   1117 		if (r) {
   1118 			return r;
   1119 		}
   1120 	}
   1121 	return 0;
   1122 }
   1123 
   1124 /**
   1125  * evergreen_cs_handle_reg() - process registers that need special handling.
   1126  * @parser: parser structure holding parsing context
   1127  * @reg: register we are testing
   1128  * @idx: index into the cs buffer
   1129  */
   1130 static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
   1131 {
   1132 	struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
   1133 	struct radeon_bo_list *reloc;
   1134 	u32 tmp, *ib;
   1135 	int r;
   1136 
   1137 	ib = p->ib.ptr;
   1138 	switch (reg) {
   1139 	/* force following reg to 0 in an attempt to disable out buffer
   1140 	 * which will need us to better understand how it works to perform
   1141 	 * security check on it (Jerome)
   1142 	 */
   1143 	case SQ_ESGS_RING_SIZE:
   1144 	case SQ_GSVS_RING_SIZE:
   1145 	case SQ_ESTMP_RING_SIZE:
   1146 	case SQ_GSTMP_RING_SIZE:
   1147 	case SQ_HSTMP_RING_SIZE:
   1148 	case SQ_LSTMP_RING_SIZE:
   1149 	case SQ_PSTMP_RING_SIZE:
   1150 	case SQ_VSTMP_RING_SIZE:
   1151 	case SQ_ESGS_RING_ITEMSIZE:
   1152 	case SQ_ESTMP_RING_ITEMSIZE:
   1153 	case SQ_GSTMP_RING_ITEMSIZE:
   1154 	case SQ_GSVS_RING_ITEMSIZE:
   1155 	case SQ_GS_VERT_ITEMSIZE:
   1156 	case SQ_GS_VERT_ITEMSIZE_1:
   1157 	case SQ_GS_VERT_ITEMSIZE_2:
   1158 	case SQ_GS_VERT_ITEMSIZE_3:
   1159 	case SQ_GSVS_RING_OFFSET_1:
   1160 	case SQ_GSVS_RING_OFFSET_2:
   1161 	case SQ_GSVS_RING_OFFSET_3:
   1162 	case SQ_HSTMP_RING_ITEMSIZE:
   1163 	case SQ_LSTMP_RING_ITEMSIZE:
   1164 	case SQ_PSTMP_RING_ITEMSIZE:
   1165 	case SQ_VSTMP_RING_ITEMSIZE:
   1166 	case VGT_TF_RING_SIZE:
   1167 		/* get value to populate the IB don't remove */
   1168 		/*tmp =radeon_get_ib_value(p, idx);
   1169 		  ib[idx] = 0;*/
   1170 		break;
   1171 	case SQ_ESGS_RING_BASE:
   1172 	case SQ_GSVS_RING_BASE:
   1173 	case SQ_ESTMP_RING_BASE:
   1174 	case SQ_GSTMP_RING_BASE:
   1175 	case SQ_HSTMP_RING_BASE:
   1176 	case SQ_LSTMP_RING_BASE:
   1177 	case SQ_PSTMP_RING_BASE:
   1178 	case SQ_VSTMP_RING_BASE:
   1179 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   1180 		if (r) {
   1181 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
   1182 					"0x%04X\n", reg);
   1183 			return -EINVAL;
   1184 		}
   1185 		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
   1186 		break;
   1187 	case DB_DEPTH_CONTROL:
   1188 		track->db_depth_control = radeon_get_ib_value(p, idx);
   1189 		track->db_dirty = true;
   1190 		break;
   1191 	case CAYMAN_DB_EQAA:
   1192 		if (p->rdev->family < CHIP_CAYMAN) {
   1193 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
   1194 				 "0x%04X\n", reg);
   1195 			return -EINVAL;
   1196 		}
   1197 		break;
   1198 	case CAYMAN_DB_DEPTH_INFO:
   1199 		if (p->rdev->family < CHIP_CAYMAN) {
   1200 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
   1201 				 "0x%04X\n", reg);
   1202 			return -EINVAL;
   1203 		}
   1204 		break;
   1205 	case DB_Z_INFO:
   1206 		track->db_z_info = radeon_get_ib_value(p, idx);
   1207 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
   1208 			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   1209 			if (r) {
   1210 				dev_warn(p->dev, "bad SET_CONTEXT_REG "
   1211 						"0x%04X\n", reg);
   1212 				return -EINVAL;
   1213 			}
   1214 			ib[idx] &= ~Z_ARRAY_MODE(0xf);
   1215 			track->db_z_info &= ~Z_ARRAY_MODE(0xf);
   1216 			ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
   1217 			track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
   1218 			if (reloc->tiling_flags & RADEON_TILING_MACRO) {
   1219 				unsigned bankw, bankh, mtaspect, tile_split;
   1220 
   1221 				evergreen_tiling_fields(reloc->tiling_flags,
   1222 							&bankw, &bankh, &mtaspect,
   1223 							&tile_split);
   1224 				ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
   1225 				ib[idx] |= DB_TILE_SPLIT(tile_split) |
   1226 						DB_BANK_WIDTH(bankw) |
   1227 						DB_BANK_HEIGHT(bankh) |
   1228 						DB_MACRO_TILE_ASPECT(mtaspect);
   1229 			}
   1230 		}
   1231 		track->db_dirty = true;
   1232 		break;
   1233 	case DB_STENCIL_INFO:
   1234 		track->db_s_info = radeon_get_ib_value(p, idx);
   1235 		track->db_dirty = true;
   1236 		break;
   1237 	case DB_DEPTH_VIEW:
   1238 		track->db_depth_view = radeon_get_ib_value(p, idx);
   1239 		track->db_dirty = true;
   1240 		break;
   1241 	case DB_DEPTH_SIZE:
   1242 		track->db_depth_size = radeon_get_ib_value(p, idx);
   1243 		track->db_dirty = true;
   1244 		break;
   1245 	case R_02805C_DB_DEPTH_SLICE:
   1246 		track->db_depth_slice = radeon_get_ib_value(p, idx);
   1247 		track->db_dirty = true;
   1248 		break;
   1249 	case DB_Z_READ_BASE:
   1250 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   1251 		if (r) {
   1252 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
   1253 					"0x%04X\n", reg);
   1254 			return -EINVAL;
   1255 		}
   1256 		track->db_z_read_offset = radeon_get_ib_value(p, idx);
   1257 		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
   1258 		track->db_z_read_bo = reloc->robj;
   1259 		track->db_dirty = true;
   1260 		break;
   1261 	case DB_Z_WRITE_BASE:
   1262 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   1263 		if (r) {
   1264 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
   1265 					"0x%04X\n", reg);
   1266 			return -EINVAL;
   1267 		}
   1268 		track->db_z_write_offset = radeon_get_ib_value(p, idx);
   1269 		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
   1270 		track->db_z_write_bo = reloc->robj;
   1271 		track->db_dirty = true;
   1272 		break;
   1273 	case DB_STENCIL_READ_BASE:
   1274 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   1275 		if (r) {
   1276 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
   1277 					"0x%04X\n", reg);
   1278 			return -EINVAL;
   1279 		}
   1280 		track->db_s_read_offset = radeon_get_ib_value(p, idx);
   1281 		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
   1282 		track->db_s_read_bo = reloc->robj;
   1283 		track->db_dirty = true;
   1284 		break;
   1285 	case DB_STENCIL_WRITE_BASE:
   1286 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   1287 		if (r) {
   1288 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
   1289 					"0x%04X\n", reg);
   1290 			return -EINVAL;
   1291 		}
   1292 		track->db_s_write_offset = radeon_get_ib_value(p, idx);
   1293 		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
   1294 		track->db_s_write_bo = reloc->robj;
   1295 		track->db_dirty = true;
   1296 		break;
   1297 	case VGT_STRMOUT_CONFIG:
   1298 		track->vgt_strmout_config = radeon_get_ib_value(p, idx);
   1299 		track->streamout_dirty = true;
   1300 		break;
   1301 	case VGT_STRMOUT_BUFFER_CONFIG:
   1302 		track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx);
   1303 		track->streamout_dirty = true;
   1304 		break;
   1305 	case VGT_STRMOUT_BUFFER_BASE_0:
   1306 	case VGT_STRMOUT_BUFFER_BASE_1:
   1307 	case VGT_STRMOUT_BUFFER_BASE_2:
   1308 	case VGT_STRMOUT_BUFFER_BASE_3:
   1309 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   1310 		if (r) {
   1311 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
   1312 					"0x%04X\n", reg);
   1313 			return -EINVAL;
   1314 		}
   1315 		tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
   1316 		track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
   1317 		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
   1318 		track->vgt_strmout_bo[tmp] = reloc->robj;
   1319 		track->streamout_dirty = true;
   1320 		break;
   1321 	case VGT_STRMOUT_BUFFER_SIZE_0:
   1322 	case VGT_STRMOUT_BUFFER_SIZE_1:
   1323 	case VGT_STRMOUT_BUFFER_SIZE_2:
   1324 	case VGT_STRMOUT_BUFFER_SIZE_3:
   1325 		tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
   1326 		/* size in register is DWs, convert to bytes */
   1327 		track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
   1328 		track->streamout_dirty = true;
   1329 		break;
   1330 	case CP_COHER_BASE:
   1331 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   1332 		if (r) {
   1333 			dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
   1334 					"0x%04X\n", reg);
   1335 			return -EINVAL;
   1336 		}
   1337 		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
   1338 	case CB_TARGET_MASK:
   1339 		track->cb_target_mask = radeon_get_ib_value(p, idx);
   1340 		track->cb_dirty = true;
   1341 		break;
   1342 	case CB_SHADER_MASK:
   1343 		track->cb_shader_mask = radeon_get_ib_value(p, idx);
   1344 		track->cb_dirty = true;
   1345 		break;
   1346 	case PA_SC_AA_CONFIG:
   1347 		if (p->rdev->family >= CHIP_CAYMAN) {
   1348 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
   1349 				 "0x%04X\n", reg);
   1350 			return -EINVAL;
   1351 		}
   1352 		tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK;
   1353 		track->nsamples = 1 << tmp;
   1354 		break;
   1355 	case CAYMAN_PA_SC_AA_CONFIG:
   1356 		if (p->rdev->family < CHIP_CAYMAN) {
   1357 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
   1358 				 "0x%04X\n", reg);
   1359 			return -EINVAL;
   1360 		}
   1361 		tmp = radeon_get_ib_value(p, idx) & CAYMAN_MSAA_NUM_SAMPLES_MASK;
   1362 		track->nsamples = 1 << tmp;
   1363 		break;
   1364 	case CB_COLOR0_VIEW:
   1365 	case CB_COLOR1_VIEW:
   1366 	case CB_COLOR2_VIEW:
   1367 	case CB_COLOR3_VIEW:
   1368 	case CB_COLOR4_VIEW:
   1369 	case CB_COLOR5_VIEW:
   1370 	case CB_COLOR6_VIEW:
   1371 	case CB_COLOR7_VIEW:
   1372 		tmp = (reg - CB_COLOR0_VIEW) / 0x3c;
   1373 		track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
   1374 		track->cb_dirty = true;
   1375 		break;
   1376 	case CB_COLOR8_VIEW:
   1377 	case CB_COLOR9_VIEW:
   1378 	case CB_COLOR10_VIEW:
   1379 	case CB_COLOR11_VIEW:
   1380 		tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8;
   1381 		track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
   1382 		track->cb_dirty = true;
   1383 		break;
   1384 	case CB_COLOR0_INFO:
   1385 	case CB_COLOR1_INFO:
   1386 	case CB_COLOR2_INFO:
   1387 	case CB_COLOR3_INFO:
   1388 	case CB_COLOR4_INFO:
   1389 	case CB_COLOR5_INFO:
   1390 	case CB_COLOR6_INFO:
   1391 	case CB_COLOR7_INFO:
   1392 		tmp = (reg - CB_COLOR0_INFO) / 0x3c;
   1393 		track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
   1394 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
   1395 			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   1396 			if (r) {
   1397 				dev_warn(p->dev, "bad SET_CONTEXT_REG "
   1398 						"0x%04X\n", reg);
   1399 				return -EINVAL;
   1400 			}
   1401 			ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
   1402 			track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
   1403 		}
   1404 		track->cb_dirty = true;
   1405 		break;
   1406 	case CB_COLOR8_INFO:
   1407 	case CB_COLOR9_INFO:
   1408 	case CB_COLOR10_INFO:
   1409 	case CB_COLOR11_INFO:
   1410 		tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
   1411 		track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
   1412 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
   1413 			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   1414 			if (r) {
   1415 				dev_warn(p->dev, "bad SET_CONTEXT_REG "
   1416 						"0x%04X\n", reg);
   1417 				return -EINVAL;
   1418 			}
   1419 			ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
   1420 			track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
   1421 		}
   1422 		track->cb_dirty = true;
   1423 		break;
   1424 	case CB_COLOR0_PITCH:
   1425 	case CB_COLOR1_PITCH:
   1426 	case CB_COLOR2_PITCH:
   1427 	case CB_COLOR3_PITCH:
   1428 	case CB_COLOR4_PITCH:
   1429 	case CB_COLOR5_PITCH:
   1430 	case CB_COLOR6_PITCH:
   1431 	case CB_COLOR7_PITCH:
   1432 		tmp = (reg - CB_COLOR0_PITCH) / 0x3c;
   1433 		track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
   1434 		track->cb_dirty = true;
   1435 		break;
   1436 	case CB_COLOR8_PITCH:
   1437 	case CB_COLOR9_PITCH:
   1438 	case CB_COLOR10_PITCH:
   1439 	case CB_COLOR11_PITCH:
   1440 		tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8;
   1441 		track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
   1442 		track->cb_dirty = true;
   1443 		break;
   1444 	case CB_COLOR0_SLICE:
   1445 	case CB_COLOR1_SLICE:
   1446 	case CB_COLOR2_SLICE:
   1447 	case CB_COLOR3_SLICE:
   1448 	case CB_COLOR4_SLICE:
   1449 	case CB_COLOR5_SLICE:
   1450 	case CB_COLOR6_SLICE:
   1451 	case CB_COLOR7_SLICE:
   1452 		tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
   1453 		track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
   1454 		track->cb_color_slice_idx[tmp] = idx;
   1455 		track->cb_dirty = true;
   1456 		break;
   1457 	case CB_COLOR8_SLICE:
   1458 	case CB_COLOR9_SLICE:
   1459 	case CB_COLOR10_SLICE:
   1460 	case CB_COLOR11_SLICE:
   1461 		tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
   1462 		track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
   1463 		track->cb_color_slice_idx[tmp] = idx;
   1464 		track->cb_dirty = true;
   1465 		break;
   1466 	case CB_COLOR0_ATTRIB:
   1467 	case CB_COLOR1_ATTRIB:
   1468 	case CB_COLOR2_ATTRIB:
   1469 	case CB_COLOR3_ATTRIB:
   1470 	case CB_COLOR4_ATTRIB:
   1471 	case CB_COLOR5_ATTRIB:
   1472 	case CB_COLOR6_ATTRIB:
   1473 	case CB_COLOR7_ATTRIB:
   1474 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   1475 		if (r) {
   1476 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
   1477 					"0x%04X\n", reg);
   1478 			return -EINVAL;
   1479 		}
   1480 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
   1481 			if (reloc->tiling_flags & RADEON_TILING_MACRO) {
   1482 				unsigned bankw, bankh, mtaspect, tile_split;
   1483 
   1484 				evergreen_tiling_fields(reloc->tiling_flags,
   1485 							&bankw, &bankh, &mtaspect,
   1486 							&tile_split);
   1487 				ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
   1488 				ib[idx] |= CB_TILE_SPLIT(tile_split) |
   1489 					   CB_BANK_WIDTH(bankw) |
   1490 					   CB_BANK_HEIGHT(bankh) |
   1491 					   CB_MACRO_TILE_ASPECT(mtaspect);
   1492 			}
   1493 		}
   1494 		tmp = ((reg - CB_COLOR0_ATTRIB) / 0x3c);
   1495 		track->cb_color_attrib[tmp] = ib[idx];
   1496 		track->cb_dirty = true;
   1497 		break;
   1498 	case CB_COLOR8_ATTRIB:
   1499 	case CB_COLOR9_ATTRIB:
   1500 	case CB_COLOR10_ATTRIB:
   1501 	case CB_COLOR11_ATTRIB:
   1502 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   1503 		if (r) {
   1504 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
   1505 					"0x%04X\n", reg);
   1506 			return -EINVAL;
   1507 		}
   1508 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
   1509 			if (reloc->tiling_flags & RADEON_TILING_MACRO) {
   1510 				unsigned bankw, bankh, mtaspect, tile_split;
   1511 
   1512 				evergreen_tiling_fields(reloc->tiling_flags,
   1513 							&bankw, &bankh, &mtaspect,
   1514 							&tile_split);
   1515 				ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
   1516 				ib[idx] |= CB_TILE_SPLIT(tile_split) |
   1517 					   CB_BANK_WIDTH(bankw) |
   1518 					   CB_BANK_HEIGHT(bankh) |
   1519 					   CB_MACRO_TILE_ASPECT(mtaspect);
   1520 			}
   1521 		}
   1522 		tmp = ((reg - CB_COLOR8_ATTRIB) / 0x1c) + 8;
   1523 		track->cb_color_attrib[tmp] = ib[idx];
   1524 		track->cb_dirty = true;
   1525 		break;
   1526 	case CB_COLOR0_FMASK:
   1527 	case CB_COLOR1_FMASK:
   1528 	case CB_COLOR2_FMASK:
   1529 	case CB_COLOR3_FMASK:
   1530 	case CB_COLOR4_FMASK:
   1531 	case CB_COLOR5_FMASK:
   1532 	case CB_COLOR6_FMASK:
   1533 	case CB_COLOR7_FMASK:
   1534 		tmp = (reg - CB_COLOR0_FMASK) / 0x3c;
   1535 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   1536 		if (r) {
   1537 			dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
   1538 			return -EINVAL;
   1539 		}
   1540 		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
   1541 		track->cb_color_fmask_bo[tmp] = reloc->robj;
   1542 		break;
   1543 	case CB_COLOR0_CMASK:
   1544 	case CB_COLOR1_CMASK:
   1545 	case CB_COLOR2_CMASK:
   1546 	case CB_COLOR3_CMASK:
   1547 	case CB_COLOR4_CMASK:
   1548 	case CB_COLOR5_CMASK:
   1549 	case CB_COLOR6_CMASK:
   1550 	case CB_COLOR7_CMASK:
   1551 		tmp = (reg - CB_COLOR0_CMASK) / 0x3c;
   1552 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   1553 		if (r) {
   1554 			dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
   1555 			return -EINVAL;
   1556 		}
   1557 		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
   1558 		track->cb_color_cmask_bo[tmp] = reloc->robj;
   1559 		break;
   1560 	case CB_COLOR0_FMASK_SLICE:
   1561 	case CB_COLOR1_FMASK_SLICE:
   1562 	case CB_COLOR2_FMASK_SLICE:
   1563 	case CB_COLOR3_FMASK_SLICE:
   1564 	case CB_COLOR4_FMASK_SLICE:
   1565 	case CB_COLOR5_FMASK_SLICE:
   1566 	case CB_COLOR6_FMASK_SLICE:
   1567 	case CB_COLOR7_FMASK_SLICE:
   1568 		tmp = (reg - CB_COLOR0_FMASK_SLICE) / 0x3c;
   1569 		track->cb_color_fmask_slice[tmp] = radeon_get_ib_value(p, idx);
   1570 		break;
   1571 	case CB_COLOR0_CMASK_SLICE:
   1572 	case CB_COLOR1_CMASK_SLICE:
   1573 	case CB_COLOR2_CMASK_SLICE:
   1574 	case CB_COLOR3_CMASK_SLICE:
   1575 	case CB_COLOR4_CMASK_SLICE:
   1576 	case CB_COLOR5_CMASK_SLICE:
   1577 	case CB_COLOR6_CMASK_SLICE:
   1578 	case CB_COLOR7_CMASK_SLICE:
   1579 		tmp = (reg - CB_COLOR0_CMASK_SLICE) / 0x3c;
   1580 		track->cb_color_cmask_slice[tmp] = radeon_get_ib_value(p, idx);
   1581 		break;
   1582 	case CB_COLOR0_BASE:
   1583 	case CB_COLOR1_BASE:
   1584 	case CB_COLOR2_BASE:
   1585 	case CB_COLOR3_BASE:
   1586 	case CB_COLOR4_BASE:
   1587 	case CB_COLOR5_BASE:
   1588 	case CB_COLOR6_BASE:
   1589 	case CB_COLOR7_BASE:
   1590 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   1591 		if (r) {
   1592 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
   1593 					"0x%04X\n", reg);
   1594 			return -EINVAL;
   1595 		}
   1596 		tmp = (reg - CB_COLOR0_BASE) / 0x3c;
   1597 		track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
   1598 		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
   1599 		track->cb_color_bo[tmp] = reloc->robj;
   1600 		track->cb_dirty = true;
   1601 		break;
   1602 	case CB_COLOR8_BASE:
   1603 	case CB_COLOR9_BASE:
   1604 	case CB_COLOR10_BASE:
   1605 	case CB_COLOR11_BASE:
   1606 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   1607 		if (r) {
   1608 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
   1609 					"0x%04X\n", reg);
   1610 			return -EINVAL;
   1611 		}
   1612 		tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8;
   1613 		track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
   1614 		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
   1615 		track->cb_color_bo[tmp] = reloc->robj;
   1616 		track->cb_dirty = true;
   1617 		break;
   1618 	case DB_HTILE_DATA_BASE:
   1619 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   1620 		if (r) {
   1621 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
   1622 					"0x%04X\n", reg);
   1623 			return -EINVAL;
   1624 		}
   1625 		track->htile_offset = radeon_get_ib_value(p, idx);
   1626 		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
   1627 		track->htile_bo = reloc->robj;
   1628 		track->db_dirty = true;
   1629 		break;
   1630 	case DB_HTILE_SURFACE:
   1631 		/* 8x8 only */
   1632 		track->htile_surface = radeon_get_ib_value(p, idx);
   1633 		/* force 8x8 htile width and height */
   1634 		ib[idx] |= 3;
   1635 		track->db_dirty = true;
   1636 		break;
   1637 	case CB_IMMED0_BASE:
   1638 	case CB_IMMED1_BASE:
   1639 	case CB_IMMED2_BASE:
   1640 	case CB_IMMED3_BASE:
   1641 	case CB_IMMED4_BASE:
   1642 	case CB_IMMED5_BASE:
   1643 	case CB_IMMED6_BASE:
   1644 	case CB_IMMED7_BASE:
   1645 	case CB_IMMED8_BASE:
   1646 	case CB_IMMED9_BASE:
   1647 	case CB_IMMED10_BASE:
   1648 	case CB_IMMED11_BASE:
   1649 	case SQ_PGM_START_FS:
   1650 	case SQ_PGM_START_ES:
   1651 	case SQ_PGM_START_VS:
   1652 	case SQ_PGM_START_GS:
   1653 	case SQ_PGM_START_PS:
   1654 	case SQ_PGM_START_HS:
   1655 	case SQ_PGM_START_LS:
   1656 	case SQ_CONST_MEM_BASE:
   1657 	case SQ_ALU_CONST_CACHE_GS_0:
   1658 	case SQ_ALU_CONST_CACHE_GS_1:
   1659 	case SQ_ALU_CONST_CACHE_GS_2:
   1660 	case SQ_ALU_CONST_CACHE_GS_3:
   1661 	case SQ_ALU_CONST_CACHE_GS_4:
   1662 	case SQ_ALU_CONST_CACHE_GS_5:
   1663 	case SQ_ALU_CONST_CACHE_GS_6:
   1664 	case SQ_ALU_CONST_CACHE_GS_7:
   1665 	case SQ_ALU_CONST_CACHE_GS_8:
   1666 	case SQ_ALU_CONST_CACHE_GS_9:
   1667 	case SQ_ALU_CONST_CACHE_GS_10:
   1668 	case SQ_ALU_CONST_CACHE_GS_11:
   1669 	case SQ_ALU_CONST_CACHE_GS_12:
   1670 	case SQ_ALU_CONST_CACHE_GS_13:
   1671 	case SQ_ALU_CONST_CACHE_GS_14:
   1672 	case SQ_ALU_CONST_CACHE_GS_15:
   1673 	case SQ_ALU_CONST_CACHE_PS_0:
   1674 	case SQ_ALU_CONST_CACHE_PS_1:
   1675 	case SQ_ALU_CONST_CACHE_PS_2:
   1676 	case SQ_ALU_CONST_CACHE_PS_3:
   1677 	case SQ_ALU_CONST_CACHE_PS_4:
   1678 	case SQ_ALU_CONST_CACHE_PS_5:
   1679 	case SQ_ALU_CONST_CACHE_PS_6:
   1680 	case SQ_ALU_CONST_CACHE_PS_7:
   1681 	case SQ_ALU_CONST_CACHE_PS_8:
   1682 	case SQ_ALU_CONST_CACHE_PS_9:
   1683 	case SQ_ALU_CONST_CACHE_PS_10:
   1684 	case SQ_ALU_CONST_CACHE_PS_11:
   1685 	case SQ_ALU_CONST_CACHE_PS_12:
   1686 	case SQ_ALU_CONST_CACHE_PS_13:
   1687 	case SQ_ALU_CONST_CACHE_PS_14:
   1688 	case SQ_ALU_CONST_CACHE_PS_15:
   1689 	case SQ_ALU_CONST_CACHE_VS_0:
   1690 	case SQ_ALU_CONST_CACHE_VS_1:
   1691 	case SQ_ALU_CONST_CACHE_VS_2:
   1692 	case SQ_ALU_CONST_CACHE_VS_3:
   1693 	case SQ_ALU_CONST_CACHE_VS_4:
   1694 	case SQ_ALU_CONST_CACHE_VS_5:
   1695 	case SQ_ALU_CONST_CACHE_VS_6:
   1696 	case SQ_ALU_CONST_CACHE_VS_7:
   1697 	case SQ_ALU_CONST_CACHE_VS_8:
   1698 	case SQ_ALU_CONST_CACHE_VS_9:
   1699 	case SQ_ALU_CONST_CACHE_VS_10:
   1700 	case SQ_ALU_CONST_CACHE_VS_11:
   1701 	case SQ_ALU_CONST_CACHE_VS_12:
   1702 	case SQ_ALU_CONST_CACHE_VS_13:
   1703 	case SQ_ALU_CONST_CACHE_VS_14:
   1704 	case SQ_ALU_CONST_CACHE_VS_15:
   1705 	case SQ_ALU_CONST_CACHE_HS_0:
   1706 	case SQ_ALU_CONST_CACHE_HS_1:
   1707 	case SQ_ALU_CONST_CACHE_HS_2:
   1708 	case SQ_ALU_CONST_CACHE_HS_3:
   1709 	case SQ_ALU_CONST_CACHE_HS_4:
   1710 	case SQ_ALU_CONST_CACHE_HS_5:
   1711 	case SQ_ALU_CONST_CACHE_HS_6:
   1712 	case SQ_ALU_CONST_CACHE_HS_7:
   1713 	case SQ_ALU_CONST_CACHE_HS_8:
   1714 	case SQ_ALU_CONST_CACHE_HS_9:
   1715 	case SQ_ALU_CONST_CACHE_HS_10:
   1716 	case SQ_ALU_CONST_CACHE_HS_11:
   1717 	case SQ_ALU_CONST_CACHE_HS_12:
   1718 	case SQ_ALU_CONST_CACHE_HS_13:
   1719 	case SQ_ALU_CONST_CACHE_HS_14:
   1720 	case SQ_ALU_CONST_CACHE_HS_15:
   1721 	case SQ_ALU_CONST_CACHE_LS_0:
   1722 	case SQ_ALU_CONST_CACHE_LS_1:
   1723 	case SQ_ALU_CONST_CACHE_LS_2:
   1724 	case SQ_ALU_CONST_CACHE_LS_3:
   1725 	case SQ_ALU_CONST_CACHE_LS_4:
   1726 	case SQ_ALU_CONST_CACHE_LS_5:
   1727 	case SQ_ALU_CONST_CACHE_LS_6:
   1728 	case SQ_ALU_CONST_CACHE_LS_7:
   1729 	case SQ_ALU_CONST_CACHE_LS_8:
   1730 	case SQ_ALU_CONST_CACHE_LS_9:
   1731 	case SQ_ALU_CONST_CACHE_LS_10:
   1732 	case SQ_ALU_CONST_CACHE_LS_11:
   1733 	case SQ_ALU_CONST_CACHE_LS_12:
   1734 	case SQ_ALU_CONST_CACHE_LS_13:
   1735 	case SQ_ALU_CONST_CACHE_LS_14:
   1736 	case SQ_ALU_CONST_CACHE_LS_15:
   1737 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   1738 		if (r) {
   1739 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
   1740 					"0x%04X\n", reg);
   1741 			return -EINVAL;
   1742 		}
   1743 		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
   1744 		break;
   1745 	case SX_MEMORY_EXPORT_BASE:
   1746 		if (p->rdev->family >= CHIP_CAYMAN) {
   1747 			dev_warn(p->dev, "bad SET_CONFIG_REG "
   1748 				 "0x%04X\n", reg);
   1749 			return -EINVAL;
   1750 		}
   1751 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   1752 		if (r) {
   1753 			dev_warn(p->dev, "bad SET_CONFIG_REG "
   1754 					"0x%04X\n", reg);
   1755 			return -EINVAL;
   1756 		}
   1757 		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
   1758 		break;
   1759 	case CAYMAN_SX_SCATTER_EXPORT_BASE:
   1760 		if (p->rdev->family < CHIP_CAYMAN) {
   1761 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
   1762 				 "0x%04X\n", reg);
   1763 			return -EINVAL;
   1764 		}
   1765 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   1766 		if (r) {
   1767 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
   1768 					"0x%04X\n", reg);
   1769 			return -EINVAL;
   1770 		}
   1771 		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
   1772 		break;
   1773 	case SX_MISC:
   1774 		track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
   1775 		break;
   1776 	default:
   1777 		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
   1778 		return -EINVAL;
   1779 	}
   1780 	return 0;
   1781 }
   1782 
   1783 /**
   1784  * evergreen_is_safe_reg() - check if register is authorized or not
   1785  * @parser: parser structure holding parsing context
   1786  * @reg: register we are testing
   1787  *
   1788  * This function will test against reg_safe_bm and return true
   1789  * if register is safe or false otherwise.
   1790  */
   1791 static inline bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg)
   1792 {
   1793 	struct evergreen_cs_track *track = p->track;
   1794 	u32 m, i;
   1795 
   1796 	i = (reg >> 7);
   1797 	if (unlikely(i >= REG_SAFE_BM_SIZE)) {
   1798 		return false;
   1799 	}
   1800 	m = 1 << ((reg >> 2) & 31);
   1801 	if (!(track->reg_safe_bm[i] & m))
   1802 		return true;
   1803 
   1804 	return false;
   1805 }
   1806 
   1807 static int evergreen_packet3_check(struct radeon_cs_parser *p,
   1808 				   struct radeon_cs_packet *pkt)
   1809 {
   1810 	struct radeon_bo_list *reloc;
   1811 	struct evergreen_cs_track *track;
   1812 	uint32_t *ib;
   1813 	unsigned idx;
   1814 	unsigned i;
   1815 	unsigned start_reg, end_reg, reg;
   1816 	int r;
   1817 	u32 idx_value;
   1818 
   1819 	track = (struct evergreen_cs_track *)p->track;
   1820 	ib = p->ib.ptr;
   1821 	idx = pkt->idx + 1;
   1822 	idx_value = radeon_get_ib_value(p, idx);
   1823 
   1824 	switch (pkt->opcode) {
   1825 	case PACKET3_SET_PREDICATION:
   1826 	{
   1827 		int pred_op;
   1828 		int tmp;
   1829 		uint64_t offset;
   1830 
   1831 		if (pkt->count != 1) {
   1832 			DRM_ERROR("bad SET PREDICATION\n");
   1833 			return -EINVAL;
   1834 		}
   1835 
   1836 		tmp = radeon_get_ib_value(p, idx + 1);
   1837 		pred_op = (tmp >> 16) & 0x7;
   1838 
   1839 		/* for the clear predicate operation */
   1840 		if (pred_op == 0)
   1841 			return 0;
   1842 
   1843 		if (pred_op > 2) {
   1844 			DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
   1845 			return -EINVAL;
   1846 		}
   1847 
   1848 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   1849 		if (r) {
   1850 			DRM_ERROR("bad SET PREDICATION\n");
   1851 			return -EINVAL;
   1852 		}
   1853 
   1854 		offset = reloc->gpu_offset +
   1855 		         (idx_value & 0xfffffff0) +
   1856 		         ((u64)(tmp & 0xff) << 32);
   1857 
   1858 		ib[idx + 0] = offset;
   1859 		ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
   1860 	}
   1861 	break;
   1862 	case PACKET3_CONTEXT_CONTROL:
   1863 		if (pkt->count != 1) {
   1864 			DRM_ERROR("bad CONTEXT_CONTROL\n");
   1865 			return -EINVAL;
   1866 		}
   1867 		break;
   1868 	case PACKET3_INDEX_TYPE:
   1869 	case PACKET3_NUM_INSTANCES:
   1870 	case PACKET3_CLEAR_STATE:
   1871 		if (pkt->count) {
   1872 			DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
   1873 			return -EINVAL;
   1874 		}
   1875 		break;
   1876 	case CAYMAN_PACKET3_DEALLOC_STATE:
   1877 		if (p->rdev->family < CHIP_CAYMAN) {
   1878 			DRM_ERROR("bad PACKET3_DEALLOC_STATE\n");
   1879 			return -EINVAL;
   1880 		}
   1881 		if (pkt->count) {
   1882 			DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
   1883 			return -EINVAL;
   1884 		}
   1885 		break;
   1886 	case PACKET3_INDEX_BASE:
   1887 	{
   1888 		uint64_t offset;
   1889 
   1890 		if (pkt->count != 1) {
   1891 			DRM_ERROR("bad INDEX_BASE\n");
   1892 			return -EINVAL;
   1893 		}
   1894 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   1895 		if (r) {
   1896 			DRM_ERROR("bad INDEX_BASE\n");
   1897 			return -EINVAL;
   1898 		}
   1899 
   1900 		offset = reloc->gpu_offset +
   1901 		         idx_value +
   1902 		         ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
   1903 
   1904 		ib[idx+0] = offset;
   1905 		ib[idx+1] = upper_32_bits(offset) & 0xff;
   1906 
   1907 		r = evergreen_cs_track_check(p);
   1908 		if (r) {
   1909 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
   1910 			return r;
   1911 		}
   1912 		break;
   1913 	}
   1914 	case PACKET3_INDEX_BUFFER_SIZE:
   1915 	{
   1916 		if (pkt->count != 0) {
   1917 			DRM_ERROR("bad INDEX_BUFFER_SIZE\n");
   1918 			return -EINVAL;
   1919 		}
   1920 		break;
   1921 	}
   1922 	case PACKET3_DRAW_INDEX:
   1923 	{
   1924 		uint64_t offset;
   1925 		if (pkt->count != 3) {
   1926 			DRM_ERROR("bad DRAW_INDEX\n");
   1927 			return -EINVAL;
   1928 		}
   1929 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   1930 		if (r) {
   1931 			DRM_ERROR("bad DRAW_INDEX\n");
   1932 			return -EINVAL;
   1933 		}
   1934 
   1935 		offset = reloc->gpu_offset +
   1936 		         idx_value +
   1937 		         ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
   1938 
   1939 		ib[idx+0] = offset;
   1940 		ib[idx+1] = upper_32_bits(offset) & 0xff;
   1941 
   1942 		r = evergreen_cs_track_check(p);
   1943 		if (r) {
   1944 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
   1945 			return r;
   1946 		}
   1947 		break;
   1948 	}
   1949 	case PACKET3_DRAW_INDEX_2:
   1950 	{
   1951 		uint64_t offset;
   1952 
   1953 		if (pkt->count != 4) {
   1954 			DRM_ERROR("bad DRAW_INDEX_2\n");
   1955 			return -EINVAL;
   1956 		}
   1957 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   1958 		if (r) {
   1959 			DRM_ERROR("bad DRAW_INDEX_2\n");
   1960 			return -EINVAL;
   1961 		}
   1962 
   1963 		offset = reloc->gpu_offset +
   1964 		         radeon_get_ib_value(p, idx+1) +
   1965 		         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
   1966 
   1967 		ib[idx+1] = offset;
   1968 		ib[idx+2] = upper_32_bits(offset) & 0xff;
   1969 
   1970 		r = evergreen_cs_track_check(p);
   1971 		if (r) {
   1972 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
   1973 			return r;
   1974 		}
   1975 		break;
   1976 	}
   1977 	case PACKET3_DRAW_INDEX_AUTO:
   1978 		if (pkt->count != 1) {
   1979 			DRM_ERROR("bad DRAW_INDEX_AUTO\n");
   1980 			return -EINVAL;
   1981 		}
   1982 		r = evergreen_cs_track_check(p);
   1983 		if (r) {
   1984 			dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
   1985 			return r;
   1986 		}
   1987 		break;
   1988 	case PACKET3_DRAW_INDEX_MULTI_AUTO:
   1989 		if (pkt->count != 2) {
   1990 			DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n");
   1991 			return -EINVAL;
   1992 		}
   1993 		r = evergreen_cs_track_check(p);
   1994 		if (r) {
   1995 			dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
   1996 			return r;
   1997 		}
   1998 		break;
   1999 	case PACKET3_DRAW_INDEX_IMMD:
   2000 		if (pkt->count < 2) {
   2001 			DRM_ERROR("bad DRAW_INDEX_IMMD\n");
   2002 			return -EINVAL;
   2003 		}
   2004 		r = evergreen_cs_track_check(p);
   2005 		if (r) {
   2006 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
   2007 			return r;
   2008 		}
   2009 		break;
   2010 	case PACKET3_DRAW_INDEX_OFFSET:
   2011 		if (pkt->count != 2) {
   2012 			DRM_ERROR("bad DRAW_INDEX_OFFSET\n");
   2013 			return -EINVAL;
   2014 		}
   2015 		r = evergreen_cs_track_check(p);
   2016 		if (r) {
   2017 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
   2018 			return r;
   2019 		}
   2020 		break;
   2021 	case PACKET3_DRAW_INDEX_OFFSET_2:
   2022 		if (pkt->count != 3) {
   2023 			DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n");
   2024 			return -EINVAL;
   2025 		}
   2026 		r = evergreen_cs_track_check(p);
   2027 		if (r) {
   2028 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
   2029 			return r;
   2030 		}
   2031 		break;
   2032 	case PACKET3_SET_BASE:
   2033 	{
   2034 		/*
   2035 		DW 1 HEADER Header of the packet. Shader_Type in bit 1 of the Header will correspond to the shader type of the Load, see Type-3 Packet.
   2036 		   2 BASE_INDEX Bits [3:0] BASE_INDEX - Base Index specifies which base address is specified in the last two DWs.
   2037 		     0001: DX11 Draw_Index_Indirect Patch Table Base: Base address for Draw_Index_Indirect data.
   2038 		   3 ADDRESS_LO Bits [31:3] - Lower bits of QWORD-Aligned Address. Bits [2:0] - Reserved
   2039 		   4 ADDRESS_HI Bits [31:8] - Reserved. Bits [7:0] - Upper bits of Address [47:32]
   2040 		*/
   2041 		if (pkt->count != 2) {
   2042 			DRM_ERROR("bad SET_BASE\n");
   2043 			return -EINVAL;
   2044 		}
   2045 
   2046 		/* currently only supporting setting indirect draw buffer base address */
   2047 		if (idx_value != 1) {
   2048 			DRM_ERROR("bad SET_BASE\n");
   2049 			return -EINVAL;
   2050 		}
   2051 
   2052 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   2053 		if (r) {
   2054 			DRM_ERROR("bad SET_BASE\n");
   2055 			return -EINVAL;
   2056 		}
   2057 
   2058 		track->indirect_draw_buffer_size = radeon_bo_size(reloc->robj);
   2059 
   2060 		ib[idx+1] = reloc->gpu_offset;
   2061 		ib[idx+2] = upper_32_bits(reloc->gpu_offset) & 0xff;
   2062 
   2063 		break;
   2064 	}
   2065 	case PACKET3_DRAW_INDIRECT:
   2066 	case PACKET3_DRAW_INDEX_INDIRECT:
   2067 	{
   2068 		u64 size = pkt->opcode == PACKET3_DRAW_INDIRECT ? 16 : 20;
   2069 
   2070 		/*
   2071 		DW 1 HEADER
   2072 		   2 DATA_OFFSET Bits [31:0] + byte aligned offset where the required data structure starts. Bits 1:0 are zero
   2073 		   3 DRAW_INITIATOR Draw Initiator Register. Written to the VGT_DRAW_INITIATOR register for the assigned context
   2074 		*/
   2075 		if (pkt->count != 1) {
   2076 			DRM_ERROR("bad DRAW_INDIRECT\n");
   2077 			return -EINVAL;
   2078 		}
   2079 
   2080 		if (idx_value + size > track->indirect_draw_buffer_size) {
   2081 			dev_warn(p->dev, "DRAW_INDIRECT buffer too small %u + %"PRIx64" > %lu\n",
   2082 				idx_value, size, track->indirect_draw_buffer_size);
   2083 			return -EINVAL;
   2084 		}
   2085 
   2086 		r = evergreen_cs_track_check(p);
   2087 		if (r) {
   2088 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
   2089 			return r;
   2090 		}
   2091 		break;
   2092 	}
   2093 	case PACKET3_DISPATCH_DIRECT:
   2094 		if (pkt->count != 3) {
   2095 			DRM_ERROR("bad DISPATCH_DIRECT\n");
   2096 			return -EINVAL;
   2097 		}
   2098 		r = evergreen_cs_track_check(p);
   2099 		if (r) {
   2100 			dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
   2101 			return r;
   2102 		}
   2103 		break;
   2104 	case PACKET3_DISPATCH_INDIRECT:
   2105 		if (pkt->count != 1) {
   2106 			DRM_ERROR("bad DISPATCH_INDIRECT\n");
   2107 			return -EINVAL;
   2108 		}
   2109 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   2110 		if (r) {
   2111 			DRM_ERROR("bad DISPATCH_INDIRECT\n");
   2112 			return -EINVAL;
   2113 		}
   2114 		ib[idx+0] = idx_value + (u32)(reloc->gpu_offset & 0xffffffff);
   2115 		r = evergreen_cs_track_check(p);
   2116 		if (r) {
   2117 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
   2118 			return r;
   2119 		}
   2120 		break;
   2121 	case PACKET3_WAIT_REG_MEM:
   2122 		if (pkt->count != 5) {
   2123 			DRM_ERROR("bad WAIT_REG_MEM\n");
   2124 			return -EINVAL;
   2125 		}
   2126 		/* bit 4 is reg (0) or mem (1) */
   2127 		if (idx_value & 0x10) {
   2128 			uint64_t offset;
   2129 
   2130 			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   2131 			if (r) {
   2132 				DRM_ERROR("bad WAIT_REG_MEM\n");
   2133 				return -EINVAL;
   2134 			}
   2135 
   2136 			offset = reloc->gpu_offset +
   2137 			         (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
   2138 			         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
   2139 
   2140 			ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc);
   2141 			ib[idx+2] = upper_32_bits(offset) & 0xff;
   2142 		} else if (idx_value & 0x100) {
   2143 			DRM_ERROR("cannot use PFP on REG wait\n");
   2144 			return -EINVAL;
   2145 		}
   2146 		break;
   2147 	case PACKET3_CP_DMA:
   2148 	{
   2149 		u32 command, size, info;
   2150 		u64 offset, tmp;
   2151 		if (pkt->count != 4) {
   2152 			DRM_ERROR("bad CP DMA\n");
   2153 			return -EINVAL;
   2154 		}
   2155 		command = radeon_get_ib_value(p, idx+4);
   2156 		size = command & 0x1fffff;
   2157 		info = radeon_get_ib_value(p, idx+1);
   2158 		if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
   2159 		    (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
   2160 		    ((((info & 0x00300000) >> 20) == 0) &&
   2161 		     (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
   2162 		    ((((info & 0x60000000) >> 29) == 0) &&
   2163 		     (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
   2164 			/* non mem to mem copies requires dw aligned count */
   2165 			if (size % 4) {
   2166 				DRM_ERROR("CP DMA command requires dw count alignment\n");
   2167 				return -EINVAL;
   2168 			}
   2169 		}
   2170 		if (command & PACKET3_CP_DMA_CMD_SAS) {
   2171 			/* src address space is register */
   2172 			/* GDS is ok */
   2173 			if (((info & 0x60000000) >> 29) != 1) {
   2174 				DRM_ERROR("CP DMA SAS not supported\n");
   2175 				return -EINVAL;
   2176 			}
   2177 		} else {
   2178 			if (command & PACKET3_CP_DMA_CMD_SAIC) {
   2179 				DRM_ERROR("CP DMA SAIC only supported for registers\n");
   2180 				return -EINVAL;
   2181 			}
   2182 			/* src address space is memory */
   2183 			if (((info & 0x60000000) >> 29) == 0) {
   2184 				r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   2185 				if (r) {
   2186 					DRM_ERROR("bad CP DMA SRC\n");
   2187 					return -EINVAL;
   2188 				}
   2189 
   2190 				tmp = radeon_get_ib_value(p, idx) +
   2191 					((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
   2192 
   2193 				offset = reloc->gpu_offset + tmp;
   2194 
   2195 				if ((tmp + size) > radeon_bo_size(reloc->robj)) {
   2196 					dev_warn(p->dev, "CP DMA src buffer too small (%"PRIu64" %lu)\n",
   2197 						 tmp + size, radeon_bo_size(reloc->robj));
   2198 					return -EINVAL;
   2199 				}
   2200 
   2201 				ib[idx] = offset;
   2202 				ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
   2203 			} else if (((info & 0x60000000) >> 29) != 2) {
   2204 				DRM_ERROR("bad CP DMA SRC_SEL\n");
   2205 				return -EINVAL;
   2206 			}
   2207 		}
   2208 		if (command & PACKET3_CP_DMA_CMD_DAS) {
   2209 			/* dst address space is register */
   2210 			/* GDS is ok */
   2211 			if (((info & 0x00300000) >> 20) != 1) {
   2212 				DRM_ERROR("CP DMA DAS not supported\n");
   2213 				return -EINVAL;
   2214 			}
   2215 		} else {
   2216 			/* dst address space is memory */
   2217 			if (command & PACKET3_CP_DMA_CMD_DAIC) {
   2218 				DRM_ERROR("CP DMA DAIC only supported for registers\n");
   2219 				return -EINVAL;
   2220 			}
   2221 			if (((info & 0x00300000) >> 20) == 0) {
   2222 				r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   2223 				if (r) {
   2224 					DRM_ERROR("bad CP DMA DST\n");
   2225 					return -EINVAL;
   2226 				}
   2227 
   2228 				tmp = radeon_get_ib_value(p, idx+2) +
   2229 					((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
   2230 
   2231 				offset = reloc->gpu_offset + tmp;
   2232 
   2233 				if ((tmp + size) > radeon_bo_size(reloc->robj)) {
   2234 					dev_warn(p->dev, "CP DMA dst buffer too small (%"PRIu64" %lu)\n",
   2235 						 tmp + size, radeon_bo_size(reloc->robj));
   2236 					return -EINVAL;
   2237 				}
   2238 
   2239 				ib[idx+2] = offset;
   2240 				ib[idx+3] = upper_32_bits(offset) & 0xff;
   2241 			} else {
   2242 				DRM_ERROR("bad CP DMA DST_SEL\n");
   2243 				return -EINVAL;
   2244 			}
   2245 		}
   2246 		break;
   2247 	}
   2248 	case PACKET3_SURFACE_SYNC:
   2249 		if (pkt->count != 3) {
   2250 			DRM_ERROR("bad SURFACE_SYNC\n");
   2251 			return -EINVAL;
   2252 		}
   2253 		/* 0xffffffff/0x0 is flush all cache flag */
   2254 		if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
   2255 		    radeon_get_ib_value(p, idx + 2) != 0) {
   2256 			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   2257 			if (r) {
   2258 				DRM_ERROR("bad SURFACE_SYNC\n");
   2259 				return -EINVAL;
   2260 			}
   2261 			ib[idx+2] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
   2262 		}
   2263 		break;
   2264 	case PACKET3_EVENT_WRITE:
   2265 		if (pkt->count != 2 && pkt->count != 0) {
   2266 			DRM_ERROR("bad EVENT_WRITE\n");
   2267 			return -EINVAL;
   2268 		}
   2269 		if (pkt->count) {
   2270 			uint64_t offset;
   2271 
   2272 			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   2273 			if (r) {
   2274 				DRM_ERROR("bad EVENT_WRITE\n");
   2275 				return -EINVAL;
   2276 			}
   2277 			offset = reloc->gpu_offset +
   2278 			         (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
   2279 			         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
   2280 
   2281 			ib[idx+1] = offset & 0xfffffff8;
   2282 			ib[idx+2] = upper_32_bits(offset) & 0xff;
   2283 		}
   2284 		break;
   2285 	case PACKET3_EVENT_WRITE_EOP:
   2286 	{
   2287 		uint64_t offset;
   2288 
   2289 		if (pkt->count != 4) {
   2290 			DRM_ERROR("bad EVENT_WRITE_EOP\n");
   2291 			return -EINVAL;
   2292 		}
   2293 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   2294 		if (r) {
   2295 			DRM_ERROR("bad EVENT_WRITE_EOP\n");
   2296 			return -EINVAL;
   2297 		}
   2298 
   2299 		offset = reloc->gpu_offset +
   2300 		         (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
   2301 		         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
   2302 
   2303 		ib[idx+1] = offset & 0xfffffffc;
   2304 		ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
   2305 		break;
   2306 	}
   2307 	case PACKET3_EVENT_WRITE_EOS:
   2308 	{
   2309 		uint64_t offset;
   2310 
   2311 		if (pkt->count != 3) {
   2312 			DRM_ERROR("bad EVENT_WRITE_EOS\n");
   2313 			return -EINVAL;
   2314 		}
   2315 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   2316 		if (r) {
   2317 			DRM_ERROR("bad EVENT_WRITE_EOS\n");
   2318 			return -EINVAL;
   2319 		}
   2320 
   2321 		offset = reloc->gpu_offset +
   2322 		         (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
   2323 		         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
   2324 
   2325 		ib[idx+1] = offset & 0xfffffffc;
   2326 		ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
   2327 		break;
   2328 	}
   2329 	case PACKET3_SET_CONFIG_REG:
   2330 		start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
   2331 		end_reg = 4 * pkt->count + start_reg - 4;
   2332 		if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
   2333 		    (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
   2334 		    (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
   2335 			DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
   2336 			return -EINVAL;
   2337 		}
   2338 		for (reg = start_reg, idx++; reg <= end_reg; reg += 4, idx++) {
   2339 			if (evergreen_is_safe_reg(p, reg))
   2340 				continue;
   2341 			r = evergreen_cs_handle_reg(p, reg, idx);
   2342 			if (r)
   2343 				return r;
   2344 		}
   2345 		break;
   2346 	case PACKET3_SET_CONTEXT_REG:
   2347 		start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_START;
   2348 		end_reg = 4 * pkt->count + start_reg - 4;
   2349 		if ((start_reg < PACKET3_SET_CONTEXT_REG_START) ||
   2350 		    (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
   2351 		    (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
   2352 			DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
   2353 			return -EINVAL;
   2354 		}
   2355 		for (reg = start_reg, idx++; reg <= end_reg; reg += 4, idx++) {
   2356 			if (evergreen_is_safe_reg(p, reg))
   2357 				continue;
   2358 			r = evergreen_cs_handle_reg(p, reg, idx);
   2359 			if (r)
   2360 				return r;
   2361 		}
   2362 		break;
   2363 	case PACKET3_SET_RESOURCE:
   2364 		if (pkt->count % 8) {
   2365 			DRM_ERROR("bad SET_RESOURCE\n");
   2366 			return -EINVAL;
   2367 		}
   2368 		start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START;
   2369 		end_reg = 4 * pkt->count + start_reg - 4;
   2370 		if ((start_reg < PACKET3_SET_RESOURCE_START) ||
   2371 		    (start_reg >= PACKET3_SET_RESOURCE_END) ||
   2372 		    (end_reg >= PACKET3_SET_RESOURCE_END)) {
   2373 			DRM_ERROR("bad SET_RESOURCE\n");
   2374 			return -EINVAL;
   2375 		}
   2376 		for (i = 0; i < (pkt->count / 8); i++) {
   2377 			struct radeon_bo *texture, *mipmap;
   2378 			u32 toffset, moffset;
   2379 			u32 size, offset, mip_address, tex_dim;
   2380 
   2381 			switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
   2382 			case SQ_TEX_VTX_VALID_TEXTURE:
   2383 				/* tex base */
   2384 				r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   2385 				if (r) {
   2386 					DRM_ERROR("bad SET_RESOURCE (tex)\n");
   2387 					return -EINVAL;
   2388 				}
   2389 				if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
   2390 					ib[idx+1+(i*8)+1] |=
   2391 						TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
   2392 					if (reloc->tiling_flags & RADEON_TILING_MACRO) {
   2393 						unsigned bankw, bankh, mtaspect, tile_split;
   2394 
   2395 						evergreen_tiling_fields(reloc->tiling_flags,
   2396 									&bankw, &bankh, &mtaspect,
   2397 									&tile_split);
   2398 						ib[idx+1+(i*8)+6] |= TEX_TILE_SPLIT(tile_split);
   2399 						ib[idx+1+(i*8)+7] |=
   2400 							TEX_BANK_WIDTH(bankw) |
   2401 							TEX_BANK_HEIGHT(bankh) |
   2402 							MACRO_TILE_ASPECT(mtaspect) |
   2403 							TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
   2404 					}
   2405 				}
   2406 				texture = reloc->robj;
   2407 				toffset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
   2408 
   2409 				/* tex mip base */
   2410 				tex_dim = ib[idx+1+(i*8)+0] & 0x7;
   2411 				mip_address = ib[idx+1+(i*8)+3];
   2412 
   2413 				if ((tex_dim == SQ_TEX_DIM_2D_MSAA || tex_dim == SQ_TEX_DIM_2D_ARRAY_MSAA) &&
   2414 				    !mip_address &&
   2415 				    !radeon_cs_packet_next_is_pkt3_nop(p)) {
   2416 					/* MIP_ADDRESS should point to FMASK for an MSAA texture.
   2417 					 * It should be 0 if FMASK is disabled. */
   2418 					moffset = 0;
   2419 					mipmap = NULL;
   2420 				} else {
   2421 					r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   2422 					if (r) {
   2423 						DRM_ERROR("bad SET_RESOURCE (tex)\n");
   2424 						return -EINVAL;
   2425 					}
   2426 					moffset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
   2427 					mipmap = reloc->robj;
   2428 				}
   2429 
   2430 				r = evergreen_cs_track_validate_texture(p, texture, mipmap, idx+1+(i*8));
   2431 				if (r)
   2432 					return r;
   2433 				ib[idx+1+(i*8)+2] += toffset;
   2434 				ib[idx+1+(i*8)+3] += moffset;
   2435 				break;
   2436 			case SQ_TEX_VTX_VALID_BUFFER:
   2437 			{
   2438 				uint64_t offset64;
   2439 				/* vtx base */
   2440 				r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   2441 				if (r) {
   2442 					DRM_ERROR("bad SET_RESOURCE (vtx)\n");
   2443 					return -EINVAL;
   2444 				}
   2445 				offset = radeon_get_ib_value(p, idx+1+(i*8)+0);
   2446 				size = radeon_get_ib_value(p, idx+1+(i*8)+1);
   2447 				if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
   2448 					/* force size to size of the buffer */
   2449 					dev_warn(p->dev, "vbo resource seems too big for the bo\n");
   2450 					ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj) - offset;
   2451 				}
   2452 
   2453 				offset64 = reloc->gpu_offset + offset;
   2454 				ib[idx+1+(i*8)+0] = offset64;
   2455 				ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
   2456 						    (upper_32_bits(offset64) & 0xff);
   2457 				break;
   2458 			}
   2459 			case SQ_TEX_VTX_INVALID_TEXTURE:
   2460 			case SQ_TEX_VTX_INVALID_BUFFER:
   2461 			default:
   2462 				DRM_ERROR("bad SET_RESOURCE\n");
   2463 				return -EINVAL;
   2464 			}
   2465 		}
   2466 		break;
   2467 	case PACKET3_SET_ALU_CONST:
   2468 		/* XXX fix me ALU const buffers only */
   2469 		break;
   2470 	case PACKET3_SET_BOOL_CONST:
   2471 		start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_START;
   2472 		end_reg = 4 * pkt->count + start_reg - 4;
   2473 		if ((start_reg < PACKET3_SET_BOOL_CONST_START) ||
   2474 		    (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
   2475 		    (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
   2476 			DRM_ERROR("bad SET_BOOL_CONST\n");
   2477 			return -EINVAL;
   2478 		}
   2479 		break;
   2480 	case PACKET3_SET_LOOP_CONST:
   2481 		start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_START;
   2482 		end_reg = 4 * pkt->count + start_reg - 4;
   2483 		if ((start_reg < PACKET3_SET_LOOP_CONST_START) ||
   2484 		    (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
   2485 		    (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
   2486 			DRM_ERROR("bad SET_LOOP_CONST\n");
   2487 			return -EINVAL;
   2488 		}
   2489 		break;
   2490 	case PACKET3_SET_CTL_CONST:
   2491 		start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_START;
   2492 		end_reg = 4 * pkt->count + start_reg - 4;
   2493 		if ((start_reg < PACKET3_SET_CTL_CONST_START) ||
   2494 		    (start_reg >= PACKET3_SET_CTL_CONST_END) ||
   2495 		    (end_reg >= PACKET3_SET_CTL_CONST_END)) {
   2496 			DRM_ERROR("bad SET_CTL_CONST\n");
   2497 			return -EINVAL;
   2498 		}
   2499 		break;
   2500 	case PACKET3_SET_SAMPLER:
   2501 		if (pkt->count % 3) {
   2502 			DRM_ERROR("bad SET_SAMPLER\n");
   2503 			return -EINVAL;
   2504 		}
   2505 		start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START;
   2506 		end_reg = 4 * pkt->count + start_reg - 4;
   2507 		if ((start_reg < PACKET3_SET_SAMPLER_START) ||
   2508 		    (start_reg >= PACKET3_SET_SAMPLER_END) ||
   2509 		    (end_reg >= PACKET3_SET_SAMPLER_END)) {
   2510 			DRM_ERROR("bad SET_SAMPLER\n");
   2511 			return -EINVAL;
   2512 		}
   2513 		break;
   2514 	case PACKET3_STRMOUT_BUFFER_UPDATE:
   2515 		if (pkt->count != 4) {
   2516 			DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
   2517 			return -EINVAL;
   2518 		}
   2519 		/* Updating memory at DST_ADDRESS. */
   2520 		if (idx_value & 0x1) {
   2521 			u64 offset;
   2522 			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   2523 			if (r) {
   2524 				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
   2525 				return -EINVAL;
   2526 			}
   2527 			offset = radeon_get_ib_value(p, idx+1);
   2528 			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
   2529 			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
   2530 				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%"PRIx64", 0x%lx\n",
   2531 					  offset + 4, radeon_bo_size(reloc->robj));
   2532 				return -EINVAL;
   2533 			}
   2534 			offset += reloc->gpu_offset;
   2535 			ib[idx+1] = offset;
   2536 			ib[idx+2] = upper_32_bits(offset) & 0xff;
   2537 		}
   2538 		/* Reading data from SRC_ADDRESS. */
   2539 		if (((idx_value >> 1) & 0x3) == 2) {
   2540 			u64 offset;
   2541 			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   2542 			if (r) {
   2543 				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
   2544 				return -EINVAL;
   2545 			}
   2546 			offset = radeon_get_ib_value(p, idx+3);
   2547 			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
   2548 			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
   2549 				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%"PRIx64", 0x%lx\n",
   2550 					  offset + 4, radeon_bo_size(reloc->robj));
   2551 				return -EINVAL;
   2552 			}
   2553 			offset += reloc->gpu_offset;
   2554 			ib[idx+3] = offset;
   2555 			ib[idx+4] = upper_32_bits(offset) & 0xff;
   2556 		}
   2557 		break;
   2558 	case PACKET3_MEM_WRITE:
   2559 	{
   2560 		u64 offset;
   2561 
   2562 		if (pkt->count != 3) {
   2563 			DRM_ERROR("bad MEM_WRITE (invalid count)\n");
   2564 			return -EINVAL;
   2565 		}
   2566 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   2567 		if (r) {
   2568 			DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
   2569 			return -EINVAL;
   2570 		}
   2571 		offset = radeon_get_ib_value(p, idx+0);
   2572 		offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
   2573 		if (offset & 0x7) {
   2574 			DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
   2575 			return -EINVAL;
   2576 		}
   2577 		if ((offset + 8) > radeon_bo_size(reloc->robj)) {
   2578 			DRM_ERROR("bad MEM_WRITE bo too small: 0x%"PRIx64", 0x%lx\n",
   2579 				  offset + 8, radeon_bo_size(reloc->robj));
   2580 			return -EINVAL;
   2581 		}
   2582 		offset += reloc->gpu_offset;
   2583 		ib[idx+0] = offset;
   2584 		ib[idx+1] = upper_32_bits(offset) & 0xff;
   2585 		break;
   2586 	}
   2587 	case PACKET3_COPY_DW:
   2588 		if (pkt->count != 4) {
   2589 			DRM_ERROR("bad COPY_DW (invalid count)\n");
   2590 			return -EINVAL;
   2591 		}
   2592 		if (idx_value & 0x1) {
   2593 			u64 offset;
   2594 			/* SRC is memory. */
   2595 			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   2596 			if (r) {
   2597 				DRM_ERROR("bad COPY_DW (missing src reloc)\n");
   2598 				return -EINVAL;
   2599 			}
   2600 			offset = radeon_get_ib_value(p, idx+1);
   2601 			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
   2602 			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
   2603 				DRM_ERROR("bad COPY_DW src bo too small: 0x%"PRIx64", 0x%lx\n",
   2604 					  offset + 4, radeon_bo_size(reloc->robj));
   2605 				return -EINVAL;
   2606 			}
   2607 			offset += reloc->gpu_offset;
   2608 			ib[idx+1] = offset;
   2609 			ib[idx+2] = upper_32_bits(offset) & 0xff;
   2610 		} else {
   2611 			/* SRC is a reg. */
   2612 			reg = radeon_get_ib_value(p, idx+1) << 2;
   2613 			if (!evergreen_is_safe_reg(p, reg)) {
   2614 				dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
   2615 					 reg, idx + 1);
   2616 				return -EINVAL;
   2617 			}
   2618 		}
   2619 		if (idx_value & 0x2) {
   2620 			u64 offset;
   2621 			/* DST is memory. */
   2622 			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
   2623 			if (r) {
   2624 				DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
   2625 				return -EINVAL;
   2626 			}
   2627 			offset = radeon_get_ib_value(p, idx+3);
   2628 			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
   2629 			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
   2630 				DRM_ERROR("bad COPY_DW dst bo too small: 0x%"PRIx64", 0x%lx\n",
   2631 					  offset + 4, radeon_bo_size(reloc->robj));
   2632 				return -EINVAL;
   2633 			}
   2634 			offset += reloc->gpu_offset;
   2635 			ib[idx+3] = offset;
   2636 			ib[idx+4] = upper_32_bits(offset) & 0xff;
   2637 		} else {
   2638 			/* DST is a reg. */
   2639 			reg = radeon_get_ib_value(p, idx+3) << 2;
   2640 			if (!evergreen_is_safe_reg(p, reg)) {
   2641 				dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
   2642 					 reg, idx + 3);
   2643 				return -EINVAL;
   2644 			}
   2645 		}
   2646 		break;
   2647 	case PACKET3_NOP:
   2648 		break;
   2649 	default:
   2650 		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
   2651 		return -EINVAL;
   2652 	}
   2653 	return 0;
   2654 }
   2655 
   2656 int evergreen_cs_parse(struct radeon_cs_parser *p)
   2657 {
   2658 	struct radeon_cs_packet pkt;
   2659 	struct evergreen_cs_track *track;
   2660 	u32 tmp;
   2661 	int r;
   2662 
   2663 	if (p->track == NULL) {
   2664 		/* initialize tracker, we are in kms */
   2665 		track = kzalloc(sizeof(*track), GFP_KERNEL);
   2666 		if (track == NULL)
   2667 			return -ENOMEM;
   2668 		evergreen_cs_track_init(track);
   2669 		if (p->rdev->family >= CHIP_CAYMAN) {
   2670 			tmp = p->rdev->config.cayman.tile_config;
   2671 			track->reg_safe_bm = cayman_reg_safe_bm;
   2672 		} else {
   2673 			tmp = p->rdev->config.evergreen.tile_config;
   2674 			track->reg_safe_bm = evergreen_reg_safe_bm;
   2675 		}
   2676 		BUILD_BUG_ON(ARRAY_SIZE(cayman_reg_safe_bm) != REG_SAFE_BM_SIZE);
   2677 		BUILD_BUG_ON(ARRAY_SIZE(evergreen_reg_safe_bm) != REG_SAFE_BM_SIZE);
   2678 		switch (tmp & 0xf) {
   2679 		case 0:
   2680 			track->npipes = 1;
   2681 			break;
   2682 		case 1:
   2683 		default:
   2684 			track->npipes = 2;
   2685 			break;
   2686 		case 2:
   2687 			track->npipes = 4;
   2688 			break;
   2689 		case 3:
   2690 			track->npipes = 8;
   2691 			break;
   2692 		}
   2693 
   2694 		switch ((tmp & 0xf0) >> 4) {
   2695 		case 0:
   2696 			track->nbanks = 4;
   2697 			break;
   2698 		case 1:
   2699 		default:
   2700 			track->nbanks = 8;
   2701 			break;
   2702 		case 2:
   2703 			track->nbanks = 16;
   2704 			break;
   2705 		}
   2706 
   2707 		switch ((tmp & 0xf00) >> 8) {
   2708 		case 0:
   2709 			track->group_size = 256;
   2710 			break;
   2711 		case 1:
   2712 		default:
   2713 			track->group_size = 512;
   2714 			break;
   2715 		}
   2716 
   2717 		switch ((tmp & 0xf000) >> 12) {
   2718 		case 0:
   2719 			track->row_size = 1;
   2720 			break;
   2721 		case 1:
   2722 		default:
   2723 			track->row_size = 2;
   2724 			break;
   2725 		case 2:
   2726 			track->row_size = 4;
   2727 			break;
   2728 		}
   2729 
   2730 		p->track = track;
   2731 	}
   2732 	do {
   2733 		r = radeon_cs_packet_parse(p, &pkt, p->idx);
   2734 		if (r) {
   2735 			kfree(p->track);
   2736 			p->track = NULL;
   2737 			return r;
   2738 		}
   2739 		p->idx += pkt.count + 2;
   2740 		switch (pkt.type) {
   2741 		case RADEON_PACKET_TYPE0:
   2742 			r = evergreen_cs_parse_packet0(p, &pkt);
   2743 			break;
   2744 		case RADEON_PACKET_TYPE2:
   2745 			break;
   2746 		case RADEON_PACKET_TYPE3:
   2747 			r = evergreen_packet3_check(p, &pkt);
   2748 			break;
   2749 		default:
   2750 			DRM_ERROR("Unknown packet type %d !\n", pkt.type);
   2751 			kfree(p->track);
   2752 			p->track = NULL;
   2753 			return -EINVAL;
   2754 		}
   2755 		if (r) {
   2756 			kfree(p->track);
   2757 			p->track = NULL;
   2758 			return r;
   2759 		}
   2760 	} while (p->idx < p->chunk_ib->length_dw);
   2761 #if 0
   2762 	for (r = 0; r < p->ib.length_dw; r++) {
   2763 		printk(KERN_INFO "%05d  0x%08X\n", r, p->ib.ptr[r]);
   2764 		mdelay(1);
   2765 	}
   2766 #endif
   2767 	kfree(p->track);
   2768 	p->track = NULL;
   2769 	return 0;
   2770 }
   2771 
   2772 /**
   2773  * evergreen_dma_cs_parse() - parse the DMA IB
   2774  * @p:		parser structure holding parsing context.
   2775  *
   2776  * Parses the DMA IB from the CS ioctl and updates
   2777  * the GPU addresses based on the reloc information and
   2778  * checks for errors. (Evergreen-Cayman)
   2779  * Returns 0 for success and an error on failure.
   2780  **/
   2781 int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
   2782 {
   2783 	struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
   2784 	struct radeon_bo_list *src_reloc, *dst_reloc, *dst2_reloc;
   2785 	u32 header, cmd, count, sub_cmd;
   2786 	uint32_t *ib = p->ib.ptr;
   2787 	u32 idx;
   2788 	u64 src_offset, dst_offset, dst2_offset;
   2789 	int r;
   2790 
   2791 	do {
   2792 		if (p->idx >= ib_chunk->length_dw) {
   2793 			DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
   2794 				  p->idx, ib_chunk->length_dw);
   2795 			return -EINVAL;
   2796 		}
   2797 		idx = p->idx;
   2798 		header = radeon_get_ib_value(p, idx);
   2799 		cmd = GET_DMA_CMD(header);
   2800 		count = GET_DMA_COUNT(header);
   2801 		sub_cmd = GET_DMA_SUB_CMD(header);
   2802 
   2803 		switch (cmd) {
   2804 		case DMA_PACKET_WRITE:
   2805 			r = r600_dma_cs_next_reloc(p, &dst_reloc);
   2806 			if (r) {
   2807 				DRM_ERROR("bad DMA_PACKET_WRITE\n");
   2808 				return -EINVAL;
   2809 			}
   2810 			switch (sub_cmd) {
   2811 			/* tiled */
   2812 			case 8:
   2813 				dst_offset = radeon_get_ib_value(p, idx+1);
   2814 				dst_offset <<= 8;
   2815 
   2816 				ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
   2817 				p->idx += count + 7;
   2818 				break;
   2819 			/* linear */
   2820 			case 0:
   2821 				dst_offset = radeon_get_ib_value(p, idx+1);
   2822 				dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
   2823 
   2824 				ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
   2825 				ib[idx+2] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
   2826 				p->idx += count + 3;
   2827 				break;
   2828 			default:
   2829 				DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, header);
   2830 				return -EINVAL;
   2831 			}
   2832 			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
   2833 				dev_warn(p->dev, "DMA write buffer too small (%"PRIu64" %lu)\n",
   2834 					 dst_offset, radeon_bo_size(dst_reloc->robj));
   2835 				return -EINVAL;
   2836 			}
   2837 			break;
   2838 		case DMA_PACKET_COPY:
   2839 			r = r600_dma_cs_next_reloc(p, &src_reloc);
   2840 			if (r) {
   2841 				DRM_ERROR("bad DMA_PACKET_COPY\n");
   2842 				return -EINVAL;
   2843 			}
   2844 			r = r600_dma_cs_next_reloc(p, &dst_reloc);
   2845 			if (r) {
   2846 				DRM_ERROR("bad DMA_PACKET_COPY\n");
   2847 				return -EINVAL;
   2848 			}
   2849 			switch (sub_cmd) {
   2850 			/* Copy L2L, DW aligned */
   2851 			case 0x00:
   2852 				/* L2L, dw */
   2853 				src_offset = radeon_get_ib_value(p, idx+2);
   2854 				src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
   2855 				dst_offset = radeon_get_ib_value(p, idx+1);
   2856 				dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
   2857 				if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
   2858 					dev_warn(p->dev, "DMA L2L, dw src buffer too small (%"PRIu64" %lu)\n",
   2859 							src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
   2860 					return -EINVAL;
   2861 				}
   2862 				if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
   2863 					dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%"PRIu64" %lu)\n",
   2864 							dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
   2865 					return -EINVAL;
   2866 				}
   2867 				ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
   2868 				ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
   2869 				ib[idx+3] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
   2870 				ib[idx+4] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
   2871 				p->idx += 5;
   2872 				break;
   2873 			/* Copy L2T/T2L */
   2874 			case 0x08:
   2875 				/* detile bit */
   2876 				if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
   2877 					/* tiled src, linear dst */
   2878 					src_offset = radeon_get_ib_value(p, idx+1);
   2879 					src_offset <<= 8;
   2880 					ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
   2881 
   2882 					dst_offset = radeon_get_ib_value(p, idx + 7);
   2883 					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
   2884 					ib[idx+7] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
   2885 					ib[idx+8] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
   2886 				} else {
   2887 					/* linear src, tiled dst */
   2888 					src_offset = radeon_get_ib_value(p, idx+7);
   2889 					src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
   2890 					ib[idx+7] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
   2891 					ib[idx+8] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
   2892 
   2893 					dst_offset = radeon_get_ib_value(p, idx+1);
   2894 					dst_offset <<= 8;
   2895 					ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
   2896 				}
   2897 				if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
   2898 					dev_warn(p->dev, "DMA L2T, src buffer too small (%"PRIu64" %lu)\n",
   2899 							src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
   2900 					return -EINVAL;
   2901 				}
   2902 				if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
   2903 					dev_warn(p->dev, "DMA L2T, dst buffer too small (%"PRIu64" %lu)\n",
   2904 							dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
   2905 					return -EINVAL;
   2906 				}
   2907 				p->idx += 9;
   2908 				break;
   2909 			/* Copy L2L, byte aligned */
   2910 			case 0x40:
   2911 				/* L2L, byte */
   2912 				src_offset = radeon_get_ib_value(p, idx+2);
   2913 				src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
   2914 				dst_offset = radeon_get_ib_value(p, idx+1);
   2915 				dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
   2916 				if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
   2917 					dev_warn(p->dev, "DMA L2L, byte src buffer too small (%"PRIu64" %lu)\n",
   2918 							src_offset + count, radeon_bo_size(src_reloc->robj));
   2919 					return -EINVAL;
   2920 				}
   2921 				if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
   2922 					dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%"PRIu64" %lu)\n",
   2923 							dst_offset + count, radeon_bo_size(dst_reloc->robj));
   2924 					return -EINVAL;
   2925 				}
   2926 				ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xffffffff);
   2927 				ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xffffffff);
   2928 				ib[idx+3] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
   2929 				ib[idx+4] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
   2930 				p->idx += 5;
   2931 				break;
   2932 			/* Copy L2L, partial */
   2933 			case 0x41:
   2934 				/* L2L, partial */
   2935 				if (p->family < CHIP_CAYMAN) {
   2936 					DRM_ERROR("L2L Partial is cayman only !\n");
   2937 					return -EINVAL;
   2938 				}
   2939 				ib[idx+1] += (u32)(src_reloc->gpu_offset & 0xffffffff);
   2940 				ib[idx+2] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
   2941 				ib[idx+4] += (u32)(dst_reloc->gpu_offset & 0xffffffff);
   2942 				ib[idx+5] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
   2943 
   2944 				p->idx += 9;
   2945 				break;
   2946 			/* Copy L2L, DW aligned, broadcast */
   2947 			case 0x44:
   2948 				/* L2L, dw, broadcast */
   2949 				r = r600_dma_cs_next_reloc(p, &dst2_reloc);
   2950 				if (r) {
   2951 					DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
   2952 					return -EINVAL;
   2953 				}
   2954 				dst_offset = radeon_get_ib_value(p, idx+1);
   2955 				dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
   2956 				dst2_offset = radeon_get_ib_value(p, idx+2);
   2957 				dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32;
   2958 				src_offset = radeon_get_ib_value(p, idx+3);
   2959 				src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
   2960 				if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
   2961 					dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%"PRIu64" %lu)\n",
   2962 							src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
   2963 					return -EINVAL;
   2964 				}
   2965 				if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
   2966 					dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%"PRIu64" %lu)\n",
   2967 							dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
   2968 					return -EINVAL;
   2969 				}
   2970 				if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
   2971 					dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%"PRIu64" %lu)\n",
   2972 							dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
   2973 					return -EINVAL;
   2974 				}
   2975 				ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
   2976 				ib[idx+2] += (u32)(dst2_reloc->gpu_offset & 0xfffffffc);
   2977 				ib[idx+3] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
   2978 				ib[idx+4] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
   2979 				ib[idx+5] += upper_32_bits(dst2_reloc->gpu_offset) & 0xff;
   2980 				ib[idx+6] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
   2981 				p->idx += 7;
   2982 				break;
   2983 			/* Copy L2T Frame to Field */
   2984 			case 0x48:
   2985 				if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
   2986 					DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
   2987 					return -EINVAL;
   2988 				}
   2989 				r = r600_dma_cs_next_reloc(p, &dst2_reloc);
   2990 				if (r) {
   2991 					DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
   2992 					return -EINVAL;
   2993 				}
   2994 				dst_offset = radeon_get_ib_value(p, idx+1);
   2995 				dst_offset <<= 8;
   2996 				dst2_offset = radeon_get_ib_value(p, idx+2);
   2997 				dst2_offset <<= 8;
   2998 				src_offset = radeon_get_ib_value(p, idx+8);
   2999 				src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
   3000 				if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
   3001 					dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%"PRIu64" %lu)\n",
   3002 							src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
   3003 					return -EINVAL;
   3004 				}
   3005 				if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
   3006 					dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%"PRIu64" %lu)\n",
   3007 							dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
   3008 					return -EINVAL;
   3009 				}
   3010 				if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
   3011 					dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%"PRIu64" %lu)\n",
   3012 							dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
   3013 					return -EINVAL;
   3014 				}
   3015 				ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
   3016 				ib[idx+2] += (u32)(dst2_reloc->gpu_offset >> 8);
   3017 				ib[idx+8] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
   3018 				ib[idx+9] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
   3019 				p->idx += 10;
   3020 				break;
   3021 			/* Copy L2T/T2L, partial */
   3022 			case 0x49:
   3023 				/* L2T, T2L partial */
   3024 				if (p->family < CHIP_CAYMAN) {
   3025 					DRM_ERROR("L2T, T2L Partial is cayman only !\n");
   3026 					return -EINVAL;
   3027 				}
   3028 				/* detile bit */
   3029 				if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
   3030 					/* tiled src, linear dst */
   3031 					ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
   3032 
   3033 					ib[idx+7] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
   3034 					ib[idx+8] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
   3035 				} else {
   3036 					/* linear src, tiled dst */
   3037 					ib[idx+7] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
   3038 					ib[idx+8] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
   3039 
   3040 					ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
   3041 				}
   3042 				p->idx += 12;
   3043 				break;
   3044 			/* Copy L2T broadcast */
   3045 			case 0x4b:
   3046 				/* L2T, broadcast */
   3047 				if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
   3048 					DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
   3049 					return -EINVAL;
   3050 				}
   3051 				r = r600_dma_cs_next_reloc(p, &dst2_reloc);
   3052 				if (r) {
   3053 					DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
   3054 					return -EINVAL;
   3055 				}
   3056 				dst_offset = radeon_get_ib_value(p, idx+1);
   3057 				dst_offset <<= 8;
   3058 				dst2_offset = radeon_get_ib_value(p, idx+2);
   3059 				dst2_offset <<= 8;
   3060 				src_offset = radeon_get_ib_value(p, idx+8);
   3061 				src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
   3062 				if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
   3063 					dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%"PRIu64" %lu)\n",
   3064 							src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
   3065 					return -EINVAL;
   3066 				}
   3067 				if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
   3068 					dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%"PRIu64" %lu)\n",
   3069 							dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
   3070 					return -EINVAL;
   3071 				}
   3072 				if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
   3073 					dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%"PRIu64" %lu)\n",
   3074 							dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
   3075 					return -EINVAL;
   3076 				}
   3077 				ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
   3078 				ib[idx+2] += (u32)(dst2_reloc->gpu_offset >> 8);
   3079 				ib[idx+8] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
   3080 				ib[idx+9] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
   3081 				p->idx += 10;
   3082 				break;
   3083 			/* Copy L2T/T2L (tile units) */
   3084 			case 0x4c:
   3085 				/* L2T, T2L */
   3086 				/* detile bit */
   3087 				if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
   3088 					/* tiled src, linear dst */
   3089 					src_offset = radeon_get_ib_value(p, idx+1);
   3090 					src_offset <<= 8;
   3091 					ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
   3092 
   3093 					dst_offset = radeon_get_ib_value(p, idx+7);
   3094 					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
   3095 					ib[idx+7] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
   3096 					ib[idx+8] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
   3097 				} else {
   3098 					/* linear src, tiled dst */
   3099 					src_offset = radeon_get_ib_value(p, idx+7);
   3100 					src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
   3101 					ib[idx+7] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
   3102 					ib[idx+8] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
   3103 
   3104 					dst_offset = radeon_get_ib_value(p, idx+1);
   3105 					dst_offset <<= 8;
   3106 					ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
   3107 				}
   3108 				if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
   3109 					dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%"PRIu64" %lu)\n",
   3110 							src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
   3111 					return -EINVAL;
   3112 				}
   3113 				if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
   3114 					dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%"PRIu64" %lu)\n",
   3115 							dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
   3116 					return -EINVAL;
   3117 				}
   3118 				p->idx += 9;
   3119 				break;
   3120 			/* Copy T2T, partial (tile units) */
   3121 			case 0x4d:
   3122 				/* T2T partial */
   3123 				if (p->family < CHIP_CAYMAN) {
   3124 					DRM_ERROR("L2T, T2L Partial is cayman only !\n");
   3125 					return -EINVAL;
   3126 				}
   3127 				ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
   3128 				ib[idx+4] += (u32)(dst_reloc->gpu_offset >> 8);
   3129 				p->idx += 13;
   3130 				break;
   3131 			/* Copy L2T broadcast (tile units) */
   3132 			case 0x4f:
   3133 				/* L2T, broadcast */
   3134 				if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
   3135 					DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
   3136 					return -EINVAL;
   3137 				}
   3138 				r = r600_dma_cs_next_reloc(p, &dst2_reloc);
   3139 				if (r) {
   3140 					DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
   3141 					return -EINVAL;
   3142 				}
   3143 				dst_offset = radeon_get_ib_value(p, idx+1);
   3144 				dst_offset <<= 8;
   3145 				dst2_offset = radeon_get_ib_value(p, idx+2);
   3146 				dst2_offset <<= 8;
   3147 				src_offset = radeon_get_ib_value(p, idx+8);
   3148 				src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
   3149 				if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
   3150 					dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%"PRIu64" %lu)\n",
   3151 							src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
   3152 					return -EINVAL;
   3153 				}
   3154 				if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
   3155 					dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%"PRIu64" %lu)\n",
   3156 							dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
   3157 					return -EINVAL;
   3158 				}
   3159 				if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
   3160 					dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%"PRIu64" %lu)\n",
   3161 							dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
   3162 					return -EINVAL;
   3163 				}
   3164 				ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
   3165 				ib[idx+2] += (u32)(dst2_reloc->gpu_offset >> 8);
   3166 				ib[idx+8] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
   3167 				ib[idx+9] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
   3168 				p->idx += 10;
   3169 				break;
   3170 			default:
   3171 				DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, header);
   3172 				return -EINVAL;
   3173 			}
   3174 			break;
   3175 		case DMA_PACKET_CONSTANT_FILL:
   3176 			r = r600_dma_cs_next_reloc(p, &dst_reloc);
   3177 			if (r) {
   3178 				DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n");
   3179 				return -EINVAL;
   3180 			}
   3181 			dst_offset = radeon_get_ib_value(p, idx+1);
   3182 			dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
   3183 			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
   3184 				dev_warn(p->dev, "DMA constant fill buffer too small (%"PRIu64" %lu)\n",
   3185 					 dst_offset, radeon_bo_size(dst_reloc->robj));
   3186 				return -EINVAL;
   3187 			}
   3188 			ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
   3189 			ib[idx+3] += (upper_32_bits(dst_reloc->gpu_offset) << 16) & 0x00ff0000;
   3190 			p->idx += 4;
   3191 			break;
   3192 		case DMA_PACKET_NOP:
   3193 			p->idx += 1;
   3194 			break;
   3195 		default:
   3196 			DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
   3197 			return -EINVAL;
   3198 		}
   3199 	} while (p->idx < p->chunk_ib->length_dw);
   3200 #if 0
   3201 	for (r = 0; r < p->ib->length_dw; r++) {
   3202 		printk(KERN_INFO "%05d  0x%08X\n", r, p->ib.ptr[r]);
   3203 		mdelay(1);
   3204 	}
   3205 #endif
   3206 	return 0;
   3207 }
   3208 
   3209 /* vm parser */
   3210 static bool evergreen_vm_reg_valid(u32 reg)
   3211 {
   3212 	/* context regs are fine */
   3213 	if (reg >= 0x28000)
   3214 		return true;
   3215 
   3216 	/* check config regs */
   3217 	switch (reg) {
   3218 	case WAIT_UNTIL:
   3219 	case GRBM_GFX_INDEX:
   3220 	case CP_STRMOUT_CNTL:
   3221 	case CP_COHER_CNTL:
   3222 	case CP_COHER_SIZE:
   3223 	case VGT_VTX_VECT_EJECT_REG:
   3224 	case VGT_CACHE_INVALIDATION:
   3225 	case VGT_GS_VERTEX_REUSE:
   3226 	case VGT_PRIMITIVE_TYPE:
   3227 	case VGT_INDEX_TYPE:
   3228 	case VGT_NUM_INDICES:
   3229 	case VGT_NUM_INSTANCES:
   3230 	case VGT_COMPUTE_DIM_X:
   3231 	case VGT_COMPUTE_DIM_Y:
   3232 	case VGT_COMPUTE_DIM_Z:
   3233 	case VGT_COMPUTE_START_X:
   3234 	case VGT_COMPUTE_START_Y:
   3235 	case VGT_COMPUTE_START_Z:
   3236 	case VGT_COMPUTE_INDEX:
   3237 	case VGT_COMPUTE_THREAD_GROUP_SIZE:
   3238 	case VGT_HS_OFFCHIP_PARAM:
   3239 	case PA_CL_ENHANCE:
   3240 	case PA_SU_LINE_STIPPLE_VALUE:
   3241 	case PA_SC_LINE_STIPPLE_STATE:
   3242 	case PA_SC_ENHANCE:
   3243 	case SQ_DYN_GPR_CNTL_PS_FLUSH_REQ:
   3244 	case SQ_DYN_GPR_SIMD_LOCK_EN:
   3245 	case SQ_CONFIG:
   3246 	case SQ_GPR_RESOURCE_MGMT_1:
   3247 	case SQ_GLOBAL_GPR_RESOURCE_MGMT_1:
   3248 	case SQ_GLOBAL_GPR_RESOURCE_MGMT_2:
   3249 	case SQ_CONST_MEM_BASE:
   3250 	case SQ_STATIC_THREAD_MGMT_1:
   3251 	case SQ_STATIC_THREAD_MGMT_2:
   3252 	case SQ_STATIC_THREAD_MGMT_3:
   3253 	case SPI_CONFIG_CNTL:
   3254 	case SPI_CONFIG_CNTL_1:
   3255 	case TA_CNTL_AUX:
   3256 	case DB_DEBUG:
   3257 	case DB_DEBUG2:
   3258 	case DB_DEBUG3:
   3259 	case DB_DEBUG4:
   3260 	case DB_WATERMARKS:
   3261 	case TD_PS_BORDER_COLOR_INDEX:
   3262 	case TD_PS_BORDER_COLOR_RED:
   3263 	case TD_PS_BORDER_COLOR_GREEN:
   3264 	case TD_PS_BORDER_COLOR_BLUE:
   3265 	case TD_PS_BORDER_COLOR_ALPHA:
   3266 	case TD_VS_BORDER_COLOR_INDEX:
   3267 	case TD_VS_BORDER_COLOR_RED:
   3268 	case TD_VS_BORDER_COLOR_GREEN:
   3269 	case TD_VS_BORDER_COLOR_BLUE:
   3270 	case TD_VS_BORDER_COLOR_ALPHA:
   3271 	case TD_GS_BORDER_COLOR_INDEX:
   3272 	case TD_GS_BORDER_COLOR_RED:
   3273 	case TD_GS_BORDER_COLOR_GREEN:
   3274 	case TD_GS_BORDER_COLOR_BLUE:
   3275 	case TD_GS_BORDER_COLOR_ALPHA:
   3276 	case TD_HS_BORDER_COLOR_INDEX:
   3277 	case TD_HS_BORDER_COLOR_RED:
   3278 	case TD_HS_BORDER_COLOR_GREEN:
   3279 	case TD_HS_BORDER_COLOR_BLUE:
   3280 	case TD_HS_BORDER_COLOR_ALPHA:
   3281 	case TD_LS_BORDER_COLOR_INDEX:
   3282 	case TD_LS_BORDER_COLOR_RED:
   3283 	case TD_LS_BORDER_COLOR_GREEN:
   3284 	case TD_LS_BORDER_COLOR_BLUE:
   3285 	case TD_LS_BORDER_COLOR_ALPHA:
   3286 	case TD_CS_BORDER_COLOR_INDEX:
   3287 	case TD_CS_BORDER_COLOR_RED:
   3288 	case TD_CS_BORDER_COLOR_GREEN:
   3289 	case TD_CS_BORDER_COLOR_BLUE:
   3290 	case TD_CS_BORDER_COLOR_ALPHA:
   3291 	case SQ_ESGS_RING_SIZE:
   3292 	case SQ_GSVS_RING_SIZE:
   3293 	case SQ_ESTMP_RING_SIZE:
   3294 	case SQ_GSTMP_RING_SIZE:
   3295 	case SQ_HSTMP_RING_SIZE:
   3296 	case SQ_LSTMP_RING_SIZE:
   3297 	case SQ_PSTMP_RING_SIZE:
   3298 	case SQ_VSTMP_RING_SIZE:
   3299 	case SQ_ESGS_RING_ITEMSIZE:
   3300 	case SQ_ESTMP_RING_ITEMSIZE:
   3301 	case SQ_GSTMP_RING_ITEMSIZE:
   3302 	case SQ_GSVS_RING_ITEMSIZE:
   3303 	case SQ_GS_VERT_ITEMSIZE:
   3304 	case SQ_GS_VERT_ITEMSIZE_1:
   3305 	case SQ_GS_VERT_ITEMSIZE_2:
   3306 	case SQ_GS_VERT_ITEMSIZE_3:
   3307 	case SQ_GSVS_RING_OFFSET_1:
   3308 	case SQ_GSVS_RING_OFFSET_2:
   3309 	case SQ_GSVS_RING_OFFSET_3:
   3310 	case SQ_HSTMP_RING_ITEMSIZE:
   3311 	case SQ_LSTMP_RING_ITEMSIZE:
   3312 	case SQ_PSTMP_RING_ITEMSIZE:
   3313 	case SQ_VSTMP_RING_ITEMSIZE:
   3314 	case VGT_TF_RING_SIZE:
   3315 	case SQ_ESGS_RING_BASE:
   3316 	case SQ_GSVS_RING_BASE:
   3317 	case SQ_ESTMP_RING_BASE:
   3318 	case SQ_GSTMP_RING_BASE:
   3319 	case SQ_HSTMP_RING_BASE:
   3320 	case SQ_LSTMP_RING_BASE:
   3321 	case SQ_PSTMP_RING_BASE:
   3322 	case SQ_VSTMP_RING_BASE:
   3323 	case CAYMAN_VGT_OFFCHIP_LDS_BASE:
   3324 	case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS:
   3325 		return true;
   3326 	default:
   3327 		DRM_ERROR("Invalid register 0x%x in CS\n", reg);
   3328 		return false;
   3329 	}
   3330 }
   3331 
   3332 static int evergreen_vm_packet3_check(struct radeon_device *rdev,
   3333 				      u32 *ib, struct radeon_cs_packet *pkt)
   3334 {
   3335 	u32 idx = pkt->idx + 1;
   3336 	u32 idx_value = ib[idx];
   3337 	u32 start_reg, end_reg, reg, i;
   3338 	u32 command, info;
   3339 
   3340 	switch (pkt->opcode) {
   3341 	case PACKET3_NOP:
   3342 		break;
   3343 	case PACKET3_SET_BASE:
   3344 		if (idx_value != 1) {
   3345 			DRM_ERROR("bad SET_BASE");
   3346 			return -EINVAL;
   3347 		}
   3348 		break;
   3349 	case PACKET3_CLEAR_STATE:
   3350 	case PACKET3_INDEX_BUFFER_SIZE:
   3351 	case PACKET3_DISPATCH_DIRECT:
   3352 	case PACKET3_DISPATCH_INDIRECT:
   3353 	case PACKET3_MODE_CONTROL:
   3354 	case PACKET3_SET_PREDICATION:
   3355 	case PACKET3_COND_EXEC:
   3356 	case PACKET3_PRED_EXEC:
   3357 	case PACKET3_DRAW_INDIRECT:
   3358 	case PACKET3_DRAW_INDEX_INDIRECT:
   3359 	case PACKET3_INDEX_BASE:
   3360 	case PACKET3_DRAW_INDEX_2:
   3361 	case PACKET3_CONTEXT_CONTROL:
   3362 	case PACKET3_DRAW_INDEX_OFFSET:
   3363 	case PACKET3_INDEX_TYPE:
   3364 	case PACKET3_DRAW_INDEX:
   3365 	case PACKET3_DRAW_INDEX_AUTO:
   3366 	case PACKET3_DRAW_INDEX_IMMD:
   3367 	case PACKET3_NUM_INSTANCES:
   3368 	case PACKET3_DRAW_INDEX_MULTI_AUTO:
   3369 	case PACKET3_STRMOUT_BUFFER_UPDATE:
   3370 	case PACKET3_DRAW_INDEX_OFFSET_2:
   3371 	case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
   3372 	case PACKET3_MPEG_INDEX:
   3373 	case PACKET3_WAIT_REG_MEM:
   3374 	case PACKET3_MEM_WRITE:
   3375 	case PACKET3_SURFACE_SYNC:
   3376 	case PACKET3_EVENT_WRITE:
   3377 	case PACKET3_EVENT_WRITE_EOP:
   3378 	case PACKET3_EVENT_WRITE_EOS:
   3379 	case PACKET3_SET_CONTEXT_REG:
   3380 	case PACKET3_SET_BOOL_CONST:
   3381 	case PACKET3_SET_LOOP_CONST:
   3382 	case PACKET3_SET_RESOURCE:
   3383 	case PACKET3_SET_SAMPLER:
   3384 	case PACKET3_SET_CTL_CONST:
   3385 	case PACKET3_SET_RESOURCE_OFFSET:
   3386 	case PACKET3_SET_CONTEXT_REG_INDIRECT:
   3387 	case PACKET3_SET_RESOURCE_INDIRECT:
   3388 	case CAYMAN_PACKET3_DEALLOC_STATE:
   3389 		break;
   3390 	case PACKET3_COND_WRITE:
   3391 		if (idx_value & 0x100) {
   3392 			reg = ib[idx + 5] * 4;
   3393 			if (!evergreen_vm_reg_valid(reg))
   3394 				return -EINVAL;
   3395 		}
   3396 		break;
   3397 	case PACKET3_COPY_DW:
   3398 		if (idx_value & 0x2) {
   3399 			reg = ib[idx + 3] * 4;
   3400 			if (!evergreen_vm_reg_valid(reg))
   3401 				return -EINVAL;
   3402 		}
   3403 		break;
   3404 	case PACKET3_SET_CONFIG_REG:
   3405 		start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
   3406 		end_reg = 4 * pkt->count + start_reg - 4;
   3407 		if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
   3408 		    (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
   3409 		    (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
   3410 			DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
   3411 			return -EINVAL;
   3412 		}
   3413 		for (i = 0; i < pkt->count; i++) {
   3414 			reg = start_reg + (4 * i);
   3415 			if (!evergreen_vm_reg_valid(reg))
   3416 				return -EINVAL;
   3417 		}
   3418 		break;
   3419 	case PACKET3_CP_DMA:
   3420 		command = ib[idx + 4];
   3421 		info = ib[idx + 1];
   3422 		if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
   3423 		    (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
   3424 		    ((((info & 0x00300000) >> 20) == 0) &&
   3425 		     (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
   3426 		    ((((info & 0x60000000) >> 29) == 0) &&
   3427 		     (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
   3428 			/* non mem to mem copies requires dw aligned count */
   3429 			if ((command & 0x1fffff) % 4) {
   3430 				DRM_ERROR("CP DMA command requires dw count alignment\n");
   3431 				return -EINVAL;
   3432 			}
   3433 		}
   3434 		if (command & PACKET3_CP_DMA_CMD_SAS) {
   3435 			/* src address space is register */
   3436 			if (((info & 0x60000000) >> 29) == 0) {
   3437 				start_reg = idx_value << 2;
   3438 				if (command & PACKET3_CP_DMA_CMD_SAIC) {
   3439 					reg = start_reg;
   3440 					if (!evergreen_vm_reg_valid(reg)) {
   3441 						DRM_ERROR("CP DMA Bad SRC register\n");
   3442 						return -EINVAL;
   3443 					}
   3444 				} else {
   3445 					for (i = 0; i < (command & 0x1fffff); i++) {
   3446 						reg = start_reg + (4 * i);
   3447 						if (!evergreen_vm_reg_valid(reg)) {
   3448 							DRM_ERROR("CP DMA Bad SRC register\n");
   3449 							return -EINVAL;
   3450 						}
   3451 					}
   3452 				}
   3453 			}
   3454 		}
   3455 		if (command & PACKET3_CP_DMA_CMD_DAS) {
   3456 			/* dst address space is register */
   3457 			if (((info & 0x00300000) >> 20) == 0) {
   3458 				start_reg = ib[idx + 2];
   3459 				if (command & PACKET3_CP_DMA_CMD_DAIC) {
   3460 					reg = start_reg;
   3461 					if (!evergreen_vm_reg_valid(reg)) {
   3462 						DRM_ERROR("CP DMA Bad DST register\n");
   3463 						return -EINVAL;
   3464 					}
   3465 				} else {
   3466 					for (i = 0; i < (command & 0x1fffff); i++) {
   3467 						reg = start_reg + (4 * i);
   3468 						if (!evergreen_vm_reg_valid(reg)) {
   3469 							DRM_ERROR("CP DMA Bad DST register\n");
   3470 							return -EINVAL;
   3471 						}
   3472 					}
   3473 				}
   3474 			}
   3475 		}
   3476 		break;
   3477 	default:
   3478 		return -EINVAL;
   3479 	}
   3480 	return 0;
   3481 }
   3482 
   3483 int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
   3484 {
   3485 	int ret = 0;
   3486 	u32 idx = 0;
   3487 	struct radeon_cs_packet pkt;
   3488 
   3489 	do {
   3490 		pkt.idx = idx;
   3491 		pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]);
   3492 		pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]);
   3493 		pkt.one_reg_wr = 0;
   3494 		switch (pkt.type) {
   3495 		case RADEON_PACKET_TYPE0:
   3496 			dev_err(rdev->dev, "Packet0 not allowed!\n");
   3497 			ret = -EINVAL;
   3498 			break;
   3499 		case RADEON_PACKET_TYPE2:
   3500 			idx += 1;
   3501 			break;
   3502 		case RADEON_PACKET_TYPE3:
   3503 			pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
   3504 			ret = evergreen_vm_packet3_check(rdev, ib->ptr, &pkt);
   3505 			idx += pkt.count + 2;
   3506 			break;
   3507 		default:
   3508 			dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
   3509 			ret = -EINVAL;
   3510 			break;
   3511 		}
   3512 		if (ret)
   3513 			break;
   3514 	} while (idx < ib->length_dw);
   3515 
   3516 	return ret;
   3517 }
   3518 
   3519 /**
   3520  * evergreen_dma_ib_parse() - parse the DMA IB for VM
   3521  * @rdev: radeon_device pointer
   3522  * @ib:	radeon_ib pointer
   3523  *
   3524  * Parses the DMA IB from the VM CS ioctl
   3525  * checks for errors. (Cayman-SI)
   3526  * Returns 0 for success and an error on failure.
   3527  **/
   3528 int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
   3529 {
   3530 	u32 idx = 0;
   3531 	u32 header, cmd, count, sub_cmd;
   3532 
   3533 	do {
   3534 		header = ib->ptr[idx];
   3535 		cmd = GET_DMA_CMD(header);
   3536 		count = GET_DMA_COUNT(header);
   3537 		sub_cmd = GET_DMA_SUB_CMD(header);
   3538 
   3539 		switch (cmd) {
   3540 		case DMA_PACKET_WRITE:
   3541 			switch (sub_cmd) {
   3542 			/* tiled */
   3543 			case 8:
   3544 				idx += count + 7;
   3545 				break;
   3546 			/* linear */
   3547 			case 0:
   3548 				idx += count + 3;
   3549 				break;
   3550 			default:
   3551 				DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, ib->ptr[idx]);
   3552 				return -EINVAL;
   3553 			}
   3554 			break;
   3555 		case DMA_PACKET_COPY:
   3556 			switch (sub_cmd) {
   3557 			/* Copy L2L, DW aligned */
   3558 			case 0x00:
   3559 				idx += 5;
   3560 				break;
   3561 			/* Copy L2T/T2L */
   3562 			case 0x08:
   3563 				idx += 9;
   3564 				break;
   3565 			/* Copy L2L, byte aligned */
   3566 			case 0x40:
   3567 				idx += 5;
   3568 				break;
   3569 			/* Copy L2L, partial */
   3570 			case 0x41:
   3571 				idx += 9;
   3572 				break;
   3573 			/* Copy L2L, DW aligned, broadcast */
   3574 			case 0x44:
   3575 				idx += 7;
   3576 				break;
   3577 			/* Copy L2T Frame to Field */
   3578 			case 0x48:
   3579 				idx += 10;
   3580 				break;
   3581 			/* Copy L2T/T2L, partial */
   3582 			case 0x49:
   3583 				idx += 12;
   3584 				break;
   3585 			/* Copy L2T broadcast */
   3586 			case 0x4b:
   3587 				idx += 10;
   3588 				break;
   3589 			/* Copy L2T/T2L (tile units) */
   3590 			case 0x4c:
   3591 				idx += 9;
   3592 				break;
   3593 			/* Copy T2T, partial (tile units) */
   3594 			case 0x4d:
   3595 				idx += 13;
   3596 				break;
   3597 			/* Copy L2T broadcast (tile units) */
   3598 			case 0x4f:
   3599 				idx += 10;
   3600 				break;
   3601 			default:
   3602 				DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, ib->ptr[idx]);
   3603 				return -EINVAL;
   3604 			}
   3605 			break;
   3606 		case DMA_PACKET_CONSTANT_FILL:
   3607 			idx += 4;
   3608 			break;
   3609 		case DMA_PACKET_NOP:
   3610 			idx += 1;
   3611 			break;
   3612 		default:
   3613 			DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
   3614 			return -EINVAL;
   3615 		}
   3616 	} while (idx < ib->length_dw);
   3617 
   3618 	return 0;
   3619 }
   3620