freedreno_screen.c revision 01e04c3f
1/*
2 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 *    Rob Clark <robclark@freedesktop.org>
25 */
26
27
28#include "pipe/p_defines.h"
29#include "pipe/p_screen.h"
30#include "pipe/p_state.h"
31
32#include "util/u_memory.h"
33#include "util/u_inlines.h"
34#include "util/u_format.h"
35#include "util/u_format_s3tc.h"
36#include "util/u_screen.h"
37#include "util/u_string.h"
38#include "util/u_debug.h"
39
40#include "util/os_time.h"
41
42#include <errno.h>
43#include <stdio.h>
44#include <stdlib.h>
45#include <sys/sysinfo.h>
46
47#include "freedreno_screen.h"
48#include "freedreno_resource.h"
49#include "freedreno_fence.h"
50#include "freedreno_query.h"
51#include "freedreno_util.h"
52
53#include "a2xx/fd2_screen.h"
54#include "a3xx/fd3_screen.h"
55#include "a4xx/fd4_screen.h"
56#include "a5xx/fd5_screen.h"
57#include "a6xx/fd6_screen.h"
58
59
60#include "ir3/ir3_nir.h"
61
62/* XXX this should go away */
63#include "state_tracker/drm_driver.h"
64
65static const struct debug_named_value debug_options[] = {
66		{"msgs",      FD_DBG_MSGS,   "Print debug messages"},
67		{"disasm",    FD_DBG_DISASM, "Dump TGSI and adreno shader disassembly"},
68		{"dclear",    FD_DBG_DCLEAR, "Mark all state dirty after clear"},
69		{"ddraw",     FD_DBG_DDRAW,  "Mark all state dirty after draw"},
70		{"noscis",    FD_DBG_NOSCIS, "Disable scissor optimization"},
71		{"direct",    FD_DBG_DIRECT, "Force inline (SS_DIRECT) state loads"},
72		{"nobypass",  FD_DBG_NOBYPASS, "Disable GMEM bypass"},
73		{"fraghalf",  FD_DBG_FRAGHALF, "Use half-precision in fragment shader"},
74		{"nobin",     FD_DBG_NOBIN,  "Disable hw binning"},
75		{"optmsgs",   FD_DBG_OPTMSGS,"Enable optimizer debug messages"},
76		{"glsl120",   FD_DBG_GLSL120,"Temporary flag to force GLSL 1.20 (rather than 1.30) on a3xx+"},
77		{"shaderdb",  FD_DBG_SHADERDB, "Enable shaderdb output"},
78		{"flush",     FD_DBG_FLUSH,  "Force flush after every draw"},
79		{"deqp",      FD_DBG_DEQP,   "Enable dEQP hacks"},
80		{"inorder",   FD_DBG_INORDER,"Disable reordering for draws/blits"},
81		{"bstat",     FD_DBG_BSTAT,  "Print batch stats at context destroy"},
82		{"nogrow",    FD_DBG_NOGROW, "Disable \"growable\" cmdstream buffers, even if kernel supports it"},
83		{"lrz",       FD_DBG_LRZ,    "Enable experimental LRZ support (a5xx+)"},
84		{"noindirect",FD_DBG_NOINDR, "Disable hw indirect draws (emulate on CPU)"},
85		{"noblit",    FD_DBG_NOBLIT, "Disable blitter (fallback to generic blit path)"},
86		{"hiprio",    FD_DBG_HIPRIO, "Force high-priority context"},
87		{"ttile",     FD_DBG_TTILE,  "Enable texture tiling (a5xx)"},
88		{"perfcntrs", FD_DBG_PERFC,  "Expose performance counters"},
89		{"softpin",   FD_DBG_SOFTPIN,"Enable softpin command submission (experimental)"},
90		DEBUG_NAMED_VALUE_END
91};
92
93DEBUG_GET_ONCE_FLAGS_OPTION(fd_mesa_debug, "FD_MESA_DEBUG", debug_options, 0)
94
95int fd_mesa_debug = 0;
96bool fd_binning_enabled = true;
97static bool glsl120 = false;
98
99static const struct debug_named_value shader_debug_options[] = {
100		{"vs", FD_DBG_SHADER_VS, "Print shader disasm for vertex shaders"},
101		{"fs", FD_DBG_SHADER_FS, "Print shader disasm for fragment shaders"},
102		{"cs", FD_DBG_SHADER_CS, "Print shader disasm for compute shaders"},
103		DEBUG_NAMED_VALUE_END
104};
105
106DEBUG_GET_ONCE_FLAGS_OPTION(fd_shader_debug, "FD_SHADER_DEBUG", shader_debug_options, 0)
107
108enum fd_shader_debug fd_shader_debug = 0;
109
110static const char *
111fd_screen_get_name(struct pipe_screen *pscreen)
112{
113	static char buffer[128];
114	util_snprintf(buffer, sizeof(buffer), "FD%03d",
115			fd_screen(pscreen)->device_id);
116	return buffer;
117}
118
119static const char *
120fd_screen_get_vendor(struct pipe_screen *pscreen)
121{
122	return "freedreno";
123}
124
125static const char *
126fd_screen_get_device_vendor(struct pipe_screen *pscreen)
127{
128	return "Qualcomm";
129}
130
131
132static uint64_t
133fd_screen_get_timestamp(struct pipe_screen *pscreen)
134{
135	struct fd_screen *screen = fd_screen(pscreen);
136
137	if (screen->has_timestamp) {
138		uint64_t n;
139		fd_pipe_get_param(screen->pipe, FD_TIMESTAMP, &n);
140		debug_assert(screen->max_freq > 0);
141		return n * 1000000000 / screen->max_freq;
142	} else {
143		int64_t cpu_time = os_time_get() * 1000;
144		return cpu_time + screen->cpu_gpu_time_delta;
145	}
146
147}
148
149static void
150fd_screen_destroy(struct pipe_screen *pscreen)
151{
152	struct fd_screen *screen = fd_screen(pscreen);
153
154	if (screen->pipe)
155		fd_pipe_del(screen->pipe);
156
157	if (screen->dev)
158		fd_device_del(screen->dev);
159
160	fd_bc_fini(&screen->batch_cache);
161
162	slab_destroy_parent(&screen->transfer_pool);
163
164	mtx_destroy(&screen->lock);
165
166	ralloc_free(screen->compiler);
167
168	free(screen->perfcntr_queries);
169	free(screen);
170}
171
172/*
173TODO either move caps to a2xx/a3xx specific code, or maybe have some
174tables for things that differ if the delta is not too much..
175 */
176static int
177fd_screen_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
178{
179	struct fd_screen *screen = fd_screen(pscreen);
180
181	/* this is probably not totally correct.. but it's a start: */
182	switch (param) {
183	/* Supported features (boolean caps). */
184	case PIPE_CAP_NPOT_TEXTURES:
185	case PIPE_CAP_MIXED_FRAMEBUFFER_SIZES:
186	case PIPE_CAP_ANISOTROPIC_FILTER:
187	case PIPE_CAP_POINT_SPRITE:
188	case PIPE_CAP_BLEND_EQUATION_SEPARATE:
189	case PIPE_CAP_TEXTURE_SWIZZLE:
190	case PIPE_CAP_MIXED_COLORBUFFER_FORMATS:
191	case PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT:
192	case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER:
193	case PIPE_CAP_SEAMLESS_CUBE_MAP:
194	case PIPE_CAP_VERTEX_COLOR_UNCLAMPED:
195	case PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION:
196	case PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY:
197	case PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY:
198	case PIPE_CAP_VERTEX_ELEMENT_SRC_OFFSET_4BYTE_ALIGNED_ONLY:
199	case PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT:
200	case PIPE_CAP_STRING_MARKER:
201	case PIPE_CAP_MIXED_COLOR_DEPTH_BITS:
202	case PIPE_CAP_TEXTURE_BARRIER:
203	case PIPE_CAP_INVALIDATE_BUFFER:
204		return 1;
205
206	case PIPE_CAP_VERTEXID_NOBASE:
207		return is_a3xx(screen) || is_a4xx(screen);
208
209	case PIPE_CAP_COMPUTE:
210		return has_compute(screen);
211
212	case PIPE_CAP_PREFER_BLIT_BASED_TEXTURE_TRANSFER:
213	case PIPE_CAP_PCI_GROUP:
214	case PIPE_CAP_PCI_BUS:
215	case PIPE_CAP_PCI_DEVICE:
216	case PIPE_CAP_PCI_FUNCTION:
217	case PIPE_CAP_DEPTH_CLIP_DISABLE_SEPARATE:
218		return 0;
219
220	case PIPE_CAP_SM3:
221	case PIPE_CAP_PRIMITIVE_RESTART:
222	case PIPE_CAP_TGSI_INSTANCEID:
223	case PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR:
224	case PIPE_CAP_INDEP_BLEND_ENABLE:
225	case PIPE_CAP_INDEP_BLEND_FUNC:
226	case PIPE_CAP_TEXTURE_BUFFER_OBJECTS:
227	case PIPE_CAP_TEXTURE_HALF_FLOAT_LINEAR:
228	case PIPE_CAP_CONDITIONAL_RENDER:
229	case PIPE_CAP_CONDITIONAL_RENDER_INVERTED:
230	case PIPE_CAP_SEAMLESS_CUBE_MAP_PER_TEXTURE:
231	case PIPE_CAP_CLIP_HALFZ:
232		return is_a3xx(screen) || is_a4xx(screen) || is_a5xx(screen) || is_a6xx(screen);
233
234	case PIPE_CAP_FAKE_SW_MSAA:
235		return !fd_screen_get_param(pscreen, PIPE_CAP_TEXTURE_MULTISAMPLE);
236
237	case PIPE_CAP_TEXTURE_MULTISAMPLE:
238		return is_a5xx(screen) || is_a6xx(screen);
239
240	case PIPE_CAP_DEPTH_CLIP_DISABLE:
241		return is_a3xx(screen) || is_a4xx(screen);
242
243	case PIPE_CAP_POLYGON_OFFSET_CLAMP:
244		return is_a5xx(screen) || is_a6xx(screen);
245
246	case PIPE_CAP_TEXTURE_BUFFER_OFFSET_ALIGNMENT:
247		if (is_a3xx(screen)) return 16;
248		if (is_a4xx(screen)) return 32;
249		if (is_a5xx(screen)) return 32;
250		if (is_a6xx(screen)) return 32;
251		return 0;
252	case PIPE_CAP_MAX_TEXTURE_BUFFER_SIZE:
253		/* We could possibly emulate more by pretending 2d/rect textures and
254		 * splitting high bits of index into 2nd dimension..
255		 */
256		if (is_a3xx(screen)) return 8192;
257		if (is_a4xx(screen)) return 16384;
258		if (is_a5xx(screen)) return 16384;
259		if (is_a6xx(screen)) return 16384;
260		return 0;
261
262	case PIPE_CAP_TEXTURE_FLOAT_LINEAR:
263	case PIPE_CAP_CUBE_MAP_ARRAY:
264	case PIPE_CAP_SAMPLER_VIEW_TARGET:
265	case PIPE_CAP_TEXTURE_QUERY_LOD:
266		return is_a4xx(screen) || is_a5xx(screen) || is_a6xx(screen);
267
268	case PIPE_CAP_START_INSTANCE:
269		/* Note that a5xx can do this, it just can't (at least with
270		 * current firmware) do draw_indirect with base_instance.
271		 * Since draw_indirect is needed sooner (gles31 and gl40 vs
272		 * gl42), hide base_instance on a5xx.  :-/
273		 */
274		return is_a4xx(screen);
275
276	case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
277		return 64;
278
279	case PIPE_CAP_GLSL_FEATURE_LEVEL:
280	case PIPE_CAP_GLSL_FEATURE_LEVEL_COMPATIBILITY:
281		if (glsl120)
282			return 120;
283		return is_ir3(screen) ? 140 : 120;
284
285	case PIPE_CAP_SHADER_BUFFER_OFFSET_ALIGNMENT:
286		if (is_a5xx(screen) || is_a6xx(screen))
287			return 4;
288		return 0;
289
290	case PIPE_CAP_MAX_TEXTURE_GATHER_COMPONENTS:
291		if (is_a4xx(screen) || is_a5xx(screen) || is_a6xx(screen))
292			return 4;
293		return 0;
294
295	/* TODO if we need this, do it in nir/ir3 backend to avoid breaking precompile: */
296	case PIPE_CAP_FORCE_PERSAMPLE_INTERP:
297		return 0;
298
299	case PIPE_CAP_ALLOW_MAPPED_BUFFERS_DURING_EXECUTION:
300		return 0;
301
302	case PIPE_CAP_CONTEXT_PRIORITY_MASK:
303		return screen->priority_mask;
304
305	case PIPE_CAP_DRAW_INDIRECT:
306		if (is_a4xx(screen) || is_a5xx(screen) || is_a6xx(screen))
307			return 1;
308		return 0;
309
310	case PIPE_CAP_FRAMEBUFFER_NO_ATTACHMENT:
311		if (is_a4xx(screen) || is_a5xx(screen) || is_a6xx(screen))
312			return 1;
313		return 0;
314
315	case PIPE_CAP_LOAD_CONSTBUF:
316		/* name is confusing, but this turns on std430 packing */
317		if (is_ir3(screen))
318			return 1;
319		return 0;
320
321	case PIPE_CAP_MAX_VIEWPORTS:
322		return 1;
323
324	case PIPE_CAP_SHAREABLE_SHADERS:
325	case PIPE_CAP_GLSL_OPTIMIZE_CONSERVATIVELY:
326	/* manage the variants for these ourself, to avoid breaking precompile: */
327	case PIPE_CAP_FRAGMENT_COLOR_CLAMPED:
328	case PIPE_CAP_VERTEX_COLOR_CLAMPED:
329		if (is_ir3(screen))
330			return 1;
331		return 0;
332
333	/* Stream output. */
334	case PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS:
335		if (is_ir3(screen))
336			return PIPE_MAX_SO_BUFFERS;
337		return 0;
338	case PIPE_CAP_STREAM_OUTPUT_PAUSE_RESUME:
339	case PIPE_CAP_STREAM_OUTPUT_INTERLEAVE_BUFFERS:
340		if (is_ir3(screen))
341			return 1;
342		return 0;
343	case PIPE_CAP_MAX_STREAM_OUTPUT_SEPARATE_COMPONENTS:
344	case PIPE_CAP_MAX_STREAM_OUTPUT_INTERLEAVED_COMPONENTS:
345		if (is_ir3(screen))
346			return 16 * 4;   /* should only be shader out limit? */
347		return 0;
348
349	/* Texturing. */
350	case PIPE_CAP_MAX_TEXTURE_2D_LEVELS:
351	case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
352		return MAX_MIP_LEVELS;
353	case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
354		return 11;
355
356	case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS:
357		return (is_a3xx(screen) || is_a4xx(screen) || is_a5xx(screen) || is_a6xx(screen)) ? 256 : 0;
358
359	/* Render targets. */
360	case PIPE_CAP_MAX_RENDER_TARGETS:
361		return screen->max_rts;
362	case PIPE_CAP_MAX_DUAL_SOURCE_RENDER_TARGETS:
363		return is_a3xx(screen) ? 1 : 0;
364
365	/* Queries. */
366	case PIPE_CAP_OCCLUSION_QUERY:
367		return is_a3xx(screen) || is_a4xx(screen) || is_a5xx(screen) || is_a6xx(screen);
368	case PIPE_CAP_QUERY_TIMESTAMP:
369	case PIPE_CAP_QUERY_TIME_ELAPSED:
370		/* only a4xx, requires new enough kernel so we know max_freq: */
371		return (screen->max_freq > 0) && (is_a4xx(screen) || is_a5xx(screen) || is_a6xx(screen));
372
373	case PIPE_CAP_VENDOR_ID:
374		return 0x5143;
375	case PIPE_CAP_DEVICE_ID:
376		return 0xFFFFFFFF;
377	case PIPE_CAP_ACCELERATED:
378		return 1;
379	case PIPE_CAP_VIDEO_MEMORY:
380		DBG("FINISHME: The value returned is incorrect\n");
381		return 10;
382	case PIPE_CAP_UMA:
383		return 1;
384	case PIPE_CAP_NATIVE_FENCE_FD:
385		return fd_device_version(screen->dev) >= FD_VERSION_FENCE_FD;
386	default:
387		return u_pipe_screen_get_param_defaults(pscreen, param);
388	}
389}
390
391static float
392fd_screen_get_paramf(struct pipe_screen *pscreen, enum pipe_capf param)
393{
394	switch (param) {
395	case PIPE_CAPF_MAX_LINE_WIDTH:
396	case PIPE_CAPF_MAX_LINE_WIDTH_AA:
397		/* NOTE: actual value is 127.0f, but this is working around a deqp
398		 * bug.. dEQP-GLES3.functional.rasterization.primitives.lines_wide
399		 * uses too small of a render target size, and gets confused when
400		 * the lines start going offscreen.
401		 *
402		 * See: https://code.google.com/p/android/issues/detail?id=206513
403		 */
404		if (fd_mesa_debug & FD_DBG_DEQP)
405			return 48.0f;
406		return 127.0f;
407	case PIPE_CAPF_MAX_POINT_WIDTH:
408	case PIPE_CAPF_MAX_POINT_WIDTH_AA:
409		return 4092.0f;
410	case PIPE_CAPF_MAX_TEXTURE_ANISOTROPY:
411		return 16.0f;
412	case PIPE_CAPF_MAX_TEXTURE_LOD_BIAS:
413		return 15.0f;
414	case PIPE_CAPF_MIN_CONSERVATIVE_RASTER_DILATE:
415	case PIPE_CAPF_MAX_CONSERVATIVE_RASTER_DILATE:
416	case PIPE_CAPF_CONSERVATIVE_RASTER_DILATE_GRANULARITY:
417		return 0.0f;
418	}
419	debug_printf("unknown paramf %d\n", param);
420	return 0;
421}
422
423static int
424fd_screen_get_shader_param(struct pipe_screen *pscreen,
425		enum pipe_shader_type shader,
426		enum pipe_shader_cap param)
427{
428	struct fd_screen *screen = fd_screen(pscreen);
429
430	switch(shader)
431	{
432	case PIPE_SHADER_FRAGMENT:
433	case PIPE_SHADER_VERTEX:
434		break;
435	case PIPE_SHADER_COMPUTE:
436		if (has_compute(screen))
437			break;
438		return 0;
439	case PIPE_SHADER_GEOMETRY:
440		/* maye we could emulate.. */
441		return 0;
442	default:
443		DBG("unknown shader type %d", shader);
444		return 0;
445	}
446
447	/* this is probably not totally correct.. but it's a start: */
448	switch (param) {
449	case PIPE_SHADER_CAP_MAX_INSTRUCTIONS:
450	case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS:
451	case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS:
452	case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS:
453		return 16384;
454	case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH:
455		return 8; /* XXX */
456	case PIPE_SHADER_CAP_MAX_INPUTS:
457	case PIPE_SHADER_CAP_MAX_OUTPUTS:
458		return 16;
459	case PIPE_SHADER_CAP_MAX_TEMPS:
460		return 64; /* Max native temporaries. */
461	case PIPE_SHADER_CAP_MAX_CONST_BUFFER_SIZE:
462		/* NOTE: seems to be limit for a3xx is actually 512 but
463		 * split between VS and FS.  Use lower limit of 256 to
464		 * avoid getting into impossible situations:
465		 */
466		return ((is_a3xx(screen) || is_a4xx(screen) || is_a5xx(screen) || is_a6xx(screen)) ? 4096 : 64) * sizeof(float[4]);
467	case PIPE_SHADER_CAP_MAX_CONST_BUFFERS:
468		return is_ir3(screen) ? 16 : 1;
469	case PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED:
470		return 1;
471	case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR:
472	case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR:
473		/* Technically this should be the same as for TEMP/CONST, since
474		 * everything is just normal registers.  This is just temporary
475		 * hack until load_input/store_output handle arrays in a similar
476		 * way as load_var/store_var..
477		 */
478		return 0;
479	case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
480	case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR:
481		/* a2xx compiler doesn't handle indirect: */
482		return is_ir3(screen) ? 1 : 0;
483	case PIPE_SHADER_CAP_SUBROUTINES:
484	case PIPE_SHADER_CAP_TGSI_DROUND_SUPPORTED:
485	case PIPE_SHADER_CAP_TGSI_DFRACEXP_DLDEXP_SUPPORTED:
486	case PIPE_SHADER_CAP_TGSI_LDEXP_SUPPORTED:
487	case PIPE_SHADER_CAP_TGSI_FMA_SUPPORTED:
488	case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE:
489	case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTERS:
490	case PIPE_SHADER_CAP_LOWER_IF_THRESHOLD:
491	case PIPE_SHADER_CAP_TGSI_SKIP_MERGE_REGISTERS:
492	case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTER_BUFFERS:
493		return 0;
494	case PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED:
495		return 1;
496	case PIPE_SHADER_CAP_INTEGERS:
497		if (glsl120)
498			return 0;
499		return is_ir3(screen) ? 1 : 0;
500	case PIPE_SHADER_CAP_INT64_ATOMICS:
501		return 0;
502	case PIPE_SHADER_CAP_FP16:
503		return 0;
504	case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS:
505	case PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS:
506		return 16;
507	case PIPE_SHADER_CAP_PREFERRED_IR:
508		if (is_ir3(screen))
509			return PIPE_SHADER_IR_NIR;
510		return PIPE_SHADER_IR_TGSI;
511	case PIPE_SHADER_CAP_SUPPORTED_IRS:
512		if (is_ir3(screen)) {
513			return (1 << PIPE_SHADER_IR_NIR) | (1 << PIPE_SHADER_IR_TGSI);
514		} else {
515			return (1 << PIPE_SHADER_IR_TGSI);
516		}
517		return 0;
518	case PIPE_SHADER_CAP_MAX_UNROLL_ITERATIONS_HINT:
519		return 32;
520	case PIPE_SHADER_CAP_SCALAR_ISA:
521		return is_ir3(screen) ? 1 : 0;
522	case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS:
523	case PIPE_SHADER_CAP_MAX_SHADER_IMAGES:
524		if (is_a5xx(screen) || is_a6xx(screen)) {
525			/* a5xx (and a4xx for that matter) has one state-block
526			 * for compute-shader SSBO's and another that is shared
527			 * by VS/HS/DS/GS/FS..  so to simplify things for now
528			 * just advertise SSBOs for FS and CS.  We could possibly
529			 * do what blob does, and partition the space for
530			 * VS/HS/DS/GS/FS.  The blob advertises:
531			 *
532			 *   GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS: 4
533			 *   GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS: 4
534			 *   GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS: 4
535			 *   GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS: 4
536			 *   GL_MAX_FRAGMENT_SHADER_STORAGE_BLOCKS: 4
537			 *   GL_MAX_COMPUTE_SHADER_STORAGE_BLOCKS: 24
538			 *   GL_MAX_COMBINED_SHADER_STORAGE_BLOCKS: 24
539			 *
540			 * I think that way we could avoid having to patch shaders
541			 * for actual SSBO indexes by using a static partitioning.
542			 *
543			 * Note same state block is used for images and buffers,
544			 * but images also need texture state for read access
545			 * (isam/isam.3d)
546			 */
547			switch(shader)
548			{
549			case PIPE_SHADER_FRAGMENT:
550			case PIPE_SHADER_COMPUTE:
551				return 24;
552			default:
553				return 0;
554			}
555		}
556		return 0;
557	}
558	debug_printf("unknown shader param %d\n", param);
559	return 0;
560}
561
562/* TODO depending on how much the limits differ for a3xx/a4xx, maybe move this
563 * into per-generation backend?
564 */
565static int
566fd_get_compute_param(struct pipe_screen *pscreen, enum pipe_shader_ir ir_type,
567		enum pipe_compute_cap param, void *ret)
568{
569	struct fd_screen *screen = fd_screen(pscreen);
570	const char * const ir = "ir3";
571
572	if (!has_compute(screen))
573		return 0;
574
575#define RET(x) do {                  \
576   if (ret)                          \
577      memcpy(ret, x, sizeof(x));     \
578   return sizeof(x);                 \
579} while (0)
580
581	switch (param) {
582	case PIPE_COMPUTE_CAP_ADDRESS_BITS:
583// don't expose 64b pointer support yet, until ir3 supports 64b
584// math, otherwise spir64 target is used and we get 64b pointer
585// calculations that we can't do yet
586//		if (is_a5xx(screen))
587//			RET((uint32_t []){ 64 });
588		RET((uint32_t []){ 32 });
589
590	case PIPE_COMPUTE_CAP_IR_TARGET:
591		if (ret)
592			sprintf(ret, ir);
593		return strlen(ir) * sizeof(char);
594
595	case PIPE_COMPUTE_CAP_GRID_DIMENSION:
596		RET((uint64_t []) { 3 });
597
598	case PIPE_COMPUTE_CAP_MAX_GRID_SIZE:
599		RET(((uint64_t []) { 65535, 65535, 65535 }));
600
601	case PIPE_COMPUTE_CAP_MAX_BLOCK_SIZE:
602		RET(((uint64_t []) { 1024, 1024, 64 }));
603
604	case PIPE_COMPUTE_CAP_MAX_THREADS_PER_BLOCK:
605		RET((uint64_t []) { 1024 });
606
607	case PIPE_COMPUTE_CAP_MAX_GLOBAL_SIZE:
608		RET((uint64_t []) { screen->ram_size });
609
610	case PIPE_COMPUTE_CAP_MAX_LOCAL_SIZE:
611		RET((uint64_t []) { 32768 });
612
613	case PIPE_COMPUTE_CAP_MAX_PRIVATE_SIZE:
614	case PIPE_COMPUTE_CAP_MAX_INPUT_SIZE:
615		RET((uint64_t []) { 4096 });
616
617	case PIPE_COMPUTE_CAP_MAX_MEM_ALLOC_SIZE:
618		RET((uint64_t []) { screen->ram_size });
619
620	case PIPE_COMPUTE_CAP_MAX_CLOCK_FREQUENCY:
621		RET((uint32_t []) { screen->max_freq / 1000000 });
622
623	case PIPE_COMPUTE_CAP_MAX_COMPUTE_UNITS:
624		RET((uint32_t []) { 9999 });  // TODO
625
626	case PIPE_COMPUTE_CAP_IMAGES_SUPPORTED:
627		RET((uint32_t []) { 1 });
628
629	case PIPE_COMPUTE_CAP_SUBGROUP_SIZE:
630		RET((uint32_t []) { 32 });  // TODO
631
632	case PIPE_COMPUTE_CAP_MAX_VARIABLE_THREADS_PER_BLOCK:
633		RET((uint64_t []) { 1024 }); // TODO
634	}
635
636	return 0;
637}
638
639static const void *
640fd_get_compiler_options(struct pipe_screen *pscreen,
641		enum pipe_shader_ir ir, unsigned shader)
642{
643	struct fd_screen *screen = fd_screen(pscreen);
644
645	if (is_ir3(screen))
646		return ir3_get_compiler_options(screen->compiler);
647
648	return NULL;
649}
650
651boolean
652fd_screen_bo_get_handle(struct pipe_screen *pscreen,
653		struct fd_bo *bo,
654		unsigned stride,
655		struct winsys_handle *whandle)
656{
657	whandle->stride = stride;
658
659	if (whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
660		return fd_bo_get_name(bo, &whandle->handle) == 0;
661	} else if (whandle->type == WINSYS_HANDLE_TYPE_KMS) {
662		whandle->handle = fd_bo_handle(bo);
663		return TRUE;
664	} else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
665		whandle->handle = fd_bo_dmabuf(bo);
666		return TRUE;
667	} else {
668		return FALSE;
669	}
670}
671
672struct fd_bo *
673fd_screen_bo_from_handle(struct pipe_screen *pscreen,
674		struct winsys_handle *whandle)
675{
676	struct fd_screen *screen = fd_screen(pscreen);
677	struct fd_bo *bo;
678
679	if (whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
680		bo = fd_bo_from_name(screen->dev, whandle->handle);
681	} else if (whandle->type == WINSYS_HANDLE_TYPE_KMS) {
682		bo = fd_bo_from_handle(screen->dev, whandle->handle, 0);
683	} else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
684		bo = fd_bo_from_dmabuf(screen->dev, whandle->handle);
685	} else {
686		DBG("Attempt to import unsupported handle type %d", whandle->type);
687		return NULL;
688	}
689
690	if (!bo) {
691		DBG("ref name 0x%08x failed", whandle->handle);
692		return NULL;
693	}
694
695	return bo;
696}
697
698struct pipe_screen *
699fd_screen_create(struct fd_device *dev)
700{
701	struct fd_screen *screen = CALLOC_STRUCT(fd_screen);
702	struct pipe_screen *pscreen;
703	uint64_t val;
704
705	fd_mesa_debug = debug_get_option_fd_mesa_debug();
706	fd_shader_debug = debug_get_option_fd_shader_debug();
707
708	if (fd_mesa_debug & FD_DBG_NOBIN)
709		fd_binning_enabled = false;
710
711	glsl120 = !!(fd_mesa_debug & FD_DBG_GLSL120);
712
713	if (!screen)
714		return NULL;
715
716	pscreen = &screen->base;
717
718	screen->dev = dev;
719	screen->refcnt = 1;
720
721	// maybe this should be in context?
722	screen->pipe = fd_pipe_new(screen->dev, FD_PIPE_3D);
723	if (!screen->pipe) {
724		DBG("could not create 3d pipe");
725		goto fail;
726	}
727
728	if (fd_pipe_get_param(screen->pipe, FD_GMEM_SIZE, &val)) {
729		DBG("could not get GMEM size");
730		goto fail;
731	}
732	screen->gmemsize_bytes = val;
733
734	if (fd_pipe_get_param(screen->pipe, FD_DEVICE_ID, &val)) {
735		DBG("could not get device-id");
736		goto fail;
737	}
738	screen->device_id = val;
739
740	if (fd_pipe_get_param(screen->pipe, FD_MAX_FREQ, &val)) {
741		DBG("could not get gpu freq");
742		/* this limits what performance related queries are
743		 * supported but is not fatal
744		 */
745		screen->max_freq = 0;
746	} else {
747		screen->max_freq = val;
748		if (fd_pipe_get_param(screen->pipe, FD_TIMESTAMP, &val) == 0)
749			screen->has_timestamp = true;
750	}
751
752	if (fd_pipe_get_param(screen->pipe, FD_GPU_ID, &val)) {
753		DBG("could not get gpu-id");
754		goto fail;
755	}
756	screen->gpu_id = val;
757
758	if (fd_pipe_get_param(screen->pipe, FD_CHIP_ID, &val)) {
759		DBG("could not get chip-id");
760		/* older kernels may not have this property: */
761		unsigned core  = screen->gpu_id / 100;
762		unsigned major = (screen->gpu_id % 100) / 10;
763		unsigned minor = screen->gpu_id % 10;
764		unsigned patch = 0;  /* assume the worst */
765		val = (patch & 0xff) | ((minor & 0xff) << 8) |
766			((major & 0xff) << 16) | ((core & 0xff) << 24);
767	}
768	screen->chip_id = val;
769
770	if (fd_pipe_get_param(screen->pipe, FD_NR_RINGS, &val)) {
771		DBG("could not get # of rings");
772		screen->priority_mask = 0;
773	} else {
774		/* # of rings equates to number of unique priority values: */
775		screen->priority_mask = (1 << val) - 1;
776	}
777
778	struct sysinfo si;
779	sysinfo(&si);
780	screen->ram_size = si.totalram;
781
782	DBG("Pipe Info:");
783	DBG(" GPU-id:          %d", screen->gpu_id);
784	DBG(" Chip-id:         0x%08x", screen->chip_id);
785	DBG(" GMEM size:       0x%08x", screen->gmemsize_bytes);
786
787	/* explicitly checking for GPU revisions that are known to work.  This
788	 * may be overly conservative for a3xx, where spoofing the gpu_id with
789	 * the blob driver seems to generate identical cmdstream dumps.  But
790	 * on a2xx, there seem to be small differences between the GPU revs
791	 * so it is probably better to actually test first on real hardware
792	 * before enabling:
793	 *
794	 * If you have a different adreno version, feel free to add it to one
795	 * of the cases below and see what happens.  And if it works, please
796	 * send a patch ;-)
797	 */
798	switch (screen->gpu_id) {
799	case 205:
800	case 220:
801		fd2_screen_init(pscreen);
802		break;
803	case 305:
804	case 307:
805	case 320:
806	case 330:
807		fd3_screen_init(pscreen);
808		break;
809	case 420:
810	case 430:
811		fd4_screen_init(pscreen);
812		break;
813	case 530:
814		fd5_screen_init(pscreen);
815		break;
816	case 630:
817		fd6_screen_init(pscreen);
818		break;
819	default:
820		debug_printf("unsupported GPU: a%03d\n", screen->gpu_id);
821		goto fail;
822	}
823
824	if (screen->gpu_id >= 600) {
825		screen->gmem_alignw = 32;
826		screen->gmem_alignh = 32;
827		screen->num_vsc_pipes = 32;
828	} else if (screen->gpu_id >= 500) {
829		screen->gmem_alignw = 64;
830		screen->gmem_alignh = 32;
831		screen->num_vsc_pipes = 16;
832	} else {
833		screen->gmem_alignw = 32;
834		screen->gmem_alignh = 32;
835		screen->num_vsc_pipes = 8;
836	}
837
838	/* NOTE: don't enable reordering on a2xx, since completely untested.
839	 * Also, don't enable if we have too old of a kernel to support
840	 * growable cmdstream buffers, since memory requirement for cmdstream
841	 * buffers would be too much otherwise.
842	 */
843	if ((screen->gpu_id >= 300) && (fd_device_version(dev) >= FD_VERSION_UNLIMITED_CMDS))
844		screen->reorder = !(fd_mesa_debug & FD_DBG_INORDER);
845
846	fd_bc_init(&screen->batch_cache);
847
848	(void) mtx_init(&screen->lock, mtx_plain);
849
850	pscreen->destroy = fd_screen_destroy;
851	pscreen->get_param = fd_screen_get_param;
852	pscreen->get_paramf = fd_screen_get_paramf;
853	pscreen->get_shader_param = fd_screen_get_shader_param;
854	pscreen->get_compute_param = fd_get_compute_param;
855	pscreen->get_compiler_options = fd_get_compiler_options;
856
857	fd_resource_screen_init(pscreen);
858	fd_query_screen_init(pscreen);
859
860	pscreen->get_name = fd_screen_get_name;
861	pscreen->get_vendor = fd_screen_get_vendor;
862	pscreen->get_device_vendor = fd_screen_get_device_vendor;
863
864	pscreen->get_timestamp = fd_screen_get_timestamp;
865
866	pscreen->fence_reference = fd_fence_ref;
867	pscreen->fence_finish = fd_fence_finish;
868	pscreen->fence_get_fd = fd_fence_get_fd;
869
870	slab_create_parent(&screen->transfer_pool, sizeof(struct fd_transfer), 16);
871
872	return pscreen;
873
874fail:
875	fd_screen_destroy(pscreen);
876	return NULL;
877}
878