1b8e80941Smrg/*
2b8e80941Smrg * Copyright (C) 2015-2018 Rob Clark <robclark@freedesktop.org>
3b8e80941Smrg *
4b8e80941Smrg * Permission is hereby granted, free of charge, to any person obtaining a
5b8e80941Smrg * copy of this software and associated documentation files (the "Software"),
6b8e80941Smrg * to deal in the Software without restriction, including without limitation
7b8e80941Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8b8e80941Smrg * and/or sell copies of the Software, and to permit persons to whom the
9b8e80941Smrg * Software is furnished to do so, subject to the following conditions:
10b8e80941Smrg *
11b8e80941Smrg * The above copyright notice and this permission notice (including the next
12b8e80941Smrg * paragraph) shall be included in all copies or substantial portions of the
13b8e80941Smrg * Software.
14b8e80941Smrg *
15b8e80941Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16b8e80941Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17b8e80941Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18b8e80941Smrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19b8e80941Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20b8e80941Smrg * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21b8e80941Smrg * SOFTWARE.
22b8e80941Smrg *
23b8e80941Smrg * Authors:
24b8e80941Smrg *    Rob Clark <robclark@freedesktop.org>
25b8e80941Smrg */
26b8e80941Smrg
27b8e80941Smrg#ifndef IR3_CONTEXT_H_
28b8e80941Smrg#define IR3_CONTEXT_H_
29b8e80941Smrg
30b8e80941Smrg#include "ir3_compiler.h"
31b8e80941Smrg#include "ir3_nir.h"
32b8e80941Smrg#include "ir3.h"
33b8e80941Smrg
34b8e80941Smrg/* for conditionally setting boolean flag(s): */
35b8e80941Smrg#define COND(bool, val) ((bool) ? (val) : 0)
36b8e80941Smrg
37b8e80941Smrg#define DBG(fmt, ...) \
38b8e80941Smrg		do { debug_printf("%s:%d: "fmt "\n", \
39b8e80941Smrg				__FUNCTION__, __LINE__, ##__VA_ARGS__); } while (0)
40b8e80941Smrg
41b8e80941Smrg/**
42b8e80941Smrg * The context for compilation of a single shader.
43b8e80941Smrg */
44b8e80941Smrgstruct ir3_context {
45b8e80941Smrg	struct ir3_compiler *compiler;
46b8e80941Smrg	const struct ir3_context_funcs *funcs;
47b8e80941Smrg
48b8e80941Smrg	struct nir_shader *s;
49b8e80941Smrg
50b8e80941Smrg	struct nir_instr *cur_instr;  /* current instruction, just for debug */
51b8e80941Smrg
52b8e80941Smrg	struct ir3 *ir;
53b8e80941Smrg	struct ir3_shader_variant *so;
54b8e80941Smrg
55b8e80941Smrg	struct ir3_block *block;      /* the current block */
56b8e80941Smrg	struct ir3_block *in_block;   /* block created for shader inputs */
57b8e80941Smrg
58b8e80941Smrg	nir_function_impl *impl;
59b8e80941Smrg
60b8e80941Smrg	/* For fragment shaders, varyings are not actual shader inputs,
61b8e80941Smrg	 * instead the hw passes a ij coord which is used with
62b8e80941Smrg	 * bary.f.
63b8e80941Smrg	 *
64b8e80941Smrg	 * But NIR doesn't know that, it still declares varyings as
65b8e80941Smrg	 * inputs.  So we do all the input tracking normally and fix
66b8e80941Smrg	 * things up after compile_instructions()
67b8e80941Smrg	 */
68b8e80941Smrg	struct ir3_instruction *ij_pixel, *ij_sample, *ij_centroid, *ij_size;
69b8e80941Smrg
70b8e80941Smrg	/* for fragment shaders, for gl_FrontFacing and gl_FragCoord: */
71b8e80941Smrg	struct ir3_instruction *frag_face, *frag_coord;
72b8e80941Smrg
73b8e80941Smrg	/* For vertex shaders, keep track of the system values sources */
74b8e80941Smrg	struct ir3_instruction *vertex_id, *basevertex, *instance_id;
75b8e80941Smrg
76b8e80941Smrg	/* For fragment shaders: */
77b8e80941Smrg	struct ir3_instruction *samp_id, *samp_mask_in;
78b8e80941Smrg
79b8e80941Smrg	/* Compute shader inputs: */
80b8e80941Smrg	struct ir3_instruction *local_invocation_id, *work_group_id;
81b8e80941Smrg
82b8e80941Smrg	/* mapping from nir_register to defining instruction: */
83b8e80941Smrg	struct hash_table *def_ht;
84b8e80941Smrg
85b8e80941Smrg	unsigned num_arrays;
86b8e80941Smrg
87b8e80941Smrg	/* Tracking for max level of flowcontrol (branchstack) needed
88b8e80941Smrg	 * by a5xx+:
89b8e80941Smrg	 */
90b8e80941Smrg	unsigned stack, max_stack;
91b8e80941Smrg
92b8e80941Smrg	/* a common pattern for indirect addressing is to request the
93b8e80941Smrg	 * same address register multiple times.  To avoid generating
94b8e80941Smrg	 * duplicate instruction sequences (which our backend does not
95b8e80941Smrg	 * try to clean up, since that should be done as the NIR stage)
96b8e80941Smrg	 * we cache the address value generated for a given src value:
97b8e80941Smrg	 *
98b8e80941Smrg	 * Note that we have to cache these per alignment, since same
99b8e80941Smrg	 * src used for an array of vec1 cannot be also used for an
100b8e80941Smrg	 * array of vec4.
101b8e80941Smrg	 */
102b8e80941Smrg	struct hash_table *addr_ht[4];
103b8e80941Smrg
104b8e80941Smrg	/* last dst array, for indirect we need to insert a var-store.
105b8e80941Smrg	 */
106b8e80941Smrg	struct ir3_instruction **last_dst;
107b8e80941Smrg	unsigned last_dst_n;
108b8e80941Smrg
109b8e80941Smrg	/* maps nir_block to ir3_block, mostly for the purposes of
110b8e80941Smrg	 * figuring out the blocks successors
111b8e80941Smrg	 */
112b8e80941Smrg	struct hash_table *block_ht;
113b8e80941Smrg
114b8e80941Smrg	/* on a4xx, bitmask of samplers which need astc+srgb workaround: */
115b8e80941Smrg	unsigned astc_srgb;
116b8e80941Smrg
117b8e80941Smrg	unsigned samples;             /* bitmask of x,y sample shifts */
118b8e80941Smrg
119b8e80941Smrg	unsigned max_texture_index;
120b8e80941Smrg
121b8e80941Smrg	/* set if we encounter something we can't handle yet, so we
122b8e80941Smrg	 * can bail cleanly and fallback to TGSI compiler f/e
123b8e80941Smrg	 */
124b8e80941Smrg	bool error;
125b8e80941Smrg};
126b8e80941Smrg
127b8e80941Smrgstruct ir3_context_funcs {
128b8e80941Smrg	void (*emit_intrinsic_load_ssbo)(struct ir3_context *ctx, nir_intrinsic_instr *intr,
129b8e80941Smrg			struct ir3_instruction **dst);
130b8e80941Smrg	void (*emit_intrinsic_store_ssbo)(struct ir3_context *ctx, nir_intrinsic_instr *intr);
131b8e80941Smrg	struct ir3_instruction * (*emit_intrinsic_atomic_ssbo)(struct ir3_context *ctx, nir_intrinsic_instr *intr);
132b8e80941Smrg	void (*emit_intrinsic_store_image)(struct ir3_context *ctx, nir_intrinsic_instr *intr);
133b8e80941Smrg	struct ir3_instruction * (*emit_intrinsic_atomic_image)(struct ir3_context *ctx, nir_intrinsic_instr *intr);
134b8e80941Smrg};
135b8e80941Smrg
136b8e80941Smrgextern const struct ir3_context_funcs ir3_a4xx_funcs;
137b8e80941Smrgextern const struct ir3_context_funcs ir3_a6xx_funcs;
138b8e80941Smrg
139b8e80941Smrgstruct ir3_context * ir3_context_init(struct ir3_compiler *compiler,
140b8e80941Smrg		struct ir3_shader_variant *so);
141b8e80941Smrgvoid ir3_context_free(struct ir3_context *ctx);
142b8e80941Smrg
143b8e80941Smrg/* gpu pointer size in units of 32bit registers/slots */
144b8e80941Smrgstatic inline
145b8e80941Smrgunsigned ir3_pointer_size(struct ir3_context *ctx)
146b8e80941Smrg{
147b8e80941Smrg	return (ctx->compiler->gpu_id >= 500) ? 2 : 1;
148b8e80941Smrg}
149b8e80941Smrg
150b8e80941Smrgstruct ir3_instruction ** ir3_get_dst_ssa(struct ir3_context *ctx, nir_ssa_def *dst, unsigned n);
151b8e80941Smrgstruct ir3_instruction ** ir3_get_dst(struct ir3_context *ctx, nir_dest *dst, unsigned n);
152b8e80941Smrgstruct ir3_instruction * const * ir3_get_src(struct ir3_context *ctx, nir_src *src);
153b8e80941Smrgvoid ir3_put_dst(struct ir3_context *ctx, nir_dest *dst);
154b8e80941Smrgstruct ir3_instruction * ir3_create_collect(struct ir3_context *ctx,
155b8e80941Smrg		struct ir3_instruction *const *arr, unsigned arrsz);
156b8e80941Smrgvoid ir3_split_dest(struct ir3_block *block, struct ir3_instruction **dst,
157b8e80941Smrg		struct ir3_instruction *src, unsigned base, unsigned n);
158b8e80941Smrg
159b8e80941SmrgNORETURN void ir3_context_error(struct ir3_context *ctx, const char *format, ...);
160b8e80941Smrg
161b8e80941Smrg#define compile_assert(ctx, cond) do { \
162b8e80941Smrg		if (!(cond)) ir3_context_error((ctx), "failed assert: "#cond"\n"); \
163b8e80941Smrg	} while (0)
164b8e80941Smrg
165b8e80941Smrgstruct ir3_instruction * ir3_get_addr(struct ir3_context *ctx,
166b8e80941Smrg		struct ir3_instruction *src, int align);
167b8e80941Smrgstruct ir3_instruction * ir3_get_predicate(struct ir3_context *ctx,
168b8e80941Smrg		struct ir3_instruction *src);
169b8e80941Smrg
170b8e80941Smrgvoid ir3_declare_array(struct ir3_context *ctx, nir_register *reg);
171b8e80941Smrgstruct ir3_array * ir3_get_array(struct ir3_context *ctx, nir_register *reg);
172b8e80941Smrgstruct ir3_instruction *ir3_create_array_load(struct ir3_context *ctx,
173b8e80941Smrg		struct ir3_array *arr, int n, struct ir3_instruction *address);
174b8e80941Smrgvoid ir3_create_array_store(struct ir3_context *ctx, struct ir3_array *arr, int n,
175b8e80941Smrg		struct ir3_instruction *src, struct ir3_instruction *address);
176b8e80941Smrg
177b8e80941Smrgstatic inline type_t utype_for_size(unsigned bit_size)
178b8e80941Smrg{
179b8e80941Smrg	switch (bit_size) {
180b8e80941Smrg	case 32: return TYPE_U32;
181b8e80941Smrg	case 16: return TYPE_U16;
182b8e80941Smrg	case  8: return TYPE_U8;
183b8e80941Smrg	default: unreachable("bad bitsize"); return ~0;
184b8e80941Smrg	}
185b8e80941Smrg}
186b8e80941Smrg
187b8e80941Smrgstatic inline type_t utype_src(nir_src src)
188b8e80941Smrg{ return utype_for_size(nir_src_bit_size(src)); }
189b8e80941Smrg
190b8e80941Smrgstatic inline type_t utype_dst(nir_dest dst)
191b8e80941Smrg{ return utype_for_size(nir_dest_bit_size(dst)); }
192b8e80941Smrg
193b8e80941Smrg#endif /* IR3_CONTEXT_H_ */
194