1/*
2 * Copyright (C) 2015-2018 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 *    Rob Clark <robclark@freedesktop.org>
25 */
26
27#ifndef IR3_CONTEXT_H_
28#define IR3_CONTEXT_H_
29
30#include "ir3_compiler.h"
31#include "ir3_nir.h"
32#include "ir3.h"
33
34/* for conditionally setting boolean flag(s): */
35#define COND(bool, val) ((bool) ? (val) : 0)
36
37#define DBG(fmt, ...) \
38		do { debug_printf("%s:%d: "fmt "\n", \
39				__FUNCTION__, __LINE__, ##__VA_ARGS__); } while (0)
40
41/**
42 * The context for compilation of a single shader.
43 */
44struct ir3_context {
45	struct ir3_compiler *compiler;
46	const struct ir3_context_funcs *funcs;
47
48	struct nir_shader *s;
49
50	struct nir_instr *cur_instr;  /* current instruction, just for debug */
51
52	struct ir3 *ir;
53	struct ir3_shader_variant *so;
54
55	struct ir3_block *block;      /* the current block */
56	struct ir3_block *in_block;   /* block created for shader inputs */
57
58	nir_function_impl *impl;
59
60	/* For fragment shaders, varyings are not actual shader inputs,
61	 * instead the hw passes a ij coord which is used with
62	 * bary.f.
63	 *
64	 * But NIR doesn't know that, it still declares varyings as
65	 * inputs.  So we do all the input tracking normally and fix
66	 * things up after compile_instructions()
67	 */
68	struct ir3_instruction *ij_pixel, *ij_sample, *ij_centroid, *ij_size;
69
70	/* for fragment shaders, for gl_FrontFacing and gl_FragCoord: */
71	struct ir3_instruction *frag_face, *frag_coord;
72
73	/* For vertex shaders, keep track of the system values sources */
74	struct ir3_instruction *vertex_id, *basevertex, *instance_id;
75
76	/* For fragment shaders: */
77	struct ir3_instruction *samp_id, *samp_mask_in;
78
79	/* Compute shader inputs: */
80	struct ir3_instruction *local_invocation_id, *work_group_id;
81
82	/* mapping from nir_register to defining instruction: */
83	struct hash_table *def_ht;
84
85	unsigned num_arrays;
86
87	/* Tracking for max level of flowcontrol (branchstack) needed
88	 * by a5xx+:
89	 */
90	unsigned stack, max_stack;
91
92	/* a common pattern for indirect addressing is to request the
93	 * same address register multiple times.  To avoid generating
94	 * duplicate instruction sequences (which our backend does not
95	 * try to clean up, since that should be done as the NIR stage)
96	 * we cache the address value generated for a given src value:
97	 *
98	 * Note that we have to cache these per alignment, since same
99	 * src used for an array of vec1 cannot be also used for an
100	 * array of vec4.
101	 */
102	struct hash_table *addr_ht[4];
103
104	/* last dst array, for indirect we need to insert a var-store.
105	 */
106	struct ir3_instruction **last_dst;
107	unsigned last_dst_n;
108
109	/* maps nir_block to ir3_block, mostly for the purposes of
110	 * figuring out the blocks successors
111	 */
112	struct hash_table *block_ht;
113
114	/* on a4xx, bitmask of samplers which need astc+srgb workaround: */
115	unsigned astc_srgb;
116
117	unsigned samples;             /* bitmask of x,y sample shifts */
118
119	unsigned max_texture_index;
120
121	/* set if we encounter something we can't handle yet, so we
122	 * can bail cleanly and fallback to TGSI compiler f/e
123	 */
124	bool error;
125};
126
127struct ir3_context_funcs {
128	void (*emit_intrinsic_load_ssbo)(struct ir3_context *ctx, nir_intrinsic_instr *intr,
129			struct ir3_instruction **dst);
130	void (*emit_intrinsic_store_ssbo)(struct ir3_context *ctx, nir_intrinsic_instr *intr);
131	struct ir3_instruction * (*emit_intrinsic_atomic_ssbo)(struct ir3_context *ctx, nir_intrinsic_instr *intr);
132	void (*emit_intrinsic_store_image)(struct ir3_context *ctx, nir_intrinsic_instr *intr);
133	struct ir3_instruction * (*emit_intrinsic_atomic_image)(struct ir3_context *ctx, nir_intrinsic_instr *intr);
134};
135
136extern const struct ir3_context_funcs ir3_a4xx_funcs;
137extern const struct ir3_context_funcs ir3_a6xx_funcs;
138
139struct ir3_context * ir3_context_init(struct ir3_compiler *compiler,
140		struct ir3_shader_variant *so);
141void ir3_context_free(struct ir3_context *ctx);
142
143/* gpu pointer size in units of 32bit registers/slots */
144static inline
145unsigned ir3_pointer_size(struct ir3_context *ctx)
146{
147	return (ctx->compiler->gpu_id >= 500) ? 2 : 1;
148}
149
150struct ir3_instruction ** ir3_get_dst_ssa(struct ir3_context *ctx, nir_ssa_def *dst, unsigned n);
151struct ir3_instruction ** ir3_get_dst(struct ir3_context *ctx, nir_dest *dst, unsigned n);
152struct ir3_instruction * const * ir3_get_src(struct ir3_context *ctx, nir_src *src);
153void ir3_put_dst(struct ir3_context *ctx, nir_dest *dst);
154struct ir3_instruction * ir3_create_collect(struct ir3_context *ctx,
155		struct ir3_instruction *const *arr, unsigned arrsz);
156void ir3_split_dest(struct ir3_block *block, struct ir3_instruction **dst,
157		struct ir3_instruction *src, unsigned base, unsigned n);
158
159NORETURN void ir3_context_error(struct ir3_context *ctx, const char *format, ...);
160
161#define compile_assert(ctx, cond) do { \
162		if (!(cond)) ir3_context_error((ctx), "failed assert: "#cond"\n"); \
163	} while (0)
164
165struct ir3_instruction * ir3_get_addr(struct ir3_context *ctx,
166		struct ir3_instruction *src, int align);
167struct ir3_instruction * ir3_get_predicate(struct ir3_context *ctx,
168		struct ir3_instruction *src);
169
170void ir3_declare_array(struct ir3_context *ctx, nir_register *reg);
171struct ir3_array * ir3_get_array(struct ir3_context *ctx, nir_register *reg);
172struct ir3_instruction *ir3_create_array_load(struct ir3_context *ctx,
173		struct ir3_array *arr, int n, struct ir3_instruction *address);
174void ir3_create_array_store(struct ir3_context *ctx, struct ir3_array *arr, int n,
175		struct ir3_instruction *src, struct ir3_instruction *address);
176
177static inline type_t utype_for_size(unsigned bit_size)
178{
179	switch (bit_size) {
180	case 32: return TYPE_U32;
181	case 16: return TYPE_U16;
182	case  8: return TYPE_U8;
183	default: unreachable("bad bitsize"); return ~0;
184	}
185}
186
187static inline type_t utype_src(nir_src src)
188{ return utype_for_size(nir_src_bit_size(src)); }
189
190static inline type_t utype_dst(nir_dest dst)
191{ return utype_for_size(nir_dest_bit_size(dst)); }
192
193#endif /* IR3_CONTEXT_H_ */
194