1848b8605Smrg/*
2848b8605Smrg * Copyright (c) 2014 Scott Mansell
3848b8605Smrg * Copyright © 2014 Broadcom
4848b8605Smrg *
5848b8605Smrg * Permission is hereby granted, free of charge, to any person obtaining a
6848b8605Smrg * copy of this software and associated documentation files (the "Software"),
7848b8605Smrg * to deal in the Software without restriction, including without limitation
8848b8605Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9848b8605Smrg * and/or sell copies of the Software, and to permit persons to whom the
10848b8605Smrg * Software is furnished to do so, subject to the following conditions:
11848b8605Smrg *
12848b8605Smrg * The above copyright notice and this permission notice (including the next
13848b8605Smrg * paragraph) shall be included in all copies or substantial portions of the
14848b8605Smrg * Software.
15848b8605Smrg *
16848b8605Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17848b8605Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18848b8605Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19848b8605Smrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20848b8605Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21848b8605Smrg * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22848b8605Smrg * IN THE SOFTWARE.
23848b8605Smrg */
24848b8605Smrg
25848b8605Smrg#include <inttypes.h>
26848b8605Smrg#include "util/u_format.h"
27b8e80941Smrg#include "util/crc32.h"
28b8e80941Smrg#include "util/u_math.h"
29848b8605Smrg#include "util/u_memory.h"
30b8e80941Smrg#include "util/ralloc.h"
31b8e80941Smrg#include "util/hash_table.h"
32848b8605Smrg#include "tgsi/tgsi_dump.h"
33b8e80941Smrg#include "tgsi/tgsi_parse.h"
34b8e80941Smrg#include "compiler/nir/nir.h"
35b8e80941Smrg#include "compiler/nir/nir_builder.h"
36b8e80941Smrg#include "compiler/nir_types.h"
37b8e80941Smrg#include "nir/tgsi_to_nir.h"
38848b8605Smrg#include "vc4_context.h"
39848b8605Smrg#include "vc4_qpu.h"
40848b8605Smrg#include "vc4_qir.h"
41848b8605Smrg
42b8e80941Smrgstatic struct qreg
43b8e80941Smrgntq_get_src(struct vc4_compile *c, nir_src src, int i);
44b8e80941Smrgstatic void
45b8e80941Smrgntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list);
46848b8605Smrg
47b8e80941Smrgstatic int
48b8e80941Smrgtype_size(const struct glsl_type *type, bool bindless)
49b8e80941Smrg{
50b8e80941Smrg   return glsl_count_attribute_slots(type, false);
51b8e80941Smrg}
52848b8605Smrg
53b8e80941Smrgstatic void
54b8e80941Smrgresize_qreg_array(struct vc4_compile *c,
55b8e80941Smrg                  struct qreg **regs,
56b8e80941Smrg                  uint32_t *size,
57b8e80941Smrg                  uint32_t decl_size)
58b8e80941Smrg{
59b8e80941Smrg        if (*size >= decl_size)
60b8e80941Smrg                return;
61848b8605Smrg
62b8e80941Smrg        uint32_t old_size = *size;
63b8e80941Smrg        *size = MAX2(*size * 2, decl_size);
64b8e80941Smrg        *regs = reralloc(c, *regs, struct qreg, *size);
65b8e80941Smrg        if (!*regs) {
66b8e80941Smrg                fprintf(stderr, "Malloc failure\n");
67b8e80941Smrg                abort();
68b8e80941Smrg        }
69b8e80941Smrg
70b8e80941Smrg        for (uint32_t i = old_size; i < *size; i++)
71b8e80941Smrg                (*regs)[i] = c->undef;
72b8e80941Smrg}
73b8e80941Smrg
74b8e80941Smrgstatic void
75b8e80941Smrgntq_emit_thrsw(struct vc4_compile *c)
76b8e80941Smrg{
77b8e80941Smrg        if (!c->fs_threaded)
78b8e80941Smrg                return;
79b8e80941Smrg
80b8e80941Smrg        /* Always thread switch after each texture operation for now.
81b8e80941Smrg         *
82b8e80941Smrg         * We could do better by batching a bunch of texture fetches up and
83b8e80941Smrg         * then doing one thread switch and collecting all their results
84b8e80941Smrg         * afterward.
85b8e80941Smrg         */
86b8e80941Smrg        qir_emit_nondef(c, qir_inst(QOP_THRSW, c->undef,
87b8e80941Smrg                                    c->undef, c->undef));
88b8e80941Smrg        c->last_thrsw_at_top_level = (c->execute.file == QFILE_NULL);
89b8e80941Smrg}
90848b8605Smrg
91848b8605Smrgstatic struct qreg
92b8e80941Smrgindirect_uniform_load(struct vc4_compile *c, nir_intrinsic_instr *intr)
93848b8605Smrg{
94b8e80941Smrg        struct qreg indirect_offset = ntq_get_src(c, intr->src[0], 0);
95b8e80941Smrg
96b8e80941Smrg        /* Clamp to [0, array size).  Note that MIN/MAX are signed. */
97b8e80941Smrg        uint32_t range = nir_intrinsic_range(intr);
98b8e80941Smrg        indirect_offset = qir_MAX(c, indirect_offset, qir_uniform_ui(c, 0));
99b8e80941Smrg        indirect_offset = qir_MIN_NOIMM(c, indirect_offset,
100b8e80941Smrg                                        qir_uniform_ui(c, range - 4));
101b8e80941Smrg
102b8e80941Smrg        qir_ADD_dest(c, qir_reg(QFILE_TEX_S_DIRECT, 0),
103b8e80941Smrg                     indirect_offset,
104b8e80941Smrg                     qir_uniform(c, QUNIFORM_UBO0_ADDR,
105b8e80941Smrg                                 nir_intrinsic_base(intr)));
106848b8605Smrg
107b8e80941Smrg        c->num_texture_samples++;
108848b8605Smrg
109b8e80941Smrg        ntq_emit_thrsw(c);
110b8e80941Smrg
111b8e80941Smrg        return qir_TEX_RESULT(c);
112848b8605Smrg}
113848b8605Smrg
114848b8605Smrgstatic struct qreg
115b8e80941Smrgvc4_ubo_load(struct vc4_compile *c, nir_intrinsic_instr *intr)
116848b8605Smrg{
117b8e80941Smrg        int buffer_index = nir_src_as_uint(intr->src[0]);
118b8e80941Smrg        assert(buffer_index == 1);
119b8e80941Smrg        assert(c->stage == QSTAGE_FRAG);
120848b8605Smrg
121b8e80941Smrg        struct qreg offset = ntq_get_src(c, intr->src[1], 0);
122b8e80941Smrg
123b8e80941Smrg        /* Clamp to [0, array size).  Note that MIN/MAX are signed. */
124b8e80941Smrg        offset = qir_MAX(c, offset, qir_uniform_ui(c, 0));
125b8e80941Smrg        offset = qir_MIN_NOIMM(c, offset,
126b8e80941Smrg                               qir_uniform_ui(c, c->fs_key->ubo_1_size - 4));
127b8e80941Smrg
128b8e80941Smrg        qir_ADD_dest(c, qir_reg(QFILE_TEX_S_DIRECT, 0),
129b8e80941Smrg                     offset,
130b8e80941Smrg                     qir_uniform(c, QUNIFORM_UBO1_ADDR, 0));
131848b8605Smrg
132b8e80941Smrg        c->num_texture_samples++;
133848b8605Smrg
134b8e80941Smrg        ntq_emit_thrsw(c);
135b8e80941Smrg
136b8e80941Smrg        return qir_TEX_RESULT(c);
137848b8605Smrg}
138848b8605Smrg
139b8e80941Smrgnir_ssa_def *
140b8e80941Smrgvc4_nir_get_swizzled_channel(nir_builder *b, nir_ssa_def **srcs, int swiz)
141848b8605Smrg{
142b8e80941Smrg        switch (swiz) {
143b8e80941Smrg        default:
144b8e80941Smrg        case PIPE_SWIZZLE_NONE:
145b8e80941Smrg                fprintf(stderr, "warning: unknown swizzle\n");
146b8e80941Smrg                /* FALLTHROUGH */
147b8e80941Smrg        case PIPE_SWIZZLE_0:
148b8e80941Smrg                return nir_imm_float(b, 0.0);
149b8e80941Smrg        case PIPE_SWIZZLE_1:
150b8e80941Smrg                return nir_imm_float(b, 1.0);
151b8e80941Smrg        case PIPE_SWIZZLE_X:
152b8e80941Smrg        case PIPE_SWIZZLE_Y:
153b8e80941Smrg        case PIPE_SWIZZLE_Z:
154b8e80941Smrg        case PIPE_SWIZZLE_W:
155b8e80941Smrg                return srcs[swiz];
156b8e80941Smrg        }
157848b8605Smrg}
158848b8605Smrg
159b8e80941Smrgstatic struct qreg *
160b8e80941Smrgntq_init_ssa_def(struct vc4_compile *c, nir_ssa_def *def)
161848b8605Smrg{
162b8e80941Smrg        struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
163b8e80941Smrg                                          def->num_components);
164b8e80941Smrg        _mesa_hash_table_insert(c->def_ht, def, qregs);
165b8e80941Smrg        return qregs;
166848b8605Smrg}
167848b8605Smrg
168b8e80941Smrg/**
169b8e80941Smrg * This function is responsible for getting QIR results into the associated
170b8e80941Smrg * storage for a NIR instruction.
171b8e80941Smrg *
172b8e80941Smrg * If it's a NIR SSA def, then we just set the associated hash table entry to
173b8e80941Smrg * the new result.
174b8e80941Smrg *
175b8e80941Smrg * If it's a NIR reg, then we need to update the existing qreg assigned to the
176b8e80941Smrg * NIR destination with the incoming value.  To do that without introducing
177b8e80941Smrg * new MOVs, we require that the incoming qreg either be a uniform, or be
178b8e80941Smrg * SSA-defined by the previous QIR instruction in the block and rewritable by
179b8e80941Smrg * this function.  That lets us sneak ahead and insert the SF flag beforehand
180b8e80941Smrg * (knowing that the previous instruction doesn't depend on flags) and rewrite
181b8e80941Smrg * its destination to be the NIR reg's destination
182b8e80941Smrg */
183b8e80941Smrgstatic void
184b8e80941Smrgntq_store_dest(struct vc4_compile *c, nir_dest *dest, int chan,
185b8e80941Smrg               struct qreg result)
186848b8605Smrg{
187b8e80941Smrg        struct qinst *last_inst = NULL;
188b8e80941Smrg        if (!list_empty(&c->cur_block->instructions))
189b8e80941Smrg                last_inst = (struct qinst *)c->cur_block->instructions.prev;
190848b8605Smrg
191b8e80941Smrg        assert(result.file == QFILE_UNIF ||
192b8e80941Smrg               (result.file == QFILE_TEMP &&
193b8e80941Smrg                last_inst && last_inst == c->defs[result.index]));
194848b8605Smrg
195b8e80941Smrg        if (dest->is_ssa) {
196b8e80941Smrg                assert(chan < dest->ssa.num_components);
197848b8605Smrg
198b8e80941Smrg                struct qreg *qregs;
199b8e80941Smrg                struct hash_entry *entry =
200b8e80941Smrg                        _mesa_hash_table_search(c->def_ht, &dest->ssa);
201b8e80941Smrg
202b8e80941Smrg                if (entry)
203b8e80941Smrg                        qregs = entry->data;
204b8e80941Smrg                else
205b8e80941Smrg                        qregs = ntq_init_ssa_def(c, &dest->ssa);
206848b8605Smrg
207b8e80941Smrg                qregs[chan] = result;
208b8e80941Smrg        } else {
209b8e80941Smrg                nir_register *reg = dest->reg.reg;
210b8e80941Smrg                assert(dest->reg.base_offset == 0);
211b8e80941Smrg                assert(reg->num_array_elems == 0);
212b8e80941Smrg                struct hash_entry *entry =
213b8e80941Smrg                        _mesa_hash_table_search(c->def_ht, reg);
214b8e80941Smrg                struct qreg *qregs = entry->data;
215b8e80941Smrg
216b8e80941Smrg                /* Insert a MOV if the source wasn't an SSA def in the
217b8e80941Smrg                 * previous instruction.
218b8e80941Smrg                 */
219b8e80941Smrg                if (result.file == QFILE_UNIF) {
220b8e80941Smrg                        result = qir_MOV(c, result);
221b8e80941Smrg                        last_inst = c->defs[result.index];
222b8e80941Smrg                }
223848b8605Smrg
224b8e80941Smrg                /* We know they're both temps, so just rewrite index. */
225b8e80941Smrg                c->defs[last_inst->dst.index] = NULL;
226b8e80941Smrg                last_inst->dst.index = qregs[chan].index;
227b8e80941Smrg
228b8e80941Smrg                /* If we're in control flow, then make this update of the reg
229b8e80941Smrg                 * conditional on the execution mask.
230b8e80941Smrg                 */
231b8e80941Smrg                if (c->execute.file != QFILE_NULL) {
232b8e80941Smrg                        last_inst->dst.index = qregs[chan].index;
233b8e80941Smrg
234b8e80941Smrg                        /* Set the flags to the current exec mask.  To insert
235b8e80941Smrg                         * the SF, we temporarily remove our SSA instruction.
236b8e80941Smrg                         */
237b8e80941Smrg                        list_del(&last_inst->link);
238b8e80941Smrg                        qir_SF(c, c->execute);
239b8e80941Smrg                        list_addtail(&last_inst->link,
240b8e80941Smrg                                     &c->cur_block->instructions);
241b8e80941Smrg
242b8e80941Smrg                        last_inst->cond = QPU_COND_ZS;
243b8e80941Smrg                        last_inst->cond_is_exec_mask = true;
244b8e80941Smrg                }
245b8e80941Smrg        }
246b8e80941Smrg}
247848b8605Smrg
248b8e80941Smrgstatic struct qreg *
249b8e80941Smrgntq_get_dest(struct vc4_compile *c, nir_dest *dest)
250b8e80941Smrg{
251b8e80941Smrg        if (dest->is_ssa) {
252b8e80941Smrg                struct qreg *qregs = ntq_init_ssa_def(c, &dest->ssa);
253b8e80941Smrg                for (int i = 0; i < dest->ssa.num_components; i++)
254b8e80941Smrg                        qregs[i] = c->undef;
255b8e80941Smrg                return qregs;
256b8e80941Smrg        } else {
257b8e80941Smrg                nir_register *reg = dest->reg.reg;
258b8e80941Smrg                assert(dest->reg.base_offset == 0);
259b8e80941Smrg                assert(reg->num_array_elems == 0);
260b8e80941Smrg                struct hash_entry *entry =
261b8e80941Smrg                        _mesa_hash_table_search(c->def_ht, reg);
262b8e80941Smrg                return entry->data;
263b8e80941Smrg        }
264b8e80941Smrg}
265848b8605Smrg
266b8e80941Smrgstatic struct qreg
267b8e80941Smrgntq_get_src(struct vc4_compile *c, nir_src src, int i)
268b8e80941Smrg{
269b8e80941Smrg        struct hash_entry *entry;
270b8e80941Smrg        if (src.is_ssa) {
271b8e80941Smrg                entry = _mesa_hash_table_search(c->def_ht, src.ssa);
272b8e80941Smrg                assert(i < src.ssa->num_components);
273b8e80941Smrg        } else {
274b8e80941Smrg                nir_register *reg = src.reg.reg;
275b8e80941Smrg                entry = _mesa_hash_table_search(c->def_ht, reg);
276b8e80941Smrg                assert(reg->num_array_elems == 0);
277b8e80941Smrg                assert(src.reg.base_offset == 0);
278b8e80941Smrg                assert(i < reg->num_components);
279b8e80941Smrg        }
280848b8605Smrg
281b8e80941Smrg        struct qreg *qregs = entry->data;
282b8e80941Smrg        return qregs[i];
283b8e80941Smrg}
284b8e80941Smrg
285b8e80941Smrgstatic struct qreg
286b8e80941Smrgntq_get_alu_src(struct vc4_compile *c, nir_alu_instr *instr,
287b8e80941Smrg                unsigned src)
288848b8605Smrg{
289b8e80941Smrg        assert(util_is_power_of_two_or_zero(instr->dest.write_mask));
290b8e80941Smrg        unsigned chan = ffs(instr->dest.write_mask) - 1;
291b8e80941Smrg        struct qreg r = ntq_get_src(c, instr->src[src].src,
292b8e80941Smrg                                    instr->src[src].swizzle[chan]);
293848b8605Smrg
294b8e80941Smrg        assert(!instr->src[src].abs);
295b8e80941Smrg        assert(!instr->src[src].negate);
296848b8605Smrg
297b8e80941Smrg        return r;
298848b8605Smrg};
299848b8605Smrg
300b8e80941Smrgstatic inline struct qreg
301b8e80941Smrgqir_SAT(struct vc4_compile *c, struct qreg val)
302848b8605Smrg{
303b8e80941Smrg        return qir_FMAX(c,
304b8e80941Smrg                        qir_FMIN(c, val, qir_uniform_f(c, 1.0)),
305b8e80941Smrg                        qir_uniform_f(c, 0.0));
306848b8605Smrg}
307848b8605Smrg
308848b8605Smrgstatic struct qreg
309b8e80941Smrgntq_rcp(struct vc4_compile *c, struct qreg x)
310848b8605Smrg{
311b8e80941Smrg        struct qreg r = qir_RCP(c, x);
312b8e80941Smrg
313b8e80941Smrg        /* Apply a Newton-Raphson step to improve the accuracy. */
314b8e80941Smrg        r = qir_FMUL(c, r, qir_FSUB(c,
315b8e80941Smrg                                    qir_uniform_f(c, 2.0),
316b8e80941Smrg                                    qir_FMUL(c, x, r)));
317b8e80941Smrg
318b8e80941Smrg        return r;
319848b8605Smrg}
320848b8605Smrg
321848b8605Smrgstatic struct qreg
322b8e80941Smrgntq_rsq(struct vc4_compile *c, struct qreg x)
323848b8605Smrg{
324b8e80941Smrg        struct qreg r = qir_RSQ(c, x);
325b8e80941Smrg
326b8e80941Smrg        /* Apply a Newton-Raphson step to improve the accuracy. */
327b8e80941Smrg        r = qir_FMUL(c, r, qir_FSUB(c,
328b8e80941Smrg                                    qir_uniform_f(c, 1.5),
329b8e80941Smrg                                    qir_FMUL(c,
330b8e80941Smrg                                             qir_uniform_f(c, 0.5),
331b8e80941Smrg                                             qir_FMUL(c, x,
332b8e80941Smrg                                                      qir_FMUL(c, r, r)))));
333b8e80941Smrg
334b8e80941Smrg        return r;
335848b8605Smrg}
336848b8605Smrg
337848b8605Smrgstatic struct qreg
338b8e80941Smrgntq_umul(struct vc4_compile *c, struct qreg src0, struct qreg src1)
339b8e80941Smrg{
340b8e80941Smrg        struct qreg src0_hi = qir_SHR(c, src0,
341b8e80941Smrg                                      qir_uniform_ui(c, 24));
342b8e80941Smrg        struct qreg src1_hi = qir_SHR(c, src1,
343b8e80941Smrg                                      qir_uniform_ui(c, 24));
344b8e80941Smrg
345b8e80941Smrg        struct qreg hilo = qir_MUL24(c, src0_hi, src1);
346b8e80941Smrg        struct qreg lohi = qir_MUL24(c, src0, src1_hi);
347b8e80941Smrg        struct qreg lolo = qir_MUL24(c, src0, src1);
348b8e80941Smrg
349b8e80941Smrg        return qir_ADD(c, lolo, qir_SHL(c,
350b8e80941Smrg                                        qir_ADD(c, hilo, lohi),
351b8e80941Smrg                                        qir_uniform_ui(c, 24)));
352848b8605Smrg}
353848b8605Smrg
354848b8605Smrgstatic struct qreg
355b8e80941Smrgntq_scale_depth_texture(struct vc4_compile *c, struct qreg src)
356b8e80941Smrg{
357b8e80941Smrg        struct qreg depthf = qir_ITOF(c, qir_SHR(c, src,
358b8e80941Smrg                                                 qir_uniform_ui(c, 8)));
359b8e80941Smrg        return qir_FMUL(c, depthf, qir_uniform_f(c, 1.0f/0xffffff));
360b8e80941Smrg}
361848b8605Smrg
362b8e80941Smrg/**
363b8e80941Smrg * Emits a lowered TXF_MS from an MSAA texture.
364b8e80941Smrg *
365b8e80941Smrg * The addressing math has been lowered in NIR, and now we just need to read
366b8e80941Smrg * it like a UBO.
367b8e80941Smrg */
368b8e80941Smrgstatic void
369b8e80941Smrgntq_emit_txf(struct vc4_compile *c, nir_tex_instr *instr)
370b8e80941Smrg{
371b8e80941Smrg        uint32_t tile_width = 32;
372b8e80941Smrg        uint32_t tile_height = 32;
373b8e80941Smrg        uint32_t tile_size = (tile_height * tile_width *
374b8e80941Smrg                              VC4_MAX_SAMPLES * sizeof(uint32_t));
375b8e80941Smrg
376b8e80941Smrg        unsigned unit = instr->texture_index;
377b8e80941Smrg        uint32_t w = align(c->key->tex[unit].msaa_width, tile_width);
378b8e80941Smrg        uint32_t w_tiles = w / tile_width;
379b8e80941Smrg        uint32_t h = align(c->key->tex[unit].msaa_height, tile_height);
380b8e80941Smrg        uint32_t h_tiles = h / tile_height;
381b8e80941Smrg        uint32_t size = w_tiles * h_tiles * tile_size;
382b8e80941Smrg
383b8e80941Smrg        struct qreg addr;
384b8e80941Smrg        assert(instr->num_srcs == 1);
385b8e80941Smrg        assert(instr->src[0].src_type == nir_tex_src_coord);
386b8e80941Smrg        addr = ntq_get_src(c, instr->src[0].src, 0);
387b8e80941Smrg
388b8e80941Smrg        /* Perform the clamping required by kernel validation. */
389b8e80941Smrg        addr = qir_MAX(c, addr, qir_uniform_ui(c, 0));
390b8e80941Smrg        addr = qir_MIN_NOIMM(c, addr, qir_uniform_ui(c, size - 4));
391b8e80941Smrg
392b8e80941Smrg        qir_ADD_dest(c, qir_reg(QFILE_TEX_S_DIRECT, 0),
393b8e80941Smrg                     addr, qir_uniform(c, QUNIFORM_TEXTURE_MSAA_ADDR, unit));
394b8e80941Smrg
395b8e80941Smrg        ntq_emit_thrsw(c);
396b8e80941Smrg
397b8e80941Smrg        struct qreg tex = qir_TEX_RESULT(c);
398b8e80941Smrg        c->num_texture_samples++;
399b8e80941Smrg
400b8e80941Smrg        enum pipe_format format = c->key->tex[unit].format;
401b8e80941Smrg        if (util_format_is_depth_or_stencil(format)) {
402b8e80941Smrg                struct qreg scaled = ntq_scale_depth_texture(c, tex);
403b8e80941Smrg                for (int i = 0; i < 4; i++)
404b8e80941Smrg                        ntq_store_dest(c, &instr->dest, i, qir_MOV(c, scaled));
405b8e80941Smrg        } else {
406b8e80941Smrg                for (int i = 0; i < 4; i++)
407b8e80941Smrg                        ntq_store_dest(c, &instr->dest, i,
408b8e80941Smrg                                       qir_UNPACK_8_F(c, tex, i));
409b8e80941Smrg        }
410848b8605Smrg}
411848b8605Smrg
412848b8605Smrgstatic void
413b8e80941Smrgntq_emit_tex(struct vc4_compile *c, nir_tex_instr *instr)
414848b8605Smrg{
415b8e80941Smrg        struct qreg s, t, r, lod, compare;
416b8e80941Smrg        bool is_txb = false, is_txl = false;
417b8e80941Smrg        unsigned unit = instr->texture_index;
418b8e80941Smrg
419b8e80941Smrg        if (instr->op == nir_texop_txf) {
420b8e80941Smrg                ntq_emit_txf(c, instr);
421b8e80941Smrg                return;
422b8e80941Smrg        }
423848b8605Smrg
424b8e80941Smrg        for (unsigned i = 0; i < instr->num_srcs; i++) {
425b8e80941Smrg                switch (instr->src[i].src_type) {
426b8e80941Smrg                case nir_tex_src_coord:
427b8e80941Smrg                        s = ntq_get_src(c, instr->src[i].src, 0);
428b8e80941Smrg                        if (instr->sampler_dim == GLSL_SAMPLER_DIM_1D)
429b8e80941Smrg                                t = qir_uniform_f(c, 0.5);
430b8e80941Smrg                        else
431b8e80941Smrg                                t = ntq_get_src(c, instr->src[i].src, 1);
432b8e80941Smrg                        if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE)
433b8e80941Smrg                                r = ntq_get_src(c, instr->src[i].src, 2);
434b8e80941Smrg                        break;
435b8e80941Smrg                case nir_tex_src_bias:
436b8e80941Smrg                        lod = ntq_get_src(c, instr->src[i].src, 0);
437b8e80941Smrg                        is_txb = true;
438b8e80941Smrg                        break;
439b8e80941Smrg                case nir_tex_src_lod:
440b8e80941Smrg                        lod = ntq_get_src(c, instr->src[i].src, 0);
441b8e80941Smrg                        is_txl = true;
442b8e80941Smrg                        break;
443b8e80941Smrg                case nir_tex_src_comparator:
444b8e80941Smrg                        compare = ntq_get_src(c, instr->src[i].src, 0);
445b8e80941Smrg                        break;
446b8e80941Smrg                default:
447b8e80941Smrg                        unreachable("unknown texture source");
448b8e80941Smrg                }
449b8e80941Smrg        }
450848b8605Smrg
451b8e80941Smrg        if (c->stage != QSTAGE_FRAG && !is_txl) {
452b8e80941Smrg                /* From the GLSL 1.20 spec:
453b8e80941Smrg                 *
454b8e80941Smrg                 *     "If it is mip-mapped and running on the vertex shader,
455b8e80941Smrg                 *      then the base texture is used."
456b8e80941Smrg                 */
457b8e80941Smrg                is_txl = true;
458b8e80941Smrg                lod = qir_uniform_ui(c, 0);
459b8e80941Smrg        }
460848b8605Smrg
461b8e80941Smrg        if (c->key->tex[unit].force_first_level) {
462b8e80941Smrg                lod = qir_uniform(c, QUNIFORM_TEXTURE_FIRST_LEVEL, unit);
463b8e80941Smrg                is_txl = true;
464b8e80941Smrg                is_txb = false;
465848b8605Smrg        }
466848b8605Smrg
467b8e80941Smrg        struct qreg texture_u[] = {
468b8e80941Smrg                qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P0, unit),
469b8e80941Smrg                qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P1, unit),
470b8e80941Smrg                qir_uniform(c, QUNIFORM_CONSTANT, 0),
471b8e80941Smrg                qir_uniform(c, QUNIFORM_CONSTANT, 0),
472b8e80941Smrg        };
473b8e80941Smrg        uint32_t next_texture_u = 0;
474b8e80941Smrg
475848b8605Smrg        /* There is no native support for GL texture rectangle coordinates, so
476848b8605Smrg         * we have to rescale from ([0, width], [0, height]) to ([0, 1], [0,
477848b8605Smrg         * 1]).
478848b8605Smrg         */
479b8e80941Smrg        if (instr->sampler_dim == GLSL_SAMPLER_DIM_RECT) {
480848b8605Smrg                s = qir_FMUL(c, s,
481b8e80941Smrg                             qir_uniform(c, QUNIFORM_TEXRECT_SCALE_X, unit));
482848b8605Smrg                t = qir_FMUL(c, t,
483b8e80941Smrg                             qir_uniform(c, QUNIFORM_TEXRECT_SCALE_Y, unit));
484848b8605Smrg        }
485848b8605Smrg
486b8e80941Smrg        if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE || is_txl) {
487b8e80941Smrg                texture_u[2] = qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P2,
488b8e80941Smrg                                           unit | (is_txl << 16));
489848b8605Smrg        }
490848b8605Smrg
491b8e80941Smrg        struct qinst *tmu;
492b8e80941Smrg        if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
493b8e80941Smrg                tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_R, 0), r);
494b8e80941Smrg                tmu->src[qir_get_tex_uniform_src(tmu)] =
495b8e80941Smrg                        texture_u[next_texture_u++];
496b8e80941Smrg        } else if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP_TO_BORDER ||
497b8e80941Smrg                   c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP ||
498b8e80941Smrg                   c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP_TO_BORDER ||
499b8e80941Smrg                   c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP) {
500b8e80941Smrg                tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_R, 0),
501b8e80941Smrg                                   qir_uniform(c, QUNIFORM_TEXTURE_BORDER_COLOR,
502b8e80941Smrg                                               unit));
503b8e80941Smrg                tmu->src[qir_get_tex_uniform_src(tmu)] =
504b8e80941Smrg                        texture_u[next_texture_u++];
505b8e80941Smrg        }
506848b8605Smrg
507b8e80941Smrg        if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP) {
508b8e80941Smrg                s = qir_SAT(c, s);
509b8e80941Smrg        }
510848b8605Smrg
511b8e80941Smrg        if (c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP) {
512b8e80941Smrg                t = qir_SAT(c, t);
513b8e80941Smrg        }
514848b8605Smrg
515b8e80941Smrg        tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_T, 0), t);
516b8e80941Smrg        tmu->src[qir_get_tex_uniform_src(tmu)] =
517b8e80941Smrg                texture_u[next_texture_u++];
518848b8605Smrg
519b8e80941Smrg        if (is_txl || is_txb) {
520b8e80941Smrg                tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_B, 0), lod);
521b8e80941Smrg                tmu->src[qir_get_tex_uniform_src(tmu)] =
522b8e80941Smrg                        texture_u[next_texture_u++];
523848b8605Smrg        }
524848b8605Smrg
525b8e80941Smrg        tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_S, 0), s);
526b8e80941Smrg        tmu->src[qir_get_tex_uniform_src(tmu)] = texture_u[next_texture_u++];
527b8e80941Smrg
528b8e80941Smrg        c->num_texture_samples++;
529b8e80941Smrg
530b8e80941Smrg        ntq_emit_thrsw(c);
531b8e80941Smrg
532b8e80941Smrg        struct qreg tex = qir_TEX_RESULT(c);
533b8e80941Smrg
534b8e80941Smrg        enum pipe_format format = c->key->tex[unit].format;
535b8e80941Smrg
536b8e80941Smrg        struct qreg *dest = ntq_get_dest(c, &instr->dest);
537b8e80941Smrg        if (util_format_is_depth_or_stencil(format)) {
538b8e80941Smrg                struct qreg normalized = ntq_scale_depth_texture(c, tex);
539b8e80941Smrg                struct qreg depth_output;
540b8e80941Smrg
541b8e80941Smrg                struct qreg u0 = qir_uniform_f(c, 0.0f);
542b8e80941Smrg                struct qreg u1 = qir_uniform_f(c, 1.0f);
543b8e80941Smrg                if (c->key->tex[unit].compare_mode) {
544b8e80941Smrg                        /* From the GL_ARB_shadow spec:
545b8e80941Smrg                         *
546b8e80941Smrg                         *     "Let Dt (D subscript t) be the depth texture
547b8e80941Smrg                         *      value, in the range [0, 1].  Let R be the
548b8e80941Smrg                         *      interpolated texture coordinate clamped to the
549b8e80941Smrg                         *      range [0, 1]."
550b8e80941Smrg                         */
551b8e80941Smrg                        compare = qir_SAT(c, compare);
552b8e80941Smrg
553b8e80941Smrg                        switch (c->key->tex[unit].compare_func) {
554b8e80941Smrg                        case PIPE_FUNC_NEVER:
555b8e80941Smrg                                depth_output = qir_uniform_f(c, 0.0f);
556b8e80941Smrg                                break;
557b8e80941Smrg                        case PIPE_FUNC_ALWAYS:
558b8e80941Smrg                                depth_output = u1;
559b8e80941Smrg                                break;
560b8e80941Smrg                        case PIPE_FUNC_EQUAL:
561b8e80941Smrg                                qir_SF(c, qir_FSUB(c, compare, normalized));
562b8e80941Smrg                                depth_output = qir_SEL(c, QPU_COND_ZS, u1, u0);
563b8e80941Smrg                                break;
564b8e80941Smrg                        case PIPE_FUNC_NOTEQUAL:
565b8e80941Smrg                                qir_SF(c, qir_FSUB(c, compare, normalized));
566b8e80941Smrg                                depth_output = qir_SEL(c, QPU_COND_ZC, u1, u0);
567b8e80941Smrg                                break;
568b8e80941Smrg                        case PIPE_FUNC_GREATER:
569b8e80941Smrg                                qir_SF(c, qir_FSUB(c, compare, normalized));
570b8e80941Smrg                                depth_output = qir_SEL(c, QPU_COND_NC, u1, u0);
571b8e80941Smrg                                break;
572b8e80941Smrg                        case PIPE_FUNC_GEQUAL:
573b8e80941Smrg                                qir_SF(c, qir_FSUB(c, normalized, compare));
574b8e80941Smrg                                depth_output = qir_SEL(c, QPU_COND_NS, u1, u0);
575b8e80941Smrg                                break;
576b8e80941Smrg                        case PIPE_FUNC_LESS:
577b8e80941Smrg                                qir_SF(c, qir_FSUB(c, compare, normalized));
578b8e80941Smrg                                depth_output = qir_SEL(c, QPU_COND_NS, u1, u0);
579b8e80941Smrg                                break;
580b8e80941Smrg                        case PIPE_FUNC_LEQUAL:
581b8e80941Smrg                                qir_SF(c, qir_FSUB(c, normalized, compare));
582b8e80941Smrg                                depth_output = qir_SEL(c, QPU_COND_NC, u1, u0);
583b8e80941Smrg                                break;
584b8e80941Smrg                        }
585b8e80941Smrg                } else {
586b8e80941Smrg                        depth_output = normalized;
587b8e80941Smrg                }
588848b8605Smrg
589b8e80941Smrg                for (int i = 0; i < 4; i++)
590b8e80941Smrg                        dest[i] = depth_output;
591b8e80941Smrg        } else {
592b8e80941Smrg                for (int i = 0; i < 4; i++)
593b8e80941Smrg                        dest[i] = qir_UNPACK_8_F(c, tex, i);
594b8e80941Smrg        }
595848b8605Smrg}
596848b8605Smrg
597848b8605Smrg/**
598848b8605Smrg * Computes x - floor(x), which is tricky because our FTOI truncates (rounds
599848b8605Smrg * to zero).
600848b8605Smrg */
601848b8605Smrgstatic struct qreg
602b8e80941Smrgntq_ffract(struct vc4_compile *c, struct qreg src)
603848b8605Smrg{
604b8e80941Smrg        struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src));
605b8e80941Smrg        struct qreg diff = qir_FSUB(c, src, trunc);
606b8e80941Smrg        qir_SF(c, diff);
607b8e80941Smrg
608b8e80941Smrg        qir_FADD_dest(c, diff,
609b8e80941Smrg                      diff, qir_uniform_f(c, 1.0))->cond = QPU_COND_NS;
610b8e80941Smrg
611b8e80941Smrg        return qir_MOV(c, diff);
612848b8605Smrg}
613848b8605Smrg
614848b8605Smrg/**
615848b8605Smrg * Computes floor(x), which is tricky because our FTOI truncates (rounds to
616848b8605Smrg * zero).
617848b8605Smrg */
618848b8605Smrgstatic struct qreg
619b8e80941Smrgntq_ffloor(struct vc4_compile *c, struct qreg src)
620848b8605Smrg{
621b8e80941Smrg        struct qreg result = qir_ITOF(c, qir_FTOI(c, src));
622848b8605Smrg
623b8e80941Smrg        /* This will be < 0 if we truncated and the truncation was of a value
624b8e80941Smrg         * that was < 0 in the first place.
625b8e80941Smrg         */
626b8e80941Smrg        qir_SF(c, qir_FSUB(c, src, result));
627848b8605Smrg
628b8e80941Smrg        struct qinst *sub = qir_FSUB_dest(c, result,
629b8e80941Smrg                                          result, qir_uniform_f(c, 1.0));
630b8e80941Smrg        sub->cond = QPU_COND_NS;
631848b8605Smrg
632b8e80941Smrg        return qir_MOV(c, result);
633848b8605Smrg}
634848b8605Smrg
635b8e80941Smrg/**
636b8e80941Smrg * Computes ceil(x), which is tricky because our FTOI truncates (rounds to
637b8e80941Smrg * zero).
638b8e80941Smrg */
639848b8605Smrgstatic struct qreg
640b8e80941Smrgntq_fceil(struct vc4_compile *c, struct qreg src)
641848b8605Smrg{
642b8e80941Smrg        struct qreg result = qir_ITOF(c, qir_FTOI(c, src));
643848b8605Smrg
644b8e80941Smrg        /* This will be < 0 if we truncated and the truncation was of a value
645b8e80941Smrg         * that was > 0 in the first place.
646b8e80941Smrg         */
647b8e80941Smrg        qir_SF(c, qir_FSUB(c, result, src));
648b8e80941Smrg
649b8e80941Smrg        qir_FADD_dest(c, result,
650b8e80941Smrg                      result, qir_uniform_f(c, 1.0))->cond = QPU_COND_NS;
651b8e80941Smrg
652b8e80941Smrg        return qir_MOV(c, result);
653848b8605Smrg}
654848b8605Smrg
655848b8605Smrgstatic struct qreg
656b8e80941Smrgntq_shrink_sincos_input_range(struct vc4_compile *c, struct qreg x)
657848b8605Smrg{
658b8e80941Smrg        /* Since we're using a Taylor approximation, we want to have a small
659b8e80941Smrg         * number of coefficients and take advantage of sin/cos repeating
660b8e80941Smrg         * every 2pi.  We keep our x as close to 0 as we can, since the series
661b8e80941Smrg         * will be less accurate as |x| increases.  (Also, be careful of
662b8e80941Smrg         * shifting the input x value to be tricky with sin/cos relations,
663b8e80941Smrg         * because getting accurate values for x==0 is very important for SDL
664b8e80941Smrg         * rendering)
665b8e80941Smrg         */
666b8e80941Smrg        struct qreg scaled_x =
667b8e80941Smrg                qir_FMUL(c, x,
668b8e80941Smrg                         qir_uniform_f(c, 1.0f / (M_PI * 2.0f)));
669b8e80941Smrg        /* Note: FTOI truncates toward 0. */
670b8e80941Smrg        struct qreg x_frac = qir_FSUB(c, scaled_x,
671b8e80941Smrg                                      qir_ITOF(c, qir_FTOI(c, scaled_x)));
672b8e80941Smrg        /* Map [0.5, 1] to [-0.5, 0] */
673b8e80941Smrg        qir_SF(c, qir_FSUB(c, x_frac, qir_uniform_f(c, 0.5)));
674b8e80941Smrg        qir_FSUB_dest(c, x_frac, x_frac, qir_uniform_f(c, 1.0))->cond = QPU_COND_NC;
675b8e80941Smrg        /* Map [-1, -0.5] to [0, 0.5] */
676b8e80941Smrg        qir_SF(c, qir_FADD(c, x_frac, qir_uniform_f(c, 0.5)));
677b8e80941Smrg        qir_FADD_dest(c, x_frac, x_frac, qir_uniform_f(c, 1.0))->cond = QPU_COND_NS;
678b8e80941Smrg
679b8e80941Smrg        return x_frac;
680848b8605Smrg}
681848b8605Smrg
682848b8605Smrgstatic struct qreg
683b8e80941Smrgntq_fsin(struct vc4_compile *c, struct qreg src)
684848b8605Smrg{
685848b8605Smrg        float coeff[] = {
686848b8605Smrg                2.0 * M_PI,
687848b8605Smrg                -pow(2.0 * M_PI, 3) / (3 * 2 * 1),
688848b8605Smrg                pow(2.0 * M_PI, 5) / (5 * 4 * 3 * 2 * 1),
689848b8605Smrg                -pow(2.0 * M_PI, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1),
690b8e80941Smrg                pow(2.0 * M_PI, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
691848b8605Smrg        };
692848b8605Smrg
693b8e80941Smrg        struct qreg x = ntq_shrink_sincos_input_range(c, src);
694848b8605Smrg        struct qreg x2 = qir_FMUL(c, x, x);
695b8e80941Smrg        struct qreg sum = qir_FMUL(c, x, qir_uniform_f(c, coeff[0]));
696848b8605Smrg        for (int i = 1; i < ARRAY_SIZE(coeff); i++) {
697848b8605Smrg                x = qir_FMUL(c, x, x2);
698848b8605Smrg                sum = qir_FADD(c,
699848b8605Smrg                               sum,
700848b8605Smrg                               qir_FMUL(c,
701848b8605Smrg                                        x,
702b8e80941Smrg                                        qir_uniform_f(c, coeff[i])));
703848b8605Smrg        }
704848b8605Smrg        return sum;
705848b8605Smrg}
706848b8605Smrg
707848b8605Smrgstatic struct qreg
708b8e80941Smrgntq_fcos(struct vc4_compile *c, struct qreg src)
709848b8605Smrg{
710848b8605Smrg        float coeff[] = {
711848b8605Smrg                1.0f,
712848b8605Smrg                -pow(2.0 * M_PI, 2) / (2 * 1),
713848b8605Smrg                pow(2.0 * M_PI, 4) / (4 * 3 * 2 * 1),
714848b8605Smrg                -pow(2.0 * M_PI, 6) / (6 * 5 * 4 * 3 * 2 * 1),
715b8e80941Smrg                pow(2.0 * M_PI, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
716b8e80941Smrg                -pow(2.0 * M_PI, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
717848b8605Smrg        };
718848b8605Smrg
719b8e80941Smrg        struct qreg x_frac = ntq_shrink_sincos_input_range(c, src);
720b8e80941Smrg        struct qreg sum = qir_uniform_f(c, coeff[0]);
721848b8605Smrg        struct qreg x2 = qir_FMUL(c, x_frac, x_frac);
722848b8605Smrg        struct qreg x = x2; /* Current x^2, x^4, or x^6 */
723848b8605Smrg        for (int i = 1; i < ARRAY_SIZE(coeff); i++) {
724848b8605Smrg                if (i != 1)
725848b8605Smrg                        x = qir_FMUL(c, x, x2);
726848b8605Smrg
727b8e80941Smrg                sum = qir_FADD(c, qir_FMUL(c,
728848b8605Smrg                                           x,
729b8e80941Smrg                                           qir_uniform_f(c, coeff[i])),
730b8e80941Smrg                               sum);
731848b8605Smrg        }
732848b8605Smrg        return sum;
733848b8605Smrg}
734848b8605Smrg
735b8e80941Smrgstatic struct qreg
736b8e80941Smrgntq_fsign(struct vc4_compile *c, struct qreg src)
737848b8605Smrg{
738b8e80941Smrg        struct qreg t = qir_get_temp(c);
739848b8605Smrg
740b8e80941Smrg        qir_SF(c, src);
741b8e80941Smrg        qir_MOV_dest(c, t, qir_uniform_f(c, 0.0));
742b8e80941Smrg        qir_MOV_dest(c, t, qir_uniform_f(c, 1.0))->cond = QPU_COND_ZC;
743b8e80941Smrg        qir_MOV_dest(c, t, qir_uniform_f(c, -1.0))->cond = QPU_COND_NS;
744b8e80941Smrg        return qir_MOV(c, t);
745b8e80941Smrg}
746848b8605Smrg
747b8e80941Smrgstatic void
748b8e80941Smrgemit_vertex_input(struct vc4_compile *c, int attr)
749b8e80941Smrg{
750b8e80941Smrg        enum pipe_format format = c->vs_key->attr_formats[attr];
751b8e80941Smrg        uint32_t attr_size = util_format_get_blocksize(format);
752848b8605Smrg
753b8e80941Smrg        c->vattr_sizes[attr] = align(attr_size, 4);
754b8e80941Smrg        for (int i = 0; i < align(attr_size, 4) / 4; i++) {
755b8e80941Smrg                c->inputs[attr * 4 + i] =
756b8e80941Smrg                        qir_MOV(c, qir_reg(QFILE_VPM, attr * 4 + i));
757b8e80941Smrg                c->num_inputs++;
758848b8605Smrg        }
759848b8605Smrg}
760848b8605Smrg
761848b8605Smrgstatic void
762b8e80941Smrgemit_fragcoord_input(struct vc4_compile *c, int attr)
763848b8605Smrg{
764b8e80941Smrg        c->inputs[attr * 4 + 0] = qir_ITOF(c, qir_reg(QFILE_FRAG_X, 0));
765b8e80941Smrg        c->inputs[attr * 4 + 1] = qir_ITOF(c, qir_reg(QFILE_FRAG_Y, 0));
766b8e80941Smrg        c->inputs[attr * 4 + 2] =
767848b8605Smrg                qir_FMUL(c,
768b8e80941Smrg                         qir_ITOF(c, qir_FRAG_Z(c)),
769b8e80941Smrg                         qir_uniform_f(c, 1.0 / 0xffffff));
770b8e80941Smrg        c->inputs[attr * 4 + 3] = qir_RCP(c, qir_FRAG_W(c));
771848b8605Smrg}
772848b8605Smrg
773848b8605Smrgstatic struct qreg
774b8e80941Smrgemit_fragment_varying(struct vc4_compile *c, gl_varying_slot slot,
775b8e80941Smrg                      uint8_t swizzle)
776848b8605Smrg{
777b8e80941Smrg        uint32_t i = c->num_input_slots++;
778848b8605Smrg        struct qreg vary = {
779848b8605Smrg                QFILE_VARY,
780b8e80941Smrg                i
781848b8605Smrg        };
782848b8605Smrg
783b8e80941Smrg        if (c->num_input_slots >= c->input_slots_array_size) {
784b8e80941Smrg                c->input_slots_array_size =
785b8e80941Smrg                        MAX2(4, c->input_slots_array_size * 2);
786b8e80941Smrg
787b8e80941Smrg                c->input_slots = reralloc(c, c->input_slots,
788b8e80941Smrg                                          struct vc4_varying_slot,
789b8e80941Smrg                                          c->input_slots_array_size);
790b8e80941Smrg        }
791b8e80941Smrg
792b8e80941Smrg        c->input_slots[i].slot = slot;
793b8e80941Smrg        c->input_slots[i].swizzle = swizzle;
794b8e80941Smrg
795b8e80941Smrg        return qir_VARY_ADD_C(c, qir_FMUL(c, vary, qir_FRAG_W(c)));
796848b8605Smrg}
797848b8605Smrg
798848b8605Smrgstatic void
799b8e80941Smrgemit_fragment_input(struct vc4_compile *c, int attr, gl_varying_slot slot)
800848b8605Smrg{
801848b8605Smrg        for (int i = 0; i < 4; i++) {
802b8e80941Smrg                c->inputs[attr * 4 + i] =
803b8e80941Smrg                        emit_fragment_varying(c, slot, i);
804848b8605Smrg                c->num_inputs++;
805848b8605Smrg        }
806848b8605Smrg}
807848b8605Smrg
808848b8605Smrgstatic void
809b8e80941Smrgadd_output(struct vc4_compile *c,
810b8e80941Smrg           uint32_t decl_offset,
811b8e80941Smrg           uint8_t slot,
812b8e80941Smrg           uint8_t swizzle)
813b8e80941Smrg{
814b8e80941Smrg        uint32_t old_array_size = c->outputs_array_size;
815b8e80941Smrg        resize_qreg_array(c, &c->outputs, &c->outputs_array_size,
816b8e80941Smrg                          decl_offset + 1);
817b8e80941Smrg
818b8e80941Smrg        if (old_array_size != c->outputs_array_size) {
819b8e80941Smrg                c->output_slots = reralloc(c,
820b8e80941Smrg                                           c->output_slots,
821b8e80941Smrg                                           struct vc4_varying_slot,
822b8e80941Smrg                                           c->outputs_array_size);
823848b8605Smrg        }
824b8e80941Smrg
825b8e80941Smrg        c->output_slots[decl_offset].slot = slot;
826b8e80941Smrg        c->output_slots[decl_offset].swizzle = swizzle;
827848b8605Smrg}
828848b8605Smrg
829b8e80941Smrgstatic bool
830b8e80941Smrgntq_src_is_only_ssa_def_user(nir_src *src)
831b8e80941Smrg{
832b8e80941Smrg        if (!src->is_ssa)
833b8e80941Smrg                return false;
834848b8605Smrg
835b8e80941Smrg        if (!list_empty(&src->ssa->if_uses))
836b8e80941Smrg                return false;
837848b8605Smrg
838b8e80941Smrg        return (src->ssa->uses.next == &src->use_link &&
839b8e80941Smrg                src->ssa->uses.next->next == &src->ssa->uses);
840b8e80941Smrg}
841848b8605Smrg
842b8e80941Smrg/**
843b8e80941Smrg * In general, emits a nir_pack_unorm_4x8 as a series of MOVs with the pack
844b8e80941Smrg * bit set.
845b8e80941Smrg *
846b8e80941Smrg * However, as an optimization, it tries to find the instructions generating
847b8e80941Smrg * the sources to be packed and just emit the pack flag there, if possible.
848b8e80941Smrg */
849b8e80941Smrgstatic void
850b8e80941Smrgntq_emit_pack_unorm_4x8(struct vc4_compile *c, nir_alu_instr *instr)
851b8e80941Smrg{
852b8e80941Smrg        struct qreg result = qir_get_temp(c);
853b8e80941Smrg        struct nir_alu_instr *vec4 = NULL;
854b8e80941Smrg
855b8e80941Smrg        /* If packing from a vec4 op (as expected), identify it so that we can
856b8e80941Smrg         * peek back at what generated its sources.
857b8e80941Smrg         */
858b8e80941Smrg        if (instr->src[0].src.is_ssa &&
859b8e80941Smrg            instr->src[0].src.ssa->parent_instr->type == nir_instr_type_alu &&
860b8e80941Smrg            nir_instr_as_alu(instr->src[0].src.ssa->parent_instr)->op ==
861b8e80941Smrg            nir_op_vec4) {
862b8e80941Smrg                vec4 = nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
863848b8605Smrg        }
864848b8605Smrg
865b8e80941Smrg        /* If the pack is replicating the same channel 4 times, use the 8888
866b8e80941Smrg         * pack flag.  This is common for blending using the alpha
867b8e80941Smrg         * channel.
868b8e80941Smrg         */
869b8e80941Smrg        if (instr->src[0].swizzle[0] == instr->src[0].swizzle[1] &&
870b8e80941Smrg            instr->src[0].swizzle[0] == instr->src[0].swizzle[2] &&
871b8e80941Smrg            instr->src[0].swizzle[0] == instr->src[0].swizzle[3]) {
872b8e80941Smrg                struct qreg rep = ntq_get_src(c,
873b8e80941Smrg                                              instr->src[0].src,
874b8e80941Smrg                                              instr->src[0].swizzle[0]);
875b8e80941Smrg                ntq_store_dest(c, &instr->dest.dest, 0, qir_PACK_8888_F(c, rep));
876b8e80941Smrg                return;
877848b8605Smrg        }
878848b8605Smrg
879848b8605Smrg        for (int i = 0; i < 4; i++) {
880b8e80941Smrg                int swiz = instr->src[0].swizzle[i];
881b8e80941Smrg                struct qreg src;
882b8e80941Smrg                if (vec4) {
883b8e80941Smrg                        src = ntq_get_src(c, vec4->src[swiz].src,
884b8e80941Smrg                                          vec4->src[swiz].swizzle[0]);
885b8e80941Smrg                } else {
886b8e80941Smrg                        src = ntq_get_src(c, instr->src[0].src, swiz);
887b8e80941Smrg                }
888848b8605Smrg
889b8e80941Smrg                if (vec4 &&
890b8e80941Smrg                    ntq_src_is_only_ssa_def_user(&vec4->src[swiz].src) &&
891b8e80941Smrg                    src.file == QFILE_TEMP &&
892b8e80941Smrg                    c->defs[src.index] &&
893b8e80941Smrg                    qir_is_mul(c->defs[src.index]) &&
894b8e80941Smrg                    !c->defs[src.index]->dst.pack) {
895b8e80941Smrg                        struct qinst *rewrite = c->defs[src.index];
896b8e80941Smrg                        c->defs[src.index] = NULL;
897b8e80941Smrg                        rewrite->dst = result;
898b8e80941Smrg                        rewrite->dst.pack = QPU_PACK_MUL_8A + i;
899b8e80941Smrg                        continue;
900848b8605Smrg                }
901848b8605Smrg
902b8e80941Smrg                qir_PACK_8_F(c, result, src, i);
903848b8605Smrg        }
904848b8605Smrg
905b8e80941Smrg        ntq_store_dest(c, &instr->dest.dest, 0, qir_MOV(c, result));
906848b8605Smrg}
907848b8605Smrg
908b8e80941Smrg/** Handles sign-extended bitfield extracts for 16 bits. */
909848b8605Smrgstatic struct qreg
910b8e80941Smrgntq_emit_ibfe(struct vc4_compile *c, struct qreg base, struct qreg offset,
911b8e80941Smrg              struct qreg bits)
912b8e80941Smrg{
913b8e80941Smrg        assert(bits.file == QFILE_UNIF &&
914b8e80941Smrg               c->uniform_contents[bits.index] == QUNIFORM_CONSTANT &&
915b8e80941Smrg               c->uniform_data[bits.index] == 16);
916848b8605Smrg
917b8e80941Smrg        assert(offset.file == QFILE_UNIF &&
918b8e80941Smrg               c->uniform_contents[offset.index] == QUNIFORM_CONSTANT);
919b8e80941Smrg        int offset_bit = c->uniform_data[offset.index];
920b8e80941Smrg        assert(offset_bit % 16 == 0);
921b8e80941Smrg
922b8e80941Smrg        return qir_UNPACK_16_I(c, base, offset_bit / 16);
923848b8605Smrg}
924848b8605Smrg
925b8e80941Smrg/** Handles unsigned bitfield extracts for 8 bits. */
926848b8605Smrgstatic struct qreg
927b8e80941Smrgntq_emit_ubfe(struct vc4_compile *c, struct qreg base, struct qreg offset,
928b8e80941Smrg              struct qreg bits)
929b8e80941Smrg{
930b8e80941Smrg        assert(bits.file == QFILE_UNIF &&
931b8e80941Smrg               c->uniform_contents[bits.index] == QUNIFORM_CONSTANT &&
932b8e80941Smrg               c->uniform_data[bits.index] == 8);
933848b8605Smrg
934b8e80941Smrg        assert(offset.file == QFILE_UNIF &&
935b8e80941Smrg               c->uniform_contents[offset.index] == QUNIFORM_CONSTANT);
936b8e80941Smrg        int offset_bit = c->uniform_data[offset.index];
937b8e80941Smrg        assert(offset_bit % 8 == 0);
938848b8605Smrg
939b8e80941Smrg        return qir_UNPACK_8_I(c, base, offset_bit / 8);
940848b8605Smrg}
941848b8605Smrg
942848b8605Smrg/**
943b8e80941Smrg * If compare_instr is a valid comparison instruction, emits the
944b8e80941Smrg * compare_instr's comparison and returns the sel_instr's return value based
945b8e80941Smrg * on the compare_instr's result.
946848b8605Smrg */
947b8e80941Smrgstatic bool
948b8e80941Smrgntq_emit_comparison(struct vc4_compile *c, struct qreg *dest,
949b8e80941Smrg                    nir_alu_instr *compare_instr,
950b8e80941Smrg                    nir_alu_instr *sel_instr)
951848b8605Smrg{
952b8e80941Smrg        enum qpu_cond cond;
953848b8605Smrg
954b8e80941Smrg        switch (compare_instr->op) {
955b8e80941Smrg        case nir_op_feq32:
956b8e80941Smrg        case nir_op_ieq32:
957b8e80941Smrg        case nir_op_seq:
958b8e80941Smrg                cond = QPU_COND_ZS;
959b8e80941Smrg                break;
960b8e80941Smrg        case nir_op_fne32:
961b8e80941Smrg        case nir_op_ine32:
962b8e80941Smrg        case nir_op_sne:
963b8e80941Smrg                cond = QPU_COND_ZC;
964b8e80941Smrg                break;
965b8e80941Smrg        case nir_op_fge32:
966b8e80941Smrg        case nir_op_ige32:
967b8e80941Smrg        case nir_op_uge32:
968b8e80941Smrg        case nir_op_sge:
969b8e80941Smrg                cond = QPU_COND_NC;
970b8e80941Smrg                break;
971b8e80941Smrg        case nir_op_flt32:
972b8e80941Smrg        case nir_op_ilt32:
973b8e80941Smrg        case nir_op_slt:
974b8e80941Smrg                cond = QPU_COND_NS;
975b8e80941Smrg                break;
976b8e80941Smrg        default:
977b8e80941Smrg                return false;
978b8e80941Smrg        }
979b8e80941Smrg
980b8e80941Smrg        struct qreg src0 = ntq_get_alu_src(c, compare_instr, 0);
981b8e80941Smrg        struct qreg src1 = ntq_get_alu_src(c, compare_instr, 1);
982b8e80941Smrg
983b8e80941Smrg        unsigned unsized_type =
984b8e80941Smrg                nir_alu_type_get_base_type(nir_op_infos[compare_instr->op].input_types[0]);
985b8e80941Smrg        if (unsized_type == nir_type_float)
986b8e80941Smrg                qir_SF(c, qir_FSUB(c, src0, src1));
987b8e80941Smrg        else
988b8e80941Smrg                qir_SF(c, qir_SUB(c, src0, src1));
989b8e80941Smrg
990b8e80941Smrg        switch (sel_instr->op) {
991b8e80941Smrg        case nir_op_seq:
992b8e80941Smrg        case nir_op_sne:
993b8e80941Smrg        case nir_op_sge:
994b8e80941Smrg        case nir_op_slt:
995b8e80941Smrg                *dest = qir_SEL(c, cond,
996b8e80941Smrg                                qir_uniform_f(c, 1.0), qir_uniform_f(c, 0.0));
997b8e80941Smrg                break;
998b8e80941Smrg
999b8e80941Smrg        case nir_op_b32csel:
1000b8e80941Smrg                *dest = qir_SEL(c, cond,
1001b8e80941Smrg                                ntq_get_alu_src(c, sel_instr, 1),
1002b8e80941Smrg                                ntq_get_alu_src(c, sel_instr, 2));
1003b8e80941Smrg                break;
1004b8e80941Smrg
1005b8e80941Smrg        default:
1006b8e80941Smrg                *dest = qir_SEL(c, cond,
1007b8e80941Smrg                                qir_uniform_ui(c, ~0), qir_uniform_ui(c, 0));
1008b8e80941Smrg                break;
1009b8e80941Smrg        }
1010b8e80941Smrg
1011b8e80941Smrg        /* Make the temporary for nir_store_dest(). */
1012b8e80941Smrg        *dest = qir_MOV(c, *dest);
1013b8e80941Smrg
1014b8e80941Smrg        return true;
1015b8e80941Smrg}
1016b8e80941Smrg
1017b8e80941Smrg/**
1018b8e80941Smrg * Attempts to fold a comparison generating a boolean result into the
1019b8e80941Smrg * condition code for selecting between two values, instead of comparing the
1020b8e80941Smrg * boolean result against 0 to generate the condition code.
1021b8e80941Smrg */
1022b8e80941Smrgstatic struct qreg ntq_emit_bcsel(struct vc4_compile *c, nir_alu_instr *instr,
1023b8e80941Smrg                                  struct qreg *src)
1024b8e80941Smrg{
1025b8e80941Smrg        if (!instr->src[0].src.is_ssa)
1026b8e80941Smrg                goto out;
1027b8e80941Smrg        if (instr->src[0].src.ssa->parent_instr->type != nir_instr_type_alu)
1028b8e80941Smrg                goto out;
1029b8e80941Smrg        nir_alu_instr *compare =
1030b8e80941Smrg                nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
1031b8e80941Smrg        if (!compare)
1032b8e80941Smrg                goto out;
1033b8e80941Smrg
1034b8e80941Smrg        struct qreg dest;
1035b8e80941Smrg        if (ntq_emit_comparison(c, &dest, compare, instr))
1036b8e80941Smrg                return dest;
1037b8e80941Smrg
1038b8e80941Smrgout:
1039b8e80941Smrg        qir_SF(c, src[0]);
1040b8e80941Smrg        return qir_MOV(c, qir_SEL(c, QPU_COND_NS, src[1], src[2]));
1041b8e80941Smrg}
1042b8e80941Smrg
1043b8e80941Smrgstatic struct qreg
1044b8e80941Smrgntq_fddx(struct vc4_compile *c, struct qreg src)
1045b8e80941Smrg{
1046b8e80941Smrg        /* Make sure that we have a bare temp to use for MUL rotation, so it
1047b8e80941Smrg         * can be allocated to an accumulator.
1048b8e80941Smrg         */
1049b8e80941Smrg        if (src.pack || src.file != QFILE_TEMP)
1050b8e80941Smrg                src = qir_MOV(c, src);
1051b8e80941Smrg
1052b8e80941Smrg        struct qreg from_left = qir_ROT_MUL(c, src, 1);
1053b8e80941Smrg        struct qreg from_right = qir_ROT_MUL(c, src, 15);
1054b8e80941Smrg
1055b8e80941Smrg        /* Distinguish left/right pixels of the quad. */
1056b8e80941Smrg        qir_SF(c, qir_AND(c, qir_reg(QFILE_QPU_ELEMENT, 0),
1057b8e80941Smrg                          qir_uniform_ui(c, 1)));
1058b8e80941Smrg
1059b8e80941Smrg        return qir_MOV(c, qir_SEL(c, QPU_COND_ZS,
1060b8e80941Smrg                                  qir_FSUB(c, from_right, src),
1061b8e80941Smrg                                  qir_FSUB(c, src, from_left)));
1062b8e80941Smrg}
1063b8e80941Smrg
1064b8e80941Smrgstatic struct qreg
1065b8e80941Smrgntq_fddy(struct vc4_compile *c, struct qreg src)
1066b8e80941Smrg{
1067b8e80941Smrg        if (src.pack || src.file != QFILE_TEMP)
1068b8e80941Smrg                src = qir_MOV(c, src);
1069b8e80941Smrg
1070b8e80941Smrg        struct qreg from_bottom = qir_ROT_MUL(c, src, 2);
1071b8e80941Smrg        struct qreg from_top = qir_ROT_MUL(c, src, 14);
1072b8e80941Smrg
1073b8e80941Smrg        /* Distinguish top/bottom pixels of the quad. */
1074b8e80941Smrg        qir_SF(c, qir_AND(c,
1075b8e80941Smrg                          qir_reg(QFILE_QPU_ELEMENT, 0),
1076b8e80941Smrg                          qir_uniform_ui(c, 2)));
1077b8e80941Smrg
1078b8e80941Smrg        return qir_MOV(c, qir_SEL(c, QPU_COND_ZS,
1079b8e80941Smrg                                  qir_FSUB(c, from_top, src),
1080b8e80941Smrg                                  qir_FSUB(c, src, from_bottom)));
1081b8e80941Smrg}
1082b8e80941Smrg
1083b8e80941Smrgstatic void
1084b8e80941Smrgntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr)
1085b8e80941Smrg{
1086b8e80941Smrg        /* This should always be lowered to ALU operations for VC4. */
1087b8e80941Smrg        assert(!instr->dest.saturate);
1088b8e80941Smrg
1089b8e80941Smrg        /* Vectors are special in that they have non-scalarized writemasks,
1090b8e80941Smrg         * and just take the first swizzle channel for each argument in order
1091b8e80941Smrg         * into each writemask channel.
1092b8e80941Smrg         */
1093b8e80941Smrg        if (instr->op == nir_op_vec2 ||
1094b8e80941Smrg            instr->op == nir_op_vec3 ||
1095b8e80941Smrg            instr->op == nir_op_vec4) {
1096b8e80941Smrg                struct qreg srcs[4];
1097b8e80941Smrg                for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
1098b8e80941Smrg                        srcs[i] = ntq_get_src(c, instr->src[i].src,
1099b8e80941Smrg                                              instr->src[i].swizzle[0]);
1100b8e80941Smrg                for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
1101b8e80941Smrg                        ntq_store_dest(c, &instr->dest.dest, i,
1102b8e80941Smrg                                       qir_MOV(c, srcs[i]));
1103b8e80941Smrg                return;
1104b8e80941Smrg        }
1105b8e80941Smrg
1106b8e80941Smrg        if (instr->op == nir_op_pack_unorm_4x8) {
1107b8e80941Smrg                ntq_emit_pack_unorm_4x8(c, instr);
1108b8e80941Smrg                return;
1109b8e80941Smrg        }
1110b8e80941Smrg
1111b8e80941Smrg        if (instr->op == nir_op_unpack_unorm_4x8) {
1112b8e80941Smrg                struct qreg src = ntq_get_src(c, instr->src[0].src,
1113b8e80941Smrg                                              instr->src[0].swizzle[0]);
1114b8e80941Smrg                for (int i = 0; i < 4; i++) {
1115b8e80941Smrg                        if (instr->dest.write_mask & (1 << i))
1116b8e80941Smrg                                ntq_store_dest(c, &instr->dest.dest, i,
1117b8e80941Smrg                                               qir_UNPACK_8_F(c, src, i));
1118b8e80941Smrg                }
1119b8e80941Smrg                return;
1120b8e80941Smrg        }
1121b8e80941Smrg
1122b8e80941Smrg        /* General case: We can just grab the one used channel per src. */
1123b8e80941Smrg        struct qreg src[nir_op_infos[instr->op].num_inputs];
1124b8e80941Smrg        for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
1125b8e80941Smrg                src[i] = ntq_get_alu_src(c, instr, i);
1126b8e80941Smrg        }
1127b8e80941Smrg
1128b8e80941Smrg        struct qreg result;
1129b8e80941Smrg
1130b8e80941Smrg        switch (instr->op) {
1131b8e80941Smrg        case nir_op_fmov:
1132b8e80941Smrg        case nir_op_imov:
1133b8e80941Smrg                result = qir_MOV(c, src[0]);
1134b8e80941Smrg                break;
1135b8e80941Smrg        case nir_op_fmul:
1136b8e80941Smrg                result = qir_FMUL(c, src[0], src[1]);
1137b8e80941Smrg                break;
1138b8e80941Smrg        case nir_op_fadd:
1139b8e80941Smrg                result = qir_FADD(c, src[0], src[1]);
1140b8e80941Smrg                break;
1141b8e80941Smrg        case nir_op_fsub:
1142b8e80941Smrg                result = qir_FSUB(c, src[0], src[1]);
1143b8e80941Smrg                break;
1144b8e80941Smrg        case nir_op_fmin:
1145b8e80941Smrg                result = qir_FMIN(c, src[0], src[1]);
1146b8e80941Smrg                break;
1147b8e80941Smrg        case nir_op_fmax:
1148b8e80941Smrg                result = qir_FMAX(c, src[0], src[1]);
1149b8e80941Smrg                break;
1150b8e80941Smrg
1151b8e80941Smrg        case nir_op_f2i32:
1152b8e80941Smrg        case nir_op_f2u32:
1153b8e80941Smrg                result = qir_FTOI(c, src[0]);
1154b8e80941Smrg                break;
1155b8e80941Smrg        case nir_op_i2f32:
1156b8e80941Smrg        case nir_op_u2f32:
1157b8e80941Smrg                result = qir_ITOF(c, src[0]);
1158b8e80941Smrg                break;
1159b8e80941Smrg        case nir_op_b2f32:
1160b8e80941Smrg                result = qir_AND(c, src[0], qir_uniform_f(c, 1.0));
1161b8e80941Smrg                break;
1162b8e80941Smrg        case nir_op_b2i32:
1163b8e80941Smrg                result = qir_AND(c, src[0], qir_uniform_ui(c, 1));
1164b8e80941Smrg                break;
1165b8e80941Smrg        case nir_op_i2b32:
1166b8e80941Smrg        case nir_op_f2b32:
1167b8e80941Smrg                qir_SF(c, src[0]);
1168b8e80941Smrg                result = qir_MOV(c, qir_SEL(c, QPU_COND_ZC,
1169b8e80941Smrg                                            qir_uniform_ui(c, ~0),
1170b8e80941Smrg                                            qir_uniform_ui(c, 0)));
1171b8e80941Smrg                break;
1172b8e80941Smrg
1173b8e80941Smrg        case nir_op_iadd:
1174b8e80941Smrg                result = qir_ADD(c, src[0], src[1]);
1175b8e80941Smrg                break;
1176b8e80941Smrg        case nir_op_ushr:
1177b8e80941Smrg                result = qir_SHR(c, src[0], src[1]);
1178b8e80941Smrg                break;
1179b8e80941Smrg        case nir_op_isub:
1180b8e80941Smrg                result = qir_SUB(c, src[0], src[1]);
1181b8e80941Smrg                break;
1182b8e80941Smrg        case nir_op_ishr:
1183b8e80941Smrg                result = qir_ASR(c, src[0], src[1]);
1184b8e80941Smrg                break;
1185b8e80941Smrg        case nir_op_ishl:
1186b8e80941Smrg                result = qir_SHL(c, src[0], src[1]);
1187b8e80941Smrg                break;
1188b8e80941Smrg        case nir_op_imin:
1189b8e80941Smrg                result = qir_MIN(c, src[0], src[1]);
1190b8e80941Smrg                break;
1191b8e80941Smrg        case nir_op_imax:
1192b8e80941Smrg                result = qir_MAX(c, src[0], src[1]);
1193b8e80941Smrg                break;
1194b8e80941Smrg        case nir_op_iand:
1195b8e80941Smrg                result = qir_AND(c, src[0], src[1]);
1196b8e80941Smrg                break;
1197b8e80941Smrg        case nir_op_ior:
1198b8e80941Smrg                result = qir_OR(c, src[0], src[1]);
1199b8e80941Smrg                break;
1200b8e80941Smrg        case nir_op_ixor:
1201b8e80941Smrg                result = qir_XOR(c, src[0], src[1]);
1202b8e80941Smrg                break;
1203b8e80941Smrg        case nir_op_inot:
1204b8e80941Smrg                result = qir_NOT(c, src[0]);
1205b8e80941Smrg                break;
1206b8e80941Smrg
1207b8e80941Smrg        case nir_op_imul:
1208b8e80941Smrg                result = ntq_umul(c, src[0], src[1]);
1209b8e80941Smrg                break;
1210b8e80941Smrg
1211b8e80941Smrg        case nir_op_seq:
1212b8e80941Smrg        case nir_op_sne:
1213b8e80941Smrg        case nir_op_sge:
1214b8e80941Smrg        case nir_op_slt:
1215b8e80941Smrg        case nir_op_feq32:
1216b8e80941Smrg        case nir_op_fne32:
1217b8e80941Smrg        case nir_op_fge32:
1218b8e80941Smrg        case nir_op_flt32:
1219b8e80941Smrg        case nir_op_ieq32:
1220b8e80941Smrg        case nir_op_ine32:
1221b8e80941Smrg        case nir_op_ige32:
1222b8e80941Smrg        case nir_op_uge32:
1223b8e80941Smrg        case nir_op_ilt32:
1224b8e80941Smrg                if (!ntq_emit_comparison(c, &result, instr, instr)) {
1225b8e80941Smrg                        fprintf(stderr, "Bad comparison instruction\n");
1226b8e80941Smrg                }
1227b8e80941Smrg                break;
1228b8e80941Smrg
1229b8e80941Smrg        case nir_op_b32csel:
1230b8e80941Smrg                result = ntq_emit_bcsel(c, instr, src);
1231b8e80941Smrg                break;
1232b8e80941Smrg        case nir_op_fcsel:
1233b8e80941Smrg                qir_SF(c, src[0]);
1234b8e80941Smrg                result = qir_MOV(c, qir_SEL(c, QPU_COND_ZC, src[1], src[2]));
1235b8e80941Smrg                break;
1236b8e80941Smrg
1237b8e80941Smrg        case nir_op_frcp:
1238b8e80941Smrg                result = ntq_rcp(c, src[0]);
1239b8e80941Smrg                break;
1240b8e80941Smrg        case nir_op_frsq:
1241b8e80941Smrg                result = ntq_rsq(c, src[0]);
1242b8e80941Smrg                break;
1243b8e80941Smrg        case nir_op_fexp2:
1244b8e80941Smrg                result = qir_EXP2(c, src[0]);
1245b8e80941Smrg                break;
1246b8e80941Smrg        case nir_op_flog2:
1247b8e80941Smrg                result = qir_LOG2(c, src[0]);
1248b8e80941Smrg                break;
1249b8e80941Smrg
1250b8e80941Smrg        case nir_op_ftrunc:
1251b8e80941Smrg                result = qir_ITOF(c, qir_FTOI(c, src[0]));
1252b8e80941Smrg                break;
1253b8e80941Smrg        case nir_op_fceil:
1254b8e80941Smrg                result = ntq_fceil(c, src[0]);
1255b8e80941Smrg                break;
1256b8e80941Smrg        case nir_op_ffract:
1257b8e80941Smrg                result = ntq_ffract(c, src[0]);
1258b8e80941Smrg                break;
1259b8e80941Smrg        case nir_op_ffloor:
1260b8e80941Smrg                result = ntq_ffloor(c, src[0]);
1261b8e80941Smrg                break;
1262b8e80941Smrg
1263b8e80941Smrg        case nir_op_fsin:
1264b8e80941Smrg                result = ntq_fsin(c, src[0]);
1265b8e80941Smrg                break;
1266b8e80941Smrg        case nir_op_fcos:
1267b8e80941Smrg                result = ntq_fcos(c, src[0]);
1268b8e80941Smrg                break;
1269b8e80941Smrg
1270b8e80941Smrg        case nir_op_fsign:
1271b8e80941Smrg                result = ntq_fsign(c, src[0]);
1272b8e80941Smrg                break;
1273b8e80941Smrg
1274b8e80941Smrg        case nir_op_fabs:
1275b8e80941Smrg                result = qir_FMAXABS(c, src[0], src[0]);
1276b8e80941Smrg                break;
1277b8e80941Smrg        case nir_op_iabs:
1278b8e80941Smrg                result = qir_MAX(c, src[0],
1279b8e80941Smrg                                qir_SUB(c, qir_uniform_ui(c, 0), src[0]));
1280b8e80941Smrg                break;
1281b8e80941Smrg
1282b8e80941Smrg        case nir_op_ibitfield_extract:
1283b8e80941Smrg                result = ntq_emit_ibfe(c, src[0], src[1], src[2]);
1284b8e80941Smrg                break;
1285b8e80941Smrg
1286b8e80941Smrg        case nir_op_ubitfield_extract:
1287b8e80941Smrg                result = ntq_emit_ubfe(c, src[0], src[1], src[2]);
1288b8e80941Smrg                break;
1289b8e80941Smrg
1290b8e80941Smrg        case nir_op_usadd_4x8:
1291b8e80941Smrg                result = qir_V8ADDS(c, src[0], src[1]);
1292b8e80941Smrg                break;
1293b8e80941Smrg
1294b8e80941Smrg        case nir_op_ussub_4x8:
1295b8e80941Smrg                result = qir_V8SUBS(c, src[0], src[1]);
1296b8e80941Smrg                break;
1297b8e80941Smrg
1298b8e80941Smrg        case nir_op_umin_4x8:
1299b8e80941Smrg                result = qir_V8MIN(c, src[0], src[1]);
1300b8e80941Smrg                break;
1301b8e80941Smrg
1302b8e80941Smrg        case nir_op_umax_4x8:
1303b8e80941Smrg                result = qir_V8MAX(c, src[0], src[1]);
1304b8e80941Smrg                break;
1305b8e80941Smrg
1306b8e80941Smrg        case nir_op_umul_unorm_4x8:
1307b8e80941Smrg                result = qir_V8MULD(c, src[0], src[1]);
1308b8e80941Smrg                break;
1309b8e80941Smrg
1310b8e80941Smrg        case nir_op_fddx:
1311b8e80941Smrg        case nir_op_fddx_coarse:
1312b8e80941Smrg        case nir_op_fddx_fine:
1313b8e80941Smrg                result = ntq_fddx(c, src[0]);
1314b8e80941Smrg                break;
1315b8e80941Smrg
1316b8e80941Smrg        case nir_op_fddy:
1317b8e80941Smrg        case nir_op_fddy_coarse:
1318b8e80941Smrg        case nir_op_fddy_fine:
1319b8e80941Smrg                result = ntq_fddy(c, src[0]);
1320b8e80941Smrg                break;
1321b8e80941Smrg
1322b8e80941Smrg        default:
1323b8e80941Smrg                fprintf(stderr, "unknown NIR ALU inst: ");
1324b8e80941Smrg                nir_print_instr(&instr->instr, stderr);
1325b8e80941Smrg                fprintf(stderr, "\n");
1326b8e80941Smrg                abort();
1327b8e80941Smrg        }
1328b8e80941Smrg
1329b8e80941Smrg        /* We have a scalar result, so the instruction should only have a
1330b8e80941Smrg         * single channel written to.
1331b8e80941Smrg         */
1332b8e80941Smrg        assert(util_is_power_of_two_or_zero(instr->dest.write_mask));
1333b8e80941Smrg        ntq_store_dest(c, &instr->dest.dest,
1334b8e80941Smrg                       ffs(instr->dest.write_mask) - 1, result);
1335b8e80941Smrg}
1336b8e80941Smrg
1337b8e80941Smrgstatic void
1338b8e80941Smrgemit_frag_end(struct vc4_compile *c)
1339b8e80941Smrg{
1340b8e80941Smrg        struct qreg color;
1341b8e80941Smrg        if (c->output_color_index != -1) {
1342b8e80941Smrg                color = c->outputs[c->output_color_index];
1343b8e80941Smrg        } else {
1344b8e80941Smrg                color = qir_uniform_ui(c, 0);
1345b8e80941Smrg        }
1346b8e80941Smrg
1347b8e80941Smrg        uint32_t discard_cond = QPU_COND_ALWAYS;
1348b8e80941Smrg        if (c->s->info.fs.uses_discard) {
1349b8e80941Smrg                qir_SF(c, c->discard);
1350b8e80941Smrg                discard_cond = QPU_COND_ZS;
1351b8e80941Smrg        }
1352b8e80941Smrg
1353b8e80941Smrg        if (c->fs_key->stencil_enabled) {
1354b8e80941Smrg                qir_MOV_dest(c, qir_reg(QFILE_TLB_STENCIL_SETUP, 0),
1355b8e80941Smrg                             qir_uniform(c, QUNIFORM_STENCIL, 0));
1356b8e80941Smrg                if (c->fs_key->stencil_twoside) {
1357b8e80941Smrg                        qir_MOV_dest(c, qir_reg(QFILE_TLB_STENCIL_SETUP, 0),
1358b8e80941Smrg                                     qir_uniform(c, QUNIFORM_STENCIL, 1));
1359b8e80941Smrg                }
1360b8e80941Smrg                if (c->fs_key->stencil_full_writemasks) {
1361b8e80941Smrg                        qir_MOV_dest(c, qir_reg(QFILE_TLB_STENCIL_SETUP, 0),
1362b8e80941Smrg                                     qir_uniform(c, QUNIFORM_STENCIL, 2));
1363b8e80941Smrg                }
1364b8e80941Smrg        }
1365b8e80941Smrg
1366b8e80941Smrg        if (c->output_sample_mask_index != -1) {
1367b8e80941Smrg                qir_MS_MASK(c, c->outputs[c->output_sample_mask_index]);
1368b8e80941Smrg        }
1369b8e80941Smrg
1370b8e80941Smrg        if (c->fs_key->depth_enabled) {
1371b8e80941Smrg                if (c->output_position_index != -1) {
1372b8e80941Smrg                        qir_FTOI_dest(c, qir_reg(QFILE_TLB_Z_WRITE, 0),
1373b8e80941Smrg                                      qir_FMUL(c,
1374b8e80941Smrg                                               c->outputs[c->output_position_index],
1375b8e80941Smrg                                               qir_uniform_f(c, 0xffffff)))->cond = discard_cond;
1376b8e80941Smrg                } else {
1377b8e80941Smrg                        qir_MOV_dest(c, qir_reg(QFILE_TLB_Z_WRITE, 0),
1378b8e80941Smrg                                     qir_FRAG_Z(c))->cond = discard_cond;
1379b8e80941Smrg                }
1380b8e80941Smrg        }
1381b8e80941Smrg
1382b8e80941Smrg        if (!c->msaa_per_sample_output) {
1383b8e80941Smrg                qir_MOV_dest(c, qir_reg(QFILE_TLB_COLOR_WRITE, 0),
1384b8e80941Smrg                             color)->cond = discard_cond;
1385b8e80941Smrg        } else {
1386b8e80941Smrg                for (int i = 0; i < VC4_MAX_SAMPLES; i++) {
1387b8e80941Smrg                        qir_MOV_dest(c, qir_reg(QFILE_TLB_COLOR_WRITE_MS, 0),
1388b8e80941Smrg                                     c->sample_colors[i])->cond = discard_cond;
1389b8e80941Smrg                }
1390b8e80941Smrg        }
1391b8e80941Smrg}
1392b8e80941Smrg
1393b8e80941Smrgstatic void
1394b8e80941Smrgemit_scaled_viewport_write(struct vc4_compile *c, struct qreg rcp_w)
1395b8e80941Smrg{
1396b8e80941Smrg        struct qreg packed = qir_get_temp(c);
1397b8e80941Smrg
1398b8e80941Smrg        for (int i = 0; i < 2; i++) {
1399b8e80941Smrg                struct qreg scale =
1400b8e80941Smrg                        qir_uniform(c, QUNIFORM_VIEWPORT_X_SCALE + i, 0);
1401b8e80941Smrg
1402b8e80941Smrg                struct qreg packed_chan = packed;
1403b8e80941Smrg                packed_chan.pack = QPU_PACK_A_16A + i;
1404b8e80941Smrg
1405b8e80941Smrg                qir_FTOI_dest(c, packed_chan,
1406b8e80941Smrg                              qir_FMUL(c,
1407b8e80941Smrg                                       qir_FMUL(c,
1408b8e80941Smrg                                                c->outputs[c->output_position_index + i],
1409b8e80941Smrg                                                scale),
1410b8e80941Smrg                                       rcp_w));
1411b8e80941Smrg        }
1412b8e80941Smrg
1413b8e80941Smrg        qir_VPM_WRITE(c, packed);
1414b8e80941Smrg}
1415b8e80941Smrg
1416b8e80941Smrgstatic void
1417b8e80941Smrgemit_zs_write(struct vc4_compile *c, struct qreg rcp_w)
1418b8e80941Smrg{
1419b8e80941Smrg        struct qreg zscale = qir_uniform(c, QUNIFORM_VIEWPORT_Z_SCALE, 0);
1420b8e80941Smrg        struct qreg zoffset = qir_uniform(c, QUNIFORM_VIEWPORT_Z_OFFSET, 0);
1421b8e80941Smrg
1422b8e80941Smrg        qir_VPM_WRITE(c, qir_FADD(c, qir_FMUL(c, qir_FMUL(c,
1423b8e80941Smrg                                                          c->outputs[c->output_position_index + 2],
1424b8e80941Smrg                                                          zscale),
1425b8e80941Smrg                                              rcp_w),
1426b8e80941Smrg                                  zoffset));
1427b8e80941Smrg}
1428b8e80941Smrg
1429b8e80941Smrgstatic void
1430b8e80941Smrgemit_rcp_wc_write(struct vc4_compile *c, struct qreg rcp_w)
1431b8e80941Smrg{
1432b8e80941Smrg        qir_VPM_WRITE(c, rcp_w);
1433b8e80941Smrg}
1434b8e80941Smrg
1435b8e80941Smrgstatic void
1436b8e80941Smrgemit_point_size_write(struct vc4_compile *c)
1437b8e80941Smrg{
1438b8e80941Smrg        struct qreg point_size;
1439b8e80941Smrg
1440b8e80941Smrg        if (c->output_point_size_index != -1)
1441b8e80941Smrg                point_size = c->outputs[c->output_point_size_index];
1442b8e80941Smrg        else
1443b8e80941Smrg                point_size = qir_uniform_f(c, 1.0);
1444b8e80941Smrg
1445b8e80941Smrg        /* Workaround: HW-2726 PTB does not handle zero-size points (BCM2835,
1446b8e80941Smrg         * BCM21553).
1447b8e80941Smrg         */
1448b8e80941Smrg        point_size = qir_FMAX(c, point_size, qir_uniform_f(c, .125));
1449b8e80941Smrg
1450b8e80941Smrg        qir_VPM_WRITE(c, point_size);
1451b8e80941Smrg}
1452b8e80941Smrg
1453b8e80941Smrg/**
1454b8e80941Smrg * Emits a VPM read of the stub vertex attribute set up by vc4_draw.c.
1455b8e80941Smrg *
1456b8e80941Smrg * The simulator insists that there be at least one vertex attribute, so
1457b8e80941Smrg * vc4_draw.c will emit one if it wouldn't have otherwise.  The simulator also
1458b8e80941Smrg * insists that all vertex attributes loaded get read by the VS/CS, so we have
1459b8e80941Smrg * to consume it here.
1460b8e80941Smrg */
1461b8e80941Smrgstatic void
1462b8e80941Smrgemit_stub_vpm_read(struct vc4_compile *c)
1463b8e80941Smrg{
1464b8e80941Smrg        if (c->num_inputs)
1465b8e80941Smrg                return;
1466b8e80941Smrg
1467b8e80941Smrg        c->vattr_sizes[0] = 4;
1468b8e80941Smrg        (void)qir_MOV(c, qir_reg(QFILE_VPM, 0));
1469b8e80941Smrg        c->num_inputs++;
1470b8e80941Smrg}
1471b8e80941Smrg
1472b8e80941Smrgstatic void
1473b8e80941Smrgemit_vert_end(struct vc4_compile *c,
1474b8e80941Smrg              struct vc4_varying_slot *fs_inputs,
1475b8e80941Smrg              uint32_t num_fs_inputs)
1476b8e80941Smrg{
1477b8e80941Smrg        struct qreg rcp_w = ntq_rcp(c, c->outputs[c->output_position_index + 3]);
1478b8e80941Smrg
1479b8e80941Smrg        emit_stub_vpm_read(c);
1480b8e80941Smrg
1481b8e80941Smrg        emit_scaled_viewport_write(c, rcp_w);
1482b8e80941Smrg        emit_zs_write(c, rcp_w);
1483b8e80941Smrg        emit_rcp_wc_write(c, rcp_w);
1484b8e80941Smrg        if (c->vs_key->per_vertex_point_size)
1485b8e80941Smrg                emit_point_size_write(c);
1486b8e80941Smrg
1487b8e80941Smrg        for (int i = 0; i < num_fs_inputs; i++) {
1488b8e80941Smrg                struct vc4_varying_slot *input = &fs_inputs[i];
1489b8e80941Smrg                int j;
1490b8e80941Smrg
1491b8e80941Smrg                for (j = 0; j < c->num_outputs; j++) {
1492b8e80941Smrg                        struct vc4_varying_slot *output =
1493b8e80941Smrg                                &c->output_slots[j];
1494b8e80941Smrg
1495b8e80941Smrg                        if (input->slot == output->slot &&
1496b8e80941Smrg                            input->swizzle == output->swizzle) {
1497b8e80941Smrg                                qir_VPM_WRITE(c, c->outputs[j]);
1498b8e80941Smrg                                break;
1499b8e80941Smrg                        }
1500b8e80941Smrg                }
1501b8e80941Smrg                /* Emit padding if we didn't find a declared VS output for
1502b8e80941Smrg                 * this FS input.
1503b8e80941Smrg                 */
1504b8e80941Smrg                if (j == c->num_outputs)
1505b8e80941Smrg                        qir_VPM_WRITE(c, qir_uniform_f(c, 0.0));
1506b8e80941Smrg        }
1507b8e80941Smrg}
1508b8e80941Smrg
1509b8e80941Smrgstatic void
1510b8e80941Smrgemit_coord_end(struct vc4_compile *c)
1511b8e80941Smrg{
1512b8e80941Smrg        struct qreg rcp_w = ntq_rcp(c, c->outputs[c->output_position_index + 3]);
1513b8e80941Smrg
1514b8e80941Smrg        emit_stub_vpm_read(c);
1515b8e80941Smrg
1516b8e80941Smrg        for (int i = 0; i < 4; i++)
1517b8e80941Smrg                qir_VPM_WRITE(c, c->outputs[c->output_position_index + i]);
1518b8e80941Smrg
1519b8e80941Smrg        emit_scaled_viewport_write(c, rcp_w);
1520b8e80941Smrg        emit_zs_write(c, rcp_w);
1521b8e80941Smrg        emit_rcp_wc_write(c, rcp_w);
1522b8e80941Smrg        if (c->vs_key->per_vertex_point_size)
1523b8e80941Smrg                emit_point_size_write(c);
1524b8e80941Smrg}
1525b8e80941Smrg
1526b8e80941Smrgstatic void
1527b8e80941Smrgvc4_optimize_nir(struct nir_shader *s)
1528b8e80941Smrg{
1529b8e80941Smrg        bool progress;
1530b8e80941Smrg
1531b8e80941Smrg        do {
1532b8e80941Smrg                progress = false;
1533b8e80941Smrg
1534b8e80941Smrg                NIR_PASS_V(s, nir_lower_vars_to_ssa);
1535b8e80941Smrg                NIR_PASS(progress, s, nir_lower_alu_to_scalar);
1536b8e80941Smrg                NIR_PASS(progress, s, nir_lower_phis_to_scalar);
1537b8e80941Smrg                NIR_PASS(progress, s, nir_copy_prop);
1538b8e80941Smrg                NIR_PASS(progress, s, nir_opt_remove_phis);
1539b8e80941Smrg                NIR_PASS(progress, s, nir_opt_dce);
1540b8e80941Smrg                NIR_PASS(progress, s, nir_opt_dead_cf);
1541b8e80941Smrg                NIR_PASS(progress, s, nir_opt_cse);
1542b8e80941Smrg                NIR_PASS(progress, s, nir_opt_peephole_select, 8, true, true);
1543b8e80941Smrg                NIR_PASS(progress, s, nir_opt_algebraic);
1544b8e80941Smrg                NIR_PASS(progress, s, nir_opt_constant_folding);
1545b8e80941Smrg                NIR_PASS(progress, s, nir_opt_undef);
1546b8e80941Smrg                NIR_PASS(progress, s, nir_opt_loop_unroll,
1547b8e80941Smrg                         nir_var_shader_in |
1548b8e80941Smrg                         nir_var_shader_out |
1549b8e80941Smrg                         nir_var_function_temp);
1550b8e80941Smrg        } while (progress);
1551b8e80941Smrg}
1552b8e80941Smrg
1553b8e80941Smrgstatic int
1554b8e80941Smrgdriver_location_compare(const void *in_a, const void *in_b)
1555b8e80941Smrg{
1556b8e80941Smrg        const nir_variable *const *a = in_a;
1557b8e80941Smrg        const nir_variable *const *b = in_b;
1558b8e80941Smrg
1559b8e80941Smrg        return (*a)->data.driver_location - (*b)->data.driver_location;
1560b8e80941Smrg}
1561b8e80941Smrg
1562b8e80941Smrgstatic void
1563b8e80941Smrgntq_setup_inputs(struct vc4_compile *c)
1564b8e80941Smrg{
1565b8e80941Smrg        unsigned num_entries = 0;
1566b8e80941Smrg        nir_foreach_variable(var, &c->s->inputs)
1567b8e80941Smrg                num_entries++;
1568b8e80941Smrg
1569b8e80941Smrg        nir_variable *vars[num_entries];
1570b8e80941Smrg
1571b8e80941Smrg        unsigned i = 0;
1572b8e80941Smrg        nir_foreach_variable(var, &c->s->inputs)
1573b8e80941Smrg                vars[i++] = var;
1574b8e80941Smrg
1575b8e80941Smrg        /* Sort the variables so that we emit the input setup in
1576b8e80941Smrg         * driver_location order.  This is required for VPM reads, whose data
1577b8e80941Smrg         * is fetched into the VPM in driver_location (TGSI register index)
1578b8e80941Smrg         * order.
1579b8e80941Smrg         */
1580b8e80941Smrg        qsort(&vars, num_entries, sizeof(*vars), driver_location_compare);
1581b8e80941Smrg
1582b8e80941Smrg        for (unsigned i = 0; i < num_entries; i++) {
1583b8e80941Smrg                nir_variable *var = vars[i];
1584b8e80941Smrg                unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1585b8e80941Smrg                unsigned loc = var->data.driver_location;
1586b8e80941Smrg
1587b8e80941Smrg                assert(array_len == 1);
1588b8e80941Smrg                (void)array_len;
1589b8e80941Smrg                resize_qreg_array(c, &c->inputs, &c->inputs_array_size,
1590b8e80941Smrg                                  (loc + 1) * 4);
1591b8e80941Smrg
1592b8e80941Smrg                if (c->stage == QSTAGE_FRAG) {
1593b8e80941Smrg                        if (var->data.location == VARYING_SLOT_POS) {
1594b8e80941Smrg                                emit_fragcoord_input(c, loc);
1595b8e80941Smrg                        } else if (var->data.location == VARYING_SLOT_PNTC ||
1596b8e80941Smrg                                   (var->data.location >= VARYING_SLOT_VAR0 &&
1597b8e80941Smrg                                    (c->fs_key->point_sprite_mask &
1598b8e80941Smrg                                     (1 << (var->data.location -
1599b8e80941Smrg                                            VARYING_SLOT_VAR0))))) {
1600b8e80941Smrg                                c->inputs[loc * 4 + 0] = c->point_x;
1601b8e80941Smrg                                c->inputs[loc * 4 + 1] = c->point_y;
1602b8e80941Smrg                        } else {
1603b8e80941Smrg                                emit_fragment_input(c, loc, var->data.location);
1604b8e80941Smrg                        }
1605b8e80941Smrg                } else {
1606b8e80941Smrg                        emit_vertex_input(c, loc);
1607b8e80941Smrg                }
1608b8e80941Smrg        }
1609b8e80941Smrg}
1610b8e80941Smrg
1611b8e80941Smrgstatic void
1612b8e80941Smrgntq_setup_outputs(struct vc4_compile *c)
1613b8e80941Smrg{
1614b8e80941Smrg        nir_foreach_variable(var, &c->s->outputs) {
1615b8e80941Smrg                unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1616b8e80941Smrg                unsigned loc = var->data.driver_location * 4;
1617b8e80941Smrg
1618b8e80941Smrg                assert(array_len == 1);
1619b8e80941Smrg                (void)array_len;
1620b8e80941Smrg
1621b8e80941Smrg                for (int i = 0; i < 4; i++)
1622b8e80941Smrg                        add_output(c, loc + i, var->data.location, i);
1623b8e80941Smrg
1624b8e80941Smrg                if (c->stage == QSTAGE_FRAG) {
1625b8e80941Smrg                        switch (var->data.location) {
1626b8e80941Smrg                        case FRAG_RESULT_COLOR:
1627b8e80941Smrg                        case FRAG_RESULT_DATA0:
1628b8e80941Smrg                                c->output_color_index = loc;
1629b8e80941Smrg                                break;
1630b8e80941Smrg                        case FRAG_RESULT_DEPTH:
1631b8e80941Smrg                                c->output_position_index = loc;
1632b8e80941Smrg                                break;
1633b8e80941Smrg                        case FRAG_RESULT_SAMPLE_MASK:
1634b8e80941Smrg                                c->output_sample_mask_index = loc;
1635b8e80941Smrg                                break;
1636b8e80941Smrg                        }
1637b8e80941Smrg                } else {
1638b8e80941Smrg                        switch (var->data.location) {
1639b8e80941Smrg                        case VARYING_SLOT_POS:
1640b8e80941Smrg                                c->output_position_index = loc;
1641b8e80941Smrg                                break;
1642b8e80941Smrg                        case VARYING_SLOT_PSIZ:
1643b8e80941Smrg                                c->output_point_size_index = loc;
1644b8e80941Smrg                                break;
1645b8e80941Smrg                        }
1646b8e80941Smrg                }
1647b8e80941Smrg        }
1648b8e80941Smrg}
1649b8e80941Smrg
1650b8e80941Smrg/**
1651b8e80941Smrg * Sets up the mapping from nir_register to struct qreg *.
1652b8e80941Smrg *
1653b8e80941Smrg * Each nir_register gets a struct qreg per 32-bit component being stored.
1654b8e80941Smrg */
1655b8e80941Smrgstatic void
1656b8e80941Smrgntq_setup_registers(struct vc4_compile *c, struct exec_list *list)
1657b8e80941Smrg{
1658b8e80941Smrg        foreach_list_typed(nir_register, nir_reg, node, list) {
1659b8e80941Smrg                unsigned array_len = MAX2(nir_reg->num_array_elems, 1);
1660b8e80941Smrg                struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
1661b8e80941Smrg                                                  array_len *
1662b8e80941Smrg                                                  nir_reg->num_components);
1663b8e80941Smrg
1664b8e80941Smrg                _mesa_hash_table_insert(c->def_ht, nir_reg, qregs);
1665b8e80941Smrg
1666b8e80941Smrg                for (int i = 0; i < array_len * nir_reg->num_components; i++)
1667b8e80941Smrg                        qregs[i] = qir_get_temp(c);
1668b8e80941Smrg        }
1669b8e80941Smrg}
1670b8e80941Smrg
1671b8e80941Smrgstatic void
1672b8e80941Smrgntq_emit_load_const(struct vc4_compile *c, nir_load_const_instr *instr)
1673b8e80941Smrg{
1674b8e80941Smrg        struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
1675b8e80941Smrg        for (int i = 0; i < instr->def.num_components; i++)
1676b8e80941Smrg                qregs[i] = qir_uniform_ui(c, instr->value[i].u32);
1677b8e80941Smrg
1678b8e80941Smrg        _mesa_hash_table_insert(c->def_ht, &instr->def, qregs);
1679b8e80941Smrg}
1680b8e80941Smrg
1681b8e80941Smrgstatic void
1682b8e80941Smrgntq_emit_ssa_undef(struct vc4_compile *c, nir_ssa_undef_instr *instr)
1683b8e80941Smrg{
1684b8e80941Smrg        struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
1685b8e80941Smrg
1686b8e80941Smrg        /* QIR needs there to be *some* value, so pick 0 (same as for
1687b8e80941Smrg         * ntq_setup_registers().
1688b8e80941Smrg         */
1689b8e80941Smrg        for (int i = 0; i < instr->def.num_components; i++)
1690b8e80941Smrg                qregs[i] = qir_uniform_ui(c, 0);
1691b8e80941Smrg}
1692b8e80941Smrg
1693b8e80941Smrgstatic void
1694b8e80941Smrgntq_emit_color_read(struct vc4_compile *c, nir_intrinsic_instr *instr)
1695b8e80941Smrg{
1696b8e80941Smrg        assert(nir_src_as_uint(instr->src[0]) == 0);
1697b8e80941Smrg
1698b8e80941Smrg        /* Reads of the per-sample color need to be done in
1699b8e80941Smrg         * order.
1700b8e80941Smrg         */
1701b8e80941Smrg        int sample_index = (nir_intrinsic_base(instr) -
1702b8e80941Smrg                            VC4_NIR_TLB_COLOR_READ_INPUT);
1703b8e80941Smrg        for (int i = 0; i <= sample_index; i++) {
1704b8e80941Smrg                if (c->color_reads[i].file == QFILE_NULL) {
1705b8e80941Smrg                        c->color_reads[i] =
1706b8e80941Smrg                                qir_TLB_COLOR_READ(c);
1707b8e80941Smrg                }
1708b8e80941Smrg        }
1709b8e80941Smrg        ntq_store_dest(c, &instr->dest, 0,
1710b8e80941Smrg                       qir_MOV(c, c->color_reads[sample_index]));
1711b8e80941Smrg}
1712b8e80941Smrg
1713b8e80941Smrgstatic void
1714b8e80941Smrgntq_emit_load_input(struct vc4_compile *c, nir_intrinsic_instr *instr)
1715b8e80941Smrg{
1716b8e80941Smrg        assert(instr->num_components == 1);
1717b8e80941Smrg        assert(nir_src_is_const(instr->src[0]) &&
1718b8e80941Smrg               "vc4 doesn't support indirect inputs");
1719b8e80941Smrg
1720b8e80941Smrg        if (c->stage == QSTAGE_FRAG &&
1721b8e80941Smrg            nir_intrinsic_base(instr) >= VC4_NIR_TLB_COLOR_READ_INPUT) {
1722b8e80941Smrg                ntq_emit_color_read(c, instr);
1723b8e80941Smrg                return;
1724b8e80941Smrg        }
1725b8e80941Smrg
1726b8e80941Smrg        uint32_t offset = nir_intrinsic_base(instr) +
1727b8e80941Smrg                          nir_src_as_uint(instr->src[0]);
1728b8e80941Smrg        int comp = nir_intrinsic_component(instr);
1729b8e80941Smrg        ntq_store_dest(c, &instr->dest, 0,
1730b8e80941Smrg                       qir_MOV(c, c->inputs[offset * 4 + comp]));
1731b8e80941Smrg}
1732b8e80941Smrg
1733b8e80941Smrgstatic void
1734b8e80941Smrgntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr)
1735b8e80941Smrg{
1736b8e80941Smrg        unsigned offset;
1737b8e80941Smrg
1738b8e80941Smrg        switch (instr->intrinsic) {
1739b8e80941Smrg        case nir_intrinsic_load_uniform:
1740b8e80941Smrg                assert(instr->num_components == 1);
1741b8e80941Smrg                if (nir_src_is_const(instr->src[0])) {
1742b8e80941Smrg                        offset = nir_intrinsic_base(instr) +
1743b8e80941Smrg                                 nir_src_as_uint(instr->src[0]);
1744b8e80941Smrg                        assert(offset % 4 == 0);
1745b8e80941Smrg                        /* We need dwords */
1746b8e80941Smrg                        offset = offset / 4;
1747b8e80941Smrg                        ntq_store_dest(c, &instr->dest, 0,
1748b8e80941Smrg                                       qir_uniform(c, QUNIFORM_UNIFORM,
1749b8e80941Smrg                                                   offset));
1750b8e80941Smrg                } else {
1751b8e80941Smrg                        ntq_store_dest(c, &instr->dest, 0,
1752b8e80941Smrg                                       indirect_uniform_load(c, instr));
1753b8e80941Smrg                }
1754b8e80941Smrg                break;
1755b8e80941Smrg
1756b8e80941Smrg        case nir_intrinsic_load_ubo:
1757b8e80941Smrg                assert(instr->num_components == 1);
1758b8e80941Smrg                ntq_store_dest(c, &instr->dest, 0, vc4_ubo_load(c, instr));
1759b8e80941Smrg                break;
1760b8e80941Smrg
1761b8e80941Smrg        case nir_intrinsic_load_user_clip_plane:
1762b8e80941Smrg                for (int i = 0; i < instr->num_components; i++) {
1763b8e80941Smrg                        ntq_store_dest(c, &instr->dest, i,
1764b8e80941Smrg                                       qir_uniform(c, QUNIFORM_USER_CLIP_PLANE,
1765b8e80941Smrg                                                   nir_intrinsic_ucp_id(instr) *
1766b8e80941Smrg                                                   4 + i));
1767b8e80941Smrg                }
1768b8e80941Smrg                break;
1769b8e80941Smrg
1770b8e80941Smrg        case nir_intrinsic_load_blend_const_color_r_float:
1771b8e80941Smrg        case nir_intrinsic_load_blend_const_color_g_float:
1772b8e80941Smrg        case nir_intrinsic_load_blend_const_color_b_float:
1773b8e80941Smrg        case nir_intrinsic_load_blend_const_color_a_float:
1774b8e80941Smrg                ntq_store_dest(c, &instr->dest, 0,
1775b8e80941Smrg                               qir_uniform(c, QUNIFORM_BLEND_CONST_COLOR_X +
1776b8e80941Smrg                                           (instr->intrinsic -
1777b8e80941Smrg                                            nir_intrinsic_load_blend_const_color_r_float),
1778b8e80941Smrg                                           0));
1779b8e80941Smrg                break;
1780b8e80941Smrg
1781b8e80941Smrg        case nir_intrinsic_load_blend_const_color_rgba8888_unorm:
1782b8e80941Smrg                ntq_store_dest(c, &instr->dest, 0,
1783b8e80941Smrg                               qir_uniform(c, QUNIFORM_BLEND_CONST_COLOR_RGBA,
1784b8e80941Smrg                                           0));
1785b8e80941Smrg                break;
1786b8e80941Smrg
1787b8e80941Smrg        case nir_intrinsic_load_blend_const_color_aaaa8888_unorm:
1788b8e80941Smrg                ntq_store_dest(c, &instr->dest, 0,
1789b8e80941Smrg                               qir_uniform(c, QUNIFORM_BLEND_CONST_COLOR_AAAA,
1790b8e80941Smrg                                           0));
1791b8e80941Smrg                break;
1792b8e80941Smrg
1793b8e80941Smrg        case nir_intrinsic_load_alpha_ref_float:
1794b8e80941Smrg                ntq_store_dest(c, &instr->dest, 0,
1795b8e80941Smrg                               qir_uniform(c, QUNIFORM_ALPHA_REF, 0));
1796b8e80941Smrg                break;
1797b8e80941Smrg
1798b8e80941Smrg        case nir_intrinsic_load_sample_mask_in:
1799b8e80941Smrg                ntq_store_dest(c, &instr->dest, 0,
1800b8e80941Smrg                               qir_uniform(c, QUNIFORM_SAMPLE_MASK, 0));
1801b8e80941Smrg                break;
1802b8e80941Smrg
1803b8e80941Smrg        case nir_intrinsic_load_front_face:
1804b8e80941Smrg                /* The register contains 0 (front) or 1 (back), and we need to
1805b8e80941Smrg                 * turn it into a NIR bool where true means front.
1806b8e80941Smrg                 */
1807b8e80941Smrg                ntq_store_dest(c, &instr->dest, 0,
1808b8e80941Smrg                               qir_ADD(c,
1809b8e80941Smrg                                       qir_uniform_ui(c, -1),
1810b8e80941Smrg                                       qir_reg(QFILE_FRAG_REV_FLAG, 0)));
1811b8e80941Smrg                break;
1812b8e80941Smrg
1813b8e80941Smrg        case nir_intrinsic_load_input:
1814b8e80941Smrg                ntq_emit_load_input(c, instr);
1815b8e80941Smrg                break;
1816b8e80941Smrg
1817b8e80941Smrg        case nir_intrinsic_store_output:
1818b8e80941Smrg                assert(nir_src_is_const(instr->src[1]) &&
1819b8e80941Smrg                       "vc4 doesn't support indirect outputs");
1820b8e80941Smrg                offset = nir_intrinsic_base(instr) +
1821b8e80941Smrg                         nir_src_as_uint(instr->src[1]);
1822b8e80941Smrg
1823b8e80941Smrg                /* MSAA color outputs are the only case where we have an
1824b8e80941Smrg                 * output that's not lowered to being a store of a single 32
1825b8e80941Smrg                 * bit value.
1826b8e80941Smrg                 */
1827b8e80941Smrg                if (c->stage == QSTAGE_FRAG && instr->num_components == 4) {
1828b8e80941Smrg                        assert(offset == c->output_color_index);
1829b8e80941Smrg                        for (int i = 0; i < 4; i++) {
1830b8e80941Smrg                                c->sample_colors[i] =
1831b8e80941Smrg                                        qir_MOV(c, ntq_get_src(c, instr->src[0],
1832b8e80941Smrg                                                               i));
1833b8e80941Smrg                        }
1834b8e80941Smrg                } else {
1835b8e80941Smrg                        offset = offset * 4 + nir_intrinsic_component(instr);
1836b8e80941Smrg                        assert(instr->num_components == 1);
1837b8e80941Smrg                        c->outputs[offset] =
1838b8e80941Smrg                                qir_MOV(c, ntq_get_src(c, instr->src[0], 0));
1839b8e80941Smrg                        c->num_outputs = MAX2(c->num_outputs, offset + 1);
1840b8e80941Smrg                }
1841b8e80941Smrg                break;
1842b8e80941Smrg
1843b8e80941Smrg        case nir_intrinsic_discard:
1844b8e80941Smrg                if (c->execute.file != QFILE_NULL) {
1845b8e80941Smrg                        qir_SF(c, c->execute);
1846b8e80941Smrg                        qir_MOV_cond(c, QPU_COND_ZS, c->discard,
1847b8e80941Smrg                                     qir_uniform_ui(c, ~0));
1848b8e80941Smrg                } else {
1849b8e80941Smrg                        qir_MOV_dest(c, c->discard, qir_uniform_ui(c, ~0));
1850b8e80941Smrg                }
1851b8e80941Smrg                break;
1852b8e80941Smrg
1853b8e80941Smrg        case nir_intrinsic_discard_if: {
1854b8e80941Smrg                /* true (~0) if we're discarding */
1855b8e80941Smrg                struct qreg cond = ntq_get_src(c, instr->src[0], 0);
1856b8e80941Smrg
1857b8e80941Smrg                if (c->execute.file != QFILE_NULL) {
1858b8e80941Smrg                        /* execute == 0 means the channel is active.  Invert
1859b8e80941Smrg                         * the condition so that we can use zero as "executing
1860b8e80941Smrg                         * and discarding."
1861b8e80941Smrg                         */
1862b8e80941Smrg                        qir_SF(c, qir_AND(c, c->execute, qir_NOT(c, cond)));
1863b8e80941Smrg                        qir_MOV_cond(c, QPU_COND_ZS, c->discard, cond);
1864b8e80941Smrg                } else {
1865b8e80941Smrg                        qir_OR_dest(c, c->discard, c->discard,
1866b8e80941Smrg                                    ntq_get_src(c, instr->src[0], 0));
1867b8e80941Smrg                }
1868b8e80941Smrg
1869b8e80941Smrg                break;
1870b8e80941Smrg        }
1871b8e80941Smrg
1872b8e80941Smrg        default:
1873b8e80941Smrg                fprintf(stderr, "Unknown intrinsic: ");
1874b8e80941Smrg                nir_print_instr(&instr->instr, stderr);
1875b8e80941Smrg                fprintf(stderr, "\n");
1876b8e80941Smrg                break;
1877b8e80941Smrg        }
1878b8e80941Smrg}
1879b8e80941Smrg
1880b8e80941Smrg/* Clears (activates) the execute flags for any channels whose jump target
1881b8e80941Smrg * matches this block.
1882b8e80941Smrg */
1883b8e80941Smrgstatic void
1884b8e80941Smrgntq_activate_execute_for_block(struct vc4_compile *c)
1885b8e80941Smrg{
1886b8e80941Smrg        qir_SF(c, qir_SUB(c,
1887b8e80941Smrg                          c->execute,
1888b8e80941Smrg                          qir_uniform_ui(c, c->cur_block->index)));
1889b8e80941Smrg        qir_MOV_cond(c, QPU_COND_ZS, c->execute, qir_uniform_ui(c, 0));
1890b8e80941Smrg}
1891b8e80941Smrg
1892b8e80941Smrgstatic void
1893b8e80941Smrgntq_emit_if(struct vc4_compile *c, nir_if *if_stmt)
1894b8e80941Smrg{
1895b8e80941Smrg        if (!c->vc4->screen->has_control_flow) {
1896b8e80941Smrg                fprintf(stderr,
1897b8e80941Smrg                        "IF statement support requires updated kernel.\n");
1898b8e80941Smrg                return;
1899b8e80941Smrg        }
1900b8e80941Smrg
1901b8e80941Smrg        nir_block *nir_else_block = nir_if_first_else_block(if_stmt);
1902b8e80941Smrg        bool empty_else_block =
1903b8e80941Smrg                (nir_else_block == nir_if_last_else_block(if_stmt) &&
1904b8e80941Smrg                 exec_list_is_empty(&nir_else_block->instr_list));
1905b8e80941Smrg
1906b8e80941Smrg        struct qblock *then_block = qir_new_block(c);
1907b8e80941Smrg        struct qblock *after_block = qir_new_block(c);
1908b8e80941Smrg        struct qblock *else_block;
1909b8e80941Smrg        if (empty_else_block)
1910b8e80941Smrg                else_block = after_block;
1911b8e80941Smrg        else
1912b8e80941Smrg                else_block = qir_new_block(c);
1913b8e80941Smrg
1914b8e80941Smrg        bool was_top_level = false;
1915b8e80941Smrg        if (c->execute.file == QFILE_NULL) {
1916b8e80941Smrg                c->execute = qir_MOV(c, qir_uniform_ui(c, 0));
1917b8e80941Smrg                was_top_level = true;
1918b8e80941Smrg        }
1919b8e80941Smrg
1920b8e80941Smrg        /* Set ZS for executing (execute == 0) and jumping (if->condition ==
1921b8e80941Smrg         * 0) channels, and then update execute flags for those to point to
1922b8e80941Smrg         * the ELSE block.
1923b8e80941Smrg         */
1924b8e80941Smrg        qir_SF(c, qir_OR(c,
1925b8e80941Smrg                         c->execute,
1926b8e80941Smrg                         ntq_get_src(c, if_stmt->condition, 0)));
1927b8e80941Smrg        qir_MOV_cond(c, QPU_COND_ZS, c->execute,
1928b8e80941Smrg                     qir_uniform_ui(c, else_block->index));
1929b8e80941Smrg
1930b8e80941Smrg        /* Jump to ELSE if nothing is active for THEN, otherwise fall
1931b8e80941Smrg         * through.
1932b8e80941Smrg         */
1933b8e80941Smrg        qir_SF(c, c->execute);
1934b8e80941Smrg        qir_BRANCH(c, QPU_COND_BRANCH_ALL_ZC);
1935b8e80941Smrg        qir_link_blocks(c->cur_block, else_block);
1936b8e80941Smrg        qir_link_blocks(c->cur_block, then_block);
1937b8e80941Smrg
1938b8e80941Smrg        /* Process the THEN block. */
1939b8e80941Smrg        qir_set_emit_block(c, then_block);
1940b8e80941Smrg        ntq_emit_cf_list(c, &if_stmt->then_list);
1941b8e80941Smrg
1942b8e80941Smrg        if (!empty_else_block) {
1943b8e80941Smrg                /* Handle the end of the THEN block.  First, all currently
1944b8e80941Smrg                 * active channels update their execute flags to point to
1945b8e80941Smrg                 * ENDIF
1946b8e80941Smrg                 */
1947b8e80941Smrg                qir_SF(c, c->execute);
1948b8e80941Smrg                qir_MOV_cond(c, QPU_COND_ZS, c->execute,
1949b8e80941Smrg                             qir_uniform_ui(c, after_block->index));
1950b8e80941Smrg
1951b8e80941Smrg                /* If everything points at ENDIF, then jump there immediately. */
1952b8e80941Smrg                qir_SF(c, qir_SUB(c, c->execute, qir_uniform_ui(c, after_block->index)));
1953b8e80941Smrg                qir_BRANCH(c, QPU_COND_BRANCH_ALL_ZS);
1954b8e80941Smrg                qir_link_blocks(c->cur_block, after_block);
1955b8e80941Smrg                qir_link_blocks(c->cur_block, else_block);
1956b8e80941Smrg
1957b8e80941Smrg                qir_set_emit_block(c, else_block);
1958b8e80941Smrg                ntq_activate_execute_for_block(c);
1959b8e80941Smrg                ntq_emit_cf_list(c, &if_stmt->else_list);
1960b8e80941Smrg        }
1961b8e80941Smrg
1962b8e80941Smrg        qir_link_blocks(c->cur_block, after_block);
1963b8e80941Smrg
1964b8e80941Smrg        qir_set_emit_block(c, after_block);
1965b8e80941Smrg        if (was_top_level) {
1966b8e80941Smrg                c->execute = c->undef;
1967b8e80941Smrg                c->last_top_block = c->cur_block;
1968b8e80941Smrg        } else {
1969b8e80941Smrg                ntq_activate_execute_for_block(c);
1970b8e80941Smrg        }
1971b8e80941Smrg}
1972b8e80941Smrg
1973b8e80941Smrgstatic void
1974b8e80941Smrgntq_emit_jump(struct vc4_compile *c, nir_jump_instr *jump)
1975b8e80941Smrg{
1976b8e80941Smrg        struct qblock *jump_block;
1977b8e80941Smrg        switch (jump->type) {
1978b8e80941Smrg        case nir_jump_break:
1979b8e80941Smrg                jump_block = c->loop_break_block;
1980b8e80941Smrg                break;
1981b8e80941Smrg        case nir_jump_continue:
1982b8e80941Smrg                jump_block = c->loop_cont_block;
1983b8e80941Smrg                break;
1984b8e80941Smrg        default:
1985b8e80941Smrg                unreachable("Unsupported jump type\n");
1986b8e80941Smrg        }
1987b8e80941Smrg
1988b8e80941Smrg        qir_SF(c, c->execute);
1989b8e80941Smrg        qir_MOV_cond(c, QPU_COND_ZS, c->execute,
1990b8e80941Smrg                     qir_uniform_ui(c, jump_block->index));
1991b8e80941Smrg
1992b8e80941Smrg        /* Jump to the destination block if everyone has taken the jump. */
1993b8e80941Smrg        qir_SF(c, qir_SUB(c, c->execute, qir_uniform_ui(c, jump_block->index)));
1994b8e80941Smrg        qir_BRANCH(c, QPU_COND_BRANCH_ALL_ZS);
1995b8e80941Smrg        struct qblock *new_block = qir_new_block(c);
1996b8e80941Smrg        qir_link_blocks(c->cur_block, jump_block);
1997b8e80941Smrg        qir_link_blocks(c->cur_block, new_block);
1998b8e80941Smrg        qir_set_emit_block(c, new_block);
1999b8e80941Smrg}
2000b8e80941Smrg
2001b8e80941Smrgstatic void
2002b8e80941Smrgntq_emit_instr(struct vc4_compile *c, nir_instr *instr)
2003b8e80941Smrg{
2004b8e80941Smrg        switch (instr->type) {
2005b8e80941Smrg        case nir_instr_type_alu:
2006b8e80941Smrg                ntq_emit_alu(c, nir_instr_as_alu(instr));
2007b8e80941Smrg                break;
2008b8e80941Smrg
2009b8e80941Smrg        case nir_instr_type_intrinsic:
2010b8e80941Smrg                ntq_emit_intrinsic(c, nir_instr_as_intrinsic(instr));
2011b8e80941Smrg                break;
2012b8e80941Smrg
2013b8e80941Smrg        case nir_instr_type_load_const:
2014b8e80941Smrg                ntq_emit_load_const(c, nir_instr_as_load_const(instr));
2015b8e80941Smrg                break;
2016b8e80941Smrg
2017b8e80941Smrg        case nir_instr_type_ssa_undef:
2018b8e80941Smrg                ntq_emit_ssa_undef(c, nir_instr_as_ssa_undef(instr));
2019b8e80941Smrg                break;
2020b8e80941Smrg
2021b8e80941Smrg        case nir_instr_type_tex:
2022b8e80941Smrg                ntq_emit_tex(c, nir_instr_as_tex(instr));
2023b8e80941Smrg                break;
2024b8e80941Smrg
2025b8e80941Smrg        case nir_instr_type_jump:
2026b8e80941Smrg                ntq_emit_jump(c, nir_instr_as_jump(instr));
2027b8e80941Smrg                break;
2028b8e80941Smrg
2029b8e80941Smrg        default:
2030b8e80941Smrg                fprintf(stderr, "Unknown NIR instr type: ");
2031b8e80941Smrg                nir_print_instr(instr, stderr);
2032b8e80941Smrg                fprintf(stderr, "\n");
2033b8e80941Smrg                abort();
2034848b8605Smrg        }
2035848b8605Smrg}
2036848b8605Smrg
2037848b8605Smrgstatic void
2038b8e80941Smrgntq_emit_block(struct vc4_compile *c, nir_block *block)
2039848b8605Smrg{
2040b8e80941Smrg        nir_foreach_instr(instr, block) {
2041b8e80941Smrg                ntq_emit_instr(c, instr);
2042b8e80941Smrg        }
2043b8e80941Smrg}
2044848b8605Smrg
2045b8e80941Smrgstatic void ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list);
2046848b8605Smrg
2047b8e80941Smrgstatic void
2048b8e80941Smrgntq_emit_loop(struct vc4_compile *c, nir_loop *loop)
2049b8e80941Smrg{
2050b8e80941Smrg        if (!c->vc4->screen->has_control_flow) {
2051b8e80941Smrg                fprintf(stderr,
2052b8e80941Smrg                        "loop support requires updated kernel.\n");
2053b8e80941Smrg                ntq_emit_cf_list(c, &loop->body);
2054b8e80941Smrg                return;
2055b8e80941Smrg        }
2056848b8605Smrg
2057b8e80941Smrg        bool was_top_level = false;
2058b8e80941Smrg        if (c->execute.file == QFILE_NULL) {
2059b8e80941Smrg                c->execute = qir_MOV(c, qir_uniform_ui(c, 0));
2060b8e80941Smrg                was_top_level = true;
2061848b8605Smrg        }
2062848b8605Smrg
2063b8e80941Smrg        struct qblock *save_loop_cont_block = c->loop_cont_block;
2064b8e80941Smrg        struct qblock *save_loop_break_block = c->loop_break_block;
2065848b8605Smrg
2066b8e80941Smrg        c->loop_cont_block = qir_new_block(c);
2067b8e80941Smrg        c->loop_break_block = qir_new_block(c);
2068848b8605Smrg
2069b8e80941Smrg        qir_link_blocks(c->cur_block, c->loop_cont_block);
2070b8e80941Smrg        qir_set_emit_block(c, c->loop_cont_block);
2071b8e80941Smrg        ntq_activate_execute_for_block(c);
2072848b8605Smrg
2073b8e80941Smrg        ntq_emit_cf_list(c, &loop->body);
2074848b8605Smrg
2075b8e80941Smrg        /* If anything had explicitly continued, or is here at the end of the
2076b8e80941Smrg         * loop, then we need to loop again.  SF updates are masked by the
2077b8e80941Smrg         * instruction's condition, so we can do the OR of the two conditions
2078b8e80941Smrg         * within SF.
2079b8e80941Smrg         */
2080b8e80941Smrg        qir_SF(c, c->execute);
2081b8e80941Smrg        struct qinst *cont_check =
2082b8e80941Smrg                qir_SUB_dest(c,
2083b8e80941Smrg                             c->undef,
2084b8e80941Smrg                             c->execute,
2085b8e80941Smrg                             qir_uniform_ui(c, c->loop_cont_block->index));
2086b8e80941Smrg        cont_check->cond = QPU_COND_ZC;
2087b8e80941Smrg        cont_check->sf = true;
2088b8e80941Smrg
2089b8e80941Smrg        qir_BRANCH(c, QPU_COND_BRANCH_ANY_ZS);
2090b8e80941Smrg        qir_link_blocks(c->cur_block, c->loop_cont_block);
2091b8e80941Smrg        qir_link_blocks(c->cur_block, c->loop_break_block);
2092b8e80941Smrg
2093b8e80941Smrg        qir_set_emit_block(c, c->loop_break_block);
2094b8e80941Smrg        if (was_top_level) {
2095b8e80941Smrg                c->execute = c->undef;
2096b8e80941Smrg                c->last_top_block = c->cur_block;
2097b8e80941Smrg        } else {
2098b8e80941Smrg                ntq_activate_execute_for_block(c);
2099848b8605Smrg        }
2100848b8605Smrg
2101b8e80941Smrg        c->loop_break_block = save_loop_break_block;
2102b8e80941Smrg        c->loop_cont_block = save_loop_cont_block;
2103848b8605Smrg}
2104848b8605Smrg
2105848b8605Smrgstatic void
2106b8e80941Smrgntq_emit_function(struct vc4_compile *c, nir_function_impl *func)
2107848b8605Smrg{
2108b8e80941Smrg        fprintf(stderr, "FUNCTIONS not handled.\n");
2109b8e80941Smrg        abort();
2110848b8605Smrg}
2111848b8605Smrg
2112848b8605Smrgstatic void
2113b8e80941Smrgntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list)
2114848b8605Smrg{
2115b8e80941Smrg        foreach_list_typed(nir_cf_node, node, node, list) {
2116b8e80941Smrg                switch (node->type) {
2117b8e80941Smrg                case nir_cf_node_block:
2118b8e80941Smrg                        ntq_emit_block(c, nir_cf_node_as_block(node));
2119b8e80941Smrg                        break;
2120b8e80941Smrg
2121b8e80941Smrg                case nir_cf_node_if:
2122b8e80941Smrg                        ntq_emit_if(c, nir_cf_node_as_if(node));
2123b8e80941Smrg                        break;
2124848b8605Smrg
2125b8e80941Smrg                case nir_cf_node_loop:
2126b8e80941Smrg                        ntq_emit_loop(c, nir_cf_node_as_loop(node));
2127b8e80941Smrg                        break;
2128848b8605Smrg
2129b8e80941Smrg                case nir_cf_node_function:
2130b8e80941Smrg                        ntq_emit_function(c, nir_cf_node_as_function(node));
2131b8e80941Smrg                        break;
2132b8e80941Smrg
2133b8e80941Smrg                default:
2134b8e80941Smrg                        fprintf(stderr, "Unknown NIR node type\n");
2135b8e80941Smrg                        abort();
2136b8e80941Smrg                }
2137b8e80941Smrg        }
2138848b8605Smrg}
2139848b8605Smrg
2140848b8605Smrgstatic void
2141b8e80941Smrgntq_emit_impl(struct vc4_compile *c, nir_function_impl *impl)
2142848b8605Smrg{
2143b8e80941Smrg        ntq_setup_registers(c, &impl->registers);
2144b8e80941Smrg        ntq_emit_cf_list(c, &impl->body);
2145848b8605Smrg}
2146848b8605Smrg
2147848b8605Smrgstatic void
2148b8e80941Smrgnir_to_qir(struct vc4_compile *c)
2149848b8605Smrg{
2150b8e80941Smrg        if (c->stage == QSTAGE_FRAG && c->s->info.fs.uses_discard)
2151b8e80941Smrg                c->discard = qir_MOV(c, qir_uniform_ui(c, 0));
2152848b8605Smrg
2153b8e80941Smrg        ntq_setup_inputs(c);
2154b8e80941Smrg        ntq_setup_outputs(c);
2155848b8605Smrg
2156b8e80941Smrg        /* Find the main function and emit the body. */
2157b8e80941Smrg        nir_foreach_function(function, c->s) {
2158b8e80941Smrg                assert(strcmp(function->name, "main") == 0);
2159b8e80941Smrg                assert(function->impl);
2160b8e80941Smrg                ntq_emit_impl(c, function->impl);
2161848b8605Smrg        }
2162848b8605Smrg}
2163848b8605Smrg
2164b8e80941Smrgstatic const nir_shader_compiler_options nir_options = {
2165b8e80941Smrg        .lower_all_io_to_temps = true,
2166b8e80941Smrg        .lower_extract_byte = true,
2167b8e80941Smrg        .lower_extract_word = true,
2168b8e80941Smrg        .lower_fdiv = true,
2169b8e80941Smrg        .lower_ffma = true,
2170b8e80941Smrg        .lower_flrp32 = true,
2171b8e80941Smrg        .lower_fpow = true,
2172b8e80941Smrg        .lower_fsat = true,
2173b8e80941Smrg        .lower_fsqrt = true,
2174b8e80941Smrg        .lower_ldexp = true,
2175b8e80941Smrg        .lower_negate = true,
2176b8e80941Smrg        .native_integers = true,
2177b8e80941Smrg        .max_unroll_iterations = 32,
2178b8e80941Smrg};
2179848b8605Smrg
2180b8e80941Smrgconst void *
2181b8e80941Smrgvc4_screen_get_compiler_options(struct pipe_screen *pscreen,
2182b8e80941Smrg                                enum pipe_shader_ir ir,
2183b8e80941Smrg                                enum pipe_shader_type shader)
2184b8e80941Smrg{
2185b8e80941Smrg        return &nir_options;
2186b8e80941Smrg}
2187848b8605Smrg
2188b8e80941Smrgstatic int
2189b8e80941Smrgcount_nir_instrs(nir_shader *nir)
2190b8e80941Smrg{
2191b8e80941Smrg        int count = 0;
2192b8e80941Smrg        nir_foreach_function(function, nir) {
2193b8e80941Smrg                if (!function->impl)
2194b8e80941Smrg                        continue;
2195b8e80941Smrg                nir_foreach_block(block, function->impl) {
2196b8e80941Smrg                        nir_foreach_instr(instr, block)
2197b8e80941Smrg                                count++;
2198b8e80941Smrg                }
2199b8e80941Smrg        }
2200b8e80941Smrg        return count;
2201848b8605Smrg}
2202848b8605Smrg
2203b8e80941Smrgstatic struct vc4_compile *
2204b8e80941Smrgvc4_shader_ntq(struct vc4_context *vc4, enum qstage stage,
2205b8e80941Smrg               struct vc4_key *key, bool fs_threaded)
2206848b8605Smrg{
2207b8e80941Smrg        struct vc4_compile *c = qir_compile_init();
2208848b8605Smrg
2209b8e80941Smrg        c->vc4 = vc4;
2210848b8605Smrg        c->stage = stage;
2211b8e80941Smrg        c->shader_state = &key->shader_state->base;
2212b8e80941Smrg        c->program_id = key->shader_state->program_id;
2213b8e80941Smrg        c->variant_id =
2214b8e80941Smrg                p_atomic_inc_return(&key->shader_state->compiled_variant_count);
2215b8e80941Smrg        c->fs_threaded = fs_threaded;
2216848b8605Smrg
2217b8e80941Smrg        c->key = key;
2218848b8605Smrg        switch (stage) {
2219848b8605Smrg        case QSTAGE_FRAG:
2220b8e80941Smrg                c->fs_key = (struct vc4_fs_key *)key;
2221b8e80941Smrg                if (c->fs_key->is_points) {
2222b8e80941Smrg                        c->point_x = emit_fragment_varying(c, ~0, 0);
2223b8e80941Smrg                        c->point_y = emit_fragment_varying(c, ~0, 0);
2224b8e80941Smrg                } else if (c->fs_key->is_lines) {
2225b8e80941Smrg                        c->line_x = emit_fragment_varying(c, ~0, 0);
2226848b8605Smrg                }
2227848b8605Smrg                break;
2228848b8605Smrg        case QSTAGE_VERT:
2229b8e80941Smrg                c->vs_key = (struct vc4_vs_key *)key;
2230848b8605Smrg                break;
2231848b8605Smrg        case QSTAGE_COORD:
2232b8e80941Smrg                c->vs_key = (struct vc4_vs_key *)key;
2233848b8605Smrg                break;
2234848b8605Smrg        }
2235848b8605Smrg
2236b8e80941Smrg        c->s = nir_shader_clone(c, key->shader_state->base.ir.nir);
2237848b8605Smrg
2238b8e80941Smrg        if (stage == QSTAGE_FRAG) {
2239b8e80941Smrg                if (c->fs_key->alpha_test_func != COMPARE_FUNC_ALWAYS) {
2240b8e80941Smrg                        NIR_PASS_V(c->s, nir_lower_alpha_test,
2241b8e80941Smrg                                   c->fs_key->alpha_test_func,
2242b8e80941Smrg                                   c->fs_key->sample_alpha_to_one &&
2243b8e80941Smrg                                   c->fs_key->msaa);
2244b8e80941Smrg                }
2245b8e80941Smrg                NIR_PASS_V(c->s, vc4_nir_lower_blend, c);
2246b8e80941Smrg        }
2247848b8605Smrg
2248b8e80941Smrg        struct nir_lower_tex_options tex_options = {
2249b8e80941Smrg                /* We would need to implement txs, but we don't want the
2250b8e80941Smrg                 * int/float conversions
2251b8e80941Smrg                 */
2252b8e80941Smrg                .lower_rect = false,
2253848b8605Smrg
2254b8e80941Smrg                .lower_txp = ~0,
2255b8e80941Smrg
2256b8e80941Smrg                /* Apply swizzles to all samplers. */
2257b8e80941Smrg                .swizzle_result = ~0,
2258b8e80941Smrg        };
2259b8e80941Smrg
2260b8e80941Smrg        /* Lower the format swizzle and ARB_texture_swizzle-style swizzle.
2261b8e80941Smrg         * The format swizzling applies before sRGB decode, and
2262b8e80941Smrg         * ARB_texture_swizzle is the last thing before returning the sample.
2263b8e80941Smrg         */
2264b8e80941Smrg        for (int i = 0; i < ARRAY_SIZE(key->tex); i++) {
2265b8e80941Smrg                enum pipe_format format = c->key->tex[i].format;
2266b8e80941Smrg
2267b8e80941Smrg                if (!format)
2268b8e80941Smrg                        continue;
2269b8e80941Smrg
2270b8e80941Smrg                const uint8_t *format_swizzle = vc4_get_format_swizzle(format);
2271b8e80941Smrg
2272b8e80941Smrg                for (int j = 0; j < 4; j++) {
2273b8e80941Smrg                        uint8_t arb_swiz = c->key->tex[i].swizzle[j];
2274b8e80941Smrg
2275b8e80941Smrg                        if (arb_swiz <= 3) {
2276b8e80941Smrg                                tex_options.swizzles[i][j] =
2277b8e80941Smrg                                        format_swizzle[arb_swiz];
2278b8e80941Smrg                        } else {
2279b8e80941Smrg                                tex_options.swizzles[i][j] = arb_swiz;
2280b8e80941Smrg                        }
2281b8e80941Smrg                }
2282b8e80941Smrg
2283b8e80941Smrg                if (util_format_is_srgb(format))
2284b8e80941Smrg                        tex_options.lower_srgb |= (1 << i);
2285b8e80941Smrg        }
2286b8e80941Smrg
2287b8e80941Smrg        NIR_PASS_V(c->s, nir_lower_tex, &tex_options);
2288b8e80941Smrg
2289b8e80941Smrg        if (c->fs_key && c->fs_key->light_twoside)
2290b8e80941Smrg                NIR_PASS_V(c->s, nir_lower_two_sided_color);
2291b8e80941Smrg
2292b8e80941Smrg        if (c->vs_key && c->vs_key->clamp_color)
2293b8e80941Smrg                NIR_PASS_V(c->s, nir_lower_clamp_color_outputs);
2294b8e80941Smrg
2295b8e80941Smrg        if (c->key->ucp_enables) {
2296b8e80941Smrg                if (stage == QSTAGE_FRAG) {
2297b8e80941Smrg                        NIR_PASS_V(c->s, nir_lower_clip_fs, c->key->ucp_enables);
2298b8e80941Smrg                } else {
2299b8e80941Smrg                        NIR_PASS_V(c->s, nir_lower_clip_vs,
2300b8e80941Smrg				   c->key->ucp_enables, false);
2301b8e80941Smrg                        NIR_PASS_V(c->s, nir_lower_io_to_scalar,
2302b8e80941Smrg                                   nir_var_shader_out);
2303848b8605Smrg                }
2304848b8605Smrg        }
2305848b8605Smrg
2306b8e80941Smrg        /* FS input scalarizing must happen after nir_lower_two_sided_color,
2307b8e80941Smrg         * which only handles a vec4 at a time.  Similarly, VS output
2308b8e80941Smrg         * scalarizing must happen after nir_lower_clip_vs.
2309b8e80941Smrg         */
2310b8e80941Smrg        if (c->stage == QSTAGE_FRAG)
2311b8e80941Smrg                NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_in);
2312b8e80941Smrg        else
2313b8e80941Smrg                NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_out);
2314b8e80941Smrg
2315b8e80941Smrg        NIR_PASS_V(c->s, vc4_nir_lower_io, c);
2316b8e80941Smrg        NIR_PASS_V(c->s, vc4_nir_lower_txf_ms, c);
2317b8e80941Smrg        NIR_PASS_V(c->s, nir_lower_idiv);
2318b8e80941Smrg
2319b8e80941Smrg        vc4_optimize_nir(c->s);
2320b8e80941Smrg
2321b8e80941Smrg        NIR_PASS_V(c->s, nir_lower_bool_to_int32);
2322b8e80941Smrg
2323b8e80941Smrg        NIR_PASS_V(c->s, nir_convert_from_ssa, true);
2324b8e80941Smrg
2325b8e80941Smrg        if (vc4_debug & VC4_DEBUG_SHADERDB) {
2326b8e80941Smrg                fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d NIR instructions\n",
2327b8e80941Smrg                        qir_get_stage_name(c->stage),
2328b8e80941Smrg                        c->program_id, c->variant_id,
2329b8e80941Smrg                        count_nir_instrs(c->s));
2330b8e80941Smrg        }
2331b8e80941Smrg
2332b8e80941Smrg        if (vc4_debug & VC4_DEBUG_NIR) {
2333b8e80941Smrg                fprintf(stderr, "%s prog %d/%d NIR:\n",
2334b8e80941Smrg                        qir_get_stage_name(c->stage),
2335b8e80941Smrg                        c->program_id, c->variant_id);
2336b8e80941Smrg                nir_print_shader(c->s, stderr);
2337b8e80941Smrg        }
2338b8e80941Smrg
2339b8e80941Smrg        nir_to_qir(c);
2340b8e80941Smrg
2341848b8605Smrg        switch (stage) {
2342848b8605Smrg        case QSTAGE_FRAG:
2343b8e80941Smrg                /* FS threading requires that the thread execute
2344b8e80941Smrg                 * QPU_SIG_LAST_THREAD_SWITCH exactly once before terminating
2345b8e80941Smrg                 * (with no other THRSW afterwards, obviously).  If we didn't
2346b8e80941Smrg                 * fetch a texture at a top level block, this wouldn't be
2347b8e80941Smrg                 * true.
2348b8e80941Smrg                 */
2349b8e80941Smrg                if (c->fs_threaded && !c->last_thrsw_at_top_level) {
2350b8e80941Smrg                        c->failed = true;
2351b8e80941Smrg                        return c;
2352b8e80941Smrg                }
2353b8e80941Smrg
2354b8e80941Smrg                emit_frag_end(c);
2355848b8605Smrg                break;
2356848b8605Smrg        case QSTAGE_VERT:
2357b8e80941Smrg                emit_vert_end(c,
2358b8e80941Smrg                              c->vs_key->fs_inputs->input_slots,
2359b8e80941Smrg                              c->vs_key->fs_inputs->num_inputs);
2360848b8605Smrg                break;
2361848b8605Smrg        case QSTAGE_COORD:
2362b8e80941Smrg                emit_coord_end(c);
2363848b8605Smrg                break;
2364848b8605Smrg        }
2365848b8605Smrg
2366b8e80941Smrg        if (vc4_debug & VC4_DEBUG_QIR) {
2367b8e80941Smrg                fprintf(stderr, "%s prog %d/%d pre-opt QIR:\n",
2368b8e80941Smrg                        qir_get_stage_name(c->stage),
2369b8e80941Smrg                        c->program_id, c->variant_id);
2370b8e80941Smrg                qir_dump(c);
2371b8e80941Smrg                fprintf(stderr, "\n");
2372b8e80941Smrg        }
2373848b8605Smrg
2374848b8605Smrg        qir_optimize(c);
2375b8e80941Smrg        qir_lower_uniforms(c);
2376b8e80941Smrg
2377b8e80941Smrg        qir_schedule_instructions(c);
2378b8e80941Smrg        qir_emit_uniform_stream_resets(c);
2379848b8605Smrg
2380848b8605Smrg        if (vc4_debug & VC4_DEBUG_QIR) {
2381b8e80941Smrg                fprintf(stderr, "%s prog %d/%d QIR:\n",
2382b8e80941Smrg                        qir_get_stage_name(c->stage),
2383b8e80941Smrg                        c->program_id, c->variant_id);
2384848b8605Smrg                qir_dump(c);
2385b8e80941Smrg                fprintf(stderr, "\n");
2386848b8605Smrg        }
2387b8e80941Smrg
2388b8e80941Smrg        qir_reorder_uniforms(c);
2389b8e80941Smrg        vc4_generate_code(vc4, c);
2390848b8605Smrg
2391848b8605Smrg        if (vc4_debug & VC4_DEBUG_SHADERDB) {
2392b8e80941Smrg                fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d instructions\n",
2393b8e80941Smrg                        qir_get_stage_name(c->stage),
2394b8e80941Smrg                        c->program_id, c->variant_id,
2395b8e80941Smrg                        c->qpu_inst_count);
2396b8e80941Smrg                fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d uniforms\n",
2397b8e80941Smrg                        qir_get_stage_name(c->stage),
2398b8e80941Smrg                        c->program_id, c->variant_id,
2399b8e80941Smrg                        c->num_uniforms);
2400848b8605Smrg        }
2401848b8605Smrg
2402b8e80941Smrg        ralloc_free(c->s);
2403b8e80941Smrg
2404b8e80941Smrg        return c;
2405848b8605Smrg}
2406848b8605Smrg
2407848b8605Smrgstatic void *
2408848b8605Smrgvc4_shader_state_create(struct pipe_context *pctx,
2409848b8605Smrg                        const struct pipe_shader_state *cso)
2410848b8605Smrg{
2411b8e80941Smrg        struct vc4_context *vc4 = vc4_context(pctx);
2412b8e80941Smrg        struct vc4_uncompiled_shader *so = CALLOC_STRUCT(vc4_uncompiled_shader);
2413848b8605Smrg        if (!so)
2414848b8605Smrg                return NULL;
2415848b8605Smrg
2416b8e80941Smrg        so->program_id = vc4->next_uncompiled_program_id++;
2417b8e80941Smrg
2418b8e80941Smrg        nir_shader *s;
2419b8e80941Smrg
2420b8e80941Smrg        if (cso->type == PIPE_SHADER_IR_NIR) {
2421b8e80941Smrg                /* The backend takes ownership of the NIR shader on state
2422b8e80941Smrg                 * creation.
2423b8e80941Smrg                 */
2424b8e80941Smrg                s = cso->ir.nir;
2425b8e80941Smrg       } else {
2426b8e80941Smrg                assert(cso->type == PIPE_SHADER_IR_TGSI);
2427b8e80941Smrg
2428b8e80941Smrg                if (vc4_debug & VC4_DEBUG_TGSI) {
2429b8e80941Smrg                        fprintf(stderr, "prog %d TGSI:\n",
2430b8e80941Smrg                                so->program_id);
2431b8e80941Smrg                        tgsi_dump(cso->tokens, 0);
2432b8e80941Smrg                        fprintf(stderr, "\n");
2433b8e80941Smrg                }
2434b8e80941Smrg                s = tgsi_to_nir(cso->tokens, pctx->screen);
2435b8e80941Smrg        }
2436b8e80941Smrg
2437b8e80941Smrg        NIR_PASS_V(s, nir_lower_io, nir_var_all, type_size,
2438b8e80941Smrg                   (nir_lower_io_options)0);
2439b8e80941Smrg
2440b8e80941Smrg        NIR_PASS_V(s, nir_lower_regs_to_ssa);
2441b8e80941Smrg        NIR_PASS_V(s, nir_normalize_cubemap_coords);
2442b8e80941Smrg
2443b8e80941Smrg        NIR_PASS_V(s, nir_lower_load_const_to_scalar);
2444b8e80941Smrg
2445b8e80941Smrg        vc4_optimize_nir(s);
2446b8e80941Smrg
2447b8e80941Smrg        NIR_PASS_V(s, nir_remove_dead_variables, nir_var_function_temp);
2448b8e80941Smrg
2449b8e80941Smrg        /* Garbage collect dead instructions */
2450b8e80941Smrg        nir_sweep(s);
2451b8e80941Smrg
2452b8e80941Smrg        so->base.type = PIPE_SHADER_IR_NIR;
2453b8e80941Smrg        so->base.ir.nir = s;
2454b8e80941Smrg
2455b8e80941Smrg        if (vc4_debug & VC4_DEBUG_NIR) {
2456b8e80941Smrg                fprintf(stderr, "%s prog %d NIR:\n",
2457b8e80941Smrg                        gl_shader_stage_name(s->info.stage),
2458b8e80941Smrg                        so->program_id);
2459b8e80941Smrg                nir_print_shader(s, stderr);
2460b8e80941Smrg                fprintf(stderr, "\n");
2461b8e80941Smrg        }
2462848b8605Smrg
2463848b8605Smrg        return so;
2464848b8605Smrg}
2465848b8605Smrg
2466848b8605Smrgstatic void
2467848b8605Smrgcopy_uniform_state_to_shader(struct vc4_compiled_shader *shader,
2468b8e80941Smrg                             struct vc4_compile *c)
2469848b8605Smrg{
2470b8e80941Smrg        int count = c->num_uniforms;
2471b8e80941Smrg        struct vc4_shader_uniform_info *uinfo = &shader->uniforms;
2472848b8605Smrg
2473848b8605Smrg        uinfo->count = count;
2474b8e80941Smrg        uinfo->data = ralloc_array(shader, uint32_t, count);
2475b8e80941Smrg        memcpy(uinfo->data, c->uniform_data,
2476848b8605Smrg               count * sizeof(*uinfo->data));
2477b8e80941Smrg        uinfo->contents = ralloc_array(shader, enum quniform_contents, count);
2478b8e80941Smrg        memcpy(uinfo->contents, c->uniform_contents,
2479848b8605Smrg               count * sizeof(*uinfo->contents));
2480b8e80941Smrg        uinfo->num_texture_samples = c->num_texture_samples;
2481b8e80941Smrg
2482b8e80941Smrg        vc4_set_shader_uniform_dirty_flags(shader);
2483848b8605Smrg}
2484848b8605Smrg
2485848b8605Smrgstatic void
2486b8e80941Smrgvc4_setup_compiled_fs_inputs(struct vc4_context *vc4, struct vc4_compile *c,
2487b8e80941Smrg                             struct vc4_compiled_shader *shader)
2488848b8605Smrg{
2489b8e80941Smrg        struct vc4_fs_inputs inputs;
2490b8e80941Smrg
2491b8e80941Smrg        memset(&inputs, 0, sizeof(inputs));
2492b8e80941Smrg        inputs.input_slots = ralloc_array(shader,
2493b8e80941Smrg                                          struct vc4_varying_slot,
2494b8e80941Smrg                                          c->num_input_slots);
2495b8e80941Smrg
2496b8e80941Smrg        bool input_live[c->num_input_slots];
2497b8e80941Smrg
2498b8e80941Smrg        memset(input_live, 0, sizeof(input_live));
2499b8e80941Smrg        qir_for_each_inst_inorder(inst, c) {
2500b8e80941Smrg                for (int i = 0; i < qir_get_nsrc(inst); i++) {
2501b8e80941Smrg                        if (inst->src[i].file == QFILE_VARY)
2502b8e80941Smrg                                input_live[inst->src[i].index] = true;
2503b8e80941Smrg                }
2504b8e80941Smrg        }
2505b8e80941Smrg
2506b8e80941Smrg        for (int i = 0; i < c->num_input_slots; i++) {
2507b8e80941Smrg                struct vc4_varying_slot *slot = &c->input_slots[i];
2508848b8605Smrg
2509b8e80941Smrg                if (!input_live[i])
2510b8e80941Smrg                        continue;
2511b8e80941Smrg
2512b8e80941Smrg                /* Skip non-VS-output inputs. */
2513b8e80941Smrg                if (slot->slot == (uint8_t)~0)
2514b8e80941Smrg                        continue;
2515b8e80941Smrg
2516b8e80941Smrg                if (slot->slot == VARYING_SLOT_COL0 ||
2517b8e80941Smrg                    slot->slot == VARYING_SLOT_COL1 ||
2518b8e80941Smrg                    slot->slot == VARYING_SLOT_BFC0 ||
2519b8e80941Smrg                    slot->slot == VARYING_SLOT_BFC1) {
2520b8e80941Smrg                        shader->color_inputs |= (1 << inputs.num_inputs);
2521b8e80941Smrg                }
2522b8e80941Smrg
2523b8e80941Smrg                inputs.input_slots[inputs.num_inputs] = *slot;
2524b8e80941Smrg                inputs.num_inputs++;
2525b8e80941Smrg        }
2526b8e80941Smrg        shader->num_inputs = inputs.num_inputs;
2527b8e80941Smrg
2528b8e80941Smrg        /* Add our set of inputs to the set of all inputs seen.  This way, we
2529b8e80941Smrg         * can have a single pointer that identifies an FS inputs set,
2530b8e80941Smrg         * allowing VS to avoid recompiling when the FS is recompiled (or a
2531b8e80941Smrg         * new one is bound using separate shader objects) but the inputs
2532b8e80941Smrg         * don't change.
2533b8e80941Smrg         */
2534b8e80941Smrg        struct set_entry *entry = _mesa_set_search(vc4->fs_inputs_set, &inputs);
2535b8e80941Smrg        if (entry) {
2536b8e80941Smrg                shader->fs_inputs = entry->key;
2537b8e80941Smrg                ralloc_free(inputs.input_slots);
2538b8e80941Smrg        } else {
2539b8e80941Smrg                struct vc4_fs_inputs *alloc_inputs;
2540b8e80941Smrg
2541b8e80941Smrg                alloc_inputs = rzalloc(vc4->fs_inputs_set, struct vc4_fs_inputs);
2542b8e80941Smrg                memcpy(alloc_inputs, &inputs, sizeof(inputs));
2543b8e80941Smrg                ralloc_steal(alloc_inputs, inputs.input_slots);
2544b8e80941Smrg                _mesa_set_add(vc4->fs_inputs_set, alloc_inputs);
2545b8e80941Smrg
2546b8e80941Smrg                shader->fs_inputs = alloc_inputs;
2547b8e80941Smrg        }
2548848b8605Smrg}
2549848b8605Smrg
2550b8e80941Smrgstatic struct vc4_compiled_shader *
2551b8e80941Smrgvc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage,
2552b8e80941Smrg                        struct vc4_key *key)
2553848b8605Smrg{
2554b8e80941Smrg        struct hash_table *ht;
2555b8e80941Smrg        uint32_t key_size;
2556b8e80941Smrg        bool try_threading;
2557b8e80941Smrg
2558b8e80941Smrg        if (stage == QSTAGE_FRAG) {
2559b8e80941Smrg                ht = vc4->fs_cache;
2560b8e80941Smrg                key_size = sizeof(struct vc4_fs_key);
2561b8e80941Smrg                try_threading = vc4->screen->has_threaded_fs;
2562b8e80941Smrg        } else {
2563b8e80941Smrg                ht = vc4->vs_cache;
2564b8e80941Smrg                key_size = sizeof(struct vc4_vs_key);
2565b8e80941Smrg                try_threading = false;
2566b8e80941Smrg        }
2567848b8605Smrg
2568b8e80941Smrg        struct vc4_compiled_shader *shader;
2569b8e80941Smrg        struct hash_entry *entry = _mesa_hash_table_search(ht, key);
2570b8e80941Smrg        if (entry)
2571b8e80941Smrg                return entry->data;
2572b8e80941Smrg
2573b8e80941Smrg        struct vc4_compile *c = vc4_shader_ntq(vc4, stage, key, try_threading);
2574b8e80941Smrg        /* If the FS failed to compile threaded, fall back to single threaded. */
2575b8e80941Smrg        if (try_threading && c->failed) {
2576b8e80941Smrg                qir_compile_destroy(c);
2577b8e80941Smrg                c = vc4_shader_ntq(vc4, stage, key, false);
2578b8e80941Smrg        }
2579b8e80941Smrg
2580b8e80941Smrg        shader = rzalloc(NULL, struct vc4_compiled_shader);
2581b8e80941Smrg
2582b8e80941Smrg        shader->program_id = vc4->next_compiled_program_id++;
2583b8e80941Smrg        if (stage == QSTAGE_FRAG) {
2584b8e80941Smrg                vc4_setup_compiled_fs_inputs(vc4, c, shader);
2585b8e80941Smrg
2586b8e80941Smrg                /* Note: the temporary clone in c->s has been freed. */
2587b8e80941Smrg                nir_shader *orig_shader = key->shader_state->base.ir.nir;
2588b8e80941Smrg                if (orig_shader->info.outputs_written & (1 << FRAG_RESULT_DEPTH))
2589b8e80941Smrg                        shader->disable_early_z = true;
2590b8e80941Smrg        } else {
2591b8e80941Smrg                shader->num_inputs = c->num_inputs;
2592b8e80941Smrg
2593b8e80941Smrg                shader->vattr_offsets[0] = 0;
2594b8e80941Smrg                for (int i = 0; i < 8; i++) {
2595b8e80941Smrg                        shader->vattr_offsets[i + 1] =
2596b8e80941Smrg                                shader->vattr_offsets[i] + c->vattr_sizes[i];
2597b8e80941Smrg
2598b8e80941Smrg                        if (c->vattr_sizes[i])
2599b8e80941Smrg                                shader->vattrs_live |= (1 << i);
2600b8e80941Smrg                }
2601b8e80941Smrg        }
2602b8e80941Smrg
2603b8e80941Smrg        shader->failed = c->failed;
2604b8e80941Smrg        if (c->failed) {
2605b8e80941Smrg                shader->failed = true;
2606b8e80941Smrg        } else {
2607b8e80941Smrg                copy_uniform_state_to_shader(shader, c);
2608b8e80941Smrg                shader->bo = vc4_bo_alloc_shader(vc4->screen, c->qpu_insts,
2609b8e80941Smrg                                                 c->qpu_inst_count *
2610b8e80941Smrg                                                 sizeof(uint64_t));
2611b8e80941Smrg        }
2612b8e80941Smrg
2613b8e80941Smrg        shader->fs_threaded = c->fs_threaded;
2614b8e80941Smrg
2615b8e80941Smrg        if ((vc4_debug & VC4_DEBUG_SHADERDB) && stage == QSTAGE_FRAG) {
2616b8e80941Smrg                fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d FS threads\n",
2617b8e80941Smrg                        qir_get_stage_name(c->stage),
2618b8e80941Smrg                        c->program_id, c->variant_id,
2619b8e80941Smrg                        1 + shader->fs_threaded);
2620b8e80941Smrg        }
2621848b8605Smrg
2622b8e80941Smrg        qir_compile_destroy(c);
2623848b8605Smrg
2624b8e80941Smrg        struct vc4_key *dup_key;
2625b8e80941Smrg        dup_key = rzalloc_size(shader, key_size); /* TODO: don't use rzalloc */
2626b8e80941Smrg        memcpy(dup_key, key, key_size);
2627b8e80941Smrg        _mesa_hash_table_insert(ht, dup_key, shader);
2628848b8605Smrg
2629b8e80941Smrg        return shader;
2630848b8605Smrg}
2631848b8605Smrg
2632848b8605Smrgstatic void
2633b8e80941Smrgvc4_setup_shared_key(struct vc4_context *vc4, struct vc4_key *key,
2634b8e80941Smrg                     struct vc4_texture_stateobj *texstate)
2635848b8605Smrg{
2636848b8605Smrg        for (int i = 0; i < texstate->num_textures; i++) {
2637848b8605Smrg                struct pipe_sampler_view *sampler = texstate->textures[i];
2638b8e80941Smrg                struct vc4_sampler_view *vc4_sampler = vc4_sampler_view(sampler);
2639b8e80941Smrg                struct pipe_sampler_state *sampler_state =
2640b8e80941Smrg                        texstate->samplers[i];
2641b8e80941Smrg
2642b8e80941Smrg                if (!sampler)
2643b8e80941Smrg                        continue;
2644b8e80941Smrg
2645b8e80941Smrg                key->tex[i].format = sampler->format;
2646b8e80941Smrg                key->tex[i].swizzle[0] = sampler->swizzle_r;
2647b8e80941Smrg                key->tex[i].swizzle[1] = sampler->swizzle_g;
2648b8e80941Smrg                key->tex[i].swizzle[2] = sampler->swizzle_b;
2649b8e80941Smrg                key->tex[i].swizzle[3] = sampler->swizzle_a;
2650b8e80941Smrg
2651b8e80941Smrg                if (sampler->texture->nr_samples > 1) {
2652b8e80941Smrg                        key->tex[i].msaa_width = sampler->texture->width0;
2653b8e80941Smrg                        key->tex[i].msaa_height = sampler->texture->height0;
2654b8e80941Smrg                } else if (sampler){
2655b8e80941Smrg                        key->tex[i].compare_mode = sampler_state->compare_mode;
2656b8e80941Smrg                        key->tex[i].compare_func = sampler_state->compare_func;
2657b8e80941Smrg                        key->tex[i].wrap_s = sampler_state->wrap_s;
2658b8e80941Smrg                        key->tex[i].wrap_t = sampler_state->wrap_t;
2659b8e80941Smrg                        key->tex[i].force_first_level =
2660b8e80941Smrg                                vc4_sampler->force_first_level;
2661848b8605Smrg                }
2662848b8605Smrg        }
2663b8e80941Smrg
2664b8e80941Smrg        key->ucp_enables = vc4->rasterizer->base.clip_plane_enable;
2665848b8605Smrg}
2666848b8605Smrg
2667848b8605Smrgstatic void
2668848b8605Smrgvc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode)
2669848b8605Smrg{
2670b8e80941Smrg        struct vc4_job *job = vc4->job;
2671848b8605Smrg        struct vc4_fs_key local_key;
2672848b8605Smrg        struct vc4_fs_key *key = &local_key;
2673848b8605Smrg
2674b8e80941Smrg        if (!(vc4->dirty & (VC4_DIRTY_PRIM_MODE |
2675b8e80941Smrg                            VC4_DIRTY_BLEND |
2676b8e80941Smrg                            VC4_DIRTY_FRAMEBUFFER |
2677b8e80941Smrg                            VC4_DIRTY_ZSA |
2678b8e80941Smrg                            VC4_DIRTY_RASTERIZER |
2679b8e80941Smrg                            VC4_DIRTY_SAMPLE_MASK |
2680b8e80941Smrg                            VC4_DIRTY_FRAGTEX |
2681b8e80941Smrg                            VC4_DIRTY_UNCOMPILED_FS |
2682b8e80941Smrg                            VC4_DIRTY_UBO_1_SIZE))) {
2683b8e80941Smrg                return;
2684b8e80941Smrg        }
2685b8e80941Smrg
2686848b8605Smrg        memset(key, 0, sizeof(*key));
2687b8e80941Smrg        vc4_setup_shared_key(vc4, &key->base, &vc4->fragtex);
2688848b8605Smrg        key->base.shader_state = vc4->prog.bind_fs;
2689848b8605Smrg        key->is_points = (prim_mode == PIPE_PRIM_POINTS);
2690848b8605Smrg        key->is_lines = (prim_mode >= PIPE_PRIM_LINES &&
2691848b8605Smrg                         prim_mode <= PIPE_PRIM_LINE_STRIP);
2692848b8605Smrg        key->blend = vc4->blend->rt[0];
2693b8e80941Smrg        if (vc4->blend->logicop_enable) {
2694b8e80941Smrg                key->logicop_func = vc4->blend->logicop_func;
2695b8e80941Smrg        } else {
2696b8e80941Smrg                key->logicop_func = PIPE_LOGICOP_COPY;
2697b8e80941Smrg        }
2698b8e80941Smrg        if (job->msaa) {
2699b8e80941Smrg                key->msaa = vc4->rasterizer->base.multisample;
2700b8e80941Smrg                key->sample_coverage = (vc4->sample_mask != (1 << VC4_MAX_SAMPLES) - 1);
2701b8e80941Smrg                key->sample_alpha_to_coverage = vc4->blend->alpha_to_coverage;
2702b8e80941Smrg                key->sample_alpha_to_one = vc4->blend->alpha_to_one;
2703b8e80941Smrg        }
2704848b8605Smrg
2705848b8605Smrg        if (vc4->framebuffer.cbufs[0])
2706848b8605Smrg                key->color_format = vc4->framebuffer.cbufs[0]->format;
2707848b8605Smrg
2708b8e80941Smrg        key->stencil_enabled = vc4->zsa->stencil_uniforms[0] != 0;
2709b8e80941Smrg        key->stencil_twoside = vc4->zsa->stencil_uniforms[1] != 0;
2710b8e80941Smrg        key->stencil_full_writemasks = vc4->zsa->stencil_uniforms[2] != 0;
2711b8e80941Smrg        key->depth_enabled = (vc4->zsa->base.depth.enabled ||
2712b8e80941Smrg                              key->stencil_enabled);
2713b8e80941Smrg        if (vc4->zsa->base.alpha.enabled)
2714b8e80941Smrg                key->alpha_test_func = vc4->zsa->base.alpha.func;
2715b8e80941Smrg        else
2716b8e80941Smrg                key->alpha_test_func = COMPARE_FUNC_ALWAYS;
2717b8e80941Smrg
2718b8e80941Smrg        if (key->is_points) {
2719b8e80941Smrg                key->point_sprite_mask =
2720b8e80941Smrg                        vc4->rasterizer->base.sprite_coord_enable;
2721b8e80941Smrg                key->point_coord_upper_left =
2722b8e80941Smrg                        (vc4->rasterizer->base.sprite_coord_mode ==
2723b8e80941Smrg                         PIPE_SPRITE_COORD_UPPER_LEFT);
2724b8e80941Smrg        }
2725b8e80941Smrg
2726b8e80941Smrg        key->ubo_1_size = vc4->constbuf[PIPE_SHADER_FRAGMENT].cb[1].buffer_size;
2727b8e80941Smrg        key->light_twoside = vc4->rasterizer->base.light_twoside;
2728848b8605Smrg
2729b8e80941Smrg        struct vc4_compiled_shader *old_fs = vc4->prog.fs;
2730b8e80941Smrg        vc4->prog.fs = vc4_get_compiled_shader(vc4, QSTAGE_FRAG, &key->base);
2731b8e80941Smrg        if (vc4->prog.fs == old_fs)
2732848b8605Smrg                return;
2733848b8605Smrg
2734b8e80941Smrg        vc4->dirty |= VC4_DIRTY_COMPILED_FS;
2735848b8605Smrg
2736b8e80941Smrg        if (vc4->rasterizer->base.flatshade &&
2737b8e80941Smrg            (!old_fs || vc4->prog.fs->color_inputs != old_fs->color_inputs)) {
2738b8e80941Smrg                vc4->dirty |= VC4_DIRTY_FLAT_SHADE_FLAGS;
2739b8e80941Smrg        }
2740848b8605Smrg
2741b8e80941Smrg        if (!old_fs || vc4->prog.fs->fs_inputs != old_fs->fs_inputs)
2742b8e80941Smrg                vc4->dirty |= VC4_DIRTY_FS_INPUTS;
2743848b8605Smrg}
2744848b8605Smrg
2745848b8605Smrgstatic void
2746b8e80941Smrgvc4_update_compiled_vs(struct vc4_context *vc4, uint8_t prim_mode)
2747848b8605Smrg{
2748848b8605Smrg        struct vc4_vs_key local_key;
2749848b8605Smrg        struct vc4_vs_key *key = &local_key;
2750848b8605Smrg
2751b8e80941Smrg        if (!(vc4->dirty & (VC4_DIRTY_PRIM_MODE |
2752b8e80941Smrg                            VC4_DIRTY_RASTERIZER |
2753b8e80941Smrg                            VC4_DIRTY_VERTTEX |
2754b8e80941Smrg                            VC4_DIRTY_VTXSTATE |
2755b8e80941Smrg                            VC4_DIRTY_UNCOMPILED_VS |
2756b8e80941Smrg                            VC4_DIRTY_FS_INPUTS))) {
2757b8e80941Smrg                return;
2758b8e80941Smrg        }
2759b8e80941Smrg
2760848b8605Smrg        memset(key, 0, sizeof(*key));
2761b8e80941Smrg        vc4_setup_shared_key(vc4, &key->base, &vc4->verttex);
2762848b8605Smrg        key->base.shader_state = vc4->prog.bind_vs;
2763b8e80941Smrg        key->fs_inputs = vc4->prog.fs->fs_inputs;
2764b8e80941Smrg        key->clamp_color = vc4->rasterizer->base.clamp_vertex_color;
2765848b8605Smrg
2766848b8605Smrg        for (int i = 0; i < ARRAY_SIZE(key->attr_formats); i++)
2767848b8605Smrg                key->attr_formats[i] = vc4->vtx->pipe[i].src_format;
2768848b8605Smrg
2769b8e80941Smrg        key->per_vertex_point_size =
2770b8e80941Smrg                (prim_mode == PIPE_PRIM_POINTS &&
2771b8e80941Smrg                 vc4->rasterizer->base.point_size_per_vertex);
2772848b8605Smrg
2773b8e80941Smrg        struct vc4_compiled_shader *vs =
2774b8e80941Smrg                vc4_get_compiled_shader(vc4, QSTAGE_VERT, &key->base);
2775b8e80941Smrg        if (vs != vc4->prog.vs) {
2776b8e80941Smrg                vc4->prog.vs = vs;
2777b8e80941Smrg                vc4->dirty |= VC4_DIRTY_COMPILED_VS;
2778b8e80941Smrg        }
2779848b8605Smrg
2780b8e80941Smrg        key->is_coord = true;
2781b8e80941Smrg        /* Coord shaders don't care what the FS inputs are. */
2782b8e80941Smrg        key->fs_inputs = NULL;
2783b8e80941Smrg        struct vc4_compiled_shader *cs =
2784b8e80941Smrg                vc4_get_compiled_shader(vc4, QSTAGE_COORD, &key->base);
2785b8e80941Smrg        if (cs != vc4->prog.cs) {
2786b8e80941Smrg                vc4->prog.cs = cs;
2787b8e80941Smrg                vc4->dirty |= VC4_DIRTY_COMPILED_CS;
2788b8e80941Smrg        }
2789848b8605Smrg}
2790848b8605Smrg
2791b8e80941Smrgbool
2792848b8605Smrgvc4_update_compiled_shaders(struct vc4_context *vc4, uint8_t prim_mode)
2793848b8605Smrg{
2794848b8605Smrg        vc4_update_compiled_fs(vc4, prim_mode);
2795b8e80941Smrg        vc4_update_compiled_vs(vc4, prim_mode);
2796b8e80941Smrg
2797b8e80941Smrg        return !(vc4->prog.cs->failed ||
2798b8e80941Smrg                 vc4->prog.vs->failed ||
2799b8e80941Smrg                 vc4->prog.fs->failed);
2800848b8605Smrg}
2801848b8605Smrg
2802b8e80941Smrgstatic uint32_t
2803b8e80941Smrgfs_cache_hash(const void *key)
2804848b8605Smrg{
2805b8e80941Smrg        return _mesa_hash_data(key, sizeof(struct vc4_fs_key));
2806848b8605Smrg}
2807848b8605Smrg
2808b8e80941Smrgstatic uint32_t
2809b8e80941Smrgvs_cache_hash(const void *key)
2810848b8605Smrg{
2811b8e80941Smrg        return _mesa_hash_data(key, sizeof(struct vc4_vs_key));
2812848b8605Smrg}
2813848b8605Smrg
2814b8e80941Smrgstatic bool
2815b8e80941Smrgfs_cache_compare(const void *key1, const void *key2)
2816848b8605Smrg{
2817b8e80941Smrg        return memcmp(key1, key2, sizeof(struct vc4_fs_key)) == 0;
2818848b8605Smrg}
2819848b8605Smrg
2820b8e80941Smrgstatic bool
2821b8e80941Smrgvs_cache_compare(const void *key1, const void *key2)
2822848b8605Smrg{
2823b8e80941Smrg        return memcmp(key1, key2, sizeof(struct vc4_vs_key)) == 0;
2824848b8605Smrg}
2825848b8605Smrg
2826b8e80941Smrgstatic uint32_t
2827b8e80941Smrgfs_inputs_hash(const void *key)
2828848b8605Smrg{
2829b8e80941Smrg        const struct vc4_fs_inputs *inputs = key;
2830848b8605Smrg
2831b8e80941Smrg        return _mesa_hash_data(inputs->input_slots,
2832b8e80941Smrg                               sizeof(*inputs->input_slots) *
2833b8e80941Smrg                               inputs->num_inputs);
2834848b8605Smrg}
2835848b8605Smrg
2836b8e80941Smrgstatic bool
2837b8e80941Smrgfs_inputs_compare(const void *key1, const void *key2)
2838848b8605Smrg{
2839b8e80941Smrg        const struct vc4_fs_inputs *inputs1 = key1;
2840b8e80941Smrg        const struct vc4_fs_inputs *inputs2 = key2;
2841b8e80941Smrg
2842b8e80941Smrg        return (inputs1->num_inputs == inputs2->num_inputs &&
2843b8e80941Smrg                memcmp(inputs1->input_slots,
2844b8e80941Smrg                       inputs2->input_slots,
2845b8e80941Smrg                       sizeof(*inputs1->input_slots) *
2846b8e80941Smrg                       inputs1->num_inputs) == 0);
2847848b8605Smrg}
2848848b8605Smrg
2849848b8605Smrgstatic void
2850b8e80941Smrgdelete_from_cache_if_matches(struct hash_table *ht,
2851b8e80941Smrg                             struct vc4_compiled_shader **last_compile,
2852b8e80941Smrg                             struct hash_entry *entry,
2853b8e80941Smrg                             struct vc4_uncompiled_shader *so)
2854848b8605Smrg{
2855b8e80941Smrg        const struct vc4_key *key = entry->key;
2856848b8605Smrg
2857b8e80941Smrg        if (key->shader_state == so) {
2858b8e80941Smrg                struct vc4_compiled_shader *shader = entry->data;
2859b8e80941Smrg                _mesa_hash_table_remove(ht, entry);
2860b8e80941Smrg                vc4_bo_unreference(&shader->bo);
2861848b8605Smrg
2862b8e80941Smrg                if (shader == *last_compile)
2863b8e80941Smrg                        *last_compile = NULL;
2864848b8605Smrg
2865b8e80941Smrg                ralloc_free(shader);
2866848b8605Smrg        }
2867848b8605Smrg}
2868848b8605Smrg
2869848b8605Smrgstatic void
2870b8e80941Smrgvc4_shader_state_delete(struct pipe_context *pctx, void *hwcso)
2871848b8605Smrg{
2872b8e80941Smrg        struct vc4_context *vc4 = vc4_context(pctx);
2873b8e80941Smrg        struct vc4_uncompiled_shader *so = hwcso;
2874848b8605Smrg
2875b8e80941Smrg        hash_table_foreach(vc4->fs_cache, entry) {
2876b8e80941Smrg                delete_from_cache_if_matches(vc4->fs_cache, &vc4->prog.fs,
2877b8e80941Smrg                                             entry, so);
2878848b8605Smrg        }
2879b8e80941Smrg        hash_table_foreach(vc4->vs_cache, entry) {
2880b8e80941Smrg                delete_from_cache_if_matches(vc4->vs_cache, &vc4->prog.vs,
2881b8e80941Smrg                                             entry, so);
2882b8e80941Smrg        }
2883b8e80941Smrg
2884b8e80941Smrg        ralloc_free(so->base.ir.nir);
2885b8e80941Smrg        free(so);
2886848b8605Smrg}
2887848b8605Smrg
2888848b8605Smrgstatic void
2889848b8605Smrgvc4_fp_state_bind(struct pipe_context *pctx, void *hwcso)
2890848b8605Smrg{
2891848b8605Smrg        struct vc4_context *vc4 = vc4_context(pctx);
2892848b8605Smrg        vc4->prog.bind_fs = hwcso;
2893b8e80941Smrg        vc4->dirty |= VC4_DIRTY_UNCOMPILED_FS;
2894848b8605Smrg}
2895848b8605Smrg
2896848b8605Smrgstatic void
2897848b8605Smrgvc4_vp_state_bind(struct pipe_context *pctx, void *hwcso)
2898848b8605Smrg{
2899848b8605Smrg        struct vc4_context *vc4 = vc4_context(pctx);
2900848b8605Smrg        vc4->prog.bind_vs = hwcso;
2901b8e80941Smrg        vc4->dirty |= VC4_DIRTY_UNCOMPILED_VS;
2902848b8605Smrg}
2903848b8605Smrg
2904848b8605Smrgvoid
2905848b8605Smrgvc4_program_init(struct pipe_context *pctx)
2906848b8605Smrg{
2907848b8605Smrg        struct vc4_context *vc4 = vc4_context(pctx);
2908848b8605Smrg
2909848b8605Smrg        pctx->create_vs_state = vc4_shader_state_create;
2910848b8605Smrg        pctx->delete_vs_state = vc4_shader_state_delete;
2911848b8605Smrg
2912848b8605Smrg        pctx->create_fs_state = vc4_shader_state_create;
2913848b8605Smrg        pctx->delete_fs_state = vc4_shader_state_delete;
2914848b8605Smrg
2915848b8605Smrg        pctx->bind_fs_state = vc4_fp_state_bind;
2916848b8605Smrg        pctx->bind_vs_state = vc4_vp_state_bind;
2917848b8605Smrg
2918b8e80941Smrg        vc4->fs_cache = _mesa_hash_table_create(pctx, fs_cache_hash,
2919b8e80941Smrg                                                fs_cache_compare);
2920b8e80941Smrg        vc4->vs_cache = _mesa_hash_table_create(pctx, vs_cache_hash,
2921b8e80941Smrg                                                vs_cache_compare);
2922b8e80941Smrg        vc4->fs_inputs_set = _mesa_set_create(pctx, fs_inputs_hash,
2923b8e80941Smrg                                              fs_inputs_compare);
2924b8e80941Smrg}
2925b8e80941Smrg
2926b8e80941Smrgvoid
2927b8e80941Smrgvc4_program_fini(struct pipe_context *pctx)
2928b8e80941Smrg{
2929b8e80941Smrg        struct vc4_context *vc4 = vc4_context(pctx);
2930b8e80941Smrg
2931b8e80941Smrg        hash_table_foreach(vc4->fs_cache, entry) {
2932b8e80941Smrg                struct vc4_compiled_shader *shader = entry->data;
2933b8e80941Smrg                vc4_bo_unreference(&shader->bo);
2934b8e80941Smrg                ralloc_free(shader);
2935b8e80941Smrg                _mesa_hash_table_remove(vc4->fs_cache, entry);
2936b8e80941Smrg        }
2937b8e80941Smrg
2938b8e80941Smrg        hash_table_foreach(vc4->vs_cache, entry) {
2939b8e80941Smrg                struct vc4_compiled_shader *shader = entry->data;
2940b8e80941Smrg                vc4_bo_unreference(&shader->bo);
2941b8e80941Smrg                ralloc_free(shader);
2942b8e80941Smrg                _mesa_hash_table_remove(vc4->vs_cache, entry);
2943b8e80941Smrg        }
2944848b8605Smrg}
2945