1/*
2 * Copyright © 2015 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "vc4_qir.h"
25#include "compiler/nir/nir_builder.h"
26#include "util/u_format.h"
27
28/**
29 * Walks the NIR generated by TGSI-to-NIR or GLSL-to-NIR to lower its io
30 * intrinsics into something amenable to the VC4 architecture.
31 *
32 * Currently, it splits VS inputs and uniforms into scalars, drops any
33 * non-position outputs in coordinate shaders, and fixes up the addressing on
34 * indirect uniform loads.  FS input and VS output scalarization is handled by
35 * nir_lower_io_to_scalar().
36 */
37
38static void
39replace_intrinsic_with_vec(nir_builder *b, nir_intrinsic_instr *intr,
40                           nir_ssa_def **comps)
41{
42
43        /* Batch things back together into a vector.  This will get split by
44         * the later ALU scalarization pass.
45         */
46        nir_ssa_def *vec = nir_vec(b, comps, intr->num_components);
47
48        /* Replace the old intrinsic with a reference to our reconstructed
49         * vector.
50         */
51        nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_src_for_ssa(vec));
52        nir_instr_remove(&intr->instr);
53}
54
55static nir_ssa_def *
56vc4_nir_unpack_8i(nir_builder *b, nir_ssa_def *src, unsigned chan)
57{
58        return nir_ubitfield_extract(b,
59                                     src,
60                                     nir_imm_int(b, 8 * chan),
61                                     nir_imm_int(b, 8));
62}
63
64/** Returns the 16 bit field as a sign-extended 32-bit value. */
65static nir_ssa_def *
66vc4_nir_unpack_16i(nir_builder *b, nir_ssa_def *src, unsigned chan)
67{
68        return nir_ibitfield_extract(b,
69                                     src,
70                                     nir_imm_int(b, 16 * chan),
71                                     nir_imm_int(b, 16));
72}
73
74/** Returns the 16 bit field as an unsigned 32 bit value. */
75static nir_ssa_def *
76vc4_nir_unpack_16u(nir_builder *b, nir_ssa_def *src, unsigned chan)
77{
78        if (chan == 0) {
79                return nir_iand(b, src, nir_imm_int(b, 0xffff));
80        } else {
81                return nir_ushr(b, src, nir_imm_int(b, 16));
82        }
83}
84
85static nir_ssa_def *
86vc4_nir_unpack_8f(nir_builder *b, nir_ssa_def *src, unsigned chan)
87{
88        return nir_channel(b, nir_unpack_unorm_4x8(b, src), chan);
89}
90
91static nir_ssa_def *
92vc4_nir_get_vattr_channel_vpm(struct vc4_compile *c,
93                              nir_builder *b,
94                              nir_ssa_def **vpm_reads,
95                              uint8_t swiz,
96                              const struct util_format_description *desc)
97{
98        const struct util_format_channel_description *chan =
99                &desc->channel[swiz];
100        nir_ssa_def *temp;
101
102        if (swiz > PIPE_SWIZZLE_W) {
103                return vc4_nir_get_swizzled_channel(b, vpm_reads, swiz);
104        } else if (chan->size == 32 && chan->type == UTIL_FORMAT_TYPE_FLOAT) {
105                return vc4_nir_get_swizzled_channel(b, vpm_reads, swiz);
106        } else if (chan->size == 32 && chan->type == UTIL_FORMAT_TYPE_SIGNED) {
107                if (chan->normalized) {
108                        return nir_fmul(b,
109                                        nir_i2f32(b, vpm_reads[swiz]),
110                                        nir_imm_float(b,
111                                                      1.0 / 0x7fffffff));
112                } else {
113                        return nir_i2f32(b, vpm_reads[swiz]);
114                }
115        } else if (chan->size == 8 &&
116                   (chan->type == UTIL_FORMAT_TYPE_UNSIGNED ||
117                    chan->type == UTIL_FORMAT_TYPE_SIGNED)) {
118                nir_ssa_def *vpm = vpm_reads[0];
119                if (chan->type == UTIL_FORMAT_TYPE_SIGNED) {
120                        temp = nir_ixor(b, vpm, nir_imm_int(b, 0x80808080));
121                        if (chan->normalized) {
122                                return nir_fsub(b, nir_fmul(b,
123                                                            vc4_nir_unpack_8f(b, temp, swiz),
124                                                            nir_imm_float(b, 2.0)),
125                                                nir_imm_float(b, 1.0));
126                        } else {
127                                return nir_fadd(b,
128                                                nir_i2f32(b,
129                                                          vc4_nir_unpack_8i(b, temp,
130                                                                            swiz)),
131                                                nir_imm_float(b, -128.0));
132                        }
133                } else {
134                        if (chan->normalized) {
135                                return vc4_nir_unpack_8f(b, vpm, swiz);
136                        } else {
137                                return nir_i2f32(b, vc4_nir_unpack_8i(b, vpm, swiz));
138                        }
139                }
140        } else if (chan->size == 16 &&
141                   (chan->type == UTIL_FORMAT_TYPE_UNSIGNED ||
142                    chan->type == UTIL_FORMAT_TYPE_SIGNED)) {
143                nir_ssa_def *vpm = vpm_reads[swiz / 2];
144
145                /* Note that UNPACK_16F eats a half float, not ints, so we use
146                 * UNPACK_16_I for all of these.
147                 */
148                if (chan->type == UTIL_FORMAT_TYPE_SIGNED) {
149                        temp = nir_i2f32(b, vc4_nir_unpack_16i(b, vpm, swiz & 1));
150                        if (chan->normalized) {
151                                return nir_fmul(b, temp,
152                                                nir_imm_float(b, 1/32768.0f));
153                        } else {
154                                return temp;
155                        }
156                } else {
157                        temp = nir_i2f32(b, vc4_nir_unpack_16u(b, vpm, swiz & 1));
158                        if (chan->normalized) {
159                                return nir_fmul(b, temp,
160                                                nir_imm_float(b, 1 / 65535.0));
161                        } else {
162                                return temp;
163                        }
164                }
165        } else {
166                return NULL;
167        }
168}
169
170static void
171vc4_nir_lower_vertex_attr(struct vc4_compile *c, nir_builder *b,
172                          nir_intrinsic_instr *intr)
173{
174        b->cursor = nir_before_instr(&intr->instr);
175
176        int attr = nir_intrinsic_base(intr);
177        enum pipe_format format = c->vs_key->attr_formats[attr];
178        uint32_t attr_size = util_format_get_blocksize(format);
179
180        /* We only accept direct outputs and TGSI only ever gives them to us
181         * with an offset value of 0.
182         */
183        assert(nir_src_as_uint(intr->src[0]) == 0);
184
185        /* Generate dword loads for the VPM values (Since these intrinsics may
186         * be reordered, the actual reads will be generated at the top of the
187         * shader by ntq_setup_inputs().
188         */
189        nir_ssa_def *vpm_reads[4];
190        for (int i = 0; i < align(attr_size, 4) / 4; i++) {
191                nir_intrinsic_instr *intr_comp =
192                        nir_intrinsic_instr_create(c->s,
193                                                   nir_intrinsic_load_input);
194                intr_comp->num_components = 1;
195                nir_intrinsic_set_base(intr_comp, nir_intrinsic_base(intr));
196                nir_intrinsic_set_component(intr_comp, i);
197                intr_comp->src[0] = nir_src_for_ssa(nir_imm_int(b, 0));
198                nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, 32, NULL);
199                nir_builder_instr_insert(b, &intr_comp->instr);
200
201                vpm_reads[i] = &intr_comp->dest.ssa;
202        }
203
204        bool format_warned = false;
205        const struct util_format_description *desc =
206                util_format_description(format);
207
208        nir_ssa_def *dests[4];
209        for (int i = 0; i < intr->num_components; i++) {
210                uint8_t swiz = desc->swizzle[i];
211                dests[i] = vc4_nir_get_vattr_channel_vpm(c, b, vpm_reads, swiz,
212                                                         desc);
213
214                if (!dests[i]) {
215                        if (!format_warned) {
216                                fprintf(stderr,
217                                        "vtx element %d unsupported type: %s\n",
218                                        attr, util_format_name(format));
219                                format_warned = true;
220                        }
221                        dests[i] = nir_imm_float(b, 0.0);
222                }
223        }
224
225        replace_intrinsic_with_vec(b, intr, dests);
226}
227
228static bool
229is_point_sprite(struct vc4_compile *c, nir_variable *var)
230{
231        if (var->data.location < VARYING_SLOT_VAR0 ||
232            var->data.location > VARYING_SLOT_VAR31)
233                return false;
234
235        return (c->fs_key->point_sprite_mask &
236                (1 << (var->data.location - VARYING_SLOT_VAR0)));
237}
238
239static void
240vc4_nir_lower_fs_input(struct vc4_compile *c, nir_builder *b,
241                       nir_intrinsic_instr *intr)
242{
243        b->cursor = nir_after_instr(&intr->instr);
244
245        if (nir_intrinsic_base(intr) >= VC4_NIR_TLB_COLOR_READ_INPUT &&
246            nir_intrinsic_base(intr) < (VC4_NIR_TLB_COLOR_READ_INPUT +
247                                        VC4_MAX_SAMPLES)) {
248                /* This doesn't need any lowering. */
249                return;
250        }
251
252        nir_variable *input_var = NULL;
253        nir_foreach_variable(var, &c->s->inputs) {
254                if (var->data.driver_location == nir_intrinsic_base(intr)) {
255                        input_var = var;
256                        break;
257                }
258        }
259        assert(input_var);
260
261        int comp = nir_intrinsic_component(intr);
262
263        /* Lower away point coordinates, and fix up PNTC. */
264        if (is_point_sprite(c, input_var) ||
265            input_var->data.location == VARYING_SLOT_PNTC) {
266                assert(intr->num_components == 1);
267
268                nir_ssa_def *result = &intr->dest.ssa;
269
270                switch (comp) {
271                case 0:
272                case 1:
273                        /* If we're not rendering points, we need to set a
274                         * defined value for the input that would come from
275                         * PNTC.
276                         */
277                        if (!c->fs_key->is_points)
278                                result = nir_imm_float(b, 0.0);
279                        break;
280                case 2:
281                        result = nir_imm_float(b, 0.0);
282                        break;
283                case 3:
284                        result = nir_imm_float(b, 1.0);
285                        break;
286                }
287
288                if (c->fs_key->point_coord_upper_left && comp == 1)
289                        result = nir_fsub(b, nir_imm_float(b, 1.0), result);
290
291                if (result != &intr->dest.ssa) {
292                        nir_ssa_def_rewrite_uses_after(&intr->dest.ssa,
293                                                       nir_src_for_ssa(result),
294                                                       result->parent_instr);
295                }
296        }
297}
298
299static void
300vc4_nir_lower_output(struct vc4_compile *c, nir_builder *b,
301                     nir_intrinsic_instr *intr)
302{
303        nir_variable *output_var = NULL;
304        nir_foreach_variable(var, &c->s->outputs) {
305                if (var->data.driver_location == nir_intrinsic_base(intr)) {
306                        output_var = var;
307                        break;
308                }
309        }
310        assert(output_var);
311
312        if (c->stage == QSTAGE_COORD &&
313            output_var->data.location != VARYING_SLOT_POS &&
314            output_var->data.location != VARYING_SLOT_PSIZ) {
315                nir_instr_remove(&intr->instr);
316                return;
317        }
318}
319
320static void
321vc4_nir_lower_uniform(struct vc4_compile *c, nir_builder *b,
322                      nir_intrinsic_instr *intr)
323{
324        b->cursor = nir_before_instr(&intr->instr);
325
326        /* Generate scalar loads equivalent to the original vector. */
327        nir_ssa_def *dests[4];
328        for (unsigned i = 0; i < intr->num_components; i++) {
329                nir_intrinsic_instr *intr_comp =
330                        nir_intrinsic_instr_create(c->s, intr->intrinsic);
331                intr_comp->num_components = 1;
332                nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1,
333                                  intr->dest.ssa.bit_size, NULL);
334
335                /* Convert the uniform offset to bytes.  If it happens
336                 * to be a constant, constant-folding will clean up
337                 * the shift for us.
338                 */
339                nir_intrinsic_set_base(intr_comp,
340                                       nir_intrinsic_base(intr) * 16 +
341                                       i * 4);
342                nir_intrinsic_set_range(intr_comp,
343                                        nir_intrinsic_range(intr) * 16 - i * 4);
344
345                intr_comp->src[0] =
346                        nir_src_for_ssa(nir_ishl(b, intr->src[0].ssa,
347                                                 nir_imm_int(b, 4)));
348
349                dests[i] = &intr_comp->dest.ssa;
350
351                nir_builder_instr_insert(b, &intr_comp->instr);
352        }
353
354        replace_intrinsic_with_vec(b, intr, dests);
355}
356
357static void
358vc4_nir_lower_io_instr(struct vc4_compile *c, nir_builder *b,
359                       struct nir_instr *instr)
360{
361        if (instr->type != nir_instr_type_intrinsic)
362                return;
363        nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
364
365        switch (intr->intrinsic) {
366        case nir_intrinsic_load_input:
367                if (c->stage == QSTAGE_FRAG)
368                        vc4_nir_lower_fs_input(c, b, intr);
369                else
370                        vc4_nir_lower_vertex_attr(c, b, intr);
371                break;
372
373        case nir_intrinsic_store_output:
374                vc4_nir_lower_output(c, b, intr);
375                break;
376
377        case nir_intrinsic_load_uniform:
378                vc4_nir_lower_uniform(c, b, intr);
379                break;
380
381        case nir_intrinsic_load_user_clip_plane:
382        default:
383                break;
384        }
385}
386
387static bool
388vc4_nir_lower_io_impl(struct vc4_compile *c, nir_function_impl *impl)
389{
390        nir_builder b;
391        nir_builder_init(&b, impl);
392
393        nir_foreach_block(block, impl) {
394                nir_foreach_instr_safe(instr, block)
395                        vc4_nir_lower_io_instr(c, &b, instr);
396        }
397
398        nir_metadata_preserve(impl, nir_metadata_block_index |
399                              nir_metadata_dominance);
400
401        return true;
402}
403
404void
405vc4_nir_lower_io(nir_shader *s, struct vc4_compile *c)
406{
407        nir_foreach_function(function, s) {
408                if (function->impl)
409                        vc4_nir_lower_io_impl(c, function->impl);
410        }
411}
412