17ec681f3Smrg/*
27ec681f3Smrg * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
37ec681f3Smrg * Copyright (C) 2019-2020 Collabora, Ltd.
47ec681f3Smrg *
57ec681f3Smrg * Permission is hereby granted, free of charge, to any person obtaining a
67ec681f3Smrg * copy of this software and associated documentation files (the "Software"),
77ec681f3Smrg * to deal in the Software without restriction, including without limitation
87ec681f3Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
97ec681f3Smrg * and/or sell copies of the Software, and to permit persons to whom the
107ec681f3Smrg * Software is furnished to do so, subject to the following conditions:
117ec681f3Smrg *
127ec681f3Smrg * The above copyright notice and this permission notice (including the next
137ec681f3Smrg * paragraph) shall be included in all copies or substantial portions of the
147ec681f3Smrg * Software.
157ec681f3Smrg *
167ec681f3Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
177ec681f3Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
187ec681f3Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
197ec681f3Smrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
207ec681f3Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
217ec681f3Smrg * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
227ec681f3Smrg * SOFTWARE.
237ec681f3Smrg */
247ec681f3Smrg
257ec681f3Smrg#include "compiler.h"
267ec681f3Smrg#include "midgard_ops.h"
277ec681f3Smrg#include "midgard_quirks.h"
287ec681f3Smrg
297ec681f3Smrgstatic midgard_int_mod
307ec681f3Smrgmir_get_imod(bool shift, nir_alu_type T, bool half, bool scalar)
317ec681f3Smrg{
327ec681f3Smrg        if (!half) {
337ec681f3Smrg                assert(!shift);
347ec681f3Smrg                /* Doesn't matter, src mods are only used when expanding */
357ec681f3Smrg                return midgard_int_sign_extend;
367ec681f3Smrg        }
377ec681f3Smrg
387ec681f3Smrg        if (shift)
397ec681f3Smrg                return midgard_int_left_shift;
407ec681f3Smrg
417ec681f3Smrg        if (nir_alu_type_get_base_type(T) == nir_type_int)
427ec681f3Smrg                return midgard_int_sign_extend;
437ec681f3Smrg        else
447ec681f3Smrg                return midgard_int_zero_extend;
457ec681f3Smrg}
467ec681f3Smrg
477ec681f3Smrgvoid
487ec681f3Smrgmidgard_pack_ubo_index_imm(midgard_load_store_word *word, unsigned index)
497ec681f3Smrg{
507ec681f3Smrg        word->arg_comp = index & 0x3;
517ec681f3Smrg        word->arg_reg = (index >> 2) & 0x7;
527ec681f3Smrg        word->bitsize_toggle = (index >> 5) & 0x1;
537ec681f3Smrg        word->index_format = (index >> 6) & 0x3;
547ec681f3Smrg}
557ec681f3Smrg
567ec681f3Smrgunsigned
577ec681f3Smrgmidgard_unpack_ubo_index_imm(midgard_load_store_word word)
587ec681f3Smrg{
597ec681f3Smrg        unsigned ubo = word.arg_comp |
607ec681f3Smrg                       (word.arg_reg << 2)  |
617ec681f3Smrg                       (word.bitsize_toggle << 5) |
627ec681f3Smrg                       (word.index_format << 6);
637ec681f3Smrg
647ec681f3Smrg        return ubo;
657ec681f3Smrg}
667ec681f3Smrg
677ec681f3Smrgvoid midgard_pack_varying_params(midgard_load_store_word *word, midgard_varying_params p)
687ec681f3Smrg{
697ec681f3Smrg        /* Currently these parameters are not supported. */
707ec681f3Smrg        assert(p.direct_sample_pos_x == 0 && p.direct_sample_pos_y == 0);
717ec681f3Smrg
727ec681f3Smrg        unsigned u;
737ec681f3Smrg        memcpy(&u, &p, sizeof(p));
747ec681f3Smrg
757ec681f3Smrg        word->signed_offset |= u & 0x1FF;
767ec681f3Smrg}
777ec681f3Smrg
787ec681f3Smrgmidgard_varying_params midgard_unpack_varying_params(midgard_load_store_word word)
797ec681f3Smrg{
807ec681f3Smrg        unsigned params = word.signed_offset & 0x1FF;
817ec681f3Smrg
827ec681f3Smrg        midgard_varying_params p;
837ec681f3Smrg        memcpy(&p, &params, sizeof(p));
847ec681f3Smrg
857ec681f3Smrg        return p;
867ec681f3Smrg}
877ec681f3Smrg
887ec681f3Smrgunsigned
897ec681f3Smrgmir_pack_mod(midgard_instruction *ins, unsigned i, bool scalar)
907ec681f3Smrg{
917ec681f3Smrg        bool integer = midgard_is_integer_op(ins->op);
927ec681f3Smrg        unsigned base_size = max_bitsize_for_alu(ins);
937ec681f3Smrg        unsigned sz = nir_alu_type_get_type_size(ins->src_types[i]);
947ec681f3Smrg        bool half = (sz == (base_size >> 1));
957ec681f3Smrg
967ec681f3Smrg        return integer ?
977ec681f3Smrg                mir_get_imod(ins->src_shift[i], ins->src_types[i], half, scalar) :
987ec681f3Smrg                ((ins->src_abs[i] << 0) |
997ec681f3Smrg                 ((ins->src_neg[i] << 1)));
1007ec681f3Smrg}
1017ec681f3Smrg
1027ec681f3Smrg/* Midgard IR only knows vector ALU types, but we sometimes need to actually
1037ec681f3Smrg * use scalar ALU instructions, for functional or performance reasons. To do
1047ec681f3Smrg * this, we just demote vector ALU payloads to scalar. */
1057ec681f3Smrg
1067ec681f3Smrgstatic int
1077ec681f3Smrgcomponent_from_mask(unsigned mask)
1087ec681f3Smrg{
1097ec681f3Smrg        for (int c = 0; c < 8; ++c) {
1107ec681f3Smrg                if (mask & (1 << c))
1117ec681f3Smrg                        return c;
1127ec681f3Smrg        }
1137ec681f3Smrg
1147ec681f3Smrg        assert(0);
1157ec681f3Smrg        return 0;
1167ec681f3Smrg}
1177ec681f3Smrg
1187ec681f3Smrgstatic unsigned
1197ec681f3Smrgmir_pack_scalar_source(unsigned mod, bool is_full, unsigned component)
1207ec681f3Smrg{
1217ec681f3Smrg        midgard_scalar_alu_src s = {
1227ec681f3Smrg                .mod = mod,
1237ec681f3Smrg                .full = is_full,
1247ec681f3Smrg                .component = component << (is_full ? 1 : 0)
1257ec681f3Smrg        };
1267ec681f3Smrg
1277ec681f3Smrg        unsigned o;
1287ec681f3Smrg        memcpy(&o, &s, sizeof(s));
1297ec681f3Smrg
1307ec681f3Smrg        return o & ((1 << 6) - 1);
1317ec681f3Smrg}
1327ec681f3Smrg
1337ec681f3Smrgstatic midgard_scalar_alu
1347ec681f3Smrgvector_to_scalar_alu(midgard_vector_alu v, midgard_instruction *ins)
1357ec681f3Smrg{
1367ec681f3Smrg        bool is_full = nir_alu_type_get_type_size(ins->dest_type) == 32;
1377ec681f3Smrg
1387ec681f3Smrg        bool half_0 = nir_alu_type_get_type_size(ins->src_types[0]) == 16;
1397ec681f3Smrg        bool half_1 = nir_alu_type_get_type_size(ins->src_types[1]) == 16;
1407ec681f3Smrg        unsigned comp = component_from_mask(ins->mask);
1417ec681f3Smrg
1427ec681f3Smrg        unsigned packed_src[2] = {
1437ec681f3Smrg                mir_pack_scalar_source(mir_pack_mod(ins, 0, true), !half_0, ins->swizzle[0][comp]),
1447ec681f3Smrg                mir_pack_scalar_source(mir_pack_mod(ins, 1, true), !half_1, ins->swizzle[1][comp])
1457ec681f3Smrg        };
1467ec681f3Smrg
1477ec681f3Smrg        /* The output component is from the mask */
1487ec681f3Smrg        midgard_scalar_alu s = {
1497ec681f3Smrg                .op = v.op,
1507ec681f3Smrg                .src1 = packed_src[0],
1517ec681f3Smrg                .src2 = packed_src[1],
1527ec681f3Smrg                .unknown = 0,
1537ec681f3Smrg                .outmod = v.outmod,
1547ec681f3Smrg                .output_full = is_full,
1557ec681f3Smrg                .output_component = comp
1567ec681f3Smrg        };
1577ec681f3Smrg
1587ec681f3Smrg        /* Full components are physically spaced out */
1597ec681f3Smrg        if (is_full) {
1607ec681f3Smrg                assert(s.output_component < 4);
1617ec681f3Smrg                s.output_component <<= 1;
1627ec681f3Smrg        }
1637ec681f3Smrg
1647ec681f3Smrg        /* Inline constant is passed along rather than trying to extract it
1657ec681f3Smrg         * from v */
1667ec681f3Smrg
1677ec681f3Smrg        if (ins->has_inline_constant) {
1687ec681f3Smrg                uint16_t imm = 0;
1697ec681f3Smrg                int lower_11 = ins->inline_constant & ((1 << 12) - 1);
1707ec681f3Smrg                imm |= (lower_11 >> 9) & 3;
1717ec681f3Smrg                imm |= (lower_11 >> 6) & 4;
1727ec681f3Smrg                imm |= (lower_11 >> 2) & 0x38;
1737ec681f3Smrg                imm |= (lower_11 & 63) << 6;
1747ec681f3Smrg
1757ec681f3Smrg                s.src2 = imm;
1767ec681f3Smrg        }
1777ec681f3Smrg
1787ec681f3Smrg        return s;
1797ec681f3Smrg}
1807ec681f3Smrg
1817ec681f3Smrg/* 64-bit swizzles are super easy since there are 2 components of 2 components
1827ec681f3Smrg * in an 8-bit field ... lots of duplication to go around!
1837ec681f3Smrg *
1847ec681f3Smrg * Swizzles of 32-bit vectors accessed from 64-bit instructions are a little
1857ec681f3Smrg * funny -- pack them *as if* they were native 64-bit, using rep_* flags to
1867ec681f3Smrg * flag upper. For instance, xy would become 64-bit XY but that's just xyzw
1877ec681f3Smrg * native. Likewise, zz would become 64-bit XX with rep* so it would be xyxy
1887ec681f3Smrg * with rep. Pretty nifty, huh? */
1897ec681f3Smrg
1907ec681f3Smrgstatic unsigned
1917ec681f3Smrgmir_pack_swizzle_64(unsigned *swizzle, unsigned max_component)
1927ec681f3Smrg{
1937ec681f3Smrg        unsigned packed = 0;
1947ec681f3Smrg
1957ec681f3Smrg        for (unsigned i = 0; i < 2; ++i) {
1967ec681f3Smrg                assert(swizzle[i] <= max_component);
1977ec681f3Smrg
1987ec681f3Smrg                unsigned a = (swizzle[i] & 1) ?
1997ec681f3Smrg                        (COMPONENT_W << 2) | COMPONENT_Z :
2007ec681f3Smrg                        (COMPONENT_Y << 2) | COMPONENT_X;
2017ec681f3Smrg
2027ec681f3Smrg                packed |= a << (i * 4);
2037ec681f3Smrg        }
2047ec681f3Smrg
2057ec681f3Smrg        return packed;
2067ec681f3Smrg}
2077ec681f3Smrg
2087ec681f3Smrgstatic void
2097ec681f3Smrgmir_pack_mask_alu(midgard_instruction *ins, midgard_vector_alu *alu)
2107ec681f3Smrg{
2117ec681f3Smrg        unsigned effective = ins->mask;
2127ec681f3Smrg
2137ec681f3Smrg        /* If we have a destination override, we need to figure out whether to
2147ec681f3Smrg         * override to the lower or upper half, shifting the effective mask in
2157ec681f3Smrg         * the latter, so AAAA.... becomes AAAA */
2167ec681f3Smrg
2177ec681f3Smrg        unsigned inst_size = max_bitsize_for_alu(ins);
2187ec681f3Smrg        signed upper_shift = mir_upper_override(ins, inst_size);
2197ec681f3Smrg
2207ec681f3Smrg        if (upper_shift >= 0) {
2217ec681f3Smrg                effective >>= upper_shift;
2227ec681f3Smrg                alu->shrink_mode = upper_shift ?
2237ec681f3Smrg                        midgard_shrink_mode_upper :
2247ec681f3Smrg                        midgard_shrink_mode_lower;
2257ec681f3Smrg        } else {
2267ec681f3Smrg                alu->shrink_mode = midgard_shrink_mode_none;
2277ec681f3Smrg        }
2287ec681f3Smrg
2297ec681f3Smrg        if (inst_size == 32)
2307ec681f3Smrg                alu->mask = expand_writemask(effective, 2);
2317ec681f3Smrg        else if (inst_size == 64)
2327ec681f3Smrg                alu->mask = expand_writemask(effective, 1);
2337ec681f3Smrg        else
2347ec681f3Smrg                alu->mask = effective;
2357ec681f3Smrg}
2367ec681f3Smrg
2377ec681f3Smrgstatic unsigned
2387ec681f3Smrgmir_pack_swizzle(unsigned mask, unsigned *swizzle,
2397ec681f3Smrg                 unsigned sz, unsigned base_size,
2407ec681f3Smrg                 bool op_channeled, midgard_src_expand_mode *expand_mode)
2417ec681f3Smrg{
2427ec681f3Smrg        unsigned packed = 0;
2437ec681f3Smrg
2447ec681f3Smrg        *expand_mode = midgard_src_passthrough;
2457ec681f3Smrg
2467ec681f3Smrg        midgard_reg_mode reg_mode = reg_mode_for_bitsize(base_size);
2477ec681f3Smrg
2487ec681f3Smrg        if (reg_mode == midgard_reg_mode_64) {
2497ec681f3Smrg                assert(sz == 64 || sz == 32);
2507ec681f3Smrg                unsigned components = (sz == 32) ? 4 : 2;
2517ec681f3Smrg
2527ec681f3Smrg                packed = mir_pack_swizzle_64(swizzle, components);
2537ec681f3Smrg
2547ec681f3Smrg                if (sz == 32) {
2557ec681f3Smrg                        bool lo = swizzle[0] >= COMPONENT_Z;
2567ec681f3Smrg                        bool hi = swizzle[1] >= COMPONENT_Z;
2577ec681f3Smrg
2587ec681f3Smrg                        if (mask & 0x1) {
2597ec681f3Smrg                                /* We can't mix halves... */
2607ec681f3Smrg                                if (mask & 2)
2617ec681f3Smrg                                        assert(lo == hi);
2627ec681f3Smrg
2637ec681f3Smrg                                *expand_mode = lo ? midgard_src_expand_high :
2647ec681f3Smrg                                                    midgard_src_expand_low;
2657ec681f3Smrg                        } else {
2667ec681f3Smrg                                *expand_mode = hi ? midgard_src_expand_high :
2677ec681f3Smrg                                                    midgard_src_expand_low;
2687ec681f3Smrg                        }
2697ec681f3Smrg                } else if (sz < 32) {
2707ec681f3Smrg                        unreachable("Cannot encode 8/16 swizzle in 64-bit");
2717ec681f3Smrg                }
2727ec681f3Smrg        } else {
2737ec681f3Smrg                /* For 32-bit, swizzle packing is stupid-simple. For 16-bit,
2747ec681f3Smrg                 * the strategy is to check whether the nibble we're on is
2757ec681f3Smrg                 * upper or lower. We need all components to be on the same
2767ec681f3Smrg                 * "side"; that much is enforced by the ISA and should have
2777ec681f3Smrg                 * been lowered. TODO: 8-bit packing. TODO: vec8 */
2787ec681f3Smrg
2797ec681f3Smrg                unsigned first = mask ? ffs(mask) - 1 : 0;
2807ec681f3Smrg                bool upper = swizzle[first] > 3;
2817ec681f3Smrg
2827ec681f3Smrg                if (upper && mask)
2837ec681f3Smrg                        assert(sz <= 16);
2847ec681f3Smrg
2857ec681f3Smrg                bool dest_up = !op_channeled && (first >= 4);
2867ec681f3Smrg
2877ec681f3Smrg                for (unsigned c = (dest_up ? 4 : 0); c < (dest_up ? 8 : 4); ++c) {
2887ec681f3Smrg                        unsigned v = swizzle[c];
2897ec681f3Smrg
2907ec681f3Smrg                        ASSERTED bool t_upper = v > 3;
2917ec681f3Smrg
2927ec681f3Smrg                        /* Ensure we're doing something sane */
2937ec681f3Smrg
2947ec681f3Smrg                        if (mask & (1 << c)) {
2957ec681f3Smrg                                assert(t_upper == upper);
2967ec681f3Smrg                                assert(v <= 7);
2977ec681f3Smrg                        }
2987ec681f3Smrg
2997ec681f3Smrg                        /* Use the non upper part */
3007ec681f3Smrg                        v &= 0x3;
3017ec681f3Smrg
3027ec681f3Smrg                        packed |= v << (2 * (c % 4));
3037ec681f3Smrg                }
3047ec681f3Smrg
3057ec681f3Smrg
3067ec681f3Smrg                /* Replicate for now.. should really pick a side for
3077ec681f3Smrg                 * dot products */
3087ec681f3Smrg
3097ec681f3Smrg                if (reg_mode == midgard_reg_mode_16 && sz == 16) {
3107ec681f3Smrg                        *expand_mode = upper ? midgard_src_rep_high :
3117ec681f3Smrg                                               midgard_src_rep_low;
3127ec681f3Smrg                } else if (reg_mode == midgard_reg_mode_16 && sz == 8) {
3137ec681f3Smrg                        if (base_size == 16) {
3147ec681f3Smrg                                *expand_mode = upper ? midgard_src_expand_high :
3157ec681f3Smrg                                                       midgard_src_expand_low;
3167ec681f3Smrg                        } else if (upper) {
3177ec681f3Smrg                                *expand_mode = midgard_src_swap;
3187ec681f3Smrg                        }
3197ec681f3Smrg                } else if (reg_mode == midgard_reg_mode_32 && sz == 16) {
3207ec681f3Smrg                        *expand_mode = upper ? midgard_src_expand_high :
3217ec681f3Smrg                                               midgard_src_expand_low;
3227ec681f3Smrg                } else if (reg_mode == midgard_reg_mode_8) {
3237ec681f3Smrg                        unreachable("Unhandled reg mode");
3247ec681f3Smrg                }
3257ec681f3Smrg        }
3267ec681f3Smrg
3277ec681f3Smrg        return packed;
3287ec681f3Smrg}
3297ec681f3Smrg
3307ec681f3Smrgstatic void
3317ec681f3Smrgmir_pack_vector_srcs(midgard_instruction *ins, midgard_vector_alu *alu)
3327ec681f3Smrg{
3337ec681f3Smrg        bool channeled = GET_CHANNEL_COUNT(alu_opcode_props[ins->op].props);
3347ec681f3Smrg
3357ec681f3Smrg        unsigned base_size = max_bitsize_for_alu(ins);
3367ec681f3Smrg
3377ec681f3Smrg        for (unsigned i = 0; i < 2; ++i) {
3387ec681f3Smrg                if (ins->has_inline_constant && (i == 1))
3397ec681f3Smrg                        continue;
3407ec681f3Smrg
3417ec681f3Smrg                if (ins->src[i] == ~0)
3427ec681f3Smrg                        continue;
3437ec681f3Smrg
3447ec681f3Smrg                unsigned sz = nir_alu_type_get_type_size(ins->src_types[i]);
3457ec681f3Smrg                assert((sz == base_size) || (sz == base_size / 2));
3467ec681f3Smrg
3477ec681f3Smrg                midgard_src_expand_mode expand_mode = midgard_src_passthrough;
3487ec681f3Smrg                unsigned swizzle = mir_pack_swizzle(ins->mask, ins->swizzle[i],
3497ec681f3Smrg                                                    sz, base_size, channeled,
3507ec681f3Smrg                                                    &expand_mode);
3517ec681f3Smrg
3527ec681f3Smrg                midgard_vector_alu_src pack = {
3537ec681f3Smrg                        .mod = mir_pack_mod(ins, i, false),
3547ec681f3Smrg                        .expand_mode = expand_mode,
3557ec681f3Smrg                        .swizzle = swizzle
3567ec681f3Smrg                };
3577ec681f3Smrg
3587ec681f3Smrg                unsigned p = vector_alu_srco_unsigned(pack);
3597ec681f3Smrg
3607ec681f3Smrg                if (i == 0)
3617ec681f3Smrg                        alu->src1 = p;
3627ec681f3Smrg                else
3637ec681f3Smrg                        alu->src2 = p;
3647ec681f3Smrg        }
3657ec681f3Smrg}
3667ec681f3Smrg
3677ec681f3Smrgstatic void
3687ec681f3Smrgmir_pack_swizzle_ldst(midgard_instruction *ins)
3697ec681f3Smrg{
3707ec681f3Smrg        /* TODO: non-32-bit, non-vec4 */
3717ec681f3Smrg        for (unsigned c = 0; c < 4; ++c) {
3727ec681f3Smrg                unsigned v = ins->swizzle[0][c];
3737ec681f3Smrg
3747ec681f3Smrg                /* Check vec4 */
3757ec681f3Smrg                assert(v <= 3);
3767ec681f3Smrg
3777ec681f3Smrg                ins->load_store.swizzle |= v << (2 * c);
3787ec681f3Smrg        }
3797ec681f3Smrg
3807ec681f3Smrg        /* TODO: arg_1/2 */
3817ec681f3Smrg}
3827ec681f3Smrg
3837ec681f3Smrgstatic void
3847ec681f3Smrgmir_pack_swizzle_tex(midgard_instruction *ins)
3857ec681f3Smrg{
3867ec681f3Smrg        for (unsigned i = 0; i < 2; ++i) {
3877ec681f3Smrg                unsigned packed = 0;
3887ec681f3Smrg
3897ec681f3Smrg                for (unsigned c = 0; c < 4; ++c) {
3907ec681f3Smrg                        unsigned v = ins->swizzle[i][c];
3917ec681f3Smrg
3927ec681f3Smrg                        /* Check vec4 */
3937ec681f3Smrg                        assert(v <= 3);
3947ec681f3Smrg
3957ec681f3Smrg                        packed |= v << (2 * c);
3967ec681f3Smrg                }
3977ec681f3Smrg
3987ec681f3Smrg                if (i == 0)
3997ec681f3Smrg                        ins->texture.swizzle = packed;
4007ec681f3Smrg                else
4017ec681f3Smrg                        ins->texture.in_reg_swizzle = packed;
4027ec681f3Smrg        }
4037ec681f3Smrg
4047ec681f3Smrg        /* TODO: bias component */
4057ec681f3Smrg}
4067ec681f3Smrg
4077ec681f3Smrg/* Up to 3 { ALU, LDST } bundles can execute in parallel with a texture op.
4087ec681f3Smrg * Given a texture op, lookahead to see how many such bundles we can flag for
4097ec681f3Smrg * OoO execution */
4107ec681f3Smrg
4117ec681f3Smrgstatic bool
4127ec681f3Smrgmir_can_run_ooo(midgard_block *block, midgard_bundle *bundle,
4137ec681f3Smrg                unsigned dependency)
4147ec681f3Smrg{
4157ec681f3Smrg        /* Don't read out of bounds */
4167ec681f3Smrg        if (bundle >= (midgard_bundle *) ((char *) block->bundles.data + block->bundles.size))
4177ec681f3Smrg                return false;
4187ec681f3Smrg
4197ec681f3Smrg        /* Texture ops can't execute with other texture ops */
4207ec681f3Smrg        if (!IS_ALU(bundle->tag) && bundle->tag != TAG_LOAD_STORE_4)
4217ec681f3Smrg                return false;
4227ec681f3Smrg
4237ec681f3Smrg        /* Ensure there is no read-after-write dependency */
4247ec681f3Smrg
4257ec681f3Smrg        for (unsigned i = 0; i < bundle->instruction_count; ++i) {
4267ec681f3Smrg                midgard_instruction *ins = bundle->instructions[i];
4277ec681f3Smrg
4287ec681f3Smrg                mir_foreach_src(ins, s) {
4297ec681f3Smrg                        if (ins->src[s] == dependency)
4307ec681f3Smrg                                return false;
4317ec681f3Smrg                }
4327ec681f3Smrg        }
4337ec681f3Smrg
4347ec681f3Smrg        /* Otherwise, we're okay */
4357ec681f3Smrg        return true;
4367ec681f3Smrg}
4377ec681f3Smrg
4387ec681f3Smrgstatic void
4397ec681f3Smrgmir_pack_tex_ooo(midgard_block *block, midgard_bundle *bundle, midgard_instruction *ins)
4407ec681f3Smrg{
4417ec681f3Smrg        unsigned count = 0;
4427ec681f3Smrg
4437ec681f3Smrg        for (count = 0; count < 3; ++count) {
4447ec681f3Smrg                if (!mir_can_run_ooo(block, bundle + count + 1, ins->dest))
4457ec681f3Smrg                        break;
4467ec681f3Smrg        }
4477ec681f3Smrg
4487ec681f3Smrg        ins->texture.out_of_order = count;
4497ec681f3Smrg}
4507ec681f3Smrg
4517ec681f3Smrg/* Load store masks are 4-bits. Load/store ops pack for that.
4527ec681f3Smrg * For most operations, vec4 is the natural mask width; vec8 is constrained to
4537ec681f3Smrg * be in pairs, vec2 is duplicated. TODO: 8-bit?
4547ec681f3Smrg * For common stores (i.e. ST.*), each bit masks a single byte in the 32-bit
4557ec681f3Smrg * case, 2 bytes in the 64-bit case and 4 bytes in the 128-bit case.
4567ec681f3Smrg */
4577ec681f3Smrg
4587ec681f3Smrgstatic unsigned
4597ec681f3Smrgmidgard_pack_common_store_mask(midgard_instruction *ins) {
4607ec681f3Smrg        unsigned comp_sz = nir_alu_type_get_type_size(ins->dest_type);
4617ec681f3Smrg        unsigned mask = ins->mask;
4627ec681f3Smrg        unsigned packed = 0;
4637ec681f3Smrg        unsigned nr_comp;
4647ec681f3Smrg
4657ec681f3Smrg        switch (ins->op) {
4667ec681f3Smrg                case midgard_op_st_u8:
4677ec681f3Smrg                        packed |= mask & 1;
4687ec681f3Smrg                        break;
4697ec681f3Smrg                case midgard_op_st_u16:
4707ec681f3Smrg                        nr_comp = 16 / comp_sz;
4717ec681f3Smrg                        for (int i = 0; i < nr_comp; i++) {
4727ec681f3Smrg                                if (mask & (1 << i)) {
4737ec681f3Smrg                                        if (comp_sz == 16)
4747ec681f3Smrg                                                packed |= 0x3;
4757ec681f3Smrg                                        else if (comp_sz == 8)
4767ec681f3Smrg                                                packed |= 1 << i;
4777ec681f3Smrg                                }
4787ec681f3Smrg                        }
4797ec681f3Smrg                        break;
4807ec681f3Smrg                case midgard_op_st_32:
4817ec681f3Smrg                case midgard_op_st_64:
4827ec681f3Smrg                case midgard_op_st_128: {
4837ec681f3Smrg                        unsigned total_sz = 32;
4847ec681f3Smrg                        if (ins->op == midgard_op_st_128)
4857ec681f3Smrg                                total_sz = 128;
4867ec681f3Smrg                        else if (ins->op == midgard_op_st_64)
4877ec681f3Smrg                                total_sz = 64;
4887ec681f3Smrg
4897ec681f3Smrg                        nr_comp = total_sz / comp_sz;
4907ec681f3Smrg
4917ec681f3Smrg                        /* Each writemask bit masks 1/4th of the value to be stored. */
4927ec681f3Smrg                        assert(comp_sz >= total_sz / 4);
4937ec681f3Smrg
4947ec681f3Smrg                        for (int i = 0; i < nr_comp; i++) {
4957ec681f3Smrg                                if (mask & (1 << i)) {
4967ec681f3Smrg                                        if (comp_sz == total_sz)
4977ec681f3Smrg                                                packed |= 0xF;
4987ec681f3Smrg                                        else if (comp_sz == total_sz / 2)
4997ec681f3Smrg                                                packed |= 0x3 << (i * 2);
5007ec681f3Smrg                                        else if (comp_sz == total_sz / 4)
5017ec681f3Smrg                                                packed |= 0x1 << i;
5027ec681f3Smrg                                }
5037ec681f3Smrg                        }
5047ec681f3Smrg                        break;
5057ec681f3Smrg                }
5067ec681f3Smrg                default:
5077ec681f3Smrg                        unreachable("unexpected ldst opcode");
5087ec681f3Smrg        }
5097ec681f3Smrg
5107ec681f3Smrg        return packed;
5117ec681f3Smrg}
5127ec681f3Smrg
5137ec681f3Smrgstatic void
5147ec681f3Smrgmir_pack_ldst_mask(midgard_instruction *ins)
5157ec681f3Smrg{
5167ec681f3Smrg        unsigned sz = nir_alu_type_get_type_size(ins->dest_type);
5177ec681f3Smrg        unsigned packed = ins->mask;
5187ec681f3Smrg
5197ec681f3Smrg        if (OP_IS_COMMON_STORE(ins->op)) {
5207ec681f3Smrg                packed = midgard_pack_common_store_mask(ins);
5217ec681f3Smrg        } else {
5227ec681f3Smrg                if (sz == 64) {
5237ec681f3Smrg                        packed = ((ins->mask & 0x2) ? (0x8 | 0x4) : 0) |
5247ec681f3Smrg                                ((ins->mask & 0x1) ? (0x2 | 0x1) : 0);
5257ec681f3Smrg                } else if (sz == 16) {
5267ec681f3Smrg                        packed = 0;
5277ec681f3Smrg
5287ec681f3Smrg                        for (unsigned i = 0; i < 4; ++i) {
5297ec681f3Smrg                                /* Make sure we're duplicated */
5307ec681f3Smrg                                bool u = (ins->mask & (1 << (2*i + 0))) != 0;
5317ec681f3Smrg                                ASSERTED bool v = (ins->mask & (1 << (2*i + 1))) != 0;
5327ec681f3Smrg                                assert(u == v);
5337ec681f3Smrg
5347ec681f3Smrg                                packed |= (u << i);
5357ec681f3Smrg                        }
5367ec681f3Smrg                } else {
5377ec681f3Smrg                        assert(sz == 32);
5387ec681f3Smrg                }
5397ec681f3Smrg        }
5407ec681f3Smrg
5417ec681f3Smrg        ins->load_store.mask = packed;
5427ec681f3Smrg}
5437ec681f3Smrg
5447ec681f3Smrgstatic void
5457ec681f3Smrgmir_lower_inverts(midgard_instruction *ins)
5467ec681f3Smrg{
5477ec681f3Smrg        bool inv[3] = {
5487ec681f3Smrg                ins->src_invert[0],
5497ec681f3Smrg                ins->src_invert[1],
5507ec681f3Smrg                ins->src_invert[2]
5517ec681f3Smrg        };
5527ec681f3Smrg
5537ec681f3Smrg        switch (ins->op) {
5547ec681f3Smrg        case midgard_alu_op_iand:
5557ec681f3Smrg                /* a & ~b = iandnot(a, b) */
5567ec681f3Smrg                /* ~a & ~b = ~(a | b) = inor(a, b) */
5577ec681f3Smrg
5587ec681f3Smrg                if (inv[0] && inv[1])
5597ec681f3Smrg                        ins->op = midgard_alu_op_inor;
5607ec681f3Smrg                else if (inv[1])
5617ec681f3Smrg                        ins->op = midgard_alu_op_iandnot;
5627ec681f3Smrg
5637ec681f3Smrg                break;
5647ec681f3Smrg        case midgard_alu_op_ior:
5657ec681f3Smrg                /*  a | ~b = iornot(a, b) */
5667ec681f3Smrg                /* ~a | ~b = ~(a & b) = inand(a, b) */
5677ec681f3Smrg
5687ec681f3Smrg                if (inv[0] && inv[1])
5697ec681f3Smrg                        ins->op = midgard_alu_op_inand;
5707ec681f3Smrg                else if (inv[1])
5717ec681f3Smrg                        ins->op = midgard_alu_op_iornot;
5727ec681f3Smrg
5737ec681f3Smrg                break;
5747ec681f3Smrg
5757ec681f3Smrg        case midgard_alu_op_ixor:
5767ec681f3Smrg                /* ~a ^ b = a ^ ~b = ~(a ^ b) = inxor(a, b) */
5777ec681f3Smrg                /* ~a ^ ~b = a ^ b */
5787ec681f3Smrg
5797ec681f3Smrg                if (inv[0] ^ inv[1])
5807ec681f3Smrg                        ins->op = midgard_alu_op_inxor;
5817ec681f3Smrg
5827ec681f3Smrg                break;
5837ec681f3Smrg
5847ec681f3Smrg        default:
5857ec681f3Smrg                break;
5867ec681f3Smrg        }
5877ec681f3Smrg}
5887ec681f3Smrg
5897ec681f3Smrg/* Opcodes with ROUNDS are the base (rte/0) type so we can just add */
5907ec681f3Smrg
5917ec681f3Smrgstatic void
5927ec681f3Smrgmir_lower_roundmode(midgard_instruction *ins)
5937ec681f3Smrg{
5947ec681f3Smrg        if (alu_opcode_props[ins->op].props & MIDGARD_ROUNDS) {
5957ec681f3Smrg                assert(ins->roundmode <= 0x3);
5967ec681f3Smrg                ins->op += ins->roundmode;
5977ec681f3Smrg        }
5987ec681f3Smrg}
5997ec681f3Smrg
6007ec681f3Smrgstatic midgard_load_store_word
6017ec681f3Smrgload_store_from_instr(midgard_instruction *ins)
6027ec681f3Smrg{
6037ec681f3Smrg        midgard_load_store_word ldst = ins->load_store;
6047ec681f3Smrg        ldst.op = ins->op;
6057ec681f3Smrg
6067ec681f3Smrg        if (OP_IS_STORE(ldst.op)) {
6077ec681f3Smrg                ldst.reg = SSA_REG_FROM_FIXED(ins->src[0]) & 1;
6087ec681f3Smrg        } else {
6097ec681f3Smrg                ldst.reg = SSA_REG_FROM_FIXED(ins->dest);
6107ec681f3Smrg        }
6117ec681f3Smrg
6127ec681f3Smrg        /* Atomic opcode swizzles have a special meaning:
6137ec681f3Smrg         *   - The first two bits say which component of the implicit register should be used
6147ec681f3Smrg         *   - The next two bits say if the implicit register is r26 or r27 */
6157ec681f3Smrg        if (OP_IS_ATOMIC(ins->op)) {
6167ec681f3Smrg                ldst.swizzle = 0;
6177ec681f3Smrg                ldst.swizzle |= ins->swizzle[3][0] & 3;
6187ec681f3Smrg                ldst.swizzle |= (SSA_REG_FROM_FIXED(ins->src[3]) & 1 ? 1 : 0) << 2;
6197ec681f3Smrg        }
6207ec681f3Smrg
6217ec681f3Smrg        if (ins->src[1] != ~0) {
6227ec681f3Smrg                ldst.arg_reg = SSA_REG_FROM_FIXED(ins->src[1]) - REGISTER_LDST_BASE;
6237ec681f3Smrg                unsigned sz = nir_alu_type_get_type_size(ins->src_types[1]);
6247ec681f3Smrg                ldst.arg_comp = midgard_ldst_comp(ldst.arg_reg, ins->swizzle[1][0], sz);
6257ec681f3Smrg        }
6267ec681f3Smrg
6277ec681f3Smrg        if (ins->src[2] != ~0) {
6287ec681f3Smrg                ldst.index_reg = SSA_REG_FROM_FIXED(ins->src[2]) - REGISTER_LDST_BASE;
6297ec681f3Smrg                unsigned sz = nir_alu_type_get_type_size(ins->src_types[2]);
6307ec681f3Smrg                ldst.index_comp = midgard_ldst_comp(ldst.index_reg, ins->swizzle[2][0], sz);
6317ec681f3Smrg        }
6327ec681f3Smrg
6337ec681f3Smrg        return ldst;
6347ec681f3Smrg}
6357ec681f3Smrg
6367ec681f3Smrgstatic midgard_texture_word
6377ec681f3Smrgtexture_word_from_instr(midgard_instruction *ins)
6387ec681f3Smrg{
6397ec681f3Smrg        midgard_texture_word tex = ins->texture;
6407ec681f3Smrg        tex.op = ins->op;
6417ec681f3Smrg
6427ec681f3Smrg        unsigned src1 = ins->src[1] == ~0 ? REGISTER_UNUSED : SSA_REG_FROM_FIXED(ins->src[1]);
6437ec681f3Smrg        tex.in_reg_select = src1 & 1;
6447ec681f3Smrg
6457ec681f3Smrg        unsigned dest = ins->dest == ~0 ? REGISTER_UNUSED : SSA_REG_FROM_FIXED(ins->dest);
6467ec681f3Smrg        tex.out_reg_select = dest & 1;
6477ec681f3Smrg
6487ec681f3Smrg        if (ins->src[2] != ~0) {
6497ec681f3Smrg                midgard_tex_register_select sel = {
6507ec681f3Smrg                        .select = SSA_REG_FROM_FIXED(ins->src[2]) & 1,
6517ec681f3Smrg                        .full = 1,
6527ec681f3Smrg                        .component = ins->swizzle[2][0]
6537ec681f3Smrg                };
6547ec681f3Smrg                uint8_t packed;
6557ec681f3Smrg                memcpy(&packed, &sel, sizeof(packed));
6567ec681f3Smrg                tex.bias = packed;
6577ec681f3Smrg        }
6587ec681f3Smrg
6597ec681f3Smrg        if (ins->src[3] != ~0) {
6607ec681f3Smrg                unsigned x = ins->swizzle[3][0];
6617ec681f3Smrg                unsigned y = x + 1;
6627ec681f3Smrg                unsigned z = x + 2;
6637ec681f3Smrg
6647ec681f3Smrg                /* Check range, TODO: half-registers */
6657ec681f3Smrg                assert(z < 4);
6667ec681f3Smrg
6677ec681f3Smrg                unsigned offset_reg = SSA_REG_FROM_FIXED(ins->src[3]);
6687ec681f3Smrg                tex.offset =
6697ec681f3Smrg                        (1)                   | /* full */
6707ec681f3Smrg                        (offset_reg & 1) << 1 | /* select */
6717ec681f3Smrg                        (0 << 2)              | /* upper */
6727ec681f3Smrg                        (x << 3)              | /* swizzle */
6737ec681f3Smrg                        (y << 5)              | /* swizzle */
6747ec681f3Smrg                        (z << 7);               /* swizzle */
6757ec681f3Smrg        }
6767ec681f3Smrg
6777ec681f3Smrg        return tex;
6787ec681f3Smrg}
6797ec681f3Smrg
6807ec681f3Smrgstatic midgard_vector_alu
6817ec681f3Smrgvector_alu_from_instr(midgard_instruction *ins)
6827ec681f3Smrg{
6837ec681f3Smrg        midgard_vector_alu alu = {
6847ec681f3Smrg                .op = ins->op,
6857ec681f3Smrg                .outmod = ins->outmod,
6867ec681f3Smrg                .reg_mode = reg_mode_for_bitsize(max_bitsize_for_alu(ins))
6877ec681f3Smrg        };
6887ec681f3Smrg
6897ec681f3Smrg        if (ins->has_inline_constant) {
6907ec681f3Smrg                /* Encode inline 16-bit constant. See disassembler for
6917ec681f3Smrg                 * where the algorithm is from */
6927ec681f3Smrg
6937ec681f3Smrg                int lower_11 = ins->inline_constant & ((1 << 12) - 1);
6947ec681f3Smrg                uint16_t imm = ((lower_11 >> 8) & 0x7) |
6957ec681f3Smrg                               ((lower_11 & 0xFF) << 3);
6967ec681f3Smrg
6977ec681f3Smrg                alu.src2 = imm << 2;
6987ec681f3Smrg        }
6997ec681f3Smrg
7007ec681f3Smrg        return alu;
7017ec681f3Smrg}
7027ec681f3Smrg
7037ec681f3Smrgstatic midgard_branch_extended
7047ec681f3Smrgmidgard_create_branch_extended( midgard_condition cond,
7057ec681f3Smrg                                midgard_jmp_writeout_op op,
7067ec681f3Smrg                                unsigned dest_tag,
7077ec681f3Smrg                                signed quadword_offset)
7087ec681f3Smrg{
7097ec681f3Smrg        /* The condition code is actually a LUT describing a function to
7107ec681f3Smrg         * combine multiple condition codes. However, we only support a single
7117ec681f3Smrg         * condition code at the moment, so we just duplicate over a bunch of
7127ec681f3Smrg         * times. */
7137ec681f3Smrg
7147ec681f3Smrg        uint16_t duplicated_cond =
7157ec681f3Smrg                (cond << 14) |
7167ec681f3Smrg                (cond << 12) |
7177ec681f3Smrg                (cond << 10) |
7187ec681f3Smrg                (cond << 8) |
7197ec681f3Smrg                (cond << 6) |
7207ec681f3Smrg                (cond << 4) |
7217ec681f3Smrg                (cond << 2) |
7227ec681f3Smrg                (cond << 0);
7237ec681f3Smrg
7247ec681f3Smrg        midgard_branch_extended branch = {
7257ec681f3Smrg                .op = op,
7267ec681f3Smrg                .dest_tag = dest_tag,
7277ec681f3Smrg                .offset = quadword_offset,
7287ec681f3Smrg                .cond = duplicated_cond
7297ec681f3Smrg        };
7307ec681f3Smrg
7317ec681f3Smrg        return branch;
7327ec681f3Smrg}
7337ec681f3Smrg
7347ec681f3Smrgstatic void
7357ec681f3Smrgemit_branch(midgard_instruction *ins,
7367ec681f3Smrg            compiler_context *ctx,
7377ec681f3Smrg            midgard_block *block,
7387ec681f3Smrg            midgard_bundle *bundle,
7397ec681f3Smrg            struct util_dynarray *emission)
7407ec681f3Smrg{
7417ec681f3Smrg        /* Parse some basic branch info */
7427ec681f3Smrg        bool is_compact = ins->unit == ALU_ENAB_BR_COMPACT;
7437ec681f3Smrg        bool is_conditional = ins->branch.conditional;
7447ec681f3Smrg        bool is_inverted = ins->branch.invert_conditional;
7457ec681f3Smrg        bool is_discard = ins->branch.target_type == TARGET_DISCARD;
7467ec681f3Smrg        bool is_tilebuf_wait = ins->branch.target_type == TARGET_TILEBUF_WAIT;
7477ec681f3Smrg        bool is_special = is_discard || is_tilebuf_wait;
7487ec681f3Smrg        bool is_writeout = ins->writeout;
7497ec681f3Smrg
7507ec681f3Smrg        /* Determine the block we're jumping to */
7517ec681f3Smrg        int target_number = ins->branch.target_block;
7527ec681f3Smrg
7537ec681f3Smrg        /* Report the destination tag */
7547ec681f3Smrg        int dest_tag = is_discard ? 0 :
7557ec681f3Smrg                is_tilebuf_wait ? bundle->tag :
7567ec681f3Smrg                midgard_get_first_tag_from_block(ctx, target_number);
7577ec681f3Smrg
7587ec681f3Smrg        /* Count up the number of quadwords we're
7597ec681f3Smrg         * jumping over = number of quadwords until
7607ec681f3Smrg         * (br_block_idx, target_number) */
7617ec681f3Smrg
7627ec681f3Smrg        int quadword_offset = 0;
7637ec681f3Smrg
7647ec681f3Smrg        if (is_discard) {
7657ec681f3Smrg                /* Fixed encoding, not actually an offset */
7667ec681f3Smrg                quadword_offset = 0x2;
7677ec681f3Smrg        } else if (is_tilebuf_wait) {
7687ec681f3Smrg                quadword_offset = -1;
7697ec681f3Smrg        } else if (target_number > block->base.name) {
7707ec681f3Smrg                /* Jump forward */
7717ec681f3Smrg
7727ec681f3Smrg                for (int idx = block->base.name+1; idx < target_number; ++idx) {
7737ec681f3Smrg                        midgard_block *blk = mir_get_block(ctx, idx);
7747ec681f3Smrg                        assert(blk);
7757ec681f3Smrg
7767ec681f3Smrg                        quadword_offset += blk->quadword_count;
7777ec681f3Smrg                }
7787ec681f3Smrg        } else {
7797ec681f3Smrg                /* Jump backwards */
7807ec681f3Smrg
7817ec681f3Smrg                for (int idx = block->base.name; idx >= target_number; --idx) {
7827ec681f3Smrg                        midgard_block *blk = mir_get_block(ctx, idx);
7837ec681f3Smrg                        assert(blk);
7847ec681f3Smrg
7857ec681f3Smrg                        quadword_offset -= blk->quadword_count;
7867ec681f3Smrg                }
7877ec681f3Smrg        }
7887ec681f3Smrg
7897ec681f3Smrg        /* Unconditional extended branches (far jumps)
7907ec681f3Smrg         * have issues, so we always use a conditional
7917ec681f3Smrg         * branch, setting the condition to always for
7927ec681f3Smrg         * unconditional. For compact unconditional
7937ec681f3Smrg         * branches, cond isn't used so it doesn't
7947ec681f3Smrg         * matter what we pick. */
7957ec681f3Smrg
7967ec681f3Smrg        midgard_condition cond =
7977ec681f3Smrg                !is_conditional ? midgard_condition_always :
7987ec681f3Smrg                is_inverted ? midgard_condition_false :
7997ec681f3Smrg                midgard_condition_true;
8007ec681f3Smrg
8017ec681f3Smrg        midgard_jmp_writeout_op op =
8027ec681f3Smrg                is_discard ? midgard_jmp_writeout_op_discard :
8037ec681f3Smrg                is_tilebuf_wait ? midgard_jmp_writeout_op_tilebuffer_pending :
8047ec681f3Smrg                is_writeout ? midgard_jmp_writeout_op_writeout :
8057ec681f3Smrg                (is_compact && !is_conditional) ?
8067ec681f3Smrg                midgard_jmp_writeout_op_branch_uncond :
8077ec681f3Smrg                midgard_jmp_writeout_op_branch_cond;
8087ec681f3Smrg
8097ec681f3Smrg        if (is_compact) {
8107ec681f3Smrg                unsigned size = sizeof(midgard_branch_cond);
8117ec681f3Smrg
8127ec681f3Smrg                if (is_conditional || is_special) {
8137ec681f3Smrg                        midgard_branch_cond branch = {
8147ec681f3Smrg                                .op = op,
8157ec681f3Smrg                                .dest_tag = dest_tag,
8167ec681f3Smrg                                .offset = quadword_offset,
8177ec681f3Smrg                                .cond = cond
8187ec681f3Smrg                        };
8197ec681f3Smrg                        memcpy(util_dynarray_grow_bytes(emission, size, 1), &branch, size);
8207ec681f3Smrg                } else {
8217ec681f3Smrg                        assert(op == midgard_jmp_writeout_op_branch_uncond);
8227ec681f3Smrg                        midgard_branch_uncond branch = {
8237ec681f3Smrg                                .op = op,
8247ec681f3Smrg                                .dest_tag = dest_tag,
8257ec681f3Smrg                                .offset = quadword_offset,
8267ec681f3Smrg                                .unknown = 1
8277ec681f3Smrg                        };
8287ec681f3Smrg                        assert(branch.offset == quadword_offset);
8297ec681f3Smrg                        memcpy(util_dynarray_grow_bytes(emission, size, 1), &branch, size);
8307ec681f3Smrg                }
8317ec681f3Smrg        } else { /* `ins->compact_branch`,  misnomer */
8327ec681f3Smrg                unsigned size = sizeof(midgard_branch_extended);
8337ec681f3Smrg
8347ec681f3Smrg                midgard_branch_extended branch =
8357ec681f3Smrg                        midgard_create_branch_extended(
8367ec681f3Smrg                                        cond, op,
8377ec681f3Smrg                                        dest_tag,
8387ec681f3Smrg                                        quadword_offset);
8397ec681f3Smrg
8407ec681f3Smrg                memcpy(util_dynarray_grow_bytes(emission, size, 1), &branch, size);
8417ec681f3Smrg        }
8427ec681f3Smrg}
8437ec681f3Smrg
8447ec681f3Smrgstatic void
8457ec681f3Smrgemit_alu_bundle(compiler_context *ctx,
8467ec681f3Smrg                midgard_block *block,
8477ec681f3Smrg                midgard_bundle *bundle,
8487ec681f3Smrg                struct util_dynarray *emission,
8497ec681f3Smrg                unsigned lookahead)
8507ec681f3Smrg{
8517ec681f3Smrg        /* Emit the control word */
8527ec681f3Smrg        util_dynarray_append(emission, uint32_t, bundle->control | lookahead);
8537ec681f3Smrg
8547ec681f3Smrg        /* Next up, emit register words */
8557ec681f3Smrg        for (unsigned i = 0; i < bundle->instruction_count; ++i) {
8567ec681f3Smrg                midgard_instruction *ins = bundle->instructions[i];
8577ec681f3Smrg
8587ec681f3Smrg                /* Check if this instruction has registers */
8597ec681f3Smrg                if (ins->compact_branch) continue;
8607ec681f3Smrg
8617ec681f3Smrg                unsigned src2_reg = REGISTER_UNUSED;
8627ec681f3Smrg                if (ins->has_inline_constant)
8637ec681f3Smrg                        src2_reg = ins->inline_constant >> 11;
8647ec681f3Smrg                else if (ins->src[1] != ~0)
8657ec681f3Smrg                        src2_reg = SSA_REG_FROM_FIXED(ins->src[1]);
8667ec681f3Smrg
8677ec681f3Smrg                /* Otherwise, just emit the registers */
8687ec681f3Smrg                uint16_t reg_word = 0;
8697ec681f3Smrg                midgard_reg_info registers = {
8707ec681f3Smrg                        .src1_reg = (ins->src[0] == ~0 ?
8717ec681f3Smrg                                        REGISTER_UNUSED :
8727ec681f3Smrg                                        SSA_REG_FROM_FIXED(ins->src[0])),
8737ec681f3Smrg                        .src2_reg = src2_reg,
8747ec681f3Smrg                        .src2_imm = ins->has_inline_constant,
8757ec681f3Smrg                        .out_reg = (ins->dest == ~0 ?
8767ec681f3Smrg                                        REGISTER_UNUSED :
8777ec681f3Smrg                                        SSA_REG_FROM_FIXED(ins->dest)),
8787ec681f3Smrg                };
8797ec681f3Smrg                memcpy(&reg_word, &registers, sizeof(uint16_t));
8807ec681f3Smrg                util_dynarray_append(emission, uint16_t, reg_word);
8817ec681f3Smrg        }
8827ec681f3Smrg
8837ec681f3Smrg        /* Now, we emit the body itself */
8847ec681f3Smrg        for (unsigned i = 0; i < bundle->instruction_count; ++i) {
8857ec681f3Smrg                midgard_instruction *ins = bundle->instructions[i];
8867ec681f3Smrg
8877ec681f3Smrg                if (!ins->compact_branch) {
8887ec681f3Smrg                        mir_lower_inverts(ins);
8897ec681f3Smrg                        mir_lower_roundmode(ins);
8907ec681f3Smrg                }
8917ec681f3Smrg
8927ec681f3Smrg                if (midgard_is_branch_unit(ins->unit)) {
8937ec681f3Smrg                        emit_branch(ins, ctx, block, bundle, emission);
8947ec681f3Smrg                } else if (ins->unit & UNITS_ANY_VECTOR) {
8957ec681f3Smrg                        midgard_vector_alu source = vector_alu_from_instr(ins);
8967ec681f3Smrg                        mir_pack_mask_alu(ins, &source);
8977ec681f3Smrg                        mir_pack_vector_srcs(ins, &source);
8987ec681f3Smrg                        unsigned size = sizeof(source);
8997ec681f3Smrg                        memcpy(util_dynarray_grow_bytes(emission, size, 1), &source, size);
9007ec681f3Smrg                } else {
9017ec681f3Smrg                        midgard_scalar_alu source = vector_to_scalar_alu(vector_alu_from_instr(ins), ins);
9027ec681f3Smrg                        unsigned size = sizeof(source);
9037ec681f3Smrg                        memcpy(util_dynarray_grow_bytes(emission, size, 1), &source, size);
9047ec681f3Smrg                }
9057ec681f3Smrg        }
9067ec681f3Smrg
9077ec681f3Smrg        /* Emit padding (all zero) */
9087ec681f3Smrg        if (bundle->padding) {
9097ec681f3Smrg                memset(util_dynarray_grow_bytes(emission, bundle->padding, 1),
9107ec681f3Smrg                                0, bundle->padding);
9117ec681f3Smrg        }
9127ec681f3Smrg
9137ec681f3Smrg        /* Tack on constants */
9147ec681f3Smrg
9157ec681f3Smrg        if (bundle->has_embedded_constants)
9167ec681f3Smrg                util_dynarray_append(emission, midgard_constants, bundle->constants);
9177ec681f3Smrg}
9187ec681f3Smrg
9197ec681f3Smrg/* Shift applied to the immediate used as an offset. Probably this is papering
9207ec681f3Smrg * over some other semantic distinction else well, but it unifies things in the
9217ec681f3Smrg * compiler so I don't mind. */
9227ec681f3Smrg
9237ec681f3Smrgstatic void
9247ec681f3Smrgmir_ldst_pack_offset(midgard_instruction *ins, int offset)
9257ec681f3Smrg{
9267ec681f3Smrg        /* These opcodes don't support offsets */
9277ec681f3Smrg        assert(!OP_IS_REG2REG_LDST(ins->op) ||
9287ec681f3Smrg               ins->op == midgard_op_lea    ||
9297ec681f3Smrg               ins->op == midgard_op_lea_image);
9307ec681f3Smrg
9317ec681f3Smrg        if (OP_IS_UBO_READ(ins->op))
9327ec681f3Smrg                ins->load_store.signed_offset |= PACK_LDST_UBO_OFS(offset);
9337ec681f3Smrg        else if (OP_IS_IMAGE(ins->op))
9347ec681f3Smrg                ins->load_store.signed_offset |= PACK_LDST_ATTRIB_OFS(offset);
9357ec681f3Smrg        else if (OP_IS_SPECIAL(ins->op))
9367ec681f3Smrg                ins->load_store.signed_offset |= PACK_LDST_SELECTOR_OFS(offset);
9377ec681f3Smrg        else
9387ec681f3Smrg                ins->load_store.signed_offset |= PACK_LDST_MEM_OFS(offset);
9397ec681f3Smrg}
9407ec681f3Smrg
9417ec681f3Smrgstatic enum mali_sampler_type
9427ec681f3Smrgmidgard_sampler_type(nir_alu_type t) {
9437ec681f3Smrg        switch (nir_alu_type_get_base_type(t))
9447ec681f3Smrg        {
9457ec681f3Smrg        case nir_type_float:
9467ec681f3Smrg                return MALI_SAMPLER_FLOAT;
9477ec681f3Smrg        case nir_type_int:
9487ec681f3Smrg                return MALI_SAMPLER_SIGNED;
9497ec681f3Smrg        case nir_type_uint:
9507ec681f3Smrg                return MALI_SAMPLER_UNSIGNED;
9517ec681f3Smrg        default:
9527ec681f3Smrg                unreachable("Unknown sampler type");
9537ec681f3Smrg        }
9547ec681f3Smrg}
9557ec681f3Smrg
9567ec681f3Smrg/* After everything is scheduled, emit whole bundles at a time */
9577ec681f3Smrg
9587ec681f3Smrgvoid
9597ec681f3Smrgemit_binary_bundle(compiler_context *ctx,
9607ec681f3Smrg                   midgard_block *block,
9617ec681f3Smrg                   midgard_bundle *bundle,
9627ec681f3Smrg                   struct util_dynarray *emission,
9637ec681f3Smrg                   int next_tag)
9647ec681f3Smrg{
9657ec681f3Smrg        int lookahead = next_tag << 4;
9667ec681f3Smrg
9677ec681f3Smrg        switch (bundle->tag) {
9687ec681f3Smrg        case TAG_ALU_4:
9697ec681f3Smrg        case TAG_ALU_8:
9707ec681f3Smrg        case TAG_ALU_12:
9717ec681f3Smrg        case TAG_ALU_16:
9727ec681f3Smrg        case TAG_ALU_4 + 4:
9737ec681f3Smrg        case TAG_ALU_8 + 4:
9747ec681f3Smrg        case TAG_ALU_12 + 4:
9757ec681f3Smrg        case TAG_ALU_16 + 4:
9767ec681f3Smrg                emit_alu_bundle(ctx, block, bundle, emission, lookahead);
9777ec681f3Smrg                break;
9787ec681f3Smrg
9797ec681f3Smrg        case TAG_LOAD_STORE_4: {
9807ec681f3Smrg                /* One or two composing instructions */
9817ec681f3Smrg
9827ec681f3Smrg                uint64_t current64, next64 = LDST_NOP;
9837ec681f3Smrg
9847ec681f3Smrg                /* Copy masks */
9857ec681f3Smrg
9867ec681f3Smrg                for (unsigned i = 0; i < bundle->instruction_count; ++i) {
9877ec681f3Smrg                        midgard_instruction *ins = bundle->instructions[i];
9887ec681f3Smrg                        mir_pack_ldst_mask(ins);
9897ec681f3Smrg
9907ec681f3Smrg                        /* Atomic ops don't use this swizzle the same way as other ops */
9917ec681f3Smrg                        if (!OP_IS_ATOMIC(ins->op))
9927ec681f3Smrg                                mir_pack_swizzle_ldst(ins);
9937ec681f3Smrg
9947ec681f3Smrg                        /* Apply a constant offset */
9957ec681f3Smrg                        unsigned offset = ins->constants.u32[0];
9967ec681f3Smrg                        if (offset)
9977ec681f3Smrg                                mir_ldst_pack_offset(ins, offset);
9987ec681f3Smrg                }
9997ec681f3Smrg
10007ec681f3Smrg                midgard_load_store_word ldst0 =
10017ec681f3Smrg                        load_store_from_instr(bundle->instructions[0]);
10027ec681f3Smrg                memcpy(&current64, &ldst0, sizeof(current64));
10037ec681f3Smrg
10047ec681f3Smrg                if (bundle->instruction_count == 2) {
10057ec681f3Smrg                        midgard_load_store_word ldst1 =
10067ec681f3Smrg                                load_store_from_instr(bundle->instructions[1]);
10077ec681f3Smrg                        memcpy(&next64, &ldst1, sizeof(next64));
10087ec681f3Smrg                }
10097ec681f3Smrg
10107ec681f3Smrg                midgard_load_store instruction = {
10117ec681f3Smrg                        .type = bundle->tag,
10127ec681f3Smrg                        .next_type = next_tag,
10137ec681f3Smrg                        .word1 = current64,
10147ec681f3Smrg                        .word2 = next64
10157ec681f3Smrg                };
10167ec681f3Smrg
10177ec681f3Smrg                util_dynarray_append(emission, midgard_load_store, instruction);
10187ec681f3Smrg
10197ec681f3Smrg                break;
10207ec681f3Smrg        }
10217ec681f3Smrg
10227ec681f3Smrg        case TAG_TEXTURE_4:
10237ec681f3Smrg        case TAG_TEXTURE_4_VTX:
10247ec681f3Smrg        case TAG_TEXTURE_4_BARRIER: {
10257ec681f3Smrg                /* Texture instructions are easy, since there is no pipelining
10267ec681f3Smrg                 * nor VLIW to worry about. We may need to set .cont/.last
10277ec681f3Smrg                 * flags. */
10287ec681f3Smrg
10297ec681f3Smrg                midgard_instruction *ins = bundle->instructions[0];
10307ec681f3Smrg
10317ec681f3Smrg                ins->texture.type = bundle->tag;
10327ec681f3Smrg                ins->texture.next_type = next_tag;
10337ec681f3Smrg
10347ec681f3Smrg                /* Nothing else to pack for barriers */
10357ec681f3Smrg                if (ins->op == midgard_tex_op_barrier) {
10367ec681f3Smrg                        ins->texture.cont = ins->texture.last = 1;
10377ec681f3Smrg                        ins->texture.op = ins->op;
10387ec681f3Smrg                        util_dynarray_append(emission, midgard_texture_word, ins->texture);
10397ec681f3Smrg                        return;
10407ec681f3Smrg                }
10417ec681f3Smrg
10427ec681f3Smrg                signed override = mir_upper_override(ins, 32);
10437ec681f3Smrg
10447ec681f3Smrg                ins->texture.mask = override > 0 ?
10457ec681f3Smrg                        ins->mask >> override :
10467ec681f3Smrg                        ins->mask;
10477ec681f3Smrg
10487ec681f3Smrg                mir_pack_swizzle_tex(ins);
10497ec681f3Smrg
10507ec681f3Smrg                if (!(ctx->quirks & MIDGARD_NO_OOO))
10517ec681f3Smrg                        mir_pack_tex_ooo(block, bundle, ins);
10527ec681f3Smrg
10537ec681f3Smrg                unsigned osz = nir_alu_type_get_type_size(ins->dest_type);
10547ec681f3Smrg                unsigned isz = nir_alu_type_get_type_size(ins->src_types[1]);
10557ec681f3Smrg
10567ec681f3Smrg                assert(osz == 32 || osz == 16);
10577ec681f3Smrg                assert(isz == 32 || isz == 16);
10587ec681f3Smrg
10597ec681f3Smrg                ins->texture.out_full = (osz == 32);
10607ec681f3Smrg                ins->texture.out_upper = override > 0;
10617ec681f3Smrg                ins->texture.in_reg_full = (isz == 32);
10627ec681f3Smrg                ins->texture.sampler_type = midgard_sampler_type(ins->dest_type);
10637ec681f3Smrg                ins->texture.outmod = ins->outmod;
10647ec681f3Smrg
10657ec681f3Smrg                if (mir_op_computes_derivatives(ctx->stage, ins->op)) {
10667ec681f3Smrg                        ins->texture.cont = !ins->helper_terminate;
10677ec681f3Smrg                        ins->texture.last = ins->helper_terminate || ins->helper_execute;
10687ec681f3Smrg                } else {
10697ec681f3Smrg                        ins->texture.cont = ins->texture.last = 1;
10707ec681f3Smrg                }
10717ec681f3Smrg
10727ec681f3Smrg                midgard_texture_word texture = texture_word_from_instr(ins);
10737ec681f3Smrg                util_dynarray_append(emission, midgard_texture_word, texture);
10747ec681f3Smrg                break;
10757ec681f3Smrg        }
10767ec681f3Smrg
10777ec681f3Smrg        default:
10787ec681f3Smrg                unreachable("Unknown midgard instruction type\n");
10797ec681f3Smrg        }
10807ec681f3Smrg}
1081