17ec681f3Smrg/*
27ec681f3Smrg * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
37ec681f3Smrg * Copyright (C) 2019-2020 Collabora, Ltd.
47ec681f3Smrg *
57ec681f3Smrg * Permission is hereby granted, free of charge, to any person obtaining a
67ec681f3Smrg * copy of this software and associated documentation files (the "Software"),
77ec681f3Smrg * to deal in the Software without restriction, including without limitation
87ec681f3Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
97ec681f3Smrg * and/or sell copies of the Software, and to permit persons to whom the
107ec681f3Smrg * Software is furnished to do so, subject to the following conditions:
117ec681f3Smrg *
127ec681f3Smrg * The above copyright notice and this permission notice (including the next
137ec681f3Smrg * paragraph) shall be included in all copies or substantial portions of the
147ec681f3Smrg * Software.
157ec681f3Smrg *
167ec681f3Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
177ec681f3Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
187ec681f3Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
197ec681f3Smrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
207ec681f3Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
217ec681f3Smrg * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
227ec681f3Smrg * SOFTWARE.
237ec681f3Smrg */
247ec681f3Smrg
257ec681f3Smrg#include "compiler.h"
267ec681f3Smrg#include "midgard_ops.h"
277ec681f3Smrg#include "midgard_quirks.h"
287ec681f3Smrg#include "util/u_memory.h"
297ec681f3Smrg#include "util/u_math.h"
307ec681f3Smrg#include "util/half_float.h"
317ec681f3Smrg
327ec681f3Smrg/* Scheduling for Midgard is complicated, to say the least. ALU instructions
337ec681f3Smrg * must be grouped into VLIW bundles according to following model:
347ec681f3Smrg *
357ec681f3Smrg * [VMUL] [SADD]
367ec681f3Smrg * [VADD] [SMUL] [VLUT]
377ec681f3Smrg *
387ec681f3Smrg * A given instruction can execute on some subset of the units (or a few can
397ec681f3Smrg * execute on all). Instructions can be either vector or scalar; only scalar
407ec681f3Smrg * instructions can execute on SADD/SMUL units. Units on a given line execute
417ec681f3Smrg * in parallel. Subsequent lines execute separately and can pass results
427ec681f3Smrg * directly via pipeline registers r24/r25, bypassing the register file.
437ec681f3Smrg *
447ec681f3Smrg * A bundle can optionally have 128-bits of embedded constants, shared across
457ec681f3Smrg * all of the instructions within a bundle.
467ec681f3Smrg *
477ec681f3Smrg * Instructions consuming conditionals (branches and conditional selects)
487ec681f3Smrg * require their condition to be written into the conditional register (r31)
497ec681f3Smrg * within the same bundle they are consumed.
507ec681f3Smrg *
517ec681f3Smrg * Fragment writeout requires its argument to be written in full within the
527ec681f3Smrg * same bundle as the branch, with no hanging dependencies.
537ec681f3Smrg *
547ec681f3Smrg * Load/store instructions are also in bundles of simply two instructions, and
557ec681f3Smrg * texture instructions have no bundling.
567ec681f3Smrg *
577ec681f3Smrg * -------------------------------------------------------------------------
587ec681f3Smrg *
597ec681f3Smrg */
607ec681f3Smrg
617ec681f3Smrg/* We create the dependency graph with per-byte granularity */
627ec681f3Smrg
637ec681f3Smrg#define BYTE_COUNT 16
647ec681f3Smrg
657ec681f3Smrgstatic void
667ec681f3Smrgadd_dependency(struct util_dynarray *table, unsigned index, uint16_t mask, midgard_instruction **instructions, unsigned child)
677ec681f3Smrg{
687ec681f3Smrg        for (unsigned i = 0; i < BYTE_COUNT; ++i) {
697ec681f3Smrg                if (!(mask & (1 << i)))
707ec681f3Smrg                        continue;
717ec681f3Smrg
727ec681f3Smrg                struct util_dynarray *parents = &table[(BYTE_COUNT * index) + i];
737ec681f3Smrg
747ec681f3Smrg                util_dynarray_foreach(parents, unsigned, parent) {
757ec681f3Smrg                        BITSET_WORD *dependents = instructions[*parent]->dependents;
767ec681f3Smrg
777ec681f3Smrg                        /* Already have the dependency */
787ec681f3Smrg                        if (BITSET_TEST(dependents, child))
797ec681f3Smrg                                continue;
807ec681f3Smrg
817ec681f3Smrg                        BITSET_SET(dependents, child);
827ec681f3Smrg                        instructions[child]->nr_dependencies++;
837ec681f3Smrg                }
847ec681f3Smrg        }
857ec681f3Smrg}
867ec681f3Smrg
877ec681f3Smrgstatic void
887ec681f3Smrgmark_access(struct util_dynarray *table, unsigned index, uint16_t mask, unsigned parent)
897ec681f3Smrg{
907ec681f3Smrg        for (unsigned i = 0; i < BYTE_COUNT; ++i) {
917ec681f3Smrg                if (!(mask & (1 << i)))
927ec681f3Smrg                        continue;
937ec681f3Smrg
947ec681f3Smrg                util_dynarray_append(&table[(BYTE_COUNT * index) + i], unsigned, parent);
957ec681f3Smrg        }
967ec681f3Smrg}
977ec681f3Smrg
987ec681f3Smrgstatic void
997ec681f3Smrgmir_create_dependency_graph(midgard_instruction **instructions, unsigned count, unsigned node_count)
1007ec681f3Smrg{
1017ec681f3Smrg        size_t sz = node_count * BYTE_COUNT;
1027ec681f3Smrg
1037ec681f3Smrg        struct util_dynarray *last_read = calloc(sizeof(struct util_dynarray), sz);
1047ec681f3Smrg        struct util_dynarray *last_write = calloc(sizeof(struct util_dynarray), sz);
1057ec681f3Smrg
1067ec681f3Smrg        for (unsigned i = 0; i < sz; ++i) {
1077ec681f3Smrg                util_dynarray_init(&last_read[i], NULL);
1087ec681f3Smrg                util_dynarray_init(&last_write[i], NULL);
1097ec681f3Smrg        }
1107ec681f3Smrg
1117ec681f3Smrg        /* Initialize dependency graph */
1127ec681f3Smrg        for (unsigned i = 0; i < count; ++i) {
1137ec681f3Smrg                instructions[i]->dependents =
1147ec681f3Smrg                        calloc(BITSET_WORDS(count), sizeof(BITSET_WORD));
1157ec681f3Smrg
1167ec681f3Smrg                instructions[i]->nr_dependencies = 0;
1177ec681f3Smrg        }
1187ec681f3Smrg
1197ec681f3Smrg        unsigned prev_ldst[3] = {~0, ~0, ~0};
1207ec681f3Smrg
1217ec681f3Smrg        /* Populate dependency graph */
1227ec681f3Smrg        for (signed i = count - 1; i >= 0; --i) {
1237ec681f3Smrg                if (instructions[i]->compact_branch)
1247ec681f3Smrg                        continue;
1257ec681f3Smrg
1267ec681f3Smrg                unsigned dest = instructions[i]->dest;
1277ec681f3Smrg                unsigned mask = mir_bytemask(instructions[i]);
1287ec681f3Smrg
1297ec681f3Smrg                mir_foreach_src((*instructions), s) {
1307ec681f3Smrg                        unsigned src = instructions[i]->src[s];
1317ec681f3Smrg
1327ec681f3Smrg                        if (src < node_count) {
1337ec681f3Smrg                                unsigned readmask = mir_bytemask_of_read_components(instructions[i], src);
1347ec681f3Smrg                                add_dependency(last_write, src, readmask, instructions, i);
1357ec681f3Smrg                        }
1367ec681f3Smrg                }
1377ec681f3Smrg
1387ec681f3Smrg                /* Create a list of dependencies for each type of load/store
1397ec681f3Smrg                 * instruction to prevent reordering. */
1407ec681f3Smrg                if (instructions[i]->type == TAG_LOAD_STORE_4 &&
1417ec681f3Smrg                    load_store_opcode_props[instructions[i]->op].props & LDST_ADDRESS) {
1427ec681f3Smrg
1437ec681f3Smrg                        unsigned type = instructions[i]->load_store.arg_reg |
1447ec681f3Smrg                                        instructions[i]->load_store.arg_comp;
1457ec681f3Smrg
1467ec681f3Smrg                        unsigned idx;
1477ec681f3Smrg                        switch (type) {
1487ec681f3Smrg                        case LDST_SHARED: idx = 0; break;
1497ec681f3Smrg                        case LDST_SCRATCH: idx = 1; break;
1507ec681f3Smrg                        default: idx = 2; break;
1517ec681f3Smrg                        }
1527ec681f3Smrg
1537ec681f3Smrg                        unsigned prev = prev_ldst[idx];
1547ec681f3Smrg
1557ec681f3Smrg                        if (prev != ~0) {
1567ec681f3Smrg                                BITSET_WORD *dependents = instructions[prev]->dependents;
1577ec681f3Smrg
1587ec681f3Smrg                                /* Already have the dependency */
1597ec681f3Smrg                                if (BITSET_TEST(dependents, i))
1607ec681f3Smrg                                        continue;
1617ec681f3Smrg
1627ec681f3Smrg                                BITSET_SET(dependents, i);
1637ec681f3Smrg                                instructions[i]->nr_dependencies++;
1647ec681f3Smrg                        }
1657ec681f3Smrg
1667ec681f3Smrg                        prev_ldst[idx] = i;
1677ec681f3Smrg                }
1687ec681f3Smrg
1697ec681f3Smrg                if (dest < node_count) {
1707ec681f3Smrg                        add_dependency(last_read, dest, mask, instructions, i);
1717ec681f3Smrg                        add_dependency(last_write, dest, mask, instructions, i);
1727ec681f3Smrg                        mark_access(last_write, dest, mask, i);
1737ec681f3Smrg                }
1747ec681f3Smrg
1757ec681f3Smrg                mir_foreach_src((*instructions), s) {
1767ec681f3Smrg                        unsigned src = instructions[i]->src[s];
1777ec681f3Smrg
1787ec681f3Smrg                        if (src < node_count) {
1797ec681f3Smrg                                unsigned readmask = mir_bytemask_of_read_components(instructions[i], src);
1807ec681f3Smrg                                mark_access(last_read, src, readmask, i);
1817ec681f3Smrg                        }
1827ec681f3Smrg                }
1837ec681f3Smrg        }
1847ec681f3Smrg
1857ec681f3Smrg        /* If there is a branch, all instructions depend on it, as interblock
1867ec681f3Smrg         * execution must be purely in-order */
1877ec681f3Smrg
1887ec681f3Smrg        if (instructions[count - 1]->compact_branch) {
1897ec681f3Smrg                BITSET_WORD *dependents = instructions[count - 1]->dependents;
1907ec681f3Smrg
1917ec681f3Smrg                for (signed i = count - 2; i >= 0; --i) {
1927ec681f3Smrg                        if (BITSET_TEST(dependents, i))
1937ec681f3Smrg                                continue;
1947ec681f3Smrg
1957ec681f3Smrg                        BITSET_SET(dependents, i);
1967ec681f3Smrg                        instructions[i]->nr_dependencies++;
1977ec681f3Smrg                }
1987ec681f3Smrg        }
1997ec681f3Smrg
2007ec681f3Smrg        /* Free the intermediate structures */
2017ec681f3Smrg        for (unsigned i = 0; i < sz; ++i) {
2027ec681f3Smrg                util_dynarray_fini(&last_read[i]);
2037ec681f3Smrg                util_dynarray_fini(&last_write[i]);
2047ec681f3Smrg        }
2057ec681f3Smrg
2067ec681f3Smrg        free(last_read);
2077ec681f3Smrg        free(last_write);
2087ec681f3Smrg}
2097ec681f3Smrg
2107ec681f3Smrg/* Does the mask cover more than a scalar? */
2117ec681f3Smrg
2127ec681f3Smrgstatic bool
2137ec681f3Smrgis_single_component_mask(unsigned mask)
2147ec681f3Smrg{
2157ec681f3Smrg        int components = 0;
2167ec681f3Smrg
2177ec681f3Smrg        for (int c = 0; c < 8; ++c) {
2187ec681f3Smrg                if (mask & (1 << c))
2197ec681f3Smrg                        components++;
2207ec681f3Smrg        }
2217ec681f3Smrg
2227ec681f3Smrg        return components == 1;
2237ec681f3Smrg}
2247ec681f3Smrg
2257ec681f3Smrg/* Helpers for scheudling */
2267ec681f3Smrg
2277ec681f3Smrgstatic bool
2287ec681f3Smrgmir_is_scalar(midgard_instruction *ains)
2297ec681f3Smrg{
2307ec681f3Smrg        /* Do we try to use it as a vector op? */
2317ec681f3Smrg        if (!is_single_component_mask(ains->mask))
2327ec681f3Smrg                return false;
2337ec681f3Smrg
2347ec681f3Smrg        /* Otherwise, check mode hazards */
2357ec681f3Smrg        bool could_scalar = true;
2367ec681f3Smrg        unsigned szd = nir_alu_type_get_type_size(ains->dest_type);
2377ec681f3Smrg        unsigned sz0 = nir_alu_type_get_type_size(ains->src_types[0]);
2387ec681f3Smrg        unsigned sz1 = nir_alu_type_get_type_size(ains->src_types[1]);
2397ec681f3Smrg
2407ec681f3Smrg        /* Only 16/32-bit can run on a scalar unit */
2417ec681f3Smrg        could_scalar &= (szd == 16) || (szd == 32);
2427ec681f3Smrg
2437ec681f3Smrg        if (ains->src[0] != ~0)
2447ec681f3Smrg                could_scalar &= (sz0 == 16) || (sz0 == 32);
2457ec681f3Smrg
2467ec681f3Smrg        if (ains->src[1] != ~0)
2477ec681f3Smrg                could_scalar &= (sz1 == 16) || (sz1 == 32);
2487ec681f3Smrg
2497ec681f3Smrg        if (midgard_is_integer_out_op(ains->op) && ains->outmod != midgard_outmod_keeplo)
2507ec681f3Smrg                return false;
2517ec681f3Smrg
2527ec681f3Smrg        return could_scalar;
2537ec681f3Smrg}
2547ec681f3Smrg
2557ec681f3Smrg/* How many bytes does this ALU instruction add to the bundle? */
2567ec681f3Smrg
2577ec681f3Smrgstatic unsigned
2587ec681f3Smrgbytes_for_instruction(midgard_instruction *ains)
2597ec681f3Smrg{
2607ec681f3Smrg        if (ains->unit & UNITS_ANY_VECTOR)
2617ec681f3Smrg                return sizeof(midgard_reg_info) + sizeof(midgard_vector_alu);
2627ec681f3Smrg        else if (ains->unit == ALU_ENAB_BRANCH)
2637ec681f3Smrg                return sizeof(midgard_branch_extended);
2647ec681f3Smrg        else if (ains->compact_branch)
2657ec681f3Smrg                return sizeof(uint16_t);
2667ec681f3Smrg        else
2677ec681f3Smrg                return sizeof(midgard_reg_info) + sizeof(midgard_scalar_alu);
2687ec681f3Smrg}
2697ec681f3Smrg
2707ec681f3Smrg/* We would like to flatten the linked list of midgard_instructions in a bundle
2717ec681f3Smrg * to an array of pointers on the heap for easy indexing */
2727ec681f3Smrg
2737ec681f3Smrgstatic midgard_instruction **
2747ec681f3Smrgflatten_mir(midgard_block *block, unsigned *len)
2757ec681f3Smrg{
2767ec681f3Smrg        *len = list_length(&block->base.instructions);
2777ec681f3Smrg
2787ec681f3Smrg        if (!(*len))
2797ec681f3Smrg                return NULL;
2807ec681f3Smrg
2817ec681f3Smrg        midgard_instruction **instructions =
2827ec681f3Smrg                calloc(sizeof(midgard_instruction *), *len);
2837ec681f3Smrg
2847ec681f3Smrg        unsigned i = 0;
2857ec681f3Smrg
2867ec681f3Smrg        mir_foreach_instr_in_block(block, ins)
2877ec681f3Smrg                instructions[i++] = ins;
2887ec681f3Smrg
2897ec681f3Smrg        return instructions;
2907ec681f3Smrg}
2917ec681f3Smrg
2927ec681f3Smrg/* The worklist is the set of instructions that can be scheduled now; that is,
2937ec681f3Smrg * the set of instructions with no remaining dependencies */
2947ec681f3Smrg
2957ec681f3Smrgstatic void
2967ec681f3Smrgmir_initialize_worklist(BITSET_WORD *worklist, midgard_instruction **instructions, unsigned count)
2977ec681f3Smrg{
2987ec681f3Smrg        for (unsigned i = 0; i < count; ++i) {
2997ec681f3Smrg                if (instructions[i]->nr_dependencies == 0)
3007ec681f3Smrg                        BITSET_SET(worklist, i);
3017ec681f3Smrg        }
3027ec681f3Smrg}
3037ec681f3Smrg
3047ec681f3Smrg/* Update the worklist after an instruction terminates. Remove its edges from
3057ec681f3Smrg * the graph and if that causes any node to have no dependencies, add it to the
3067ec681f3Smrg * worklist */
3077ec681f3Smrg
3087ec681f3Smrgstatic void
3097ec681f3Smrgmir_update_worklist(
3107ec681f3Smrg                BITSET_WORD *worklist, unsigned count,
3117ec681f3Smrg                midgard_instruction **instructions, midgard_instruction *done)
3127ec681f3Smrg{
3137ec681f3Smrg        /* Sanity check: if no instruction terminated, there is nothing to do.
3147ec681f3Smrg         * If the instruction that terminated had dependencies, that makes no
3157ec681f3Smrg         * sense and means we messed up the worklist. Finally, as the purpose
3167ec681f3Smrg         * of this routine is to update dependents, we abort early if there are
3177ec681f3Smrg         * no dependents defined. */
3187ec681f3Smrg
3197ec681f3Smrg        if (!done)
3207ec681f3Smrg                return;
3217ec681f3Smrg
3227ec681f3Smrg        assert(done->nr_dependencies == 0);
3237ec681f3Smrg
3247ec681f3Smrg        if (!done->dependents)
3257ec681f3Smrg                return;
3267ec681f3Smrg
3277ec681f3Smrg        /* We have an instruction with dependents. Iterate each dependent to
3287ec681f3Smrg         * remove one dependency (`done`), adding dependents to the worklist
3297ec681f3Smrg         * where possible. */
3307ec681f3Smrg
3317ec681f3Smrg        unsigned i;
3327ec681f3Smrg        BITSET_FOREACH_SET(i, done->dependents, count) {
3337ec681f3Smrg                assert(instructions[i]->nr_dependencies);
3347ec681f3Smrg
3357ec681f3Smrg                if (!(--instructions[i]->nr_dependencies))
3367ec681f3Smrg                        BITSET_SET(worklist, i);
3377ec681f3Smrg        }
3387ec681f3Smrg
3397ec681f3Smrg        free(done->dependents);
3407ec681f3Smrg}
3417ec681f3Smrg
3427ec681f3Smrg/* While scheduling, we need to choose instructions satisfying certain
3437ec681f3Smrg * criteria. As we schedule backwards, we choose the *last* instruction in the
3447ec681f3Smrg * worklist to simulate in-order scheduling. Chosen instructions must satisfy a
3457ec681f3Smrg * given predicate. */
3467ec681f3Smrg
3477ec681f3Smrgstruct midgard_predicate {
3487ec681f3Smrg        /* TAG or ~0 for dont-care */
3497ec681f3Smrg        unsigned tag;
3507ec681f3Smrg
3517ec681f3Smrg        /* True if we want to pop off the chosen instruction */
3527ec681f3Smrg        bool destructive;
3537ec681f3Smrg
3547ec681f3Smrg        /* For ALU, choose only this unit */
3557ec681f3Smrg        unsigned unit;
3567ec681f3Smrg
3577ec681f3Smrg        /* State for bundle constants. constants is the actual constants
3587ec681f3Smrg         * for the bundle. constant_count is the number of bytes (up to
3597ec681f3Smrg         * 16) currently in use for constants. When picking in destructive
3607ec681f3Smrg         * mode, the constants array will be updated, and the instruction
3617ec681f3Smrg         * will be adjusted to index into the constants array */
3627ec681f3Smrg
3637ec681f3Smrg        midgard_constants *constants;
3647ec681f3Smrg        unsigned constant_mask;
3657ec681f3Smrg
3667ec681f3Smrg        /* Exclude this destination (if not ~0) */
3677ec681f3Smrg        unsigned exclude;
3687ec681f3Smrg
3697ec681f3Smrg        /* Don't schedule instructions consuming conditionals (since we already
3707ec681f3Smrg         * scheduled one). Excludes conditional branches and csel */
3717ec681f3Smrg        bool no_cond;
3727ec681f3Smrg
3737ec681f3Smrg        /* Require (or reject) a minimal mask and (if nonzero) given
3747ec681f3Smrg         * destination. Used for writeout optimizations */
3757ec681f3Smrg
3767ec681f3Smrg        unsigned mask;
3777ec681f3Smrg        unsigned no_mask;
3787ec681f3Smrg        unsigned dest;
3797ec681f3Smrg
3807ec681f3Smrg        /* Whether to not-care/only/never schedule imov/fmov instructions This
3817ec681f3Smrg         * allows non-move instructions to get priority on each unit */
3827ec681f3Smrg        unsigned move_mode;
3837ec681f3Smrg
3847ec681f3Smrg        /* For load/store: how many pipeline registers are in use? The two
3857ec681f3Smrg         * scheduled instructions cannot use more than the 256-bits of pipeline
3867ec681f3Smrg         * space available or RA will fail (as it would run out of pipeline
3877ec681f3Smrg         * registers and fail to spill without breaking the schedule) */
3887ec681f3Smrg
3897ec681f3Smrg        unsigned pipeline_count;
3907ec681f3Smrg};
3917ec681f3Smrg
3927ec681f3Smrgstatic bool
3937ec681f3Smrgmir_adjust_constant(midgard_instruction *ins, unsigned src,
3947ec681f3Smrg                unsigned *bundle_constant_mask,
3957ec681f3Smrg                unsigned *comp_mapping,
3967ec681f3Smrg                uint8_t *bundle_constants,
3977ec681f3Smrg                bool upper)
3987ec681f3Smrg{
3997ec681f3Smrg        unsigned type_size = nir_alu_type_get_type_size(ins->src_types[src]) / 8;
4007ec681f3Smrg        unsigned type_shift = util_logbase2(type_size);
4017ec681f3Smrg        unsigned max_comp = mir_components_for_type(ins->src_types[src]);
4027ec681f3Smrg        unsigned comp_mask = mir_from_bytemask(mir_round_bytemask_up(
4037ec681f3Smrg                                mir_bytemask_of_read_components_index(ins, src),
4047ec681f3Smrg                                type_size * 8),
4057ec681f3Smrg                                               type_size * 8);
4067ec681f3Smrg        unsigned type_mask = (1 << type_size) - 1;
4077ec681f3Smrg
4087ec681f3Smrg        /* Upper only makes sense for 16-bit */
4097ec681f3Smrg        if (type_size != 16 && upper)
4107ec681f3Smrg                return false;
4117ec681f3Smrg
4127ec681f3Smrg        /* For 16-bit, we need to stay on either upper or lower halves to avoid
4137ec681f3Smrg         * disrupting the swizzle */
4147ec681f3Smrg        unsigned start = upper ? 8 : 0;
4157ec681f3Smrg        unsigned length = (type_size == 2) ? 8 : 16;
4167ec681f3Smrg
4177ec681f3Smrg        for (unsigned comp = 0; comp < max_comp; comp++) {
4187ec681f3Smrg                if (!(comp_mask & (1 << comp)))
4197ec681f3Smrg                        continue;
4207ec681f3Smrg
4217ec681f3Smrg                uint8_t *constantp = ins->constants.u8 + (type_size * comp);
4227ec681f3Smrg                unsigned best_reuse_bytes = 0;
4237ec681f3Smrg                signed best_place = -1;
4247ec681f3Smrg                unsigned i, j;
4257ec681f3Smrg
4267ec681f3Smrg                for (i = start; i < (start + length); i += type_size) {
4277ec681f3Smrg                        unsigned reuse_bytes = 0;
4287ec681f3Smrg
4297ec681f3Smrg                        for (j = 0; j < type_size; j++) {
4307ec681f3Smrg                                if (!(*bundle_constant_mask & (1 << (i + j))))
4317ec681f3Smrg                                        continue;
4327ec681f3Smrg                                if (constantp[j] != bundle_constants[i + j])
4337ec681f3Smrg                                        break;
4347ec681f3Smrg                                if ((i + j) > (start + length))
4357ec681f3Smrg                                        break;
4367ec681f3Smrg
4377ec681f3Smrg                                reuse_bytes++;
4387ec681f3Smrg                        }
4397ec681f3Smrg
4407ec681f3Smrg                        /* Select the place where existing bytes can be
4417ec681f3Smrg                         * reused so we leave empty slots to others
4427ec681f3Smrg                         */
4437ec681f3Smrg                        if (j == type_size &&
4447ec681f3Smrg                            (reuse_bytes > best_reuse_bytes || best_place < 0)) {
4457ec681f3Smrg                                best_reuse_bytes = reuse_bytes;
4467ec681f3Smrg                                best_place = i;
4477ec681f3Smrg                                break;
4487ec681f3Smrg                        }
4497ec681f3Smrg                }
4507ec681f3Smrg
4517ec681f3Smrg                /* This component couldn't fit in the remaining constant slot,
4527ec681f3Smrg                 * no need check the remaining components, bail out now
4537ec681f3Smrg                 */
4547ec681f3Smrg                if (best_place < 0)
4557ec681f3Smrg                        return false;
4567ec681f3Smrg
4577ec681f3Smrg                memcpy(&bundle_constants[i], constantp, type_size);
4587ec681f3Smrg                *bundle_constant_mask |= type_mask << best_place;
4597ec681f3Smrg                comp_mapping[comp] = best_place >> type_shift;
4607ec681f3Smrg        }
4617ec681f3Smrg
4627ec681f3Smrg        return true;
4637ec681f3Smrg}
4647ec681f3Smrg
4657ec681f3Smrg/* For an instruction that can fit, adjust it to fit and update the constants
4667ec681f3Smrg * array, in destructive mode. Returns whether the fitting was successful. */
4677ec681f3Smrg
4687ec681f3Smrgstatic bool
4697ec681f3Smrgmir_adjust_constants(midgard_instruction *ins,
4707ec681f3Smrg                struct midgard_predicate *pred,
4717ec681f3Smrg                bool destructive)
4727ec681f3Smrg{
4737ec681f3Smrg        /* No constant, nothing to adjust */
4747ec681f3Smrg        if (!ins->has_constants)
4757ec681f3Smrg                return true;
4767ec681f3Smrg
4777ec681f3Smrg        unsigned r_constant = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
4787ec681f3Smrg        unsigned bundle_constant_mask = pred->constant_mask;
4797ec681f3Smrg        unsigned comp_mapping[2][16] = { };
4807ec681f3Smrg        uint8_t bundle_constants[16];
4817ec681f3Smrg
4827ec681f3Smrg        memcpy(bundle_constants, pred->constants, 16);
4837ec681f3Smrg
4847ec681f3Smrg        /* Let's try to find a place for each active component of the constant
4857ec681f3Smrg         * register.
4867ec681f3Smrg         */
4877ec681f3Smrg        for (unsigned src = 0; src < 2; ++src) {
4887ec681f3Smrg                if (ins->src[src] != SSA_FIXED_REGISTER(REGISTER_CONSTANT))
4897ec681f3Smrg                        continue;
4907ec681f3Smrg
4917ec681f3Smrg                /* First, try lower half (or whole for !16) */
4927ec681f3Smrg                if (mir_adjust_constant(ins, src, &bundle_constant_mask,
4937ec681f3Smrg                                comp_mapping[src], bundle_constants, false))
4947ec681f3Smrg                        continue;
4957ec681f3Smrg
4967ec681f3Smrg                /* Next, try upper half */
4977ec681f3Smrg                if (mir_adjust_constant(ins, src, &bundle_constant_mask,
4987ec681f3Smrg                                comp_mapping[src], bundle_constants, true))
4997ec681f3Smrg                        continue;
5007ec681f3Smrg
5017ec681f3Smrg                /* Otherwise bail */
5027ec681f3Smrg                return false;
5037ec681f3Smrg        }
5047ec681f3Smrg
5057ec681f3Smrg        /* If non-destructive, we're done */
5067ec681f3Smrg        if (!destructive)
5077ec681f3Smrg                return true;
5087ec681f3Smrg
5097ec681f3Smrg	/* Otherwise update the constant_mask and constant values */
5107ec681f3Smrg        pred->constant_mask = bundle_constant_mask;
5117ec681f3Smrg        memcpy(pred->constants, bundle_constants, 16);
5127ec681f3Smrg
5137ec681f3Smrg        /* Use comp_mapping as a swizzle */
5147ec681f3Smrg        mir_foreach_src(ins, s) {
5157ec681f3Smrg                if (ins->src[s] == r_constant)
5167ec681f3Smrg                        mir_compose_swizzle(ins->swizzle[s], comp_mapping[s], ins->swizzle[s]);
5177ec681f3Smrg        }
5187ec681f3Smrg
5197ec681f3Smrg        return true;
5207ec681f3Smrg}
5217ec681f3Smrg
5227ec681f3Smrg/* Conservative estimate of the pipeline registers required for load/store */
5237ec681f3Smrg
5247ec681f3Smrgstatic unsigned
5257ec681f3Smrgmir_pipeline_count(midgard_instruction *ins)
5267ec681f3Smrg{
5277ec681f3Smrg        unsigned bytecount = 0;
5287ec681f3Smrg
5297ec681f3Smrg        mir_foreach_src(ins, i) {
5307ec681f3Smrg                /* Skip empty source  */
5317ec681f3Smrg                if (ins->src[i] == ~0) continue;
5327ec681f3Smrg
5337ec681f3Smrg                if (i == 0) {
5347ec681f3Smrg                        /* First source is a vector, worst-case the mask */
5357ec681f3Smrg                        unsigned bytemask = mir_bytemask_of_read_components_index(ins, i);
5367ec681f3Smrg                        unsigned max = util_logbase2(bytemask) + 1;
5377ec681f3Smrg                        bytecount += max;
5387ec681f3Smrg                } else {
5397ec681f3Smrg                        /* Sources 1 on are scalars */
5407ec681f3Smrg                        bytecount += 4;
5417ec681f3Smrg                }
5427ec681f3Smrg        }
5437ec681f3Smrg
5447ec681f3Smrg        unsigned dwords = DIV_ROUND_UP(bytecount, 16);
5457ec681f3Smrg        assert(dwords <= 2);
5467ec681f3Smrg
5477ec681f3Smrg        return dwords;
5487ec681f3Smrg}
5497ec681f3Smrg
5507ec681f3Smrg/* Matches FADD x, x with modifiers compatible. Since x + x = x * 2, for
5517ec681f3Smrg * any x including of the form f(y) for some swizzle/abs/neg function f */
5527ec681f3Smrg
5537ec681f3Smrgstatic bool
5547ec681f3Smrgmir_is_add_2(midgard_instruction *ins)
5557ec681f3Smrg{
5567ec681f3Smrg        if (ins->op != midgard_alu_op_fadd)
5577ec681f3Smrg                return false;
5587ec681f3Smrg
5597ec681f3Smrg        if (ins->src[0] != ins->src[1])
5607ec681f3Smrg                return false;
5617ec681f3Smrg
5627ec681f3Smrg        if (ins->src_types[0] != ins->src_types[1])
5637ec681f3Smrg                return false;
5647ec681f3Smrg
5657ec681f3Smrg        for (unsigned i = 0; i < MIR_VEC_COMPONENTS; ++i) {
5667ec681f3Smrg                if (ins->swizzle[0][i] != ins->swizzle[1][i])
5677ec681f3Smrg                        return false;
5687ec681f3Smrg        }
5697ec681f3Smrg
5707ec681f3Smrg        if (ins->src_abs[0] != ins->src_abs[1])
5717ec681f3Smrg                return false;
5727ec681f3Smrg
5737ec681f3Smrg        if (ins->src_neg[0] != ins->src_neg[1])
5747ec681f3Smrg                return false;
5757ec681f3Smrg
5767ec681f3Smrg        return true;
5777ec681f3Smrg}
5787ec681f3Smrg
5797ec681f3Smrgstatic void
5807ec681f3Smrgmir_adjust_unit(midgard_instruction *ins, unsigned unit)
5817ec681f3Smrg{
5827ec681f3Smrg        /* FADD x, x = FMUL x, #2 */
5837ec681f3Smrg        if (mir_is_add_2(ins) && (unit & (UNITS_MUL | UNIT_VLUT))) {
5847ec681f3Smrg                ins->op = midgard_alu_op_fmul;
5857ec681f3Smrg
5867ec681f3Smrg                ins->src[1] = ~0;
5877ec681f3Smrg                ins->src_abs[1] = false;
5887ec681f3Smrg                ins->src_neg[1] = false;
5897ec681f3Smrg
5907ec681f3Smrg                ins->has_inline_constant = true;
5917ec681f3Smrg                ins->inline_constant = _mesa_float_to_half(2.0);
5927ec681f3Smrg        }
5937ec681f3Smrg}
5947ec681f3Smrg
5957ec681f3Smrgstatic unsigned
5967ec681f3Smrgmir_has_unit(midgard_instruction *ins, unsigned unit)
5977ec681f3Smrg{
5987ec681f3Smrg        if (alu_opcode_props[ins->op].props & unit)
5997ec681f3Smrg                return true;
6007ec681f3Smrg
6017ec681f3Smrg        /* FADD x, x can run on any adder or any multiplier */
6027ec681f3Smrg        if (mir_is_add_2(ins))
6037ec681f3Smrg                return true;
6047ec681f3Smrg
6057ec681f3Smrg        return false;
6067ec681f3Smrg}
6077ec681f3Smrg
6087ec681f3Smrg/* Net change in liveness if an instruction were scheduled. Loosely based on
6097ec681f3Smrg * ir3's scheduler. */
6107ec681f3Smrg
6117ec681f3Smrgstatic int
6127ec681f3Smrgmir_live_effect(uint16_t *liveness, midgard_instruction *ins, bool destructive)
6137ec681f3Smrg{
6147ec681f3Smrg        /* TODO: what if dest is used multiple times? */
6157ec681f3Smrg        int free_live = 0;
6167ec681f3Smrg
6177ec681f3Smrg        if (ins->dest < SSA_FIXED_MINIMUM) {
6187ec681f3Smrg                unsigned bytemask = mir_bytemask(ins);
6197ec681f3Smrg                bytemask = util_next_power_of_two(bytemask + 1) - 1;
6207ec681f3Smrg                free_live += util_bitcount(liveness[ins->dest] & bytemask);
6217ec681f3Smrg
6227ec681f3Smrg                if (destructive)
6237ec681f3Smrg                        liveness[ins->dest] &= ~bytemask;
6247ec681f3Smrg        }
6257ec681f3Smrg
6267ec681f3Smrg        int new_live = 0;
6277ec681f3Smrg
6287ec681f3Smrg        mir_foreach_src(ins, s) {
6297ec681f3Smrg                unsigned S = ins->src[s];
6307ec681f3Smrg
6317ec681f3Smrg                bool dupe = false;
6327ec681f3Smrg
6337ec681f3Smrg                for (unsigned q = 0; q < s; ++q)
6347ec681f3Smrg                        dupe |= (ins->src[q] == S);
6357ec681f3Smrg
6367ec681f3Smrg                if (dupe)
6377ec681f3Smrg                        continue;
6387ec681f3Smrg
6397ec681f3Smrg                if (S < SSA_FIXED_MINIMUM) {
6407ec681f3Smrg                        unsigned bytemask = mir_bytemask_of_read_components(ins, S);
6417ec681f3Smrg                        bytemask = util_next_power_of_two(bytemask + 1) - 1;
6427ec681f3Smrg
6437ec681f3Smrg                        /* Count only the new components */
6447ec681f3Smrg                        new_live += util_bitcount(bytemask & ~(liveness[S]));
6457ec681f3Smrg
6467ec681f3Smrg                        if (destructive)
6477ec681f3Smrg                                liveness[S] |= bytemask;
6487ec681f3Smrg                }
6497ec681f3Smrg        }
6507ec681f3Smrg
6517ec681f3Smrg        return new_live - free_live;
6527ec681f3Smrg}
6537ec681f3Smrg
6547ec681f3Smrgstatic midgard_instruction *
6557ec681f3Smrgmir_choose_instruction(
6567ec681f3Smrg                midgard_instruction **instructions,
6577ec681f3Smrg                uint16_t *liveness,
6587ec681f3Smrg                BITSET_WORD *worklist, unsigned count,
6597ec681f3Smrg                struct midgard_predicate *predicate)
6607ec681f3Smrg{
6617ec681f3Smrg        /* Parse the predicate */
6627ec681f3Smrg        unsigned tag = predicate->tag;
6637ec681f3Smrg        unsigned unit = predicate->unit;
6647ec681f3Smrg        bool scalar = (unit != ~0) && (unit & UNITS_SCALAR);
6657ec681f3Smrg        bool no_cond = predicate->no_cond;
6667ec681f3Smrg
6677ec681f3Smrg        unsigned mask = predicate->mask;
6687ec681f3Smrg        unsigned dest = predicate->dest;
6697ec681f3Smrg        bool needs_dest = mask & 0xF;
6707ec681f3Smrg
6717ec681f3Smrg        /* Iterate to find the best instruction satisfying the predicate */
6727ec681f3Smrg        unsigned i;
6737ec681f3Smrg
6747ec681f3Smrg        signed best_index = -1;
6757ec681f3Smrg        signed best_effect = INT_MAX;
6767ec681f3Smrg        bool best_conditional = false;
6777ec681f3Smrg
6787ec681f3Smrg        /* Enforce a simple metric limiting distance to keep down register
6797ec681f3Smrg         * pressure. TOOD: replace with liveness tracking for much better
6807ec681f3Smrg         * results */
6817ec681f3Smrg
6827ec681f3Smrg        unsigned max_active = 0;
6837ec681f3Smrg        unsigned max_distance = 36;
6847ec681f3Smrg
6857ec681f3Smrg#ifndef NDEBUG
6867ec681f3Smrg        /* Force in-order scheduling */
6877ec681f3Smrg        if (midgard_debug & MIDGARD_DBG_INORDER)
6887ec681f3Smrg                max_distance = 1;
6897ec681f3Smrg#endif
6907ec681f3Smrg
6917ec681f3Smrg        BITSET_FOREACH_SET(i, worklist, count) {
6927ec681f3Smrg                max_active = MAX2(max_active, i);
6937ec681f3Smrg        }
6947ec681f3Smrg
6957ec681f3Smrg        BITSET_FOREACH_SET(i, worklist, count) {
6967ec681f3Smrg                if ((max_active - i) >= max_distance)
6977ec681f3Smrg                        continue;
6987ec681f3Smrg
6997ec681f3Smrg                if (tag != ~0 && instructions[i]->type != tag)
7007ec681f3Smrg                        continue;
7017ec681f3Smrg
7027ec681f3Smrg                bool alu = (instructions[i]->type == TAG_ALU_4);
7037ec681f3Smrg                bool ldst = (instructions[i]->type == TAG_LOAD_STORE_4);
7047ec681f3Smrg
7057ec681f3Smrg                bool branch = alu && (unit == ALU_ENAB_BR_COMPACT);
7067ec681f3Smrg                bool is_move = alu &&
7077ec681f3Smrg                        (instructions[i]->op == midgard_alu_op_imov ||
7087ec681f3Smrg                         instructions[i]->op == midgard_alu_op_fmov);
7097ec681f3Smrg
7107ec681f3Smrg                if (predicate->exclude != ~0 && instructions[i]->dest == predicate->exclude)
7117ec681f3Smrg                        continue;
7127ec681f3Smrg
7137ec681f3Smrg                if (alu && !branch && unit != ~0 && !(mir_has_unit(instructions[i], unit)))
7147ec681f3Smrg                        continue;
7157ec681f3Smrg
7167ec681f3Smrg                /* 0: don't care, 1: no moves, 2: only moves */
7177ec681f3Smrg                if (predicate->move_mode && ((predicate->move_mode - 1) != is_move))
7187ec681f3Smrg                        continue;
7197ec681f3Smrg
7207ec681f3Smrg                if (branch && !instructions[i]->compact_branch)
7217ec681f3Smrg                        continue;
7227ec681f3Smrg
7237ec681f3Smrg                if (alu && scalar && !mir_is_scalar(instructions[i]))
7247ec681f3Smrg                        continue;
7257ec681f3Smrg
7267ec681f3Smrg                if (alu && predicate->constants && !mir_adjust_constants(instructions[i], predicate, false))
7277ec681f3Smrg                        continue;
7287ec681f3Smrg
7297ec681f3Smrg                if (needs_dest && instructions[i]->dest != dest)
7307ec681f3Smrg                        continue;
7317ec681f3Smrg
7327ec681f3Smrg                if (mask && ((~instructions[i]->mask) & mask))
7337ec681f3Smrg                        continue;
7347ec681f3Smrg
7357ec681f3Smrg                if (instructions[i]->mask & predicate->no_mask)
7367ec681f3Smrg                        continue;
7377ec681f3Smrg
7387ec681f3Smrg                if (ldst && mir_pipeline_count(instructions[i]) + predicate->pipeline_count > 2)
7397ec681f3Smrg                        continue;
7407ec681f3Smrg
7417ec681f3Smrg                bool conditional = alu && !branch && OP_IS_CSEL(instructions[i]->op);
7427ec681f3Smrg                conditional |= (branch && instructions[i]->branch.conditional);
7437ec681f3Smrg
7447ec681f3Smrg                if (conditional && no_cond)
7457ec681f3Smrg                        continue;
7467ec681f3Smrg
7477ec681f3Smrg                int effect = mir_live_effect(liveness, instructions[i], false);
7487ec681f3Smrg
7497ec681f3Smrg                if (effect > best_effect)
7507ec681f3Smrg                        continue;
7517ec681f3Smrg
7527ec681f3Smrg                if (effect == best_effect && (signed) i < best_index)
7537ec681f3Smrg                        continue;
7547ec681f3Smrg
7557ec681f3Smrg                best_effect = effect;
7567ec681f3Smrg                best_index = i;
7577ec681f3Smrg                best_conditional = conditional;
7587ec681f3Smrg        }
7597ec681f3Smrg
7607ec681f3Smrg        /* Did we find anything?  */
7617ec681f3Smrg
7627ec681f3Smrg        if (best_index < 0)
7637ec681f3Smrg                return NULL;
7647ec681f3Smrg
7657ec681f3Smrg        /* If we found something, remove it from the worklist */
7667ec681f3Smrg        assert(best_index < count);
7677ec681f3Smrg        midgard_instruction *I = instructions[best_index];
7687ec681f3Smrg
7697ec681f3Smrg        if (predicate->destructive) {
7707ec681f3Smrg                BITSET_CLEAR(worklist, best_index);
7717ec681f3Smrg
7727ec681f3Smrg                if (I->type == TAG_ALU_4)
7737ec681f3Smrg                        mir_adjust_constants(instructions[best_index], predicate, true);
7747ec681f3Smrg
7757ec681f3Smrg                if (I->type == TAG_LOAD_STORE_4)
7767ec681f3Smrg                        predicate->pipeline_count += mir_pipeline_count(instructions[best_index]);
7777ec681f3Smrg
7787ec681f3Smrg                if (I->type == TAG_ALU_4)
7797ec681f3Smrg                        mir_adjust_unit(instructions[best_index], unit);
7807ec681f3Smrg
7817ec681f3Smrg                /* Once we schedule a conditional, we can't again */
7827ec681f3Smrg                predicate->no_cond |= best_conditional;
7837ec681f3Smrg                mir_live_effect(liveness, instructions[best_index], true);
7847ec681f3Smrg        }
7857ec681f3Smrg
7867ec681f3Smrg        return I;
7877ec681f3Smrg}
7887ec681f3Smrg
7897ec681f3Smrg/* Still, we don't choose instructions in a vacuum. We need a way to choose the
7907ec681f3Smrg * best bundle type (ALU, load/store, texture). Nondestructive. */
7917ec681f3Smrg
7927ec681f3Smrgstatic unsigned
7937ec681f3Smrgmir_choose_bundle(
7947ec681f3Smrg                midgard_instruction **instructions,
7957ec681f3Smrg                uint16_t *liveness,
7967ec681f3Smrg                BITSET_WORD *worklist, unsigned count,
7977ec681f3Smrg                unsigned num_ldst)
7987ec681f3Smrg{
7997ec681f3Smrg        /* At the moment, our algorithm is very simple - use the bundle of the
8007ec681f3Smrg         * best instruction, regardless of what else could be scheduled
8017ec681f3Smrg         * alongside it. This is not optimal but it works okay for in-order */
8027ec681f3Smrg
8037ec681f3Smrg        struct midgard_predicate predicate = {
8047ec681f3Smrg                .tag = ~0,
8057ec681f3Smrg                .unit = ~0,
8067ec681f3Smrg                .destructive = false,
8077ec681f3Smrg                .exclude = ~0
8087ec681f3Smrg        };
8097ec681f3Smrg
8107ec681f3Smrg        midgard_instruction *chosen = mir_choose_instruction(instructions, liveness, worklist, count, &predicate);
8117ec681f3Smrg
8127ec681f3Smrg        if (chosen && chosen->type == TAG_LOAD_STORE_4 && !(num_ldst % 2)) {
8137ec681f3Smrg                /* Try to schedule load/store ops in pairs */
8147ec681f3Smrg
8157ec681f3Smrg                predicate.exclude = chosen->dest;
8167ec681f3Smrg                predicate.tag = TAG_LOAD_STORE_4;
8177ec681f3Smrg
8187ec681f3Smrg                chosen = mir_choose_instruction(instructions, liveness, worklist, count, &predicate);
8197ec681f3Smrg                if (chosen)
8207ec681f3Smrg                        return TAG_LOAD_STORE_4;
8217ec681f3Smrg
8227ec681f3Smrg                predicate.tag = ~0;
8237ec681f3Smrg
8247ec681f3Smrg                chosen = mir_choose_instruction(instructions, liveness, worklist, count, &predicate);
8257ec681f3Smrg                assert(chosen == NULL || chosen->type != TAG_LOAD_STORE_4);
8267ec681f3Smrg
8277ec681f3Smrg                if (chosen)
8287ec681f3Smrg                        return chosen->type;
8297ec681f3Smrg                else
8307ec681f3Smrg                        return TAG_LOAD_STORE_4;
8317ec681f3Smrg        }
8327ec681f3Smrg
8337ec681f3Smrg        if (chosen)
8347ec681f3Smrg                return chosen->type;
8357ec681f3Smrg        else
8367ec681f3Smrg                return ~0;
8377ec681f3Smrg}
8387ec681f3Smrg
8397ec681f3Smrg/* We want to choose an ALU instruction filling a given unit */
8407ec681f3Smrgstatic void
8417ec681f3Smrgmir_choose_alu(midgard_instruction **slot,
8427ec681f3Smrg                midgard_instruction **instructions,
8437ec681f3Smrg                uint16_t *liveness,
8447ec681f3Smrg                BITSET_WORD *worklist, unsigned len,
8457ec681f3Smrg                struct midgard_predicate *predicate,
8467ec681f3Smrg                unsigned unit)
8477ec681f3Smrg{
8487ec681f3Smrg        /* Did we already schedule to this slot? */
8497ec681f3Smrg        if ((*slot) != NULL)
8507ec681f3Smrg                return;
8517ec681f3Smrg
8527ec681f3Smrg        /* Try to schedule something, if not */
8537ec681f3Smrg        predicate->unit = unit;
8547ec681f3Smrg        *slot = mir_choose_instruction(instructions, liveness, worklist, len, predicate);
8557ec681f3Smrg
8567ec681f3Smrg        /* Store unit upon scheduling */
8577ec681f3Smrg        if (*slot && !((*slot)->compact_branch))
8587ec681f3Smrg                (*slot)->unit = unit;
8597ec681f3Smrg}
8607ec681f3Smrg
8617ec681f3Smrg/* When we are scheduling a branch/csel, we need the consumed condition in the
8627ec681f3Smrg * same block as a pipeline register. There are two options to enable this:
8637ec681f3Smrg *
8647ec681f3Smrg *  - Move the conditional into the bundle. Preferred, but only works if the
8657ec681f3Smrg *    conditional is used only once and is from this block.
8667ec681f3Smrg *  - Copy the conditional.
8677ec681f3Smrg *
8687ec681f3Smrg * We search for the conditional. If it's in this block, single-use, and
8697ec681f3Smrg * without embedded constants, we schedule it immediately. Otherwise, we
8707ec681f3Smrg * schedule a move for it.
8717ec681f3Smrg *
8727ec681f3Smrg * mir_comparison_mobile is a helper to find the moveable condition.
8737ec681f3Smrg */
8747ec681f3Smrg
8757ec681f3Smrgstatic unsigned
8767ec681f3Smrgmir_comparison_mobile(
8777ec681f3Smrg                compiler_context *ctx,
8787ec681f3Smrg                midgard_instruction **instructions,
8797ec681f3Smrg                struct midgard_predicate *predicate,
8807ec681f3Smrg                unsigned count,
8817ec681f3Smrg                unsigned cond)
8827ec681f3Smrg{
8837ec681f3Smrg        if (!mir_single_use(ctx, cond))
8847ec681f3Smrg                return ~0;
8857ec681f3Smrg
8867ec681f3Smrg        unsigned ret = ~0;
8877ec681f3Smrg
8887ec681f3Smrg        for (unsigned i = 0; i < count; ++i) {
8897ec681f3Smrg                if (instructions[i]->dest != cond)
8907ec681f3Smrg                        continue;
8917ec681f3Smrg
8927ec681f3Smrg                /* Must fit in an ALU bundle */
8937ec681f3Smrg                if (instructions[i]->type != TAG_ALU_4)
8947ec681f3Smrg                        return ~0;
8957ec681f3Smrg
8967ec681f3Smrg                /* If it would itself require a condition, that's recursive */
8977ec681f3Smrg                if (OP_IS_CSEL(instructions[i]->op))
8987ec681f3Smrg                        return ~0;
8997ec681f3Smrg
9007ec681f3Smrg                /* We'll need to rewrite to .w but that doesn't work for vector
9017ec681f3Smrg                 * ops that don't replicate (ball/bany), so bail there */
9027ec681f3Smrg
9037ec681f3Smrg                if (GET_CHANNEL_COUNT(alu_opcode_props[instructions[i]->op].props))
9047ec681f3Smrg                        return ~0;
9057ec681f3Smrg
9067ec681f3Smrg                /* Ensure it will fit with constants */
9077ec681f3Smrg
9087ec681f3Smrg                if (!mir_adjust_constants(instructions[i], predicate, false))
9097ec681f3Smrg                        return ~0;
9107ec681f3Smrg
9117ec681f3Smrg                /* Ensure it is written only once */
9127ec681f3Smrg
9137ec681f3Smrg                if (ret != ~0)
9147ec681f3Smrg                        return ~0;
9157ec681f3Smrg                else
9167ec681f3Smrg                        ret = i;
9177ec681f3Smrg        }
9187ec681f3Smrg
9197ec681f3Smrg        /* Inject constants now that we are sure we want to */
9207ec681f3Smrg        if (ret != ~0)
9217ec681f3Smrg                mir_adjust_constants(instructions[ret], predicate, true);
9227ec681f3Smrg
9237ec681f3Smrg        return ret;
9247ec681f3Smrg}
9257ec681f3Smrg
9267ec681f3Smrg/* Using the information about the moveable conditional itself, we either pop
9277ec681f3Smrg * that condition off the worklist for use now, or create a move to
9287ec681f3Smrg * artificially schedule instead as a fallback */
9297ec681f3Smrg
9307ec681f3Smrgstatic midgard_instruction *
9317ec681f3Smrgmir_schedule_comparison(
9327ec681f3Smrg                compiler_context *ctx,
9337ec681f3Smrg                midgard_instruction **instructions,
9347ec681f3Smrg                struct midgard_predicate *predicate,
9357ec681f3Smrg                BITSET_WORD *worklist, unsigned count,
9367ec681f3Smrg                unsigned cond, bool vector, unsigned *swizzle,
9377ec681f3Smrg                midgard_instruction *user)
9387ec681f3Smrg{
9397ec681f3Smrg        /* TODO: swizzle when scheduling */
9407ec681f3Smrg        unsigned comp_i =
9417ec681f3Smrg                (!vector && (swizzle[0] == 0)) ?
9427ec681f3Smrg                mir_comparison_mobile(ctx, instructions, predicate, count, cond) : ~0;
9437ec681f3Smrg
9447ec681f3Smrg        /* If we can, schedule the condition immediately */
9457ec681f3Smrg        if ((comp_i != ~0) && BITSET_TEST(worklist, comp_i)) {
9467ec681f3Smrg                assert(comp_i < count);
9477ec681f3Smrg                BITSET_CLEAR(worklist, comp_i);
9487ec681f3Smrg                return instructions[comp_i];
9497ec681f3Smrg        }
9507ec681f3Smrg
9517ec681f3Smrg        /* Otherwise, we insert a move */
9527ec681f3Smrg
9537ec681f3Smrg        midgard_instruction mov = v_mov(cond, cond);
9547ec681f3Smrg        mov.mask = vector ? 0xF : 0x1;
9557ec681f3Smrg        memcpy(mov.swizzle[1], swizzle, sizeof(mov.swizzle[1]));
9567ec681f3Smrg
9577ec681f3Smrg        return mir_insert_instruction_before(ctx, user, mov);
9587ec681f3Smrg}
9597ec681f3Smrg
9607ec681f3Smrg/* Most generally, we need instructions writing to r31 in the appropriate
9617ec681f3Smrg * components */
9627ec681f3Smrg
9637ec681f3Smrgstatic midgard_instruction *
9647ec681f3Smrgmir_schedule_condition(compiler_context *ctx,
9657ec681f3Smrg                struct midgard_predicate *predicate,
9667ec681f3Smrg                BITSET_WORD *worklist, unsigned count,
9677ec681f3Smrg                midgard_instruction **instructions,
9687ec681f3Smrg                midgard_instruction *last)
9697ec681f3Smrg{
9707ec681f3Smrg        /* For a branch, the condition is the only argument; for csel, third */
9717ec681f3Smrg        bool branch = last->compact_branch;
9727ec681f3Smrg        unsigned condition_index = branch ? 0 : 2;
9737ec681f3Smrg
9747ec681f3Smrg        /* csel_v is vector; otherwise, conditions are scalar */
9757ec681f3Smrg        bool vector = !branch && OP_IS_CSEL_V(last->op);
9767ec681f3Smrg
9777ec681f3Smrg        /* Grab the conditional instruction */
9787ec681f3Smrg
9797ec681f3Smrg        midgard_instruction *cond = mir_schedule_comparison(
9807ec681f3Smrg                        ctx, instructions, predicate, worklist, count, last->src[condition_index],
9817ec681f3Smrg                        vector, last->swizzle[condition_index], last);
9827ec681f3Smrg
9837ec681f3Smrg        /* We have exclusive reign over this (possibly move) conditional
9847ec681f3Smrg         * instruction. We can rewrite into a pipeline conditional register */
9857ec681f3Smrg
9867ec681f3Smrg        predicate->exclude = cond->dest;
9877ec681f3Smrg        cond->dest = SSA_FIXED_REGISTER(31);
9887ec681f3Smrg
9897ec681f3Smrg        if (!vector) {
9907ec681f3Smrg                cond->mask = (1 << COMPONENT_W);
9917ec681f3Smrg
9927ec681f3Smrg                mir_foreach_src(cond, s) {
9937ec681f3Smrg                        if (cond->src[s] == ~0)
9947ec681f3Smrg                                continue;
9957ec681f3Smrg
9967ec681f3Smrg                        for (unsigned q = 0; q < 4; ++q)
9977ec681f3Smrg                                cond->swizzle[s][q + COMPONENT_W] = cond->swizzle[s][q];
9987ec681f3Smrg                }
9997ec681f3Smrg        }
10007ec681f3Smrg
10017ec681f3Smrg        /* Schedule the unit: csel is always in the latter pipeline, so a csel
10027ec681f3Smrg         * condition must be in the former pipeline stage (vmul/sadd),
10037ec681f3Smrg         * depending on scalar/vector of the instruction itself. A branch must
10047ec681f3Smrg         * be written from the latter pipeline stage and a branch condition is
10057ec681f3Smrg         * always scalar, so it is always in smul (exception: ball/bany, which
10067ec681f3Smrg         * will be vadd) */
10077ec681f3Smrg
10087ec681f3Smrg        if (branch)
10097ec681f3Smrg                cond->unit = UNIT_SMUL;
10107ec681f3Smrg        else
10117ec681f3Smrg                cond->unit = vector ? UNIT_VMUL : UNIT_SADD;
10127ec681f3Smrg
10137ec681f3Smrg        return cond;
10147ec681f3Smrg}
10157ec681f3Smrg
10167ec681f3Smrg/* Schedules a single bundle of the given type */
10177ec681f3Smrg
10187ec681f3Smrgstatic midgard_bundle
10197ec681f3Smrgmir_schedule_texture(
10207ec681f3Smrg                midgard_instruction **instructions,
10217ec681f3Smrg                uint16_t *liveness,
10227ec681f3Smrg                BITSET_WORD *worklist, unsigned len,
10237ec681f3Smrg                bool is_vertex)
10247ec681f3Smrg{
10257ec681f3Smrg        struct midgard_predicate predicate = {
10267ec681f3Smrg                .tag = TAG_TEXTURE_4,
10277ec681f3Smrg                .destructive = true,
10287ec681f3Smrg                .exclude = ~0
10297ec681f3Smrg        };
10307ec681f3Smrg
10317ec681f3Smrg        midgard_instruction *ins =
10327ec681f3Smrg                mir_choose_instruction(instructions, liveness, worklist, len, &predicate);
10337ec681f3Smrg
10347ec681f3Smrg        mir_update_worklist(worklist, len, instructions, ins);
10357ec681f3Smrg
10367ec681f3Smrg        struct midgard_bundle out = {
10377ec681f3Smrg                .tag = ins->op == midgard_tex_op_barrier ?
10387ec681f3Smrg                        TAG_TEXTURE_4_BARRIER :
10397ec681f3Smrg                        (ins->op == midgard_tex_op_fetch) || is_vertex ?
10407ec681f3Smrg                        TAG_TEXTURE_4_VTX : TAG_TEXTURE_4,
10417ec681f3Smrg                .instruction_count = 1,
10427ec681f3Smrg                .instructions = { ins }
10437ec681f3Smrg        };
10447ec681f3Smrg
10457ec681f3Smrg        return out;
10467ec681f3Smrg}
10477ec681f3Smrg
10487ec681f3Smrgstatic midgard_bundle
10497ec681f3Smrgmir_schedule_ldst(
10507ec681f3Smrg                midgard_instruction **instructions,
10517ec681f3Smrg                uint16_t *liveness,
10527ec681f3Smrg                BITSET_WORD *worklist, unsigned len,
10537ec681f3Smrg                unsigned *num_ldst)
10547ec681f3Smrg{
10557ec681f3Smrg        struct midgard_predicate predicate = {
10567ec681f3Smrg                .tag = TAG_LOAD_STORE_4,
10577ec681f3Smrg                .destructive = true,
10587ec681f3Smrg                .exclude = ~0
10597ec681f3Smrg        };
10607ec681f3Smrg
10617ec681f3Smrg        /* Try to pick two load/store ops. Second not gauranteed to exist */
10627ec681f3Smrg
10637ec681f3Smrg        midgard_instruction *ins =
10647ec681f3Smrg                mir_choose_instruction(instructions, liveness, worklist, len, &predicate);
10657ec681f3Smrg
10667ec681f3Smrg        midgard_instruction *pair =
10677ec681f3Smrg                mir_choose_instruction(instructions, liveness, worklist, len, &predicate);
10687ec681f3Smrg
10697ec681f3Smrg        assert(ins != NULL);
10707ec681f3Smrg
10717ec681f3Smrg        struct midgard_bundle out = {
10727ec681f3Smrg                .tag = TAG_LOAD_STORE_4,
10737ec681f3Smrg                .instruction_count = pair ? 2 : 1,
10747ec681f3Smrg                .instructions = { ins, pair }
10757ec681f3Smrg        };
10767ec681f3Smrg
10777ec681f3Smrg        *num_ldst -= out.instruction_count;
10787ec681f3Smrg
10797ec681f3Smrg        /* We have to update the worklist atomically, since the two
10807ec681f3Smrg         * instructions run concurrently (TODO: verify it's not pipelined) */
10817ec681f3Smrg
10827ec681f3Smrg        mir_update_worklist(worklist, len, instructions, ins);
10837ec681f3Smrg        mir_update_worklist(worklist, len, instructions, pair);
10847ec681f3Smrg
10857ec681f3Smrg        return out;
10867ec681f3Smrg}
10877ec681f3Smrg
10887ec681f3Smrgstatic void
10897ec681f3Smrgmir_schedule_zs_write(
10907ec681f3Smrg                compiler_context *ctx,
10917ec681f3Smrg                struct midgard_predicate *predicate,
10927ec681f3Smrg                midgard_instruction **instructions,
10937ec681f3Smrg                uint16_t *liveness,
10947ec681f3Smrg                BITSET_WORD *worklist, unsigned len,
10957ec681f3Smrg                midgard_instruction *branch,
10967ec681f3Smrg                midgard_instruction **smul,
10977ec681f3Smrg                midgard_instruction **vadd,
10987ec681f3Smrg                midgard_instruction **vlut,
10997ec681f3Smrg                bool stencil)
11007ec681f3Smrg{
11017ec681f3Smrg        bool success = false;
11027ec681f3Smrg        unsigned idx = stencil ? 3 : 2;
11037ec681f3Smrg        unsigned src = (branch->src[0] == ~0) ? SSA_FIXED_REGISTER(1) : branch->src[idx];
11047ec681f3Smrg
11057ec681f3Smrg        predicate->dest = src;
11067ec681f3Smrg        predicate->mask = 0x1;
11077ec681f3Smrg
11087ec681f3Smrg        midgard_instruction **units[] = { smul, vadd, vlut };
11097ec681f3Smrg        unsigned unit_names[] = { UNIT_SMUL, UNIT_VADD, UNIT_VLUT };
11107ec681f3Smrg
11117ec681f3Smrg        for (unsigned i = 0; i < 3; ++i) {
11127ec681f3Smrg                if (*(units[i]))
11137ec681f3Smrg                        continue;
11147ec681f3Smrg
11157ec681f3Smrg                predicate->unit = unit_names[i];
11167ec681f3Smrg                midgard_instruction *ins =
11177ec681f3Smrg                        mir_choose_instruction(instructions, liveness, worklist, len, predicate);
11187ec681f3Smrg
11197ec681f3Smrg                if (ins) {
11207ec681f3Smrg                        ins->unit = unit_names[i];
11217ec681f3Smrg                        *(units[i]) = ins;
11227ec681f3Smrg                        success |= true;
11237ec681f3Smrg                        break;
11247ec681f3Smrg                }
11257ec681f3Smrg        }
11267ec681f3Smrg
11277ec681f3Smrg        predicate->dest = predicate->mask = 0;
11287ec681f3Smrg
11297ec681f3Smrg        if (success)
11307ec681f3Smrg                return;
11317ec681f3Smrg
11327ec681f3Smrg        midgard_instruction *mov = ralloc(ctx, midgard_instruction);
11337ec681f3Smrg        *mov = v_mov(src, make_compiler_temp(ctx));
11347ec681f3Smrg        mov->mask = 0x1;
11357ec681f3Smrg
11367ec681f3Smrg        branch->src[idx] = mov->dest;
11377ec681f3Smrg
11387ec681f3Smrg        if (stencil) {
11397ec681f3Smrg                unsigned swizzle = (branch->src[0] == ~0) ? COMPONENT_Y : COMPONENT_X;
11407ec681f3Smrg
11417ec681f3Smrg                for (unsigned c = 0; c < 16; ++c)
11427ec681f3Smrg                        mov->swizzle[1][c] = swizzle;
11437ec681f3Smrg        }
11447ec681f3Smrg
11457ec681f3Smrg        for (unsigned i = 0; i < 3; ++i) {
11467ec681f3Smrg                if (!(*(units[i]))) {
11477ec681f3Smrg                        *(units[i]) = mov;
11487ec681f3Smrg                        mov->unit = unit_names[i];
11497ec681f3Smrg                        return;
11507ec681f3Smrg                }
11517ec681f3Smrg        }
11527ec681f3Smrg
11537ec681f3Smrg        unreachable("Could not schedule Z/S move to any unit");
11547ec681f3Smrg}
11557ec681f3Smrg
11567ec681f3Smrgstatic midgard_bundle
11577ec681f3Smrgmir_schedule_alu(
11587ec681f3Smrg                compiler_context *ctx,
11597ec681f3Smrg                midgard_instruction **instructions,
11607ec681f3Smrg                uint16_t *liveness,
11617ec681f3Smrg                BITSET_WORD *worklist, unsigned len)
11627ec681f3Smrg{
11637ec681f3Smrg        struct midgard_bundle bundle = {};
11647ec681f3Smrg
11657ec681f3Smrg        unsigned bytes_emitted = sizeof(bundle.control);
11667ec681f3Smrg
11677ec681f3Smrg        struct midgard_predicate predicate = {
11687ec681f3Smrg                .tag = TAG_ALU_4,
11697ec681f3Smrg                .destructive = true,
11707ec681f3Smrg                .exclude = ~0,
11717ec681f3Smrg                .constants = &bundle.constants
11727ec681f3Smrg        };
11737ec681f3Smrg
11747ec681f3Smrg        midgard_instruction *vmul = NULL;
11757ec681f3Smrg        midgard_instruction *vadd = NULL;
11767ec681f3Smrg        midgard_instruction *vlut = NULL;
11777ec681f3Smrg        midgard_instruction *smul = NULL;
11787ec681f3Smrg        midgard_instruction *sadd = NULL;
11797ec681f3Smrg        midgard_instruction *branch = NULL;
11807ec681f3Smrg
11817ec681f3Smrg        mir_choose_alu(&branch, instructions, liveness, worklist, len, &predicate, ALU_ENAB_BR_COMPACT);
11827ec681f3Smrg        mir_update_worklist(worklist, len, instructions, branch);
11837ec681f3Smrg        unsigned writeout = branch ? branch->writeout : 0;
11847ec681f3Smrg
11857ec681f3Smrg        if (branch && branch->branch.conditional) {
11867ec681f3Smrg                midgard_instruction *cond = mir_schedule_condition(ctx, &predicate, worklist, len, instructions, branch);
11877ec681f3Smrg
11887ec681f3Smrg                if (cond->unit == UNIT_VADD)
11897ec681f3Smrg                        vadd = cond;
11907ec681f3Smrg                else if (cond->unit == UNIT_SMUL)
11917ec681f3Smrg                        smul = cond;
11927ec681f3Smrg                else
11937ec681f3Smrg                        unreachable("Bad condition");
11947ec681f3Smrg        }
11957ec681f3Smrg
11967ec681f3Smrg        /* If we have a render target reference, schedule a move for it. Since
11977ec681f3Smrg         * this will be in sadd, we boost this to prevent scheduling csel into
11987ec681f3Smrg         * smul */
11997ec681f3Smrg
12007ec681f3Smrg        if (writeout && (branch->constants.u32[0] || ctx->inputs->is_blend)) {
12017ec681f3Smrg                sadd = ralloc(ctx, midgard_instruction);
12027ec681f3Smrg                *sadd = v_mov(~0, make_compiler_temp(ctx));
12037ec681f3Smrg                sadd->unit = UNIT_SADD;
12047ec681f3Smrg                sadd->mask = 0x1;
12057ec681f3Smrg                sadd->has_inline_constant = true;
12067ec681f3Smrg                sadd->inline_constant = branch->constants.u32[0];
12077ec681f3Smrg                branch->src[1] = sadd->dest;
12087ec681f3Smrg                branch->src_types[1] = sadd->dest_type;
12097ec681f3Smrg        }
12107ec681f3Smrg
12117ec681f3Smrg        if (writeout) {
12127ec681f3Smrg                /* Propagate up */
12137ec681f3Smrg                bundle.last_writeout = branch->last_writeout;
12147ec681f3Smrg
12157ec681f3Smrg                /* Mask off any conditionals.
12167ec681f3Smrg                 * This prevents csel and csel_v being scheduled into smul
12177ec681f3Smrg                 * since we might not have room for a conditional in vmul/sadd.
12187ec681f3Smrg                 * This is important because both writeout and csel have same-bundle
12197ec681f3Smrg                 * requirements on their dependencies. */
12207ec681f3Smrg                predicate.no_cond = true;
12217ec681f3Smrg        }
12227ec681f3Smrg
12237ec681f3Smrg        /* When MRT is in use, writeout loops require r1.w to be filled with a
12247ec681f3Smrg         * return address for the blend shader to jump to.  We always emit the
12257ec681f3Smrg         * move for blend shaders themselves for ABI reasons. */
12267ec681f3Smrg
12277ec681f3Smrg        if (writeout && (ctx->inputs->is_blend || ctx->writeout_branch[1])) {
12287ec681f3Smrg                vadd = ralloc(ctx, midgard_instruction);
12297ec681f3Smrg                *vadd = v_mov(~0, make_compiler_temp(ctx));
12307ec681f3Smrg
12317ec681f3Smrg                if (!ctx->inputs->is_blend) {
12327ec681f3Smrg                        vadd->op = midgard_alu_op_iadd;
12337ec681f3Smrg                        vadd->src[0] = SSA_FIXED_REGISTER(31);
12347ec681f3Smrg                        vadd->src_types[0] = nir_type_uint32;
12357ec681f3Smrg
12367ec681f3Smrg                        for (unsigned c = 0; c < 16; ++c)
12377ec681f3Smrg                                vadd->swizzle[0][c] = COMPONENT_X;
12387ec681f3Smrg
12397ec681f3Smrg                        vadd->has_inline_constant = true;
12407ec681f3Smrg                        vadd->inline_constant = 0;
12417ec681f3Smrg                } else {
12427ec681f3Smrg                        vadd->src[1] = SSA_FIXED_REGISTER(1);
12437ec681f3Smrg                        vadd->src_types[0] = nir_type_uint32;
12447ec681f3Smrg
12457ec681f3Smrg                        for (unsigned c = 0; c < 16; ++c)
12467ec681f3Smrg                                vadd->swizzle[1][c] = COMPONENT_W;
12477ec681f3Smrg                }
12487ec681f3Smrg
12497ec681f3Smrg                vadd->unit = UNIT_VADD;
12507ec681f3Smrg                vadd->mask = 0x1;
12517ec681f3Smrg                branch->dest = vadd->dest;
12527ec681f3Smrg                branch->dest_type = vadd->dest_type;
12537ec681f3Smrg        }
12547ec681f3Smrg
12557ec681f3Smrg        if (writeout & PAN_WRITEOUT_Z)
12567ec681f3Smrg                mir_schedule_zs_write(ctx, &predicate, instructions, liveness, worklist, len, branch, &smul, &vadd, &vlut, false);
12577ec681f3Smrg
12587ec681f3Smrg        if (writeout & PAN_WRITEOUT_S)
12597ec681f3Smrg                mir_schedule_zs_write(ctx, &predicate, instructions, liveness, worklist, len, branch, &smul, &vadd, &vlut, true);
12607ec681f3Smrg
12617ec681f3Smrg        mir_choose_alu(&smul, instructions, liveness, worklist, len, &predicate, UNIT_SMUL);
12627ec681f3Smrg
12637ec681f3Smrg        for (unsigned mode = 1; mode < 3; ++mode) {
12647ec681f3Smrg                predicate.move_mode = mode;
12657ec681f3Smrg                predicate.no_mask = writeout ? (1 << 3) : 0;
12667ec681f3Smrg                mir_choose_alu(&vlut, instructions, liveness, worklist, len, &predicate, UNIT_VLUT);
12677ec681f3Smrg                predicate.no_mask = 0;
12687ec681f3Smrg                mir_choose_alu(&vadd, instructions, liveness, worklist, len, &predicate, UNIT_VADD);
12697ec681f3Smrg        }
12707ec681f3Smrg
12717ec681f3Smrg        /* Reset */
12727ec681f3Smrg        predicate.move_mode = 0;
12737ec681f3Smrg
12747ec681f3Smrg        mir_update_worklist(worklist, len, instructions, vlut);
12757ec681f3Smrg        mir_update_worklist(worklist, len, instructions, vadd);
12767ec681f3Smrg        mir_update_worklist(worklist, len, instructions, smul);
12777ec681f3Smrg
12787ec681f3Smrg        bool vadd_csel = vadd && OP_IS_CSEL(vadd->op);
12797ec681f3Smrg        bool smul_csel = smul && OP_IS_CSEL(smul->op);
12807ec681f3Smrg
12817ec681f3Smrg        if (vadd_csel || smul_csel) {
12827ec681f3Smrg                midgard_instruction *ins = vadd_csel ? vadd : smul;
12837ec681f3Smrg                midgard_instruction *cond = mir_schedule_condition(ctx, &predicate, worklist, len, instructions, ins);
12847ec681f3Smrg
12857ec681f3Smrg                if (cond->unit == UNIT_VMUL)
12867ec681f3Smrg                        vmul = cond;
12877ec681f3Smrg                else if (cond->unit == UNIT_SADD)
12887ec681f3Smrg                        sadd = cond;
12897ec681f3Smrg                else
12907ec681f3Smrg                        unreachable("Bad condition");
12917ec681f3Smrg        }
12927ec681f3Smrg
12937ec681f3Smrg        /* Stage 2, let's schedule sadd before vmul for writeout */
12947ec681f3Smrg        mir_choose_alu(&sadd, instructions, liveness, worklist, len, &predicate, UNIT_SADD);
12957ec681f3Smrg
12967ec681f3Smrg        /* Check if writeout reads its own register */
12977ec681f3Smrg
12987ec681f3Smrg        if (writeout) {
12997ec681f3Smrg                midgard_instruction *stages[] = { sadd, vadd, smul, vlut };
13007ec681f3Smrg                unsigned src = (branch->src[0] == ~0) ? SSA_FIXED_REGISTER(0) : branch->src[0];
13017ec681f3Smrg                unsigned writeout_mask = 0x0;
13027ec681f3Smrg                bool bad_writeout = false;
13037ec681f3Smrg
13047ec681f3Smrg                for (unsigned i = 0; i < ARRAY_SIZE(stages); ++i) {
13057ec681f3Smrg                        if (!stages[i])
13067ec681f3Smrg                                continue;
13077ec681f3Smrg
13087ec681f3Smrg                        if (stages[i]->dest != src)
13097ec681f3Smrg                                continue;
13107ec681f3Smrg
13117ec681f3Smrg                        writeout_mask |= stages[i]->mask;
13127ec681f3Smrg                        bad_writeout |= mir_has_arg(stages[i], branch->src[0]);
13137ec681f3Smrg                }
13147ec681f3Smrg
13157ec681f3Smrg                /* It's possible we'll be able to schedule something into vmul
13167ec681f3Smrg                 * to fill r0. Let's peak into the future, trying to schedule
13177ec681f3Smrg                 * vmul specially that way. */
13187ec681f3Smrg
13197ec681f3Smrg                unsigned full_mask = 0xF;
13207ec681f3Smrg
13217ec681f3Smrg                if (!bad_writeout && writeout_mask != full_mask) {
13227ec681f3Smrg                        predicate.unit = UNIT_VMUL;
13237ec681f3Smrg                        predicate.dest = src;
13247ec681f3Smrg                        predicate.mask = writeout_mask ^ full_mask;
13257ec681f3Smrg
13267ec681f3Smrg                        struct midgard_instruction *peaked =
13277ec681f3Smrg                                mir_choose_instruction(instructions, liveness, worklist, len, &predicate);
13287ec681f3Smrg
13297ec681f3Smrg                        if (peaked) {
13307ec681f3Smrg                                vmul = peaked;
13317ec681f3Smrg                                vmul->unit = UNIT_VMUL;
13327ec681f3Smrg                                writeout_mask |= predicate.mask;
13337ec681f3Smrg                                assert(writeout_mask == full_mask);
13347ec681f3Smrg                        }
13357ec681f3Smrg
13367ec681f3Smrg                        /* Cleanup */
13377ec681f3Smrg                        predicate.dest = predicate.mask = 0;
13387ec681f3Smrg                }
13397ec681f3Smrg
13407ec681f3Smrg                /* Finally, add a move if necessary */
13417ec681f3Smrg                if (bad_writeout || writeout_mask != full_mask) {
13427ec681f3Smrg                        unsigned temp = (branch->src[0] == ~0) ? SSA_FIXED_REGISTER(0) : make_compiler_temp(ctx);
13437ec681f3Smrg
13447ec681f3Smrg                        vmul = ralloc(ctx, midgard_instruction);
13457ec681f3Smrg                        *vmul = v_mov(src, temp);
13467ec681f3Smrg                        vmul->unit = UNIT_VMUL;
13477ec681f3Smrg                        vmul->mask = full_mask ^ writeout_mask;
13487ec681f3Smrg
13497ec681f3Smrg                        /* Rewrite to use our temp */
13507ec681f3Smrg
13517ec681f3Smrg                        for (unsigned i = 0; i < ARRAY_SIZE(stages); ++i) {
13527ec681f3Smrg                                if (stages[i]) {
13537ec681f3Smrg                                        mir_rewrite_index_dst_single(stages[i], src, temp);
13547ec681f3Smrg                                        mir_rewrite_index_src_single(stages[i], src, temp);
13557ec681f3Smrg                                }
13567ec681f3Smrg                        }
13577ec681f3Smrg
13587ec681f3Smrg                        mir_rewrite_index_src_single(branch, src, temp);
13597ec681f3Smrg                }
13607ec681f3Smrg        }
13617ec681f3Smrg
13627ec681f3Smrg        mir_choose_alu(&vmul, instructions, liveness, worklist, len, &predicate, UNIT_VMUL);
13637ec681f3Smrg
13647ec681f3Smrg        mir_update_worklist(worklist, len, instructions, vmul);
13657ec681f3Smrg        mir_update_worklist(worklist, len, instructions, sadd);
13667ec681f3Smrg
13677ec681f3Smrg        bundle.has_embedded_constants = predicate.constant_mask != 0;
13687ec681f3Smrg
13697ec681f3Smrg        unsigned padding = 0;
13707ec681f3Smrg
13717ec681f3Smrg        /* Now that we have finished scheduling, build up the bundle */
13727ec681f3Smrg        midgard_instruction *stages[] = { vmul, sadd, vadd, smul, vlut, branch };
13737ec681f3Smrg
13747ec681f3Smrg        for (unsigned i = 0; i < ARRAY_SIZE(stages); ++i) {
13757ec681f3Smrg                if (stages[i]) {
13767ec681f3Smrg                        bundle.control |= stages[i]->unit;
13777ec681f3Smrg                        bytes_emitted += bytes_for_instruction(stages[i]);
13787ec681f3Smrg                        bundle.instructions[bundle.instruction_count++] = stages[i];
13797ec681f3Smrg
13807ec681f3Smrg                        /* If we branch, we can't spill to TLS since the store
13817ec681f3Smrg                         * instruction will never get executed. We could try to
13827ec681f3Smrg                         * break the bundle but this is probably easier for
13837ec681f3Smrg                         * now. */
13847ec681f3Smrg
13857ec681f3Smrg                        if (branch)
13867ec681f3Smrg                                stages[i]->no_spill |= (1 << REG_CLASS_WORK);
13877ec681f3Smrg                }
13887ec681f3Smrg        }
13897ec681f3Smrg
13907ec681f3Smrg        /* Pad ALU op to nearest word */
13917ec681f3Smrg
13927ec681f3Smrg        if (bytes_emitted & 15) {
13937ec681f3Smrg                padding = 16 - (bytes_emitted & 15);
13947ec681f3Smrg                bytes_emitted += padding;
13957ec681f3Smrg        }
13967ec681f3Smrg
13977ec681f3Smrg        /* Constants must always be quadwords */
13987ec681f3Smrg        if (bundle.has_embedded_constants)
13997ec681f3Smrg                bytes_emitted += 16;
14007ec681f3Smrg
14017ec681f3Smrg        /* Size ALU instruction for tag */
14027ec681f3Smrg        bundle.tag = (TAG_ALU_4) + (bytes_emitted / 16) - 1;
14037ec681f3Smrg
14047ec681f3Smrg        bool tilebuf_wait = branch && branch->compact_branch &&
14057ec681f3Smrg           branch->branch.target_type == TARGET_TILEBUF_WAIT;
14067ec681f3Smrg
14077ec681f3Smrg        /* MRT capable GPUs use a special writeout procedure */
14087ec681f3Smrg        if ((writeout || tilebuf_wait) && !(ctx->quirks & MIDGARD_NO_UPPER_ALU))
14097ec681f3Smrg                bundle.tag += 4;
14107ec681f3Smrg
14117ec681f3Smrg        bundle.padding = padding;
14127ec681f3Smrg        bundle.control |= bundle.tag;
14137ec681f3Smrg
14147ec681f3Smrg        return bundle;
14157ec681f3Smrg}
14167ec681f3Smrg
14177ec681f3Smrg/* Schedule a single block by iterating its instruction to create bundles.
14187ec681f3Smrg * While we go, tally about the bundle sizes to compute the block size. */
14197ec681f3Smrg
14207ec681f3Smrg
14217ec681f3Smrgstatic void
14227ec681f3Smrgschedule_block(compiler_context *ctx, midgard_block *block)
14237ec681f3Smrg{
14247ec681f3Smrg        /* Copy list to dynamic array */
14257ec681f3Smrg        unsigned len = 0;
14267ec681f3Smrg        midgard_instruction **instructions = flatten_mir(block, &len);
14277ec681f3Smrg
14287ec681f3Smrg        if (!len)
14297ec681f3Smrg                return;
14307ec681f3Smrg
14317ec681f3Smrg        /* Calculate dependencies and initial worklist */
14327ec681f3Smrg        unsigned node_count = ctx->temp_count + 1;
14337ec681f3Smrg        mir_create_dependency_graph(instructions, len, node_count);
14347ec681f3Smrg
14357ec681f3Smrg        /* Allocate the worklist */
14367ec681f3Smrg        size_t sz = BITSET_WORDS(len) * sizeof(BITSET_WORD);
14377ec681f3Smrg        BITSET_WORD *worklist = calloc(sz, 1);
14387ec681f3Smrg        uint16_t *liveness = calloc(node_count, 2);
14397ec681f3Smrg        mir_initialize_worklist(worklist, instructions, len);
14407ec681f3Smrg
14417ec681f3Smrg        /* Count the number of load/store instructions so we know when it's
14427ec681f3Smrg         * worth trying to schedule them in pairs. */
14437ec681f3Smrg        unsigned num_ldst = 0;
14447ec681f3Smrg        for (unsigned i = 0; i < len; ++i) {
14457ec681f3Smrg                if (instructions[i]->type == TAG_LOAD_STORE_4)
14467ec681f3Smrg                        ++num_ldst;
14477ec681f3Smrg        }
14487ec681f3Smrg
14497ec681f3Smrg        struct util_dynarray bundles;
14507ec681f3Smrg        util_dynarray_init(&bundles, NULL);
14517ec681f3Smrg
14527ec681f3Smrg        block->quadword_count = 0;
14537ec681f3Smrg
14547ec681f3Smrg        for (;;) {
14557ec681f3Smrg                unsigned tag = mir_choose_bundle(instructions, liveness, worklist, len, num_ldst);
14567ec681f3Smrg                midgard_bundle bundle;
14577ec681f3Smrg
14587ec681f3Smrg                if (tag == TAG_TEXTURE_4)
14597ec681f3Smrg                        bundle = mir_schedule_texture(instructions, liveness, worklist, len, ctx->stage != MESA_SHADER_FRAGMENT);
14607ec681f3Smrg                else if (tag == TAG_LOAD_STORE_4)
14617ec681f3Smrg                        bundle = mir_schedule_ldst(instructions, liveness, worklist, len, &num_ldst);
14627ec681f3Smrg                else if (tag == TAG_ALU_4)
14637ec681f3Smrg                        bundle = mir_schedule_alu(ctx, instructions, liveness, worklist, len);
14647ec681f3Smrg                else
14657ec681f3Smrg                        break;
14667ec681f3Smrg
14677ec681f3Smrg                for (unsigned i = 0; i < bundle.instruction_count; ++i)
14687ec681f3Smrg                        bundle.instructions[i]->bundle_id =
14697ec681f3Smrg                                ctx->quadword_count + block->quadword_count;
14707ec681f3Smrg
14717ec681f3Smrg                util_dynarray_append(&bundles, midgard_bundle, bundle);
14727ec681f3Smrg                block->quadword_count += midgard_tag_props[bundle.tag].size;
14737ec681f3Smrg        }
14747ec681f3Smrg
14757ec681f3Smrg        assert(num_ldst == 0);
14767ec681f3Smrg
14777ec681f3Smrg        /* We emitted bundles backwards; copy into the block in reverse-order */
14787ec681f3Smrg
14797ec681f3Smrg        util_dynarray_init(&block->bundles, block);
14807ec681f3Smrg        util_dynarray_foreach_reverse(&bundles, midgard_bundle, bundle) {
14817ec681f3Smrg                util_dynarray_append(&block->bundles, midgard_bundle, *bundle);
14827ec681f3Smrg        }
14837ec681f3Smrg        util_dynarray_fini(&bundles);
14847ec681f3Smrg
14857ec681f3Smrg        block->scheduled = true;
14867ec681f3Smrg        ctx->quadword_count += block->quadword_count;
14877ec681f3Smrg
14887ec681f3Smrg        /* Reorder instructions to match bundled. First remove existing
14897ec681f3Smrg         * instructions and then recreate the list */
14907ec681f3Smrg
14917ec681f3Smrg        mir_foreach_instr_in_block_safe(block, ins) {
14927ec681f3Smrg                list_del(&ins->link);
14937ec681f3Smrg        }
14947ec681f3Smrg
14957ec681f3Smrg        mir_foreach_instr_in_block_scheduled_rev(block, ins) {
14967ec681f3Smrg                list_add(&ins->link, &block->base.instructions);
14977ec681f3Smrg        }
14987ec681f3Smrg
14997ec681f3Smrg	free(instructions); /* Allocated by flatten_mir() */
15007ec681f3Smrg	free(worklist);
15017ec681f3Smrg        free(liveness);
15027ec681f3Smrg}
15037ec681f3Smrg
15047ec681f3Smrg/* Insert moves to ensure we can register allocate load/store registers */
15057ec681f3Smrgstatic void
15067ec681f3Smrgmir_lower_ldst(compiler_context *ctx)
15077ec681f3Smrg{
15087ec681f3Smrg        mir_foreach_instr_global_safe(ctx, I) {
15097ec681f3Smrg                if (I->type != TAG_LOAD_STORE_4) continue;
15107ec681f3Smrg
15117ec681f3Smrg                mir_foreach_src(I, s) {
15127ec681f3Smrg                        if (s == 0) continue;
15137ec681f3Smrg                        if (I->src[s] == ~0) continue;
15147ec681f3Smrg                        if (I->swizzle[s][0] == 0) continue;
15157ec681f3Smrg
15167ec681f3Smrg                        unsigned temp = make_compiler_temp(ctx);
15177ec681f3Smrg                        midgard_instruction mov = v_mov(I->src[s], temp);
15187ec681f3Smrg                        mov.mask = 0x1;
15197ec681f3Smrg                        mov.dest_type = I->src_types[s];
15207ec681f3Smrg                        for (unsigned c = 0; c < NIR_MAX_VEC_COMPONENTS; ++c)
15217ec681f3Smrg                                mov.swizzle[1][c] = I->swizzle[s][0];
15227ec681f3Smrg
15237ec681f3Smrg                        mir_insert_instruction_before(ctx, I, mov);
15247ec681f3Smrg                        I->src[s] = mov.dest;
15257ec681f3Smrg                        I->swizzle[s][0] = 0;
15267ec681f3Smrg                }
15277ec681f3Smrg        }
15287ec681f3Smrg}
15297ec681f3Smrg
15307ec681f3Smrg/* Insert moves to ensure we can register allocate blend writeout */
15317ec681f3Smrgstatic void
15327ec681f3Smrgmir_lower_blend_input(compiler_context *ctx)
15337ec681f3Smrg{
15347ec681f3Smrg        mir_foreach_block(ctx, _blk) {
15357ec681f3Smrg                midgard_block *blk = (midgard_block *) _blk;
15367ec681f3Smrg
15377ec681f3Smrg                if (list_is_empty(&blk->base.instructions))
15387ec681f3Smrg                        continue;
15397ec681f3Smrg
15407ec681f3Smrg                midgard_instruction *I = mir_last_in_block(blk);
15417ec681f3Smrg
15427ec681f3Smrg                if (!I || I->type != TAG_ALU_4 || !I->writeout)
15437ec681f3Smrg                        continue;
15447ec681f3Smrg
15457ec681f3Smrg                mir_foreach_src(I, s) {
15467ec681f3Smrg                        unsigned src = I->src[s];
15477ec681f3Smrg
15487ec681f3Smrg                        if (src >= ctx->temp_count)
15497ec681f3Smrg                                continue;
15507ec681f3Smrg
15517ec681f3Smrg                        if (!_blk->live_out[src])
15527ec681f3Smrg                                continue;
15537ec681f3Smrg
15547ec681f3Smrg                        unsigned temp = make_compiler_temp(ctx);
15557ec681f3Smrg                        midgard_instruction mov = v_mov(src, temp);
15567ec681f3Smrg                        mov.mask = 0xF;
15577ec681f3Smrg                        mov.dest_type = nir_type_uint32;
15587ec681f3Smrg                        mir_insert_instruction_before(ctx, I, mov);
15597ec681f3Smrg                        I->src[s] = mov.dest;
15607ec681f3Smrg                }
15617ec681f3Smrg        }
15627ec681f3Smrg}
15637ec681f3Smrg
15647ec681f3Smrgvoid
15657ec681f3Smrgmidgard_schedule_program(compiler_context *ctx)
15667ec681f3Smrg{
15677ec681f3Smrg        mir_lower_ldst(ctx);
15687ec681f3Smrg        midgard_promote_uniforms(ctx);
15697ec681f3Smrg
15707ec681f3Smrg        /* Must be lowered right before scheduling */
15717ec681f3Smrg        mir_squeeze_index(ctx);
15727ec681f3Smrg        mir_lower_special_reads(ctx);
15737ec681f3Smrg
15747ec681f3Smrg        if (ctx->stage == MESA_SHADER_FRAGMENT) {
15757ec681f3Smrg                mir_invalidate_liveness(ctx);
15767ec681f3Smrg                mir_compute_liveness(ctx);
15777ec681f3Smrg                mir_lower_blend_input(ctx);
15787ec681f3Smrg        }
15797ec681f3Smrg
15807ec681f3Smrg        mir_squeeze_index(ctx);
15817ec681f3Smrg
15827ec681f3Smrg        /* Lowering can introduce some dead moves */
15837ec681f3Smrg
15847ec681f3Smrg        mir_foreach_block(ctx, _block) {
15857ec681f3Smrg                midgard_block *block = (midgard_block *) _block;
15867ec681f3Smrg                midgard_opt_dead_move_eliminate(ctx, block);
15877ec681f3Smrg                schedule_block(ctx, block);
15887ec681f3Smrg        }
15897ec681f3Smrg}
1590