1/*
2 * Copyright (C) 2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 * Copyright (C) 2019 Collabora, Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25#include "compiler.h"
26
27/* Creates pipeline registers. This is a prepass run before the main register
28 * allocator but after scheduling, once bundles are created. It works by
29 * iterating the scheduled IR, checking if a value is ever used after the end
30 * of the current bundle. If it is not, it is promoted to a bundle-specific
31 * pipeline register.
32 *
33 * Pipeline registers are only written from the first two stages of the
34 * pipeline (vmul/sadd) lasting the duration of the bundle only. There are two
35 * 128-bit pipeline registers available (r24/r25). The upshot is that no actual
36 * register allocation is needed; we can _always_ promote a value to a pipeline
37 * register, liveness permitting. This greatly simplifies the logic of this
38 * passing, negating the need for a proper RA like work registers.
39 */
40
41static bool
42mir_pipeline_ins(
43        compiler_context *ctx,
44        midgard_block *block,
45        midgard_bundle *bundle, unsigned i,
46        unsigned pipeline_count)
47{
48        midgard_instruction *ins = bundle->instructions[i];
49
50        /* Our goal is to create a pipeline register. Pipeline registers are
51         * created at the start of the bundle and are destroyed at the end. So
52         * we conservatively require:
53         *
54         *  1. Each component read in the second stage is written in the first stage.
55         *  2. The index is not live after the bundle.
56         *  3. We're not a special index (writeout, conditionals, ..)
57         *
58         * Rationale: #1 ensures that there is no need to go before the
59         * creation of the bundle, so the pipeline register can exist. #2 is
60         * since the pipeline register will be destroyed at the end. This
61         * ensures that nothing will try to read/write the pipeline register
62         * once it is not live, and that there's no need to go earlier. */
63
64        unsigned node = ins->dest;
65        unsigned read_mask = 0;
66
67        if (node >= SSA_FIXED_MINIMUM)
68                return false;
69
70        if (node == ctx->blend_src1)
71                return false;
72
73        /* Analyze the bundle for a per-byte read mask */
74
75        for (unsigned j = 0; j < bundle->instruction_count; ++j) {
76                midgard_instruction *q = bundle->instructions[j];
77
78                /* The fragment colour can't be pipelined (well, it is
79                 * pipelined in r0, but this is a delicate dance with
80                 * scheduling and RA, not for us to worry about) */
81
82                if (q->compact_branch && q->writeout && mir_has_arg(q, node))
83                        return false;
84
85                if (q->unit < UNIT_VADD) continue;
86                read_mask |= mir_bytemask_of_read_components(q, node);
87        }
88
89        /* Now check what's written in the beginning stage  */
90        for (unsigned j = 0; j < bundle->instruction_count; ++j) {
91                midgard_instruction *q = bundle->instructions[j];
92                if (q->unit >= UNIT_VADD) break;
93                if (q->dest != node) continue;
94
95                /* Remove the written mask from the read requirements */
96                read_mask &= ~mir_bytemask(q);
97        }
98
99        /* Check for leftovers */
100        if (read_mask)
101                return false;
102
103        /* We want to know if we live after this bundle, so check if
104         * we're live after the last instruction of the bundle */
105
106        midgard_instruction *end = bundle->instructions[
107                                    bundle->instruction_count - 1];
108
109        if (mir_is_live_after(ctx, block, end, ins->dest))
110                return false;
111
112        /* We're only live in this bundle -- pipeline! */
113        unsigned preg = SSA_FIXED_REGISTER(24 + pipeline_count);
114
115        for (unsigned j = 0; j < bundle->instruction_count; ++j) {
116                midgard_instruction *q = bundle->instructions[j];
117
118                if (q->unit >= UNIT_VADD)
119                        mir_rewrite_index_src_single(q, node, preg);
120                else
121                        mir_rewrite_index_dst_single(q, node, preg);
122        }
123
124        return true;
125}
126
127void
128mir_create_pipeline_registers(compiler_context *ctx)
129{
130        mir_invalidate_liveness(ctx);
131
132        mir_foreach_block(ctx, _block) {
133                midgard_block *block = (midgard_block *) _block;
134
135                mir_foreach_bundle_in_block(block, bundle) {
136                        if (!mir_is_alu_bundle(bundle)) continue;
137                        if (bundle->instruction_count < 2) continue;
138
139                        /* Only first 2 instructions could pipeline */
140                        bool succ = mir_pipeline_ins(ctx, block, bundle, 0, 0);
141                        mir_pipeline_ins(ctx, block, bundle, 1, succ);
142                }
143        }
144}
145