1/*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "util/ralloc.h"
25#include "util/register_allocate.h"
26#include "common/v3d_device_info.h"
27#include "v3d_compiler.h"
28
29#define QPU_R(i) { .magic = false, .index = i }
30
31#define ACC_INDEX     0
32#define ACC_COUNT     6
33#define PHYS_INDEX    (ACC_INDEX + ACC_COUNT)
34#define PHYS_COUNT    64
35
36static inline bool
37qinst_writes_tmu(struct qinst *inst)
38{
39        return (inst->dst.file == QFILE_MAGIC &&
40                v3d_qpu_magic_waddr_is_tmu(inst->dst.index));
41}
42
43static bool
44is_last_ldtmu(struct qinst *inst, struct qblock *block)
45{
46        list_for_each_entry_from(struct qinst, scan_inst, inst->link.next,
47                                 &block->instructions, link) {
48                if (scan_inst->qpu.sig.ldtmu)
49                        return false;
50                if (qinst_writes_tmu(scan_inst))
51                        return true;
52        }
53
54        return true;
55}
56
57static bool
58vir_is_mov_uniform(struct v3d_compile *c, int temp)
59{
60        struct qinst *def = c->defs[temp];
61
62        return def && def->qpu.sig.ldunif;
63}
64
65static int
66v3d_choose_spill_node(struct v3d_compile *c, struct ra_graph *g,
67                      uint32_t *temp_to_node)
68{
69        const float tmu_scale = 5;
70        float block_scale = 1.0;
71        float spill_costs[c->num_temps];
72        bool in_tmu_operation = false;
73        bool started_last_seg = false;
74
75        for (unsigned i = 0; i < c->num_temps; i++)
76                spill_costs[i] = 0.0;
77
78        /* XXX: Scale the cost up when inside of a loop. */
79        vir_for_each_block(block, c) {
80                vir_for_each_inst(inst, block) {
81                        /* We can't insert a new TMU operation while currently
82                         * in a TMU operation, and we can't insert new thread
83                         * switches after starting output writes.
84                         */
85                        bool no_spilling =
86                                (in_tmu_operation ||
87                                 (c->threads > 1 && started_last_seg));
88
89                        for (int i = 0; i < vir_get_nsrc(inst); i++) {
90                                if (inst->src[i].file != QFILE_TEMP)
91                                        continue;
92
93                                int temp = inst->src[i].index;
94                                if (vir_is_mov_uniform(c, temp)) {
95                                        spill_costs[temp] += block_scale;
96                                } else if (!no_spilling) {
97                                        spill_costs[temp] += (block_scale *
98                                                              tmu_scale);
99                                } else {
100                                        BITSET_CLEAR(c->spillable, temp);
101                                }
102                        }
103
104                        if (inst->dst.file == QFILE_TEMP) {
105                                int temp = inst->dst.index;
106
107                                if (vir_is_mov_uniform(c, temp)) {
108                                        /* We just rematerialize the unform
109                                         * later.
110                                         */
111                                } else if (!no_spilling) {
112                                        spill_costs[temp] += (block_scale *
113                                                              tmu_scale);
114                                } else {
115                                        BITSET_CLEAR(c->spillable, temp);
116                                }
117                        }
118
119                        /* Refuse to spill a ldvary's dst, because that means
120                         * that ldvary's r5 would end up being used across a
121                         * thrsw.
122                         */
123                        if (inst->qpu.sig.ldvary) {
124                                assert(inst->dst.file == QFILE_TEMP);
125                                BITSET_CLEAR(c->spillable, inst->dst.index);
126                        }
127
128                        if (inst->is_last_thrsw)
129                                started_last_seg = true;
130
131                        if (v3d_qpu_writes_vpm(&inst->qpu) ||
132                            v3d_qpu_uses_tlb(&inst->qpu))
133                                started_last_seg = true;
134
135                        /* Track when we're in between a TMU setup and the
136                         * final LDTMU or TMUWT from that TMU setup.  We can't
137                         * spill/fill any temps during that time, because that
138                         * involves inserting a new TMU setup/LDTMU sequence.
139                         */
140                        if (inst->qpu.sig.ldtmu &&
141                            is_last_ldtmu(inst, block))
142                                in_tmu_operation = false;
143
144                        if (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
145                            inst->qpu.alu.add.op == V3D_QPU_A_TMUWT)
146                                in_tmu_operation = false;
147
148                        if (qinst_writes_tmu(inst))
149                                in_tmu_operation = true;
150                }
151        }
152
153        for (unsigned i = 0; i < c->num_temps; i++) {
154                int node = temp_to_node[i];
155
156                if (BITSET_TEST(c->spillable, i))
157                        ra_set_node_spill_cost(g, node, spill_costs[i]);
158        }
159
160        return ra_get_best_spill_node(g);
161}
162
163/* The spill offset for this thread takes a bit of setup, so do it once at
164 * program start.
165 */
166void
167v3d_setup_spill_base(struct v3d_compile *c)
168{
169        c->cursor = vir_before_block(vir_entry_block(c));
170
171        int start_num_temps = c->num_temps;
172
173        /* Each thread wants to be in a separate region of the scratch space
174         * so that the QPUs aren't fighting over cache lines.  We have the
175         * driver keep a single global spill BO rather than
176         * per-spilling-program BOs, so we need a uniform from the driver for
177         * what the per-thread scale is.
178         */
179        struct qreg thread_offset =
180                vir_UMUL(c,
181                         vir_TIDX(c),
182                         vir_uniform(c, QUNIFORM_SPILL_SIZE_PER_THREAD, 0));
183
184        /* Each channel in a reg is 4 bytes, so scale them up by that. */
185        struct qreg element_offset = vir_SHL(c, vir_EIDX(c),
186                                             vir_uniform_ui(c, 2));
187
188        c->spill_base = vir_ADD(c,
189                                vir_ADD(c, thread_offset, element_offset),
190                                vir_uniform(c, QUNIFORM_SPILL_OFFSET, 0));
191
192        /* Make sure that we don't spill the spilling setup instructions. */
193        for (int i = start_num_temps; i < c->num_temps; i++)
194                BITSET_CLEAR(c->spillable, i);
195
196        c->cursor = vir_after_block(c->cur_block);
197}
198
199static void
200v3d_emit_spill_tmua(struct v3d_compile *c, uint32_t spill_offset)
201{
202        vir_ADD_dest(c, vir_reg(QFILE_MAGIC,
203                                V3D_QPU_WADDR_TMUA),
204                     c->spill_base,
205                     vir_uniform_ui(c, spill_offset));
206}
207
208static void
209v3d_spill_reg(struct v3d_compile *c, int spill_temp)
210{
211        bool is_uniform = vir_is_mov_uniform(c, spill_temp);
212
213        uint32_t spill_offset = 0;
214
215        if (!is_uniform) {
216                uint32_t spill_offset = c->spill_size;
217                c->spill_size += V3D_CHANNELS * sizeof(uint32_t);
218
219                if (spill_offset == 0)
220                        v3d_setup_spill_base(c);
221        }
222
223        struct qinst *last_thrsw = c->last_thrsw;
224        assert(!last_thrsw || last_thrsw->is_last_thrsw);
225
226        int start_num_temps = c->num_temps;
227
228        int uniform_index = ~0;
229        if (is_uniform) {
230                struct qinst *orig_unif = c->defs[spill_temp];
231                uniform_index = orig_unif->uniform;
232        }
233
234        vir_for_each_inst_inorder_safe(inst, c) {
235                for (int i = 0; i < vir_get_nsrc(inst); i++) {
236                        if (inst->src[i].file != QFILE_TEMP ||
237                            inst->src[i].index != spill_temp) {
238                                continue;
239                        }
240
241                        c->cursor = vir_before_inst(inst);
242
243                        if (is_uniform) {
244                                struct qreg unif =
245                                        vir_uniform(c,
246                                                    c->uniform_contents[uniform_index],
247                                                    c->uniform_data[uniform_index]);
248                                inst->src[i] = unif;
249                        } else {
250                                v3d_emit_spill_tmua(c, spill_offset);
251                                vir_emit_thrsw(c);
252                                inst->src[i] = vir_LDTMU(c);
253                                c->fills++;
254                        }
255                }
256
257                if (inst->dst.file == QFILE_TEMP &&
258                    inst->dst.index == spill_temp) {
259                        if (is_uniform) {
260                                c->cursor.link = NULL;
261                                vir_remove_instruction(c, inst);
262                        } else {
263                                c->cursor = vir_after_inst(inst);
264
265                                inst->dst.index = c->num_temps++;
266                                vir_MOV_dest(c, vir_reg(QFILE_MAGIC,
267                                                        V3D_QPU_WADDR_TMUD),
268                                             inst->dst);
269                                v3d_emit_spill_tmua(c, spill_offset);
270                                vir_emit_thrsw(c);
271                                vir_TMUWT(c);
272                                c->spills++;
273                        }
274                }
275
276                /* If we didn't have a last-thrsw inserted by nir_to_vir and
277                 * we've been inserting thrsws, then insert a new last_thrsw
278                 * right before we start the vpm/tlb sequence for the last
279                 * thread segment.
280                 */
281                if (!is_uniform && !last_thrsw && c->last_thrsw &&
282                    (v3d_qpu_writes_vpm(&inst->qpu) ||
283                     v3d_qpu_uses_tlb(&inst->qpu))) {
284                        c->cursor = vir_before_inst(inst);
285                        vir_emit_thrsw(c);
286
287                        last_thrsw = c->last_thrsw;
288                        last_thrsw->is_last_thrsw = true;
289                }
290        }
291
292        /* Make sure c->last_thrsw is the actual last thrsw, not just one we
293         * inserted in our most recent unspill.
294         */
295        if (last_thrsw)
296                c->last_thrsw = last_thrsw;
297
298        /* Don't allow spilling of our spilling instructions.  There's no way
299         * they can help get things colored.
300         */
301        for (int i = start_num_temps; i < c->num_temps; i++)
302                BITSET_CLEAR(c->spillable, i);
303}
304
305struct v3d_ra_select_callback_data {
306        uint32_t next_acc;
307        uint32_t next_phys;
308};
309
310static unsigned int
311v3d_ra_select_callback(struct ra_graph *g, BITSET_WORD *regs, void *data)
312{
313        struct v3d_ra_select_callback_data *v3d_ra = data;
314        int r5 = ACC_INDEX + 5;
315
316        /* Choose r5 for our ldunifs if possible (nobody else can load to that
317         * reg, and it keeps the QPU cond field free from being occupied by
318         * ldunifrf).
319         */
320        if (BITSET_TEST(regs, r5))
321                return r5;
322
323        /* Choose an accumulator if possible (I think it's lower power than
324         * phys regs), but round-robin through them to give post-RA
325         * instruction selection more options.
326         */
327        for (int i = 0; i < ACC_COUNT; i++) {
328                int acc_off = (v3d_ra->next_acc + i) % ACC_COUNT;
329                int acc = ACC_INDEX + acc_off;
330
331                if (BITSET_TEST(regs, acc)) {
332                        v3d_ra->next_acc = acc_off + 1;
333                        return acc;
334                }
335        }
336
337        for (int i = 0; i < PHYS_COUNT; i++) {
338                int phys_off = (v3d_ra->next_phys + i) % PHYS_COUNT;
339                int phys = PHYS_INDEX + phys_off;
340
341                if (BITSET_TEST(regs, phys)) {
342                        v3d_ra->next_phys = phys_off + 1;
343                        return phys;
344                }
345        }
346
347        unreachable("RA must pass us at least one possible reg.");
348}
349
350bool
351vir_init_reg_sets(struct v3d_compiler *compiler)
352{
353        /* Allocate up to 3 regfile classes, for the ways the physical
354         * register file can be divided up for fragment shader threading.
355         */
356        int max_thread_index = (compiler->devinfo->ver >= 40 ? 2 : 3);
357
358        compiler->regs = ra_alloc_reg_set(compiler, PHYS_INDEX + PHYS_COUNT,
359                                          true);
360        if (!compiler->regs)
361                return false;
362
363        for (int threads = 0; threads < max_thread_index; threads++) {
364                compiler->reg_class_any[threads] =
365                        ra_alloc_reg_class(compiler->regs);
366                compiler->reg_class_r5[threads] =
367                        ra_alloc_reg_class(compiler->regs);
368                compiler->reg_class_phys_or_acc[threads] =
369                        ra_alloc_reg_class(compiler->regs);
370                compiler->reg_class_phys[threads] =
371                        ra_alloc_reg_class(compiler->regs);
372
373                for (int i = PHYS_INDEX;
374                     i < PHYS_INDEX + (PHYS_COUNT >> threads); i++) {
375                        ra_class_add_reg(compiler->regs,
376                                         compiler->reg_class_phys_or_acc[threads], i);
377                        ra_class_add_reg(compiler->regs,
378                                         compiler->reg_class_phys[threads], i);
379                        ra_class_add_reg(compiler->regs,
380                                         compiler->reg_class_any[threads], i);
381                }
382
383                for (int i = ACC_INDEX + 0; i < ACC_INDEX + ACC_COUNT - 1; i++) {
384                        ra_class_add_reg(compiler->regs,
385                                         compiler->reg_class_phys_or_acc[threads], i);
386                        ra_class_add_reg(compiler->regs,
387                                         compiler->reg_class_any[threads], i);
388                }
389                /* r5 can only store a single 32-bit value, so not much can
390                 * use it.
391                 */
392                ra_class_add_reg(compiler->regs,
393                                 compiler->reg_class_r5[threads],
394                                 ACC_INDEX + 5);
395                ra_class_add_reg(compiler->regs,
396                                 compiler->reg_class_any[threads],
397                                 ACC_INDEX + 5);
398        }
399
400        ra_set_finalize(compiler->regs, NULL);
401
402        return true;
403}
404
405struct node_to_temp_map {
406        uint32_t temp;
407        uint32_t priority;
408};
409
410static int
411node_to_temp_priority(const void *in_a, const void *in_b)
412{
413        const struct node_to_temp_map *a = in_a;
414        const struct node_to_temp_map *b = in_b;
415
416        return a->priority - b->priority;
417}
418
419#define CLASS_BIT_PHYS			(1 << 0)
420#define CLASS_BIT_ACC			(1 << 1)
421#define CLASS_BIT_R5			(1 << 4)
422#define CLASS_BITS_ANY			(CLASS_BIT_PHYS | \
423                                         CLASS_BIT_ACC | \
424                                         CLASS_BIT_R5)
425
426/**
427 * Returns a mapping from QFILE_TEMP indices to struct qpu_regs.
428 *
429 * The return value should be freed by the caller.
430 */
431struct qpu_reg *
432v3d_register_allocate(struct v3d_compile *c, bool *spilled)
433{
434        struct node_to_temp_map map[c->num_temps];
435        uint32_t temp_to_node[c->num_temps];
436        uint8_t class_bits[c->num_temps];
437        int acc_nodes[ACC_COUNT];
438        struct v3d_ra_select_callback_data callback_data = {
439                .next_acc = 0,
440                /* Start at RF3, to try to keep the TLB writes from using
441                 * RF0-2.
442                 */
443                .next_phys = 3,
444        };
445
446        *spilled = false;
447
448        vir_calculate_live_intervals(c);
449
450        /* Convert 1, 2, 4 threads to 0, 1, 2 index.
451         *
452         * V3D 4.x has double the physical register space, so 64 physical regs
453         * are available at both 1x and 2x threading, and 4x has 32.
454         */
455        int thread_index = ffs(c->threads) - 1;
456        if (c->devinfo->ver >= 40) {
457                if (thread_index >= 1)
458                        thread_index--;
459        }
460
461        struct ra_graph *g = ra_alloc_interference_graph(c->compiler->regs,
462                                                         c->num_temps +
463                                                         ARRAY_SIZE(acc_nodes));
464        ra_set_select_reg_callback(g, v3d_ra_select_callback, &callback_data);
465
466        /* Make some fixed nodes for the accumulators, which we will need to
467         * interfere with when ops have implied r3/r4 writes or for the thread
468         * switches.  We could represent these as classes for the nodes to
469         * live in, but the classes take up a lot of memory to set up, so we
470         * don't want to make too many.
471         */
472        for (int i = 0; i < ARRAY_SIZE(acc_nodes); i++) {
473                acc_nodes[i] = c->num_temps + i;
474                ra_set_node_reg(g, acc_nodes[i], ACC_INDEX + i);
475        }
476
477        for (uint32_t i = 0; i < c->num_temps; i++) {
478                map[i].temp = i;
479                map[i].priority = c->temp_end[i] - c->temp_start[i];
480        }
481        qsort(map, c->num_temps, sizeof(map[0]), node_to_temp_priority);
482        for (uint32_t i = 0; i < c->num_temps; i++) {
483                temp_to_node[map[i].temp] = i;
484        }
485
486        /* Figure out our register classes and preallocated registers.  We
487         * start with any temp being able to be in any file, then instructions
488         * incrementally remove bits that the temp definitely can't be in.
489         */
490        memset(class_bits, CLASS_BITS_ANY, sizeof(class_bits));
491
492        int ip = 0;
493        vir_for_each_inst_inorder(inst, c) {
494                /* If the instruction writes r3/r4 (and optionally moves its
495                 * result to a temp), nothing else can be stored in r3/r4 across
496                 * it.
497                 */
498                if (vir_writes_r3(c->devinfo, inst)) {
499                        for (int i = 0; i < c->num_temps; i++) {
500                                if (c->temp_start[i] < ip &&
501                                    c->temp_end[i] > ip) {
502                                        ra_add_node_interference(g,
503                                                                 temp_to_node[i],
504                                                                 acc_nodes[3]);
505                                }
506                        }
507                }
508                if (vir_writes_r4(c->devinfo, inst)) {
509                        for (int i = 0; i < c->num_temps; i++) {
510                                if (c->temp_start[i] < ip &&
511                                    c->temp_end[i] > ip) {
512                                        ra_add_node_interference(g,
513                                                                 temp_to_node[i],
514                                                                 acc_nodes[4]);
515                                }
516                        }
517                }
518
519                if (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU) {
520                        switch (inst->qpu.alu.add.op) {
521                        case V3D_QPU_A_LDVPMV_IN:
522                        case V3D_QPU_A_LDVPMV_OUT:
523                        case V3D_QPU_A_LDVPMD_IN:
524                        case V3D_QPU_A_LDVPMD_OUT:
525                        case V3D_QPU_A_LDVPMP:
526                        case V3D_QPU_A_LDVPMG_IN:
527                        case V3D_QPU_A_LDVPMG_OUT:
528                                /* LDVPMs only store to temps (the MA flag
529                                 * decides whether the LDVPM is in or out)
530                                 */
531                                assert(inst->dst.file == QFILE_TEMP);
532                                class_bits[inst->dst.index] &= CLASS_BIT_PHYS;
533                                break;
534
535                        case V3D_QPU_A_RECIP:
536                        case V3D_QPU_A_RSQRT:
537                        case V3D_QPU_A_EXP:
538                        case V3D_QPU_A_LOG:
539                        case V3D_QPU_A_SIN:
540                        case V3D_QPU_A_RSQRT2:
541                                /* The SFU instructions write directly to the
542                                 * phys regfile.
543                                 */
544                                assert(inst->dst.file == QFILE_TEMP);
545                                class_bits[inst->dst.index] &= CLASS_BIT_PHYS;
546                                break;
547
548                        default:
549                                break;
550                        }
551                }
552
553                if (inst->src[0].file == QFILE_REG) {
554                        switch (inst->src[0].index) {
555                        case 0:
556                        case 1:
557                        case 2:
558                        case 3:
559                                /* Payload setup instructions: Force allocate
560                                 * the dst to the given register (so the MOV
561                                 * will disappear).
562                                 */
563                                assert(inst->qpu.alu.mul.op == V3D_QPU_M_MOV);
564                                assert(inst->dst.file == QFILE_TEMP);
565                                ra_set_node_reg(g,
566                                                temp_to_node[inst->dst.index],
567                                                PHYS_INDEX +
568                                                inst->src[0].index);
569                                break;
570                        }
571                }
572
573                if (inst->dst.file == QFILE_TEMP) {
574                        /* Only a ldunif gets to write to R5, which only has a
575                         * single 32-bit channel of storage.
576                         */
577                        if (!inst->qpu.sig.ldunif) {
578                                class_bits[inst->dst.index] &= ~CLASS_BIT_R5;
579                        } else {
580                                /* Until V3D 4.x, we could only load a uniform
581                                 * to r5, so we'll need to spill if uniform
582                                 * loads interfere with each other.
583                                 */
584                                if (c->devinfo->ver < 40) {
585                                        class_bits[inst->dst.index] &=
586                                                CLASS_BIT_R5;
587                                }
588                        }
589                }
590
591                if (inst->qpu.sig.thrsw) {
592                        /* All accumulators are invalidated across a thread
593                         * switch.
594                         */
595                        for (int i = 0; i < c->num_temps; i++) {
596                                if (c->temp_start[i] < ip && c->temp_end[i] > ip)
597                                        class_bits[i] &= CLASS_BIT_PHYS;
598                        }
599                }
600
601                ip++;
602        }
603
604        for (uint32_t i = 0; i < c->num_temps; i++) {
605                if (class_bits[i] == CLASS_BIT_PHYS) {
606                        ra_set_node_class(g, temp_to_node[i],
607                                          c->compiler->reg_class_phys[thread_index]);
608                } else if (class_bits[i] == (CLASS_BIT_R5)) {
609                        ra_set_node_class(g, temp_to_node[i],
610                                          c->compiler->reg_class_r5[thread_index]);
611                } else if (class_bits[i] == (CLASS_BIT_PHYS | CLASS_BIT_ACC)) {
612                        ra_set_node_class(g, temp_to_node[i],
613                                          c->compiler->reg_class_phys_or_acc[thread_index]);
614                } else {
615                        assert(class_bits[i] == CLASS_BITS_ANY);
616                        ra_set_node_class(g, temp_to_node[i],
617                                          c->compiler->reg_class_any[thread_index]);
618                }
619        }
620
621        for (uint32_t i = 0; i < c->num_temps; i++) {
622                for (uint32_t j = i + 1; j < c->num_temps; j++) {
623                        if (!(c->temp_start[i] >= c->temp_end[j] ||
624                              c->temp_start[j] >= c->temp_end[i])) {
625                                ra_add_node_interference(g,
626                                                         temp_to_node[i],
627                                                         temp_to_node[j]);
628                        }
629                }
630        }
631
632        /* Debug code to force a bit of register spilling, for running across
633         * conformance tests to make sure that spilling works.
634         */
635        int force_register_spills = 0;
636        if (c->spill_size <
637            V3D_CHANNELS * sizeof(uint32_t) * force_register_spills) {
638                int node = v3d_choose_spill_node(c, g, temp_to_node);
639                if (node != -1) {
640                        v3d_spill_reg(c, map[node].temp);
641                        ralloc_free(g);
642                        *spilled = true;
643                        return NULL;
644                }
645        }
646
647        bool ok = ra_allocate(g);
648        if (!ok) {
649                int node = v3d_choose_spill_node(c, g, temp_to_node);
650
651                /* Don't emit spills using the TMU until we've dropped thread
652                 * conut first.
653                 */
654                if (node != -1 &&
655                    (vir_is_mov_uniform(c, map[node].temp) ||
656                     thread_index == 0)) {
657                        v3d_spill_reg(c, map[node].temp);
658
659                        /* Ask the outer loop to call back in. */
660                        *spilled = true;
661                }
662
663                ralloc_free(g);
664                return NULL;
665        }
666
667        struct qpu_reg *temp_registers = calloc(c->num_temps,
668                                                sizeof(*temp_registers));
669
670        for (uint32_t i = 0; i < c->num_temps; i++) {
671                int ra_reg = ra_get_node_reg(g, temp_to_node[i]);
672                if (ra_reg < PHYS_INDEX) {
673                        temp_registers[i].magic = true;
674                        temp_registers[i].index = (V3D_QPU_WADDR_R0 +
675                                                   ra_reg - ACC_INDEX);
676                } else {
677                        temp_registers[i].magic = false;
678                        temp_registers[i].index = ra_reg - PHYS_INDEX;
679                }
680
681                /* If the value's never used, just write to the NOP register
682                 * for clarity in debug output.
683                 */
684                if (c->temp_start[i] == c->temp_end[i]) {
685                        temp_registers[i].magic = true;
686                        temp_registers[i].index = V3D_QPU_WADDR_NOP;
687                }
688        }
689
690        ralloc_free(g);
691
692        return temp_registers;
693}
694