1b8e80941Smrg/*
2b8e80941Smrg * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
3b8e80941Smrg *
4b8e80941Smrg * Permission is hereby granted, free of charge, to any person obtaining a
5b8e80941Smrg * copy of this software and associated documentation files (the "Software"),
6b8e80941Smrg * to deal in the Software without restriction, including without limitation
7b8e80941Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8b8e80941Smrg * and/or sell copies of the Software, and to permit persons to whom the
9b8e80941Smrg * Software is furnished to do so, subject to the following conditions:
10b8e80941Smrg *
11b8e80941Smrg * The above copyright notice and this permission notice (including the next
12b8e80941Smrg * paragraph) shall be included in all copies or substantial portions of the
13b8e80941Smrg * Software.
14b8e80941Smrg *
15b8e80941Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16b8e80941Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17b8e80941Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18b8e80941Smrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19b8e80941Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20b8e80941Smrg * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21b8e80941Smrg * SOFTWARE.
22b8e80941Smrg *
23b8e80941Smrg * Authors:
24b8e80941Smrg *    Rob Clark <robclark@freedesktop.org>
25b8e80941Smrg */
26b8e80941Smrg
27b8e80941Smrg
28b8e80941Smrg#include "util/u_math.h"
29b8e80941Smrg
30b8e80941Smrg#include "ir3.h"
31b8e80941Smrg
32b8e80941Smrg/*
33b8e80941Smrg * Instruction Scheduling:
34b8e80941Smrg *
35b8e80941Smrg * A recursive depth based scheduling algo.  Recursively find an eligible
36b8e80941Smrg * instruction to schedule from the deepest instruction (recursing through
37b8e80941Smrg * it's unscheduled src instructions).  Normally this would result in a
38b8e80941Smrg * lot of re-traversal of the same instructions, so we cache results in
39b8e80941Smrg * instr->data (and clear cached results that would be no longer valid
40b8e80941Smrg * after scheduling an instruction).
41b8e80941Smrg *
42b8e80941Smrg * There are a few special cases that need to be handled, since sched
43b8e80941Smrg * is currently independent of register allocation.  Usages of address
44b8e80941Smrg * register (a0.x) or predicate register (p0.x) must be serialized.  Ie.
45b8e80941Smrg * if you have two pairs of instructions that write the same special
46b8e80941Smrg * register and then read it, then those pairs cannot be interleaved.
47b8e80941Smrg * To solve this, when we are in such a scheduling "critical section",
48b8e80941Smrg * and we encounter a conflicting write to a special register, we try
49b8e80941Smrg * to schedule any remaining instructions that use that value first.
50b8e80941Smrg */
51b8e80941Smrg
52b8e80941Smrgstruct ir3_sched_ctx {
53b8e80941Smrg	struct ir3_block *block;           /* the current block */
54b8e80941Smrg	struct list_head depth_list;       /* depth sorted unscheduled instrs */
55b8e80941Smrg	struct ir3_instruction *scheduled; /* last scheduled instr XXX remove*/
56b8e80941Smrg	struct ir3_instruction *addr;      /* current a0.x user, if any */
57b8e80941Smrg	struct ir3_instruction *pred;      /* current p0.x user, if any */
58b8e80941Smrg	int live_values;                   /* estimate of current live values */
59b8e80941Smrg	bool error;
60b8e80941Smrg};
61b8e80941Smrg
62b8e80941Smrgstatic bool is_sfu_or_mem(struct ir3_instruction *instr)
63b8e80941Smrg{
64b8e80941Smrg	return is_sfu(instr) || is_mem(instr);
65b8e80941Smrg}
66b8e80941Smrg
67b8e80941Smrgstatic void
68b8e80941Smrgunuse_each_src(struct ir3_sched_ctx *ctx, struct ir3_instruction *instr)
69b8e80941Smrg{
70b8e80941Smrg	struct ir3_instruction *src;
71b8e80941Smrg
72b8e80941Smrg	foreach_ssa_src_n(src, n, instr) {
73b8e80941Smrg		if (__is_false_dep(instr, n))
74b8e80941Smrg			continue;
75b8e80941Smrg		if (instr->block != src->block)
76b8e80941Smrg			continue;
77b8e80941Smrg		if ((src->opc == OPC_META_FI) || (src->opc == OPC_META_FO)) {
78b8e80941Smrg			unuse_each_src(ctx, src);
79b8e80941Smrg		} else {
80b8e80941Smrg			debug_assert(src->use_count > 0);
81b8e80941Smrg
82b8e80941Smrg			if (--src->use_count == 0) {
83b8e80941Smrg				ctx->live_values -= dest_regs(src);
84b8e80941Smrg				debug_assert(ctx->live_values >= 0);
85b8e80941Smrg			}
86b8e80941Smrg		}
87b8e80941Smrg	}
88b8e80941Smrg}
89b8e80941Smrg
90b8e80941Smrgstatic void
91b8e80941Smrguse_each_src(struct ir3_instruction *instr)
92b8e80941Smrg{
93b8e80941Smrg	struct ir3_instruction *src;
94b8e80941Smrg
95b8e80941Smrg	foreach_ssa_src_n(src, n, instr) {
96b8e80941Smrg		if (__is_false_dep(instr, n))
97b8e80941Smrg			continue;
98b8e80941Smrg		if (instr->block != src->block)
99b8e80941Smrg			continue;
100b8e80941Smrg		if ((src->opc == OPC_META_FI) || (src->opc == OPC_META_FO)) {
101b8e80941Smrg			use_each_src(src);
102b8e80941Smrg		} else {
103b8e80941Smrg			src->use_count++;
104b8e80941Smrg		}
105b8e80941Smrg	}
106b8e80941Smrg}
107b8e80941Smrg
108b8e80941Smrgstatic void
109b8e80941Smrgupdate_live_values(struct ir3_sched_ctx *ctx, struct ir3_instruction *instr)
110b8e80941Smrg{
111b8e80941Smrg	if ((instr->opc == OPC_META_FI) || (instr->opc == OPC_META_FO))
112b8e80941Smrg		return;
113b8e80941Smrg
114b8e80941Smrg	ctx->live_values += dest_regs(instr);
115b8e80941Smrg	unuse_each_src(ctx, instr);
116b8e80941Smrg}
117b8e80941Smrg
118b8e80941Smrg/* This is *slightly* different than how ir3_cp uses use_count, in that
119b8e80941Smrg * we just track it per block (because we schedule a block at a time) and
120b8e80941Smrg * because we don't track meta instructions and false dependencies (since
121b8e80941Smrg * they don't contribute real register pressure).
122b8e80941Smrg */
123b8e80941Smrgstatic void
124b8e80941Smrgupdate_use_count(struct ir3_block *block)
125b8e80941Smrg{
126b8e80941Smrg	list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
127b8e80941Smrg		instr->use_count = 0;
128b8e80941Smrg	}
129b8e80941Smrg
130b8e80941Smrg	list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
131b8e80941Smrg		if ((instr->opc == OPC_META_FI) || (instr->opc == OPC_META_FO))
132b8e80941Smrg			continue;
133b8e80941Smrg
134b8e80941Smrg		use_each_src(instr);
135b8e80941Smrg	}
136b8e80941Smrg}
137b8e80941Smrg
138b8e80941Smrg#define NULL_INSTR ((void *)~0)
139b8e80941Smrg
140b8e80941Smrgstatic void
141b8e80941Smrgclear_cache(struct ir3_sched_ctx *ctx, struct ir3_instruction *instr)
142b8e80941Smrg{
143b8e80941Smrg	list_for_each_entry (struct ir3_instruction, instr2, &ctx->depth_list, node) {
144b8e80941Smrg		if ((instr2->data == instr) || (instr2->data == NULL_INSTR) || !instr)
145b8e80941Smrg			instr2->data = NULL;
146b8e80941Smrg	}
147b8e80941Smrg}
148b8e80941Smrg
149b8e80941Smrgstatic void
150b8e80941Smrgschedule(struct ir3_sched_ctx *ctx, struct ir3_instruction *instr)
151b8e80941Smrg{
152b8e80941Smrg	debug_assert(ctx->block == instr->block);
153b8e80941Smrg
154b8e80941Smrg	/* maybe there is a better way to handle this than just stuffing
155b8e80941Smrg	 * a nop.. ideally we'd know about this constraint in the
156b8e80941Smrg	 * scheduling and depth calculation..
157b8e80941Smrg	 */
158b8e80941Smrg	if (ctx->scheduled && is_sfu_or_mem(ctx->scheduled) && is_sfu_or_mem(instr))
159b8e80941Smrg		ir3_NOP(ctx->block);
160b8e80941Smrg
161b8e80941Smrg	/* remove from depth list:
162b8e80941Smrg	 */
163b8e80941Smrg	list_delinit(&instr->node);
164b8e80941Smrg
165b8e80941Smrg	if (writes_addr(instr)) {
166b8e80941Smrg		debug_assert(ctx->addr == NULL);
167b8e80941Smrg		ctx->addr = instr;
168b8e80941Smrg	}
169b8e80941Smrg
170b8e80941Smrg	if (writes_pred(instr)) {
171b8e80941Smrg		debug_assert(ctx->pred == NULL);
172b8e80941Smrg		ctx->pred = instr;
173b8e80941Smrg	}
174b8e80941Smrg
175b8e80941Smrg	instr->flags |= IR3_INSTR_MARK;
176b8e80941Smrg
177b8e80941Smrg	list_addtail(&instr->node, &instr->block->instr_list);
178b8e80941Smrg	ctx->scheduled = instr;
179b8e80941Smrg
180b8e80941Smrg	update_live_values(ctx, instr);
181b8e80941Smrg
182b8e80941Smrg	if (writes_addr(instr) || writes_pred(instr) || is_input(instr)) {
183b8e80941Smrg		clear_cache(ctx, NULL);
184b8e80941Smrg	} else {
185b8e80941Smrg		/* invalidate only the necessary entries.. */
186b8e80941Smrg		clear_cache(ctx, instr);
187b8e80941Smrg	}
188b8e80941Smrg}
189b8e80941Smrg
190b8e80941Smrgstatic struct ir3_instruction *
191b8e80941Smrgdeepest(struct ir3_instruction **srcs, unsigned nsrcs)
192b8e80941Smrg{
193b8e80941Smrg	struct ir3_instruction *d = NULL;
194b8e80941Smrg	unsigned i = 0, id = 0;
195b8e80941Smrg
196b8e80941Smrg	while ((i < nsrcs) && !(d = srcs[id = i]))
197b8e80941Smrg		i++;
198b8e80941Smrg
199b8e80941Smrg	if (!d)
200b8e80941Smrg		return NULL;
201b8e80941Smrg
202b8e80941Smrg	for (; i < nsrcs; i++)
203b8e80941Smrg		if (srcs[i] && (srcs[i]->sun > d->sun))
204b8e80941Smrg			d = srcs[id = i];
205b8e80941Smrg
206b8e80941Smrg	srcs[id] = NULL;
207b8e80941Smrg
208b8e80941Smrg	return d;
209b8e80941Smrg}
210b8e80941Smrg
211b8e80941Smrg/**
212b8e80941Smrg * @block: the block to search in, starting from end; in first pass,
213b8e80941Smrg *    this will be the block the instruction would be inserted into
214b8e80941Smrg *    (but has not yet, ie. it only contains already scheduled
215b8e80941Smrg *    instructions).  For intra-block scheduling (second pass), this
216b8e80941Smrg *    would be one of the predecessor blocks.
217b8e80941Smrg * @instr: the instruction to search for
218b8e80941Smrg * @maxd:  max distance, bail after searching this # of instruction
219b8e80941Smrg *    slots, since it means the instruction we are looking for is
220b8e80941Smrg *    far enough away
221b8e80941Smrg * @pred:  if true, recursively search into predecessor blocks to
222b8e80941Smrg *    find the worst case (shortest) distance (only possible after
223b8e80941Smrg *    individual blocks are all scheduled
224b8e80941Smrg */
225b8e80941Smrgstatic unsigned
226b8e80941Smrgdistance(struct ir3_block *block, struct ir3_instruction *instr,
227b8e80941Smrg		unsigned maxd, bool pred)
228b8e80941Smrg{
229b8e80941Smrg	unsigned d = 0;
230b8e80941Smrg
231b8e80941Smrg	list_for_each_entry_rev (struct ir3_instruction, n, &block->instr_list, node) {
232b8e80941Smrg		if ((n == instr) || (d >= maxd))
233b8e80941Smrg			return d;
234b8e80941Smrg		/* NOTE: don't count branch/jump since we don't know yet if they will
235b8e80941Smrg		 * be eliminated later in resolve_jumps().. really should do that
236b8e80941Smrg		 * earlier so we don't have this constraint.
237b8e80941Smrg		 */
238b8e80941Smrg		if (is_alu(n) || (is_flow(n) && (n->opc != OPC_JUMP) && (n->opc != OPC_BR)))
239b8e80941Smrg			d++;
240b8e80941Smrg	}
241b8e80941Smrg
242b8e80941Smrg	/* if coming from a predecessor block, assume it is assigned far
243b8e80941Smrg	 * enough away.. we'll fix up later.
244b8e80941Smrg	 */
245b8e80941Smrg	if (!pred)
246b8e80941Smrg		return maxd;
247b8e80941Smrg
248b8e80941Smrg	if (pred && (block->data != block)) {
249b8e80941Smrg		/* Search into predecessor blocks, finding the one with the
250b8e80941Smrg		 * shortest distance, since that will be the worst case
251b8e80941Smrg		 */
252b8e80941Smrg		unsigned min = maxd - d;
253b8e80941Smrg
254b8e80941Smrg		/* (ab)use block->data to prevent recursion: */
255b8e80941Smrg		block->data = block;
256b8e80941Smrg
257b8e80941Smrg		for (unsigned i = 0; i < block->predecessors_count; i++) {
258b8e80941Smrg			unsigned n;
259b8e80941Smrg
260b8e80941Smrg			n = distance(block->predecessors[i], instr, min, pred);
261b8e80941Smrg
262b8e80941Smrg			min = MIN2(min, n);
263b8e80941Smrg		}
264b8e80941Smrg
265b8e80941Smrg		block->data = NULL;
266b8e80941Smrg		d += min;
267b8e80941Smrg	}
268b8e80941Smrg
269b8e80941Smrg	return d;
270b8e80941Smrg}
271b8e80941Smrg
272b8e80941Smrg/* calculate delay for specified src: */
273b8e80941Smrgstatic unsigned
274b8e80941Smrgdelay_calc_srcn(struct ir3_block *block,
275b8e80941Smrg		struct ir3_instruction *assigner,
276b8e80941Smrg		struct ir3_instruction *consumer,
277b8e80941Smrg		unsigned srcn, bool soft, bool pred)
278b8e80941Smrg{
279b8e80941Smrg	unsigned delay = 0;
280b8e80941Smrg
281b8e80941Smrg	if (is_meta(assigner)) {
282b8e80941Smrg		struct ir3_instruction *src;
283b8e80941Smrg		foreach_ssa_src(src, assigner) {
284b8e80941Smrg			unsigned d;
285b8e80941Smrg			d = delay_calc_srcn(block, src, consumer, srcn, soft, pred);
286b8e80941Smrg			delay = MAX2(delay, d);
287b8e80941Smrg		}
288b8e80941Smrg	} else {
289b8e80941Smrg		if (soft) {
290b8e80941Smrg			if (is_sfu(assigner)) {
291b8e80941Smrg				delay = 4;
292b8e80941Smrg			} else {
293b8e80941Smrg				delay = ir3_delayslots(assigner, consumer, srcn);
294b8e80941Smrg			}
295b8e80941Smrg		} else {
296b8e80941Smrg			delay = ir3_delayslots(assigner, consumer, srcn);
297b8e80941Smrg		}
298b8e80941Smrg		delay -= distance(block, assigner, delay, pred);
299b8e80941Smrg	}
300b8e80941Smrg
301b8e80941Smrg	return delay;
302b8e80941Smrg}
303b8e80941Smrg
304b8e80941Smrg/* calculate delay for instruction (maximum of delay for all srcs): */
305b8e80941Smrgstatic unsigned
306b8e80941Smrgdelay_calc(struct ir3_block *block, struct ir3_instruction *instr,
307b8e80941Smrg		bool soft, bool pred)
308b8e80941Smrg{
309b8e80941Smrg	unsigned delay = 0;
310b8e80941Smrg	struct ir3_instruction *src;
311b8e80941Smrg
312b8e80941Smrg	foreach_ssa_src_n(src, i, instr) {
313b8e80941Smrg		unsigned d;
314b8e80941Smrg		d = delay_calc_srcn(block, src, instr, i, soft, pred);
315b8e80941Smrg		delay = MAX2(delay, d);
316b8e80941Smrg	}
317b8e80941Smrg
318b8e80941Smrg	return delay;
319b8e80941Smrg}
320b8e80941Smrg
321b8e80941Smrgstruct ir3_sched_notes {
322b8e80941Smrg	/* there is at least one kill which could be scheduled, except
323b8e80941Smrg	 * for unscheduled bary.f's:
324b8e80941Smrg	 */
325b8e80941Smrg	bool blocked_kill;
326b8e80941Smrg	/* there is at least one instruction that could be scheduled,
327b8e80941Smrg	 * except for conflicting address/predicate register usage:
328b8e80941Smrg	 */
329b8e80941Smrg	bool addr_conflict, pred_conflict;
330b8e80941Smrg};
331b8e80941Smrg
332b8e80941Smrgstatic bool is_scheduled(struct ir3_instruction *instr)
333b8e80941Smrg{
334b8e80941Smrg	return !!(instr->flags & IR3_INSTR_MARK);
335b8e80941Smrg}
336b8e80941Smrg
337b8e80941Smrg/* could an instruction be scheduled if specified ssa src was scheduled? */
338b8e80941Smrgstatic bool
339b8e80941Smrgcould_sched(struct ir3_instruction *instr, struct ir3_instruction *src)
340b8e80941Smrg{
341b8e80941Smrg	struct ir3_instruction *other_src;
342b8e80941Smrg	foreach_ssa_src(other_src, instr) {
343b8e80941Smrg		/* if dependency not scheduled, we aren't ready yet: */
344b8e80941Smrg		if ((src != other_src) && !is_scheduled(other_src)) {
345b8e80941Smrg			return false;
346b8e80941Smrg		}
347b8e80941Smrg	}
348b8e80941Smrg	return true;
349b8e80941Smrg}
350b8e80941Smrg
351b8e80941Smrg/* Check if instruction is ok to schedule.  Make sure it is not blocked
352b8e80941Smrg * by use of addr/predicate register, etc.
353b8e80941Smrg */
354b8e80941Smrgstatic bool
355b8e80941Smrgcheck_instr(struct ir3_sched_ctx *ctx, struct ir3_sched_notes *notes,
356b8e80941Smrg		struct ir3_instruction *instr)
357b8e80941Smrg{
358b8e80941Smrg	/* For instructions that write address register we need to
359b8e80941Smrg	 * make sure there is at least one instruction that uses the
360b8e80941Smrg	 * addr value which is otherwise ready.
361b8e80941Smrg	 *
362b8e80941Smrg	 * TODO if any instructions use pred register and have other
363b8e80941Smrg	 * src args, we would need to do the same for writes_pred()..
364b8e80941Smrg	 */
365b8e80941Smrg	if (writes_addr(instr)) {
366b8e80941Smrg		struct ir3 *ir = instr->block->shader;
367b8e80941Smrg		bool ready = false;
368b8e80941Smrg		for (unsigned i = 0; (i < ir->indirects_count) && !ready; i++) {
369b8e80941Smrg			struct ir3_instruction *indirect = ir->indirects[i];
370b8e80941Smrg			if (!indirect)
371b8e80941Smrg				continue;
372b8e80941Smrg			if (indirect->address != instr)
373b8e80941Smrg				continue;
374b8e80941Smrg			ready = could_sched(indirect, instr);
375b8e80941Smrg		}
376b8e80941Smrg
377b8e80941Smrg		/* nothing could be scheduled, so keep looking: */
378b8e80941Smrg		if (!ready)
379b8e80941Smrg			return false;
380b8e80941Smrg	}
381b8e80941Smrg
382b8e80941Smrg	/* if this is a write to address/predicate register, and that
383b8e80941Smrg	 * register is currently in use, we need to defer until it is
384b8e80941Smrg	 * free:
385b8e80941Smrg	 */
386b8e80941Smrg	if (writes_addr(instr) && ctx->addr) {
387b8e80941Smrg		debug_assert(ctx->addr != instr);
388b8e80941Smrg		notes->addr_conflict = true;
389b8e80941Smrg		return false;
390b8e80941Smrg	}
391b8e80941Smrg
392b8e80941Smrg	if (writes_pred(instr) && ctx->pred) {
393b8e80941Smrg		debug_assert(ctx->pred != instr);
394b8e80941Smrg		notes->pred_conflict = true;
395b8e80941Smrg		return false;
396b8e80941Smrg	}
397b8e80941Smrg
398b8e80941Smrg	/* if the instruction is a kill, we need to ensure *every*
399b8e80941Smrg	 * bary.f is scheduled.  The hw seems unhappy if the thread
400b8e80941Smrg	 * gets killed before the end-input (ei) flag is hit.
401b8e80941Smrg	 *
402b8e80941Smrg	 * We could do this by adding each bary.f instruction as
403b8e80941Smrg	 * virtual ssa src for the kill instruction.  But we have
404b8e80941Smrg	 * fixed length instr->regs[].
405b8e80941Smrg	 *
406b8e80941Smrg	 * TODO this wouldn't be quite right if we had multiple
407b8e80941Smrg	 * basic blocks, if any block was conditional.  We'd need
408b8e80941Smrg	 * to schedule the bary.f's outside of any block which
409b8e80941Smrg	 * was conditional that contained a kill.. I think..
410b8e80941Smrg	 */
411b8e80941Smrg	if (is_kill(instr)) {
412b8e80941Smrg		struct ir3 *ir = instr->block->shader;
413b8e80941Smrg
414b8e80941Smrg		for (unsigned i = 0; i < ir->baryfs_count; i++) {
415b8e80941Smrg			struct ir3_instruction *baryf = ir->baryfs[i];
416b8e80941Smrg			if (baryf->flags & IR3_INSTR_UNUSED)
417b8e80941Smrg				continue;
418b8e80941Smrg			if (!is_scheduled(baryf)) {
419b8e80941Smrg				notes->blocked_kill = true;
420b8e80941Smrg				return false;
421b8e80941Smrg			}
422b8e80941Smrg		}
423b8e80941Smrg	}
424b8e80941Smrg
425b8e80941Smrg	return true;
426b8e80941Smrg}
427b8e80941Smrg
428b8e80941Smrg/* Find the best instruction to schedule from specified instruction or
429b8e80941Smrg * recursively it's ssa sources.
430b8e80941Smrg */
431b8e80941Smrgstatic struct ir3_instruction *
432b8e80941Smrgfind_instr_recursive(struct ir3_sched_ctx *ctx, struct ir3_sched_notes *notes,
433b8e80941Smrg		struct ir3_instruction *instr)
434b8e80941Smrg{
435b8e80941Smrg	struct ir3_instruction *srcs[__ssa_src_cnt(instr)];
436b8e80941Smrg	struct ir3_instruction *src;
437b8e80941Smrg	unsigned nsrcs = 0;
438b8e80941Smrg
439b8e80941Smrg	if (is_scheduled(instr))
440b8e80941Smrg		return NULL;
441b8e80941Smrg
442b8e80941Smrg	/* use instr->data to cache the results of recursing up the
443b8e80941Smrg	 * instr src's.  Otherwise the recursive algo can scale quite
444b8e80941Smrg	 * badly w/ shader size.  But this takes some care to clear
445b8e80941Smrg	 * the cache appropriately when instructions are scheduled.
446b8e80941Smrg	 */
447b8e80941Smrg	if (instr->data) {
448b8e80941Smrg		if (instr->data == NULL_INSTR)
449b8e80941Smrg			return NULL;
450b8e80941Smrg		return instr->data;
451b8e80941Smrg	}
452b8e80941Smrg
453b8e80941Smrg	/* find unscheduled srcs: */
454b8e80941Smrg	foreach_ssa_src(src, instr) {
455b8e80941Smrg		if (!is_scheduled(src) && (src->block == instr->block)) {
456b8e80941Smrg			debug_assert(nsrcs < ARRAY_SIZE(srcs));
457b8e80941Smrg			srcs[nsrcs++] = src;
458b8e80941Smrg		}
459b8e80941Smrg	}
460b8e80941Smrg
461b8e80941Smrg	/* if all our src's are already scheduled: */
462b8e80941Smrg	if (nsrcs == 0) {
463b8e80941Smrg		if (check_instr(ctx, notes, instr)) {
464b8e80941Smrg			instr->data = instr;
465b8e80941Smrg			return instr;
466b8e80941Smrg		}
467b8e80941Smrg		return NULL;
468b8e80941Smrg	}
469b8e80941Smrg
470b8e80941Smrg	while ((src = deepest(srcs, nsrcs))) {
471b8e80941Smrg		struct ir3_instruction *candidate;
472b8e80941Smrg
473b8e80941Smrg		candidate = find_instr_recursive(ctx, notes, src);
474b8e80941Smrg		if (!candidate)
475b8e80941Smrg			continue;
476b8e80941Smrg
477b8e80941Smrg		if (check_instr(ctx, notes, candidate)) {
478b8e80941Smrg			instr->data = candidate;
479b8e80941Smrg			return candidate;
480b8e80941Smrg		}
481b8e80941Smrg	}
482b8e80941Smrg
483b8e80941Smrg	instr->data = NULL_INSTR;
484b8e80941Smrg	return NULL;
485b8e80941Smrg}
486b8e80941Smrg
487b8e80941Smrg/* find instruction to schedule: */
488b8e80941Smrgstatic struct ir3_instruction *
489b8e80941Smrgfind_eligible_instr(struct ir3_sched_ctx *ctx, struct ir3_sched_notes *notes,
490b8e80941Smrg		bool soft)
491b8e80941Smrg{
492b8e80941Smrg	struct ir3_instruction *best_instr = NULL;
493b8e80941Smrg	unsigned min_delay = ~0;
494b8e80941Smrg
495b8e80941Smrg	/* TODO we'd really rather use the list/array of block outputs.  But we
496b8e80941Smrg	 * don't have such a thing.  Recursing *every* instruction in the list
497b8e80941Smrg	 * will result in a lot of repeated traversal, since instructions will
498b8e80941Smrg	 * get traversed both when they appear as ssa src to a later instruction
499b8e80941Smrg	 * as well as where they appear in the depth_list.
500b8e80941Smrg	 */
501b8e80941Smrg	list_for_each_entry_rev (struct ir3_instruction, instr, &ctx->depth_list, node) {
502b8e80941Smrg		struct ir3_instruction *candidate;
503b8e80941Smrg		unsigned delay;
504b8e80941Smrg
505b8e80941Smrg		candidate = find_instr_recursive(ctx, notes, instr);
506b8e80941Smrg		if (!candidate)
507b8e80941Smrg			continue;
508b8e80941Smrg
509b8e80941Smrg		if (ctx->live_values > 16*4) {
510b8e80941Smrg			/* under register pressure, only care about reducing live values: */
511b8e80941Smrg			if (!best_instr || (candidate->sun > best_instr->sun))
512b8e80941Smrg				best_instr = candidate;
513b8e80941Smrg		} else {
514b8e80941Smrg			delay = delay_calc(ctx->block, candidate, soft, false);
515b8e80941Smrg			if ((delay < min_delay) ||
516b8e80941Smrg					((delay <= (min_delay + 2)) && (candidate->sun > best_instr->sun))) {
517b8e80941Smrg				best_instr = candidate;
518b8e80941Smrg				min_delay = delay;
519b8e80941Smrg			}
520b8e80941Smrg		}
521b8e80941Smrg	}
522b8e80941Smrg
523b8e80941Smrg	return best_instr;
524b8e80941Smrg}
525b8e80941Smrg
526b8e80941Smrg/* "spill" the address register by remapping any unscheduled
527b8e80941Smrg * instructions which depend on the current address register
528b8e80941Smrg * to a clone of the instruction which wrote the address reg.
529b8e80941Smrg */
530b8e80941Smrgstatic struct ir3_instruction *
531b8e80941Smrgsplit_addr(struct ir3_sched_ctx *ctx)
532b8e80941Smrg{
533b8e80941Smrg	struct ir3 *ir;
534b8e80941Smrg	struct ir3_instruction *new_addr = NULL;
535b8e80941Smrg	unsigned i;
536b8e80941Smrg
537b8e80941Smrg	debug_assert(ctx->addr);
538b8e80941Smrg
539b8e80941Smrg	ir = ctx->addr->block->shader;
540b8e80941Smrg
541b8e80941Smrg	for (i = 0; i < ir->indirects_count; i++) {
542b8e80941Smrg		struct ir3_instruction *indirect = ir->indirects[i];
543b8e80941Smrg
544b8e80941Smrg		if (!indirect)
545b8e80941Smrg			continue;
546b8e80941Smrg
547b8e80941Smrg		/* skip instructions already scheduled: */
548b8e80941Smrg		if (is_scheduled(indirect))
549b8e80941Smrg			continue;
550b8e80941Smrg
551b8e80941Smrg		/* remap remaining instructions using current addr
552b8e80941Smrg		 * to new addr:
553b8e80941Smrg		 */
554b8e80941Smrg		if (indirect->address == ctx->addr) {
555b8e80941Smrg			if (!new_addr) {
556b8e80941Smrg				new_addr = ir3_instr_clone(ctx->addr);
557b8e80941Smrg				/* original addr is scheduled, but new one isn't: */
558b8e80941Smrg				new_addr->flags &= ~IR3_INSTR_MARK;
559b8e80941Smrg			}
560b8e80941Smrg			ir3_instr_set_address(indirect, new_addr);
561b8e80941Smrg		}
562b8e80941Smrg	}
563b8e80941Smrg
564b8e80941Smrg	/* all remaining indirects remapped to new addr: */
565b8e80941Smrg	ctx->addr = NULL;
566b8e80941Smrg
567b8e80941Smrg	return new_addr;
568b8e80941Smrg}
569b8e80941Smrg
570b8e80941Smrg/* "spill" the predicate register by remapping any unscheduled
571b8e80941Smrg * instructions which depend on the current predicate register
572b8e80941Smrg * to a clone of the instruction which wrote the address reg.
573b8e80941Smrg */
574b8e80941Smrgstatic struct ir3_instruction *
575b8e80941Smrgsplit_pred(struct ir3_sched_ctx *ctx)
576b8e80941Smrg{
577b8e80941Smrg	struct ir3 *ir;
578b8e80941Smrg	struct ir3_instruction *new_pred = NULL;
579b8e80941Smrg	unsigned i;
580b8e80941Smrg
581b8e80941Smrg	debug_assert(ctx->pred);
582b8e80941Smrg
583b8e80941Smrg	ir = ctx->pred->block->shader;
584b8e80941Smrg
585b8e80941Smrg	for (i = 0; i < ir->predicates_count; i++) {
586b8e80941Smrg		struct ir3_instruction *predicated = ir->predicates[i];
587b8e80941Smrg
588b8e80941Smrg		/* skip instructions already scheduled: */
589b8e80941Smrg		if (is_scheduled(predicated))
590b8e80941Smrg			continue;
591b8e80941Smrg
592b8e80941Smrg		/* remap remaining instructions using current pred
593b8e80941Smrg		 * to new pred:
594b8e80941Smrg		 *
595b8e80941Smrg		 * TODO is there ever a case when pred isn't first
596b8e80941Smrg		 * (and only) src?
597b8e80941Smrg		 */
598b8e80941Smrg		if (ssa(predicated->regs[1]) == ctx->pred) {
599b8e80941Smrg			if (!new_pred) {
600b8e80941Smrg				new_pred = ir3_instr_clone(ctx->pred);
601b8e80941Smrg				/* original pred is scheduled, but new one isn't: */
602b8e80941Smrg				new_pred->flags &= ~IR3_INSTR_MARK;
603b8e80941Smrg			}
604b8e80941Smrg			predicated->regs[1]->instr = new_pred;
605b8e80941Smrg		}
606b8e80941Smrg	}
607b8e80941Smrg
608b8e80941Smrg	/* all remaining predicated remapped to new pred: */
609b8e80941Smrg	ctx->pred = NULL;
610b8e80941Smrg
611b8e80941Smrg	return new_pred;
612b8e80941Smrg}
613b8e80941Smrg
614b8e80941Smrgstatic void
615b8e80941Smrgsched_block(struct ir3_sched_ctx *ctx, struct ir3_block *block)
616b8e80941Smrg{
617b8e80941Smrg	struct list_head unscheduled_list;
618b8e80941Smrg
619b8e80941Smrg	ctx->block = block;
620b8e80941Smrg
621b8e80941Smrg	/* addr/pred writes are per-block: */
622b8e80941Smrg	ctx->addr = NULL;
623b8e80941Smrg	ctx->pred = NULL;
624b8e80941Smrg
625b8e80941Smrg	/* move all instructions to the unscheduled list, and
626b8e80941Smrg	 * empty the block's instruction list (to which we will
627b8e80941Smrg	 * be inserting).
628b8e80941Smrg	 */
629b8e80941Smrg	list_replace(&block->instr_list, &unscheduled_list);
630b8e80941Smrg	list_inithead(&block->instr_list);
631b8e80941Smrg	list_inithead(&ctx->depth_list);
632b8e80941Smrg
633b8e80941Smrg	/* first a pre-pass to schedule all meta:input instructions
634b8e80941Smrg	 * (which need to appear first so that RA knows the register is
635b8e80941Smrg	 * occupied), and move remaining to depth sorted list:
636b8e80941Smrg	 */
637b8e80941Smrg	list_for_each_entry_safe (struct ir3_instruction, instr, &unscheduled_list, node) {
638b8e80941Smrg		if (instr->opc == OPC_META_INPUT) {
639b8e80941Smrg			schedule(ctx, instr);
640b8e80941Smrg		} else {
641b8e80941Smrg			ir3_insert_by_depth(instr, &ctx->depth_list);
642b8e80941Smrg		}
643b8e80941Smrg	}
644b8e80941Smrg
645b8e80941Smrg	while (!list_empty(&ctx->depth_list)) {
646b8e80941Smrg		struct ir3_sched_notes notes = {0};
647b8e80941Smrg		struct ir3_instruction *instr;
648b8e80941Smrg
649b8e80941Smrg		instr = find_eligible_instr(ctx, &notes, true);
650b8e80941Smrg		if (!instr)
651b8e80941Smrg			instr = find_eligible_instr(ctx, &notes, false);
652b8e80941Smrg
653b8e80941Smrg		if (instr) {
654b8e80941Smrg			unsigned delay = delay_calc(ctx->block, instr, false, false);
655b8e80941Smrg
656b8e80941Smrg			/* and if we run out of instructions that can be scheduled,
657b8e80941Smrg			 * then it is time for nop's:
658b8e80941Smrg			 */
659b8e80941Smrg			debug_assert(delay <= 6);
660b8e80941Smrg			while (delay > 0) {
661b8e80941Smrg				ir3_NOP(block);
662b8e80941Smrg				delay--;
663b8e80941Smrg			}
664b8e80941Smrg
665b8e80941Smrg			schedule(ctx, instr);
666b8e80941Smrg		} else {
667b8e80941Smrg			struct ir3_instruction *new_instr = NULL;
668b8e80941Smrg
669b8e80941Smrg			/* nothing available to schedule.. if we are blocked on
670b8e80941Smrg			 * address/predicate register conflict, then break the
671b8e80941Smrg			 * deadlock by cloning the instruction that wrote that
672b8e80941Smrg			 * reg:
673b8e80941Smrg			 */
674b8e80941Smrg			if (notes.addr_conflict) {
675b8e80941Smrg				new_instr = split_addr(ctx);
676b8e80941Smrg			} else if (notes.pred_conflict) {
677b8e80941Smrg				new_instr = split_pred(ctx);
678b8e80941Smrg			} else {
679b8e80941Smrg				debug_assert(0);
680b8e80941Smrg				ctx->error = true;
681b8e80941Smrg				return;
682b8e80941Smrg			}
683b8e80941Smrg
684b8e80941Smrg			if (new_instr) {
685b8e80941Smrg				/* clearing current addr/pred can change what is
686b8e80941Smrg				 * available to schedule, so clear cache..
687b8e80941Smrg				 */
688b8e80941Smrg				clear_cache(ctx, NULL);
689b8e80941Smrg
690b8e80941Smrg				ir3_insert_by_depth(new_instr, &ctx->depth_list);
691b8e80941Smrg				/* the original instr that wrote addr/pred may have
692b8e80941Smrg				 * originated from a different block:
693b8e80941Smrg				 */
694b8e80941Smrg				new_instr->block = block;
695b8e80941Smrg			}
696b8e80941Smrg		}
697b8e80941Smrg	}
698b8e80941Smrg
699b8e80941Smrg	/* And lastly, insert branch/jump instructions to take us to
700b8e80941Smrg	 * the next block.  Later we'll strip back out the branches
701b8e80941Smrg	 * that simply jump to next instruction.
702b8e80941Smrg	 */
703b8e80941Smrg	if (block->successors[1]) {
704b8e80941Smrg		/* if/else, conditional branches to "then" or "else": */
705b8e80941Smrg		struct ir3_instruction *br;
706b8e80941Smrg		unsigned delay = 6;
707b8e80941Smrg
708b8e80941Smrg		debug_assert(ctx->pred);
709b8e80941Smrg		debug_assert(block->condition);
710b8e80941Smrg
711b8e80941Smrg		delay -= distance(ctx->block, ctx->pred, delay, false);
712b8e80941Smrg
713b8e80941Smrg		while (delay > 0) {
714b8e80941Smrg			ir3_NOP(block);
715b8e80941Smrg			delay--;
716b8e80941Smrg		}
717b8e80941Smrg
718b8e80941Smrg		/* create "else" branch first (since "then" block should
719b8e80941Smrg		 * frequently/always end up being a fall-thru):
720b8e80941Smrg		 */
721b8e80941Smrg		br = ir3_BR(block);
722b8e80941Smrg		br->cat0.inv = true;
723b8e80941Smrg		br->cat0.target = block->successors[1];
724b8e80941Smrg
725b8e80941Smrg		/* NOTE: we have to hard code delay of 6 above, since
726b8e80941Smrg		 * we want to insert the nop's before constructing the
727b8e80941Smrg		 * branch.  Throw in an assert so we notice if this
728b8e80941Smrg		 * ever breaks on future generation:
729b8e80941Smrg		 */
730b8e80941Smrg		debug_assert(ir3_delayslots(ctx->pred, br, 0) == 6);
731b8e80941Smrg
732b8e80941Smrg		br = ir3_BR(block);
733b8e80941Smrg		br->cat0.target = block->successors[0];
734b8e80941Smrg
735b8e80941Smrg	} else if (block->successors[0]) {
736b8e80941Smrg		/* otherwise unconditional jump to next block: */
737b8e80941Smrg		struct ir3_instruction *jmp;
738b8e80941Smrg
739b8e80941Smrg		jmp = ir3_JUMP(block);
740b8e80941Smrg		jmp->cat0.target = block->successors[0];
741b8e80941Smrg	}
742b8e80941Smrg
743b8e80941Smrg	/* NOTE: if we kept track of the predecessors, we could do a better
744b8e80941Smrg	 * job w/ (jp) flags.. every node w/ > predecessor is a join point.
745b8e80941Smrg	 * Note that as we eliminate blocks which contain only an unconditional
746b8e80941Smrg	 * jump we probably need to propagate (jp) flag..
747b8e80941Smrg	 */
748b8e80941Smrg}
749b8e80941Smrg
750b8e80941Smrg/* After scheduling individual blocks, we still could have cases where
751b8e80941Smrg * one (or more) paths into a block, a value produced by a previous
752b8e80941Smrg * has too few delay slots to be legal.  We can't deal with this in the
753b8e80941Smrg * first pass, because loops (ie. we can't ensure all predecessor blocks
754b8e80941Smrg * are already scheduled in the first pass).  All we can really do at
755b8e80941Smrg * this point is stuff in extra nop's until things are legal.
756b8e80941Smrg */
757b8e80941Smrgstatic void
758b8e80941Smrgsched_intra_block(struct ir3_sched_ctx *ctx, struct ir3_block *block)
759b8e80941Smrg{
760b8e80941Smrg	unsigned n = 0;
761b8e80941Smrg
762b8e80941Smrg	ctx->block = block;
763b8e80941Smrg
764b8e80941Smrg	list_for_each_entry_safe (struct ir3_instruction, instr, &block->instr_list, node) {
765b8e80941Smrg		unsigned delay = 0;
766b8e80941Smrg
767b8e80941Smrg		for (unsigned i = 0; i < block->predecessors_count; i++) {
768b8e80941Smrg			unsigned d = delay_calc(block->predecessors[i], instr, false, true);
769b8e80941Smrg			delay = MAX2(d, delay);
770b8e80941Smrg		}
771b8e80941Smrg
772b8e80941Smrg		while (delay > n) {
773b8e80941Smrg			struct ir3_instruction *nop = ir3_NOP(block);
774b8e80941Smrg
775b8e80941Smrg			/* move to before instr: */
776b8e80941Smrg			list_delinit(&nop->node);
777b8e80941Smrg			list_addtail(&nop->node, &instr->node);
778b8e80941Smrg
779b8e80941Smrg			n++;
780b8e80941Smrg		}
781b8e80941Smrg
782b8e80941Smrg		/* we can bail once we hit worst case delay: */
783b8e80941Smrg		if (++n > 6)
784b8e80941Smrg			break;
785b8e80941Smrg	}
786b8e80941Smrg}
787b8e80941Smrg
788b8e80941Smrgint ir3_sched(struct ir3 *ir)
789b8e80941Smrg{
790b8e80941Smrg	struct ir3_sched_ctx ctx = {0};
791b8e80941Smrg
792b8e80941Smrg	ir3_clear_mark(ir);
793b8e80941Smrg
794b8e80941Smrg	list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
795b8e80941Smrg		ctx.live_values = 0;
796b8e80941Smrg		update_use_count(block);
797b8e80941Smrg		sched_block(&ctx, block);
798b8e80941Smrg	}
799b8e80941Smrg
800b8e80941Smrg	list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
801b8e80941Smrg		sched_intra_block(&ctx, block);
802b8e80941Smrg	}
803b8e80941Smrg
804b8e80941Smrg	if (ctx.error)
805b8e80941Smrg		return -1;
806b8e80941Smrg
807b8e80941Smrg	return 0;
808b8e80941Smrg}
809b8e80941Smrg
810b8e80941Smrgstatic unsigned
811b8e80941Smrgget_array_id(struct ir3_instruction *instr)
812b8e80941Smrg{
813b8e80941Smrg	/* The expectation is that there is only a single array
814b8e80941Smrg	 * src or dst, ir3_cp should enforce this.
815b8e80941Smrg	 */
816b8e80941Smrg
817b8e80941Smrg	for (unsigned i = 0; i < instr->regs_count; i++)
818b8e80941Smrg		if (instr->regs[i]->flags & IR3_REG_ARRAY)
819b8e80941Smrg			return instr->regs[i]->array.id;
820b8e80941Smrg
821b8e80941Smrg	unreachable("this was unexpected");
822b8e80941Smrg}
823b8e80941Smrg
824b8e80941Smrg/* does instruction 'prior' need to be scheduled before 'instr'? */
825b8e80941Smrgstatic bool
826b8e80941Smrgdepends_on(struct ir3_instruction *instr, struct ir3_instruction *prior)
827b8e80941Smrg{
828b8e80941Smrg	/* TODO for dependencies that are related to a specific object, ie
829b8e80941Smrg	 * a specific SSBO/image/array, we could relax this constraint to
830b8e80941Smrg	 * make accesses to unrelated objects not depend on each other (at
831b8e80941Smrg	 * least as long as not declared coherent)
832b8e80941Smrg	 */
833b8e80941Smrg	if (((instr->barrier_class & IR3_BARRIER_EVERYTHING) && prior->barrier_class) ||
834b8e80941Smrg			((prior->barrier_class & IR3_BARRIER_EVERYTHING) && instr->barrier_class))
835b8e80941Smrg		return true;
836b8e80941Smrg
837b8e80941Smrg	if (instr->barrier_class & prior->barrier_conflict) {
838b8e80941Smrg		if (!(instr->barrier_class & ~(IR3_BARRIER_ARRAY_R | IR3_BARRIER_ARRAY_W))) {
839b8e80941Smrg			/* if only array barrier, then we can further limit false-deps
840b8e80941Smrg			 * by considering the array-id, ie reads/writes to different
841b8e80941Smrg			 * arrays do not depend on each other (no aliasing)
842b8e80941Smrg			 */
843b8e80941Smrg			if (get_array_id(instr) != get_array_id(prior)) {
844b8e80941Smrg				return false;
845b8e80941Smrg			}
846b8e80941Smrg		}
847b8e80941Smrg
848b8e80941Smrg		return true;
849b8e80941Smrg	}
850b8e80941Smrg
851b8e80941Smrg	return false;
852b8e80941Smrg}
853b8e80941Smrg
854b8e80941Smrgstatic void
855b8e80941Smrgadd_barrier_deps(struct ir3_block *block, struct ir3_instruction *instr)
856b8e80941Smrg{
857b8e80941Smrg	struct list_head *prev = instr->node.prev;
858b8e80941Smrg	struct list_head *next = instr->node.next;
859b8e80941Smrg
860b8e80941Smrg	/* add dependencies on previous instructions that must be scheduled
861b8e80941Smrg	 * prior to the current instruction
862b8e80941Smrg	 */
863b8e80941Smrg	while (prev != &block->instr_list) {
864b8e80941Smrg		struct ir3_instruction *pi =
865b8e80941Smrg			LIST_ENTRY(struct ir3_instruction, prev, node);
866b8e80941Smrg
867b8e80941Smrg		prev = prev->prev;
868b8e80941Smrg
869b8e80941Smrg		if (is_meta(pi))
870b8e80941Smrg			continue;
871b8e80941Smrg
872b8e80941Smrg		if (instr->barrier_class == pi->barrier_class) {
873b8e80941Smrg			ir3_instr_add_dep(instr, pi);
874b8e80941Smrg			break;
875b8e80941Smrg		}
876b8e80941Smrg
877b8e80941Smrg		if (depends_on(instr, pi))
878b8e80941Smrg			ir3_instr_add_dep(instr, pi);
879b8e80941Smrg	}
880b8e80941Smrg
881b8e80941Smrg	/* add dependencies on this instruction to following instructions
882b8e80941Smrg	 * that must be scheduled after the current instruction:
883b8e80941Smrg	 */
884b8e80941Smrg	while (next != &block->instr_list) {
885b8e80941Smrg		struct ir3_instruction *ni =
886b8e80941Smrg			LIST_ENTRY(struct ir3_instruction, next, node);
887b8e80941Smrg
888b8e80941Smrg		next = next->next;
889b8e80941Smrg
890b8e80941Smrg		if (is_meta(ni))
891b8e80941Smrg			continue;
892b8e80941Smrg
893b8e80941Smrg		if (instr->barrier_class == ni->barrier_class) {
894b8e80941Smrg			ir3_instr_add_dep(ni, instr);
895b8e80941Smrg			break;
896b8e80941Smrg		}
897b8e80941Smrg
898b8e80941Smrg		if (depends_on(ni, instr))
899b8e80941Smrg			ir3_instr_add_dep(ni, instr);
900b8e80941Smrg	}
901b8e80941Smrg}
902b8e80941Smrg
903b8e80941Smrg/* before scheduling a block, we need to add any necessary false-dependencies
904b8e80941Smrg * to ensure that:
905b8e80941Smrg *
906b8e80941Smrg *  (1) barriers are scheduled in the right order wrt instructions related
907b8e80941Smrg *      to the barrier
908b8e80941Smrg *
909b8e80941Smrg *  (2) reads that come before a write actually get scheduled before the
910b8e80941Smrg *      write
911b8e80941Smrg */
912b8e80941Smrgstatic void
913b8e80941Smrgcalculate_deps(struct ir3_block *block)
914b8e80941Smrg{
915b8e80941Smrg	list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
916b8e80941Smrg		if (instr->barrier_class) {
917b8e80941Smrg			add_barrier_deps(block, instr);
918b8e80941Smrg		}
919b8e80941Smrg	}
920b8e80941Smrg}
921b8e80941Smrg
922b8e80941Smrgvoid
923b8e80941Smrgir3_sched_add_deps(struct ir3 *ir)
924b8e80941Smrg{
925b8e80941Smrg	list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
926b8e80941Smrg		calculate_deps(block);
927b8e80941Smrg	}
928b8e80941Smrg}
929