1b8e80941Smrg/* 2b8e80941Smrg * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org> 3b8e80941Smrg * 4b8e80941Smrg * Permission is hereby granted, free of charge, to any person obtaining a 5b8e80941Smrg * copy of this software and associated documentation files (the "Software"), 6b8e80941Smrg * to deal in the Software without restriction, including without limitation 7b8e80941Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8b8e80941Smrg * and/or sell copies of the Software, and to permit persons to whom the 9b8e80941Smrg * Software is furnished to do so, subject to the following conditions: 10b8e80941Smrg * 11b8e80941Smrg * The above copyright notice and this permission notice (including the next 12b8e80941Smrg * paragraph) shall be included in all copies or substantial portions of the 13b8e80941Smrg * Software. 14b8e80941Smrg * 15b8e80941Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16b8e80941Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17b8e80941Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18b8e80941Smrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19b8e80941Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20b8e80941Smrg * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21b8e80941Smrg * SOFTWARE. 22b8e80941Smrg * 23b8e80941Smrg * Authors: 24b8e80941Smrg * Rob Clark <robclark@freedesktop.org> 25b8e80941Smrg */ 26b8e80941Smrg 27b8e80941Smrg#include "util/u_math.h" 28b8e80941Smrg 29b8e80941Smrg#include "ir3.h" 30b8e80941Smrg 31b8e80941Smrg/* 32b8e80941Smrg * Instruction Depth: 33b8e80941Smrg * 34b8e80941Smrg * Calculates weighted instruction depth, ie. the sum of # of needed 35b8e80941Smrg * instructions plus delay slots back to original input (ie INPUT or 36b8e80941Smrg * CONST). That is to say, an instructions depth is: 37b8e80941Smrg * 38b8e80941Smrg * depth(instr) { 39b8e80941Smrg * d = 0; 40b8e80941Smrg * // for each src register: 41b8e80941Smrg * foreach (src in instr->regs[1..n]) 42b8e80941Smrg * d = max(d, delayslots(src->instr, n) + depth(src->instr)); 43b8e80941Smrg * return d + 1; 44b8e80941Smrg * } 45b8e80941Smrg * 46b8e80941Smrg * After an instruction's depth is calculated, it is inserted into the 47b8e80941Smrg * blocks depth sorted list, which is used by the scheduling pass. 48b8e80941Smrg */ 49b8e80941Smrg 50b8e80941Smrg/* generally don't count false dependencies, since this can just be 51b8e80941Smrg * something like a barrier, or SSBO store. The exception is array 52b8e80941Smrg * dependencies if the assigner is an array write and the consumer 53b8e80941Smrg * reads the same array. 54b8e80941Smrg */ 55b8e80941Smrgstatic bool 56b8e80941Smrgignore_dep(struct ir3_instruction *assigner, 57b8e80941Smrg struct ir3_instruction *consumer, unsigned n) 58b8e80941Smrg{ 59b8e80941Smrg if (!__is_false_dep(consumer, n)) 60b8e80941Smrg return false; 61b8e80941Smrg 62b8e80941Smrg if (assigner->barrier_class & IR3_BARRIER_ARRAY_W) { 63b8e80941Smrg struct ir3_register *dst = assigner->regs[0]; 64b8e80941Smrg struct ir3_register *src; 65b8e80941Smrg 66b8e80941Smrg debug_assert(dst->flags & IR3_REG_ARRAY); 67b8e80941Smrg 68b8e80941Smrg foreach_src(src, consumer) { 69b8e80941Smrg if ((src->flags & IR3_REG_ARRAY) && 70b8e80941Smrg (dst->array.id == src->array.id)) { 71b8e80941Smrg return false; 72b8e80941Smrg } 73b8e80941Smrg } 74b8e80941Smrg } 75b8e80941Smrg 76b8e80941Smrg return true; 77b8e80941Smrg} 78b8e80941Smrg 79b8e80941Smrg/* calculate required # of delay slots between the instruction that 80b8e80941Smrg * assigns a value and the one that consumes 81b8e80941Smrg */ 82b8e80941Smrgint ir3_delayslots(struct ir3_instruction *assigner, 83b8e80941Smrg struct ir3_instruction *consumer, unsigned n) 84b8e80941Smrg{ 85b8e80941Smrg if (ignore_dep(assigner, consumer, n)) 86b8e80941Smrg return 0; 87b8e80941Smrg 88b8e80941Smrg /* worst case is cat1-3 (alu) -> cat4/5 needing 6 cycles, normal 89b8e80941Smrg * alu -> alu needs 3 cycles, cat4 -> alu and texture fetch 90b8e80941Smrg * handled with sync bits 91b8e80941Smrg */ 92b8e80941Smrg 93b8e80941Smrg if (is_meta(assigner) || is_meta(consumer)) 94b8e80941Smrg return 0; 95b8e80941Smrg 96b8e80941Smrg if (writes_addr(assigner)) 97b8e80941Smrg return 6; 98b8e80941Smrg 99b8e80941Smrg /* handled via sync flags: */ 100b8e80941Smrg if (is_sfu(assigner) || is_tex(assigner) || is_mem(assigner)) 101b8e80941Smrg return 0; 102b8e80941Smrg 103b8e80941Smrg /* assigner must be alu: */ 104b8e80941Smrg if (is_flow(consumer) || is_sfu(consumer) || is_tex(consumer) || 105b8e80941Smrg is_mem(consumer)) { 106b8e80941Smrg return 6; 107b8e80941Smrg } else if ((is_mad(consumer->opc) || is_madsh(consumer->opc)) && 108b8e80941Smrg (n == 3)) { 109b8e80941Smrg /* special case, 3rd src to cat3 not required on first cycle */ 110b8e80941Smrg return 1; 111b8e80941Smrg } else { 112b8e80941Smrg return 3; 113b8e80941Smrg } 114b8e80941Smrg} 115b8e80941Smrg 116b8e80941Smrgvoid 117b8e80941Smrgir3_insert_by_depth(struct ir3_instruction *instr, struct list_head *list) 118b8e80941Smrg{ 119b8e80941Smrg /* remove from existing spot in list: */ 120b8e80941Smrg list_delinit(&instr->node); 121b8e80941Smrg 122b8e80941Smrg /* find where to re-insert instruction: */ 123b8e80941Smrg list_for_each_entry (struct ir3_instruction, pos, list, node) { 124b8e80941Smrg if (pos->depth > instr->depth) { 125b8e80941Smrg list_add(&instr->node, &pos->node); 126b8e80941Smrg return; 127b8e80941Smrg } 128b8e80941Smrg } 129b8e80941Smrg /* if we get here, we didn't find an insertion spot: */ 130b8e80941Smrg list_addtail(&instr->node, list); 131b8e80941Smrg} 132b8e80941Smrg 133b8e80941Smrgstatic void 134b8e80941Smrgir3_instr_depth(struct ir3_instruction *instr, unsigned boost, bool falsedep) 135b8e80941Smrg{ 136b8e80941Smrg struct ir3_instruction *src; 137b8e80941Smrg 138b8e80941Smrg /* don't mark falsedep's as used, but otherwise process them normally: */ 139b8e80941Smrg if (!falsedep) 140b8e80941Smrg instr->flags &= ~IR3_INSTR_UNUSED; 141b8e80941Smrg 142b8e80941Smrg if (ir3_instr_check_mark(instr)) 143b8e80941Smrg return; 144b8e80941Smrg 145b8e80941Smrg instr->depth = 0; 146b8e80941Smrg 147b8e80941Smrg foreach_ssa_src_n(src, i, instr) { 148b8e80941Smrg unsigned sd; 149b8e80941Smrg 150b8e80941Smrg /* visit child to compute it's depth: */ 151b8e80941Smrg ir3_instr_depth(src, boost, __is_false_dep(instr, i)); 152b8e80941Smrg 153b8e80941Smrg /* for array writes, no need to delay on previous write: */ 154b8e80941Smrg if (i == 0) 155b8e80941Smrg continue; 156b8e80941Smrg 157b8e80941Smrg sd = ir3_delayslots(src, instr, i) + src->depth; 158b8e80941Smrg sd += boost; 159b8e80941Smrg 160b8e80941Smrg instr->depth = MAX2(instr->depth, sd); 161b8e80941Smrg } 162b8e80941Smrg 163b8e80941Smrg if (!is_meta(instr)) 164b8e80941Smrg instr->depth++; 165b8e80941Smrg 166b8e80941Smrg ir3_insert_by_depth(instr, &instr->block->instr_list); 167b8e80941Smrg} 168b8e80941Smrg 169b8e80941Smrgstatic bool 170b8e80941Smrgremove_unused_by_block(struct ir3_block *block) 171b8e80941Smrg{ 172b8e80941Smrg bool progress = false; 173b8e80941Smrg list_for_each_entry_safe (struct ir3_instruction, instr, &block->instr_list, node) { 174b8e80941Smrg if (instr->opc == OPC_END) 175b8e80941Smrg continue; 176b8e80941Smrg if (instr->flags & IR3_INSTR_UNUSED) { 177b8e80941Smrg if (instr->opc == OPC_META_FO) { 178b8e80941Smrg struct ir3_instruction *src = ssa(instr->regs[1]); 179b8e80941Smrg /* leave inputs alone.. we can't optimize out components of 180b8e80941Smrg * an input, since the hw is still going to be writing all 181b8e80941Smrg * of the components, and we could end up in a situation 182b8e80941Smrg * where multiple inputs overlap. 183b8e80941Smrg */ 184b8e80941Smrg if ((src->opc != OPC_META_INPUT) && 185b8e80941Smrg (src->regs[0]->wrmask > 1)) { 186b8e80941Smrg src->regs[0]->wrmask &= ~(1 << instr->fo.off); 187b8e80941Smrg 188b8e80941Smrg /* prune no-longer needed right-neighbors. We could 189b8e80941Smrg * probably do the same for left-neighbors (ie. tex 190b8e80941Smrg * fetch that only need .yw components), but that 191b8e80941Smrg * makes RA a bit more confusing than it already is 192b8e80941Smrg */ 193b8e80941Smrg struct ir3_instruction *n = instr; 194b8e80941Smrg while (n && n->cp.right) 195b8e80941Smrg n = n->cp.right; 196b8e80941Smrg while (n->flags & IR3_INSTR_UNUSED) { 197b8e80941Smrg n = n->cp.left; 198b8e80941Smrg if (!n) 199b8e80941Smrg break; 200b8e80941Smrg n->cp.right = NULL; 201b8e80941Smrg } 202b8e80941Smrg } 203b8e80941Smrg } 204b8e80941Smrg list_delinit(&instr->node); 205b8e80941Smrg progress = true; 206b8e80941Smrg } 207b8e80941Smrg } 208b8e80941Smrg return progress; 209b8e80941Smrg} 210b8e80941Smrg 211b8e80941Smrgstatic bool 212b8e80941Smrgcompute_depth_and_remove_unused(struct ir3 *ir) 213b8e80941Smrg{ 214b8e80941Smrg unsigned i; 215b8e80941Smrg bool progress = false; 216b8e80941Smrg 217b8e80941Smrg ir3_clear_mark(ir); 218b8e80941Smrg 219b8e80941Smrg /* initially mark everything as unused, we'll clear the flag as we 220b8e80941Smrg * visit the instructions: 221b8e80941Smrg */ 222b8e80941Smrg list_for_each_entry (struct ir3_block, block, &ir->block_list, node) { 223b8e80941Smrg list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) { 224b8e80941Smrg instr->flags |= IR3_INSTR_UNUSED; 225b8e80941Smrg } 226b8e80941Smrg } 227b8e80941Smrg 228b8e80941Smrg for (i = 0; i < ir->noutputs; i++) 229b8e80941Smrg if (ir->outputs[i]) 230b8e80941Smrg ir3_instr_depth(ir->outputs[i], 0, false); 231b8e80941Smrg 232b8e80941Smrg list_for_each_entry (struct ir3_block, block, &ir->block_list, node) { 233b8e80941Smrg for (i = 0; i < block->keeps_count; i++) 234b8e80941Smrg ir3_instr_depth(block->keeps[i], 0, false); 235b8e80941Smrg 236b8e80941Smrg /* We also need to account for if-condition: */ 237b8e80941Smrg if (block->condition) 238b8e80941Smrg ir3_instr_depth(block->condition, 6, false); 239b8e80941Smrg } 240b8e80941Smrg 241b8e80941Smrg /* mark un-used instructions: */ 242b8e80941Smrg list_for_each_entry (struct ir3_block, block, &ir->block_list, node) { 243b8e80941Smrg progress |= remove_unused_by_block(block); 244b8e80941Smrg } 245b8e80941Smrg 246b8e80941Smrg /* note that we can end up with unused indirects, but we should 247b8e80941Smrg * not end up with unused predicates. 248b8e80941Smrg */ 249b8e80941Smrg for (i = 0; i < ir->indirects_count; i++) { 250b8e80941Smrg struct ir3_instruction *instr = ir->indirects[i]; 251b8e80941Smrg if (instr && (instr->flags & IR3_INSTR_UNUSED)) 252b8e80941Smrg ir->indirects[i] = NULL; 253b8e80941Smrg } 254b8e80941Smrg 255b8e80941Smrg /* cleanup unused inputs: */ 256b8e80941Smrg for (i = 0; i < ir->ninputs; i++) { 257b8e80941Smrg struct ir3_instruction *in = ir->inputs[i]; 258b8e80941Smrg if (in && (in->flags & IR3_INSTR_UNUSED)) 259b8e80941Smrg ir->inputs[i] = NULL; 260b8e80941Smrg } 261b8e80941Smrg 262b8e80941Smrg return progress; 263b8e80941Smrg} 264b8e80941Smrg 265b8e80941Smrgvoid 266b8e80941Smrgir3_depth(struct ir3 *ir) 267b8e80941Smrg{ 268b8e80941Smrg bool progress; 269b8e80941Smrg do { 270b8e80941Smrg progress = compute_depth_and_remove_unused(ir); 271b8e80941Smrg } while (progress); 272b8e80941Smrg} 273