1/* 2 * Copyright © 2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Jason Ekstrand (jason@jlekstrand.net) 25 * 26 */ 27 28#include "brw_nir.h" 29#include "compiler/nir/nir_builder.h" 30 31/* 32 * Implements a small peephole optimization that looks for a multiply that 33 * is only ever used in an add and replaces both with an fma. 34 */ 35 36static inline bool 37are_all_uses_fadd(nir_ssa_def *def) 38{ 39 if (!list_is_empty(&def->if_uses)) 40 return false; 41 42 nir_foreach_use(use_src, def) { 43 nir_instr *use_instr = use_src->parent_instr; 44 45 if (use_instr->type != nir_instr_type_alu) 46 return false; 47 48 nir_alu_instr *use_alu = nir_instr_as_alu(use_instr); 49 switch (use_alu->op) { 50 case nir_op_fadd: 51 break; /* This one's ok */ 52 53 case nir_op_mov: 54 case nir_op_fneg: 55 case nir_op_fabs: 56 assert(use_alu->dest.dest.is_ssa); 57 if (!are_all_uses_fadd(&use_alu->dest.dest.ssa)) 58 return false; 59 break; 60 61 default: 62 return false; 63 } 64 } 65 66 return true; 67} 68 69static nir_alu_instr * 70get_mul_for_src(nir_alu_src *src, unsigned num_components, 71 uint8_t swizzle[4], bool *negate, bool *abs) 72{ 73 uint8_t swizzle_tmp[4]; 74 assert(src->src.is_ssa && !src->abs && !src->negate); 75 76 nir_instr *instr = src->src.ssa->parent_instr; 77 if (instr->type != nir_instr_type_alu) 78 return NULL; 79 80 nir_alu_instr *alu = nir_instr_as_alu(instr); 81 82 /* We want to bail if any of the other ALU operations involved is labled 83 * exact. One reason for this is that, while the value that is changing is 84 * actually the result of the add and not the multiply, the intention of 85 * the user when they specify an exact multiply is that they want *that* 86 * value and what they don't care about is the add. Another reason is that 87 * SPIR-V explicitly requires this behaviour. 88 */ 89 if (alu->exact) 90 return NULL; 91 92 switch (alu->op) { 93 case nir_op_mov: 94 alu = get_mul_for_src(&alu->src[0], alu->dest.dest.ssa.num_components, 95 swizzle, negate, abs); 96 break; 97 98 case nir_op_fneg: 99 alu = get_mul_for_src(&alu->src[0], alu->dest.dest.ssa.num_components, 100 swizzle, negate, abs); 101 *negate = !*negate; 102 break; 103 104 case nir_op_fabs: 105 alu = get_mul_for_src(&alu->src[0], alu->dest.dest.ssa.num_components, 106 swizzle, negate, abs); 107 *negate = false; 108 *abs = true; 109 break; 110 111 case nir_op_fmul: 112 /* Only absorb a fmul into a ffma if the fmul is only used in fadd 113 * operations. This prevents us from being too aggressive with our 114 * fusing which can actually lead to more instructions. 115 */ 116 if (!are_all_uses_fadd(&alu->dest.dest.ssa)) 117 return NULL; 118 break; 119 120 default: 121 return NULL; 122 } 123 124 if (!alu) 125 return NULL; 126 127 /* Copy swizzle data before overwriting it to avoid setting a wrong swizzle. 128 * 129 * Example: 130 * Former swizzle[] = xyzw 131 * src->swizzle[] = zyxx 132 * 133 * Expected output swizzle = zyxx 134 * If we reuse swizzle in the loop, then output swizzle would be zyzz. 135 */ 136 memcpy(swizzle_tmp, swizzle, 4*sizeof(uint8_t)); 137 for (int i = 0; i < num_components; i++) 138 swizzle[i] = swizzle_tmp[src->swizzle[i]]; 139 140 return alu; 141} 142 143/** 144 * Given a list of (at least two) nir_alu_src's, tells if any of them is a 145 * constant value and is used only once. 146 */ 147static bool 148any_alu_src_is_a_constant(nir_alu_src srcs[]) 149{ 150 for (unsigned i = 0; i < 2; i++) { 151 if (srcs[i].src.ssa->parent_instr->type == nir_instr_type_load_const) { 152 nir_load_const_instr *load_const = 153 nir_instr_as_load_const (srcs[i].src.ssa->parent_instr); 154 155 if (list_is_singular(&load_const->def.uses) && 156 list_is_empty(&load_const->def.if_uses)) { 157 return true; 158 } 159 } 160 } 161 162 return false; 163} 164 165static bool 166brw_nir_opt_peephole_ffma_instr(nir_builder *b, 167 nir_instr *instr, 168 UNUSED void *cb_data) 169{ 170 if (instr->type != nir_instr_type_alu) 171 return false; 172 173 nir_alu_instr *add = nir_instr_as_alu(instr); 174 if (add->op != nir_op_fadd) 175 return false; 176 177 assert(add->dest.dest.is_ssa); 178 if (add->exact) 179 return false; 180 181 assert(add->src[0].src.is_ssa && add->src[1].src.is_ssa); 182 183 /* This, is the case a + a. We would rather handle this with an 184 * algebraic reduction than fuse it. Also, we want to only fuse 185 * things where the multiply is used only once and, in this case, 186 * it would be used twice by the same instruction. 187 */ 188 if (add->src[0].src.ssa == add->src[1].src.ssa) 189 return false; 190 191 nir_alu_instr *mul; 192 uint8_t add_mul_src, swizzle[4]; 193 bool negate, abs; 194 for (add_mul_src = 0; add_mul_src < 2; add_mul_src++) { 195 for (unsigned i = 0; i < 4; i++) 196 swizzle[i] = i; 197 198 negate = false; 199 abs = false; 200 201 mul = get_mul_for_src(&add->src[add_mul_src], 202 add->dest.dest.ssa.num_components, 203 swizzle, &negate, &abs); 204 205 if (mul != NULL) 206 break; 207 } 208 209 if (mul == NULL) 210 return false; 211 212 unsigned bit_size = add->dest.dest.ssa.bit_size; 213 214 nir_ssa_def *mul_src[2]; 215 mul_src[0] = mul->src[0].src.ssa; 216 mul_src[1] = mul->src[1].src.ssa; 217 218 /* If any of the operands of the fmul and any of the fadd is a constant, 219 * we bypass because it will be more efficient as the constants will be 220 * propagated as operands, potentially saving two load_const instructions. 221 */ 222 if (any_alu_src_is_a_constant(mul->src) && 223 any_alu_src_is_a_constant(add->src)) { 224 return false; 225 } 226 227 b->cursor = nir_before_instr(&add->instr); 228 229 if (abs) { 230 for (unsigned i = 0; i < 2; i++) 231 mul_src[i] = nir_fabs(b, mul_src[i]); 232 } 233 234 if (negate) 235 mul_src[0] = nir_fneg(b, mul_src[0]); 236 237 nir_alu_instr *ffma = nir_alu_instr_create(b->shader, nir_op_ffma); 238 ffma->dest.saturate = add->dest.saturate; 239 ffma->dest.write_mask = add->dest.write_mask; 240 241 for (unsigned i = 0; i < 2; i++) { 242 ffma->src[i].src = nir_src_for_ssa(mul_src[i]); 243 for (unsigned j = 0; j < add->dest.dest.ssa.num_components; j++) 244 ffma->src[i].swizzle[j] = mul->src[i].swizzle[swizzle[j]]; 245 } 246 nir_alu_src_copy(&ffma->src[2], &add->src[1 - add_mul_src]); 247 248 assert(add->dest.dest.is_ssa); 249 250 nir_ssa_dest_init(&ffma->instr, &ffma->dest.dest, 251 add->dest.dest.ssa.num_components, 252 bit_size, NULL); 253 nir_ssa_def_rewrite_uses(&add->dest.dest.ssa, &ffma->dest.dest.ssa); 254 255 nir_builder_instr_insert(b, &ffma->instr); 256 assert(list_is_empty(&add->dest.dest.ssa.uses)); 257 nir_instr_remove(&add->instr); 258 259 return true; 260} 261 262bool 263brw_nir_opt_peephole_ffma(nir_shader *shader) 264{ 265 return nir_shader_instructions_pass(shader, brw_nir_opt_peephole_ffma_instr, 266 nir_metadata_block_index | 267 nir_metadata_dominance, 268 NULL); 269} 270