101e04c3fSmrg/*
201e04c3fSmrg * Copyright © 2011 Intel Corporation
301e04c3fSmrg *
401e04c3fSmrg * Permission is hereby granted, free of charge, to any person obtaining a
501e04c3fSmrg * copy of this software and associated documentation files (the "Software"),
601e04c3fSmrg * to deal in the Software without restriction, including without limitation
701e04c3fSmrg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
801e04c3fSmrg * and/or sell copies of the Software, and to permit persons to whom the
901e04c3fSmrg * Software is furnished to do so, subject to the following conditions:
1001e04c3fSmrg *
1101e04c3fSmrg * The above copyright notice and this permission notice (including the next
1201e04c3fSmrg * paragraph) shall be included in all copies or substantial portions of the
1301e04c3fSmrg * Software.
1401e04c3fSmrg *
1501e04c3fSmrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1601e04c3fSmrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1701e04c3fSmrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
1801e04c3fSmrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1901e04c3fSmrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
2001e04c3fSmrg * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
2101e04c3fSmrg * IN THE SOFTWARE.
2201e04c3fSmrg */
2301e04c3fSmrg
2401e04c3fSmrg#include "brw_vec4.h"
2501e04c3fSmrg#include "brw_fs.h"
2601e04c3fSmrg#include "brw_cfg.h"
2701e04c3fSmrg#include "brw_nir.h"
2801e04c3fSmrg#include "brw_vec4_builder.h"
2901e04c3fSmrg#include "brw_vec4_vs.h"
3001e04c3fSmrg#include "brw_dead_control_flow.h"
317ec681f3Smrg#include "dev/intel_debug.h"
3201e04c3fSmrg#include "program/prog_parameter.h"
3301e04c3fSmrg#include "util/u_math.h"
3401e04c3fSmrg
3501e04c3fSmrg#define MAX_INSTRUCTION (1 << 30)
3601e04c3fSmrg
3701e04c3fSmrgusing namespace brw;
3801e04c3fSmrg
3901e04c3fSmrgnamespace brw {
4001e04c3fSmrg
4101e04c3fSmrgvoid
4201e04c3fSmrgsrc_reg::init()
4301e04c3fSmrg{
4401e04c3fSmrg   memset((void*)this, 0, sizeof(*this));
4501e04c3fSmrg   this->file = BAD_FILE;
4601e04c3fSmrg   this->type = BRW_REGISTER_TYPE_UD;
4701e04c3fSmrg}
4801e04c3fSmrg
4901e04c3fSmrgsrc_reg::src_reg(enum brw_reg_file file, int nr, const glsl_type *type)
5001e04c3fSmrg{
5101e04c3fSmrg   init();
5201e04c3fSmrg
5301e04c3fSmrg   this->file = file;
5401e04c3fSmrg   this->nr = nr;
5501e04c3fSmrg   if (type && (type->is_scalar() || type->is_vector() || type->is_matrix()))
5601e04c3fSmrg      this->swizzle = brw_swizzle_for_size(type->vector_elements);
5701e04c3fSmrg   else
5801e04c3fSmrg      this->swizzle = BRW_SWIZZLE_XYZW;
5901e04c3fSmrg   if (type)
6001e04c3fSmrg      this->type = brw_type_for_base_type(type);
6101e04c3fSmrg}
6201e04c3fSmrg
6301e04c3fSmrg/** Generic unset register constructor. */
6401e04c3fSmrgsrc_reg::src_reg()
6501e04c3fSmrg{
6601e04c3fSmrg   init();
6701e04c3fSmrg}
6801e04c3fSmrg
6901e04c3fSmrgsrc_reg::src_reg(struct ::brw_reg reg) :
7001e04c3fSmrg   backend_reg(reg)
7101e04c3fSmrg{
7201e04c3fSmrg   this->offset = 0;
7301e04c3fSmrg   this->reladdr = NULL;
7401e04c3fSmrg}
7501e04c3fSmrg
7601e04c3fSmrgsrc_reg::src_reg(const dst_reg &reg) :
7701e04c3fSmrg   backend_reg(reg)
7801e04c3fSmrg{
7901e04c3fSmrg   this->reladdr = reg.reladdr;
8001e04c3fSmrg   this->swizzle = brw_swizzle_for_mask(reg.writemask);
8101e04c3fSmrg}
8201e04c3fSmrg
8301e04c3fSmrgvoid
8401e04c3fSmrgdst_reg::init()
8501e04c3fSmrg{
8601e04c3fSmrg   memset((void*)this, 0, sizeof(*this));
8701e04c3fSmrg   this->file = BAD_FILE;
8801e04c3fSmrg   this->type = BRW_REGISTER_TYPE_UD;
8901e04c3fSmrg   this->writemask = WRITEMASK_XYZW;
9001e04c3fSmrg}
9101e04c3fSmrg
9201e04c3fSmrgdst_reg::dst_reg()
9301e04c3fSmrg{
9401e04c3fSmrg   init();
9501e04c3fSmrg}
9601e04c3fSmrg
9701e04c3fSmrgdst_reg::dst_reg(enum brw_reg_file file, int nr)
9801e04c3fSmrg{
9901e04c3fSmrg   init();
10001e04c3fSmrg
10101e04c3fSmrg   this->file = file;
10201e04c3fSmrg   this->nr = nr;
10301e04c3fSmrg}
10401e04c3fSmrg
10501e04c3fSmrgdst_reg::dst_reg(enum brw_reg_file file, int nr, const glsl_type *type,
10601e04c3fSmrg                 unsigned writemask)
10701e04c3fSmrg{
10801e04c3fSmrg   init();
10901e04c3fSmrg
11001e04c3fSmrg   this->file = file;
11101e04c3fSmrg   this->nr = nr;
11201e04c3fSmrg   this->type = brw_type_for_base_type(type);
11301e04c3fSmrg   this->writemask = writemask;
11401e04c3fSmrg}
11501e04c3fSmrg
11601e04c3fSmrgdst_reg::dst_reg(enum brw_reg_file file, int nr, brw_reg_type type,
11701e04c3fSmrg                 unsigned writemask)
11801e04c3fSmrg{
11901e04c3fSmrg   init();
12001e04c3fSmrg
12101e04c3fSmrg   this->file = file;
12201e04c3fSmrg   this->nr = nr;
12301e04c3fSmrg   this->type = type;
12401e04c3fSmrg   this->writemask = writemask;
12501e04c3fSmrg}
12601e04c3fSmrg
12701e04c3fSmrgdst_reg::dst_reg(struct ::brw_reg reg) :
12801e04c3fSmrg   backend_reg(reg)
12901e04c3fSmrg{
13001e04c3fSmrg   this->offset = 0;
13101e04c3fSmrg   this->reladdr = NULL;
13201e04c3fSmrg}
13301e04c3fSmrg
13401e04c3fSmrgdst_reg::dst_reg(const src_reg &reg) :
13501e04c3fSmrg   backend_reg(reg)
13601e04c3fSmrg{
13701e04c3fSmrg   this->writemask = brw_mask_for_swizzle(reg.swizzle);
13801e04c3fSmrg   this->reladdr = reg.reladdr;
13901e04c3fSmrg}
14001e04c3fSmrg
14101e04c3fSmrgbool
14201e04c3fSmrgdst_reg::equals(const dst_reg &r) const
14301e04c3fSmrg{
14401e04c3fSmrg   return (this->backend_reg::equals(r) &&
14501e04c3fSmrg           (reladdr == r.reladdr ||
14601e04c3fSmrg            (reladdr && r.reladdr && reladdr->equals(*r.reladdr))));
14701e04c3fSmrg}
14801e04c3fSmrg
14901e04c3fSmrgbool
1507ec681f3Smrgvec4_instruction::is_send_from_grf() const
15101e04c3fSmrg{
15201e04c3fSmrg   switch (opcode) {
15301e04c3fSmrg   case SHADER_OPCODE_SHADER_TIME_ADD:
1547ec681f3Smrg   case VS_OPCODE_PULL_CONSTANT_LOAD_GFX7:
1559f464c52Smaya   case VEC4_OPCODE_UNTYPED_ATOMIC:
1569f464c52Smaya   case VEC4_OPCODE_UNTYPED_SURFACE_READ:
1579f464c52Smaya   case VEC4_OPCODE_UNTYPED_SURFACE_WRITE:
15801e04c3fSmrg   case VEC4_OPCODE_URB_READ:
15901e04c3fSmrg   case TCS_OPCODE_URB_WRITE:
16001e04c3fSmrg   case TCS_OPCODE_RELEASE_INPUT:
16101e04c3fSmrg   case SHADER_OPCODE_BARRIER:
16201e04c3fSmrg      return true;
16301e04c3fSmrg   default:
16401e04c3fSmrg      return false;
16501e04c3fSmrg   }
16601e04c3fSmrg}
16701e04c3fSmrg
16801e04c3fSmrg/**
16901e04c3fSmrg * Returns true if this instruction's sources and destinations cannot
17001e04c3fSmrg * safely be the same register.
17101e04c3fSmrg *
17201e04c3fSmrg * In most cases, a register can be written over safely by the same
17301e04c3fSmrg * instruction that is its last use.  For a single instruction, the
17401e04c3fSmrg * sources are dereferenced before writing of the destination starts
17501e04c3fSmrg * (naturally).
17601e04c3fSmrg *
17701e04c3fSmrg * However, there are a few cases where this can be problematic:
17801e04c3fSmrg *
17901e04c3fSmrg * - Virtual opcodes that translate to multiple instructions in the
18001e04c3fSmrg *   code generator: if src == dst and one instruction writes the
18101e04c3fSmrg *   destination before a later instruction reads the source, then
18201e04c3fSmrg *   src will have been clobbered.
18301e04c3fSmrg *
18401e04c3fSmrg * The register allocator uses this information to set up conflicts between
18501e04c3fSmrg * GRF sources and the destination.
18601e04c3fSmrg */
18701e04c3fSmrgbool
18801e04c3fSmrgvec4_instruction::has_source_and_destination_hazard() const
18901e04c3fSmrg{
19001e04c3fSmrg   switch (opcode) {
19101e04c3fSmrg   case TCS_OPCODE_SET_INPUT_URB_OFFSETS:
19201e04c3fSmrg   case TCS_OPCODE_SET_OUTPUT_URB_OFFSETS:
19301e04c3fSmrg   case TES_OPCODE_ADD_INDIRECT_URB_OFFSET:
19401e04c3fSmrg      return true;
19501e04c3fSmrg   default:
19601e04c3fSmrg      /* 8-wide compressed DF operations are executed as two 4-wide operations,
19701e04c3fSmrg       * so we have a src/dst hazard if the first half of the instruction
19801e04c3fSmrg       * overwrites the source of the second half. Prevent this by marking
19901e04c3fSmrg       * compressed instructions as having src/dst hazards, so the register
20001e04c3fSmrg       * allocator assigns safe register regions for dst and srcs.
20101e04c3fSmrg       */
20201e04c3fSmrg      return size_written > REG_SIZE;
20301e04c3fSmrg   }
20401e04c3fSmrg}
20501e04c3fSmrg
20601e04c3fSmrgunsigned
20701e04c3fSmrgvec4_instruction::size_read(unsigned arg) const
20801e04c3fSmrg{
20901e04c3fSmrg   switch (opcode) {
21001e04c3fSmrg   case SHADER_OPCODE_SHADER_TIME_ADD:
2119f464c52Smaya   case VEC4_OPCODE_UNTYPED_ATOMIC:
2129f464c52Smaya   case VEC4_OPCODE_UNTYPED_SURFACE_READ:
2139f464c52Smaya   case VEC4_OPCODE_UNTYPED_SURFACE_WRITE:
21401e04c3fSmrg   case TCS_OPCODE_URB_WRITE:
21501e04c3fSmrg      if (arg == 0)
21601e04c3fSmrg         return mlen * REG_SIZE;
21701e04c3fSmrg      break;
2187ec681f3Smrg   case VS_OPCODE_PULL_CONSTANT_LOAD_GFX7:
21901e04c3fSmrg      if (arg == 1)
22001e04c3fSmrg         return mlen * REG_SIZE;
22101e04c3fSmrg      break;
22201e04c3fSmrg   default:
22301e04c3fSmrg      break;
22401e04c3fSmrg   }
22501e04c3fSmrg
22601e04c3fSmrg   switch (src[arg].file) {
22701e04c3fSmrg   case BAD_FILE:
22801e04c3fSmrg      return 0;
22901e04c3fSmrg   case IMM:
23001e04c3fSmrg   case UNIFORM:
23101e04c3fSmrg      return 4 * type_sz(src[arg].type);
23201e04c3fSmrg   default:
23301e04c3fSmrg      /* XXX - Represent actual vertical stride. */
23401e04c3fSmrg      return exec_size * type_sz(src[arg].type);
23501e04c3fSmrg   }
23601e04c3fSmrg}
23701e04c3fSmrg
23801e04c3fSmrgbool
2397ec681f3Smrgvec4_instruction::can_do_source_mods(const struct intel_device_info *devinfo)
24001e04c3fSmrg{
2417ec681f3Smrg   if (devinfo->ver == 6 && is_math())
24201e04c3fSmrg      return false;
24301e04c3fSmrg
24401e04c3fSmrg   if (is_send_from_grf())
24501e04c3fSmrg      return false;
24601e04c3fSmrg
24701e04c3fSmrg   if (!backend_instruction::can_do_source_mods())
24801e04c3fSmrg      return false;
24901e04c3fSmrg
25001e04c3fSmrg   return true;
25101e04c3fSmrg}
25201e04c3fSmrg
25301e04c3fSmrgbool
25401e04c3fSmrgvec4_instruction::can_do_cmod()
25501e04c3fSmrg{
25601e04c3fSmrg   if (!backend_instruction::can_do_cmod())
25701e04c3fSmrg      return false;
25801e04c3fSmrg
25901e04c3fSmrg   /* The accumulator result appears to get used for the conditional modifier
26001e04c3fSmrg    * generation.  When negating a UD value, there is a 33rd bit generated for
26101e04c3fSmrg    * the sign in the accumulator value, so now you can't check, for example,
26201e04c3fSmrg    * equality with a 32-bit value.  See piglit fs-op-neg-uvec4.
26301e04c3fSmrg    */
26401e04c3fSmrg   for (unsigned i = 0; i < 3; i++) {
26501e04c3fSmrg      if (src[i].file != BAD_FILE &&
2667ec681f3Smrg          brw_reg_type_is_unsigned_integer(src[i].type) && src[i].negate)
26701e04c3fSmrg         return false;
26801e04c3fSmrg   }
26901e04c3fSmrg
27001e04c3fSmrg   return true;
27101e04c3fSmrg}
27201e04c3fSmrg
27301e04c3fSmrgbool
2747ec681f3Smrgvec4_instruction::can_do_writemask(const struct intel_device_info *devinfo)
27501e04c3fSmrg{
27601e04c3fSmrg   switch (opcode) {
2777ec681f3Smrg   case SHADER_OPCODE_GFX4_SCRATCH_READ:
27801e04c3fSmrg   case VEC4_OPCODE_DOUBLE_TO_F32:
27901e04c3fSmrg   case VEC4_OPCODE_DOUBLE_TO_D32:
28001e04c3fSmrg   case VEC4_OPCODE_DOUBLE_TO_U32:
28101e04c3fSmrg   case VEC4_OPCODE_TO_DOUBLE:
28201e04c3fSmrg   case VEC4_OPCODE_PICK_LOW_32BIT:
28301e04c3fSmrg   case VEC4_OPCODE_PICK_HIGH_32BIT:
28401e04c3fSmrg   case VEC4_OPCODE_SET_LOW_32BIT:
28501e04c3fSmrg   case VEC4_OPCODE_SET_HIGH_32BIT:
28601e04c3fSmrg   case VS_OPCODE_PULL_CONSTANT_LOAD:
2877ec681f3Smrg   case VS_OPCODE_PULL_CONSTANT_LOAD_GFX7:
28801e04c3fSmrg   case TCS_OPCODE_SET_INPUT_URB_OFFSETS:
28901e04c3fSmrg   case TCS_OPCODE_SET_OUTPUT_URB_OFFSETS:
29001e04c3fSmrg   case TES_OPCODE_CREATE_INPUT_READ_HEADER:
29101e04c3fSmrg   case TES_OPCODE_ADD_INDIRECT_URB_OFFSET:
29201e04c3fSmrg   case VEC4_OPCODE_URB_READ:
29301e04c3fSmrg   case SHADER_OPCODE_MOV_INDIRECT:
29401e04c3fSmrg      return false;
29501e04c3fSmrg   default:
2967ec681f3Smrg      /* The MATH instruction on Gfx6 only executes in align1 mode, which does
29701e04c3fSmrg       * not support writemasking.
29801e04c3fSmrg       */
2997ec681f3Smrg      if (devinfo->ver == 6 && is_math())
30001e04c3fSmrg         return false;
30101e04c3fSmrg
30201e04c3fSmrg      if (is_tex())
30301e04c3fSmrg         return false;
30401e04c3fSmrg
30501e04c3fSmrg      return true;
30601e04c3fSmrg   }
30701e04c3fSmrg}
30801e04c3fSmrg
30901e04c3fSmrgbool
31001e04c3fSmrgvec4_instruction::can_change_types() const
31101e04c3fSmrg{
31201e04c3fSmrg   return dst.type == src[0].type &&
31301e04c3fSmrg          !src[0].abs && !src[0].negate && !saturate &&
31401e04c3fSmrg          (opcode == BRW_OPCODE_MOV ||
31501e04c3fSmrg           (opcode == BRW_OPCODE_SEL &&
31601e04c3fSmrg            dst.type == src[1].type &&
31701e04c3fSmrg            predicate != BRW_PREDICATE_NONE &&
31801e04c3fSmrg            !src[1].abs && !src[1].negate));
31901e04c3fSmrg}
32001e04c3fSmrg
32101e04c3fSmrg/**
32201e04c3fSmrg * Returns how many MRFs an opcode will write over.
32301e04c3fSmrg *
32401e04c3fSmrg * Note that this is not the 0 or 1 implied writes in an actual gen
32501e04c3fSmrg * instruction -- the generate_* functions generate additional MOVs
32601e04c3fSmrg * for setup.
32701e04c3fSmrg */
3287ec681f3Smrgunsigned
3297ec681f3Smrgvec4_instruction::implied_mrf_writes() const
33001e04c3fSmrg{
3317ec681f3Smrg   if (mlen == 0 || is_send_from_grf())
33201e04c3fSmrg      return 0;
33301e04c3fSmrg
3347ec681f3Smrg   switch (opcode) {
33501e04c3fSmrg   case SHADER_OPCODE_RCP:
33601e04c3fSmrg   case SHADER_OPCODE_RSQ:
33701e04c3fSmrg   case SHADER_OPCODE_SQRT:
33801e04c3fSmrg   case SHADER_OPCODE_EXP2:
33901e04c3fSmrg   case SHADER_OPCODE_LOG2:
34001e04c3fSmrg   case SHADER_OPCODE_SIN:
34101e04c3fSmrg   case SHADER_OPCODE_COS:
34201e04c3fSmrg      return 1;
34301e04c3fSmrg   case SHADER_OPCODE_INT_QUOTIENT:
34401e04c3fSmrg   case SHADER_OPCODE_INT_REMAINDER:
34501e04c3fSmrg   case SHADER_OPCODE_POW:
34601e04c3fSmrg   case TCS_OPCODE_THREAD_END:
34701e04c3fSmrg      return 2;
34801e04c3fSmrg   case VS_OPCODE_URB_WRITE:
34901e04c3fSmrg      return 1;
35001e04c3fSmrg   case VS_OPCODE_PULL_CONSTANT_LOAD:
35101e04c3fSmrg      return 2;
3527ec681f3Smrg   case SHADER_OPCODE_GFX4_SCRATCH_READ:
35301e04c3fSmrg      return 2;
3547ec681f3Smrg   case SHADER_OPCODE_GFX4_SCRATCH_WRITE:
35501e04c3fSmrg      return 3;
35601e04c3fSmrg   case GS_OPCODE_URB_WRITE:
35701e04c3fSmrg   case GS_OPCODE_URB_WRITE_ALLOCATE:
35801e04c3fSmrg   case GS_OPCODE_THREAD_END:
35901e04c3fSmrg      return 0;
36001e04c3fSmrg   case GS_OPCODE_FF_SYNC:
36101e04c3fSmrg      return 1;
36201e04c3fSmrg   case TCS_OPCODE_URB_WRITE:
36301e04c3fSmrg      return 0;
36401e04c3fSmrg   case SHADER_OPCODE_SHADER_TIME_ADD:
36501e04c3fSmrg      return 0;
36601e04c3fSmrg   case SHADER_OPCODE_TEX:
36701e04c3fSmrg   case SHADER_OPCODE_TXL:
36801e04c3fSmrg   case SHADER_OPCODE_TXD:
36901e04c3fSmrg   case SHADER_OPCODE_TXF:
37001e04c3fSmrg   case SHADER_OPCODE_TXF_CMS:
37101e04c3fSmrg   case SHADER_OPCODE_TXF_CMS_W:
37201e04c3fSmrg   case SHADER_OPCODE_TXF_MCS:
37301e04c3fSmrg   case SHADER_OPCODE_TXS:
37401e04c3fSmrg   case SHADER_OPCODE_TG4:
37501e04c3fSmrg   case SHADER_OPCODE_TG4_OFFSET:
37601e04c3fSmrg   case SHADER_OPCODE_SAMPLEINFO:
37701e04c3fSmrg   case SHADER_OPCODE_GET_BUFFER_SIZE:
3787ec681f3Smrg      return header_size;
37901e04c3fSmrg   default:
38001e04c3fSmrg      unreachable("not reached");
38101e04c3fSmrg   }
38201e04c3fSmrg}
38301e04c3fSmrg
38401e04c3fSmrgbool
38501e04c3fSmrgsrc_reg::equals(const src_reg &r) const
38601e04c3fSmrg{
38701e04c3fSmrg   return (this->backend_reg::equals(r) &&
38801e04c3fSmrg	   !reladdr && !r.reladdr);
38901e04c3fSmrg}
39001e04c3fSmrg
39101e04c3fSmrgbool
39201e04c3fSmrgsrc_reg::negative_equals(const src_reg &r) const
39301e04c3fSmrg{
39401e04c3fSmrg   return this->backend_reg::negative_equals(r) &&
39501e04c3fSmrg          !reladdr && !r.reladdr;
39601e04c3fSmrg}
39701e04c3fSmrg
39801e04c3fSmrgbool
39901e04c3fSmrgvec4_visitor::opt_vector_float()
40001e04c3fSmrg{
40101e04c3fSmrg   bool progress = false;
40201e04c3fSmrg
40301e04c3fSmrg   foreach_block(block, cfg) {
4049f464c52Smaya      unsigned last_reg = ~0u, last_offset = ~0u;
40501e04c3fSmrg      enum brw_reg_file last_reg_file = BAD_FILE;
40601e04c3fSmrg
40701e04c3fSmrg      uint8_t imm[4] = { 0 };
40801e04c3fSmrg      int inst_count = 0;
40901e04c3fSmrg      vec4_instruction *imm_inst[4];
41001e04c3fSmrg      unsigned writemask = 0;
41101e04c3fSmrg      enum brw_reg_type dest_type = BRW_REGISTER_TYPE_F;
41201e04c3fSmrg
41301e04c3fSmrg      foreach_inst_in_block_safe(vec4_instruction, inst, block) {
41401e04c3fSmrg         int vf = -1;
4159f464c52Smaya         enum brw_reg_type need_type = BRW_REGISTER_TYPE_LAST;
41601e04c3fSmrg
41701e04c3fSmrg         /* Look for unconditional MOVs from an immediate with a partial
41801e04c3fSmrg          * writemask.  Skip type-conversion MOVs other than integer 0,
41901e04c3fSmrg          * where the type doesn't matter.  See if the immediate can be
42001e04c3fSmrg          * represented as a VF.
42101e04c3fSmrg          */
42201e04c3fSmrg         if (inst->opcode == BRW_OPCODE_MOV &&
42301e04c3fSmrg             inst->src[0].file == IMM &&
42401e04c3fSmrg             inst->predicate == BRW_PREDICATE_NONE &&
42501e04c3fSmrg             inst->dst.writemask != WRITEMASK_XYZW &&
42601e04c3fSmrg             type_sz(inst->src[0].type) < 8 &&
42701e04c3fSmrg             (inst->src[0].type == inst->dst.type || inst->src[0].d == 0)) {
42801e04c3fSmrg
42901e04c3fSmrg            vf = brw_float_to_vf(inst->src[0].d);
43001e04c3fSmrg            need_type = BRW_REGISTER_TYPE_D;
43101e04c3fSmrg
43201e04c3fSmrg            if (vf == -1) {
43301e04c3fSmrg               vf = brw_float_to_vf(inst->src[0].f);
43401e04c3fSmrg               need_type = BRW_REGISTER_TYPE_F;
43501e04c3fSmrg            }
43601e04c3fSmrg         } else {
4379f464c52Smaya            last_reg = ~0u;
43801e04c3fSmrg         }
43901e04c3fSmrg
44001e04c3fSmrg         /* If this wasn't a MOV, or the destination register doesn't match,
44101e04c3fSmrg          * or we have to switch destination types, then this breaks our
44201e04c3fSmrg          * sequence.  Combine anything we've accumulated so far.
44301e04c3fSmrg          */
44401e04c3fSmrg         if (last_reg != inst->dst.nr ||
44501e04c3fSmrg             last_offset != inst->dst.offset ||
44601e04c3fSmrg             last_reg_file != inst->dst.file ||
44701e04c3fSmrg             (vf > 0 && dest_type != need_type)) {
44801e04c3fSmrg
44901e04c3fSmrg            if (inst_count > 1) {
45001e04c3fSmrg               unsigned vf;
45101e04c3fSmrg               memcpy(&vf, imm, sizeof(vf));
45201e04c3fSmrg               vec4_instruction *mov = MOV(imm_inst[0]->dst, brw_imm_vf(vf));
45301e04c3fSmrg               mov->dst.type = dest_type;
45401e04c3fSmrg               mov->dst.writemask = writemask;
45501e04c3fSmrg               inst->insert_before(block, mov);
45601e04c3fSmrg
45701e04c3fSmrg               for (int i = 0; i < inst_count; i++) {
45801e04c3fSmrg                  imm_inst[i]->remove(block);
45901e04c3fSmrg               }
46001e04c3fSmrg
46101e04c3fSmrg               progress = true;
46201e04c3fSmrg            }
46301e04c3fSmrg
46401e04c3fSmrg            inst_count = 0;
4659f464c52Smaya            last_reg = ~0u;;
46601e04c3fSmrg            writemask = 0;
46701e04c3fSmrg            dest_type = BRW_REGISTER_TYPE_F;
46801e04c3fSmrg
46901e04c3fSmrg            for (int i = 0; i < 4; i++) {
47001e04c3fSmrg               imm[i] = 0;
47101e04c3fSmrg            }
47201e04c3fSmrg         }
47301e04c3fSmrg
47401e04c3fSmrg         /* Record this instruction's value (if it was representable). */
47501e04c3fSmrg         if (vf != -1) {
47601e04c3fSmrg            if ((inst->dst.writemask & WRITEMASK_X) != 0)
47701e04c3fSmrg               imm[0] = vf;
47801e04c3fSmrg            if ((inst->dst.writemask & WRITEMASK_Y) != 0)
47901e04c3fSmrg               imm[1] = vf;
48001e04c3fSmrg            if ((inst->dst.writemask & WRITEMASK_Z) != 0)
48101e04c3fSmrg               imm[2] = vf;
48201e04c3fSmrg            if ((inst->dst.writemask & WRITEMASK_W) != 0)
48301e04c3fSmrg               imm[3] = vf;
48401e04c3fSmrg
48501e04c3fSmrg            writemask |= inst->dst.writemask;
48601e04c3fSmrg            imm_inst[inst_count++] = inst;
48701e04c3fSmrg
48801e04c3fSmrg            last_reg = inst->dst.nr;
48901e04c3fSmrg            last_offset = inst->dst.offset;
49001e04c3fSmrg            last_reg_file = inst->dst.file;
49101e04c3fSmrg            if (vf > 0)
49201e04c3fSmrg               dest_type = need_type;
49301e04c3fSmrg         }
49401e04c3fSmrg      }
49501e04c3fSmrg   }
49601e04c3fSmrg
49701e04c3fSmrg   if (progress)
4987ec681f3Smrg      invalidate_analysis(DEPENDENCY_INSTRUCTIONS);
49901e04c3fSmrg
50001e04c3fSmrg   return progress;
50101e04c3fSmrg}
50201e04c3fSmrg
50301e04c3fSmrg/* Replaces unused channels of a swizzle with channels that are used.
50401e04c3fSmrg *
50501e04c3fSmrg * For instance, this pass transforms
50601e04c3fSmrg *
50701e04c3fSmrg *    mov vgrf4.yz, vgrf5.wxzy
50801e04c3fSmrg *
50901e04c3fSmrg * into
51001e04c3fSmrg *
51101e04c3fSmrg *    mov vgrf4.yz, vgrf5.xxzx
51201e04c3fSmrg *
51301e04c3fSmrg * This eliminates false uses of some channels, letting dead code elimination
51401e04c3fSmrg * remove the instructions that wrote them.
51501e04c3fSmrg */
51601e04c3fSmrgbool
51701e04c3fSmrgvec4_visitor::opt_reduce_swizzle()
51801e04c3fSmrg{
51901e04c3fSmrg   bool progress = false;
52001e04c3fSmrg
52101e04c3fSmrg   foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
52201e04c3fSmrg      if (inst->dst.file == BAD_FILE ||
52301e04c3fSmrg          inst->dst.file == ARF ||
52401e04c3fSmrg          inst->dst.file == FIXED_GRF ||
52501e04c3fSmrg          inst->is_send_from_grf())
52601e04c3fSmrg         continue;
52701e04c3fSmrg
52801e04c3fSmrg      unsigned swizzle;
52901e04c3fSmrg
53001e04c3fSmrg      /* Determine which channels of the sources are read. */
53101e04c3fSmrg      switch (inst->opcode) {
53201e04c3fSmrg      case VEC4_OPCODE_PACK_BYTES:
53301e04c3fSmrg      case BRW_OPCODE_DP4:
53401e04c3fSmrg      case BRW_OPCODE_DPH: /* FINISHME: DPH reads only three channels of src0,
53501e04c3fSmrg                            *           but all four of src1.
53601e04c3fSmrg                            */
53701e04c3fSmrg         swizzle = brw_swizzle_for_size(4);
53801e04c3fSmrg         break;
53901e04c3fSmrg      case BRW_OPCODE_DP3:
54001e04c3fSmrg         swizzle = brw_swizzle_for_size(3);
54101e04c3fSmrg         break;
54201e04c3fSmrg      case BRW_OPCODE_DP2:
54301e04c3fSmrg         swizzle = brw_swizzle_for_size(2);
54401e04c3fSmrg         break;
54501e04c3fSmrg
54601e04c3fSmrg      case VEC4_OPCODE_TO_DOUBLE:
54701e04c3fSmrg      case VEC4_OPCODE_DOUBLE_TO_F32:
54801e04c3fSmrg      case VEC4_OPCODE_DOUBLE_TO_D32:
54901e04c3fSmrg      case VEC4_OPCODE_DOUBLE_TO_U32:
55001e04c3fSmrg      case VEC4_OPCODE_PICK_LOW_32BIT:
55101e04c3fSmrg      case VEC4_OPCODE_PICK_HIGH_32BIT:
55201e04c3fSmrg      case VEC4_OPCODE_SET_LOW_32BIT:
55301e04c3fSmrg      case VEC4_OPCODE_SET_HIGH_32BIT:
55401e04c3fSmrg         swizzle = brw_swizzle_for_size(4);
55501e04c3fSmrg         break;
55601e04c3fSmrg
55701e04c3fSmrg      default:
55801e04c3fSmrg         swizzle = brw_swizzle_for_mask(inst->dst.writemask);
55901e04c3fSmrg         break;
56001e04c3fSmrg      }
56101e04c3fSmrg
56201e04c3fSmrg      /* Update sources' swizzles. */
56301e04c3fSmrg      for (int i = 0; i < 3; i++) {
56401e04c3fSmrg         if (inst->src[i].file != VGRF &&
56501e04c3fSmrg             inst->src[i].file != ATTR &&
56601e04c3fSmrg             inst->src[i].file != UNIFORM)
56701e04c3fSmrg            continue;
56801e04c3fSmrg
56901e04c3fSmrg         const unsigned new_swizzle =
57001e04c3fSmrg            brw_compose_swizzle(swizzle, inst->src[i].swizzle);
57101e04c3fSmrg         if (inst->src[i].swizzle != new_swizzle) {
57201e04c3fSmrg            inst->src[i].swizzle = new_swizzle;
57301e04c3fSmrg            progress = true;
57401e04c3fSmrg         }
57501e04c3fSmrg      }
57601e04c3fSmrg   }
57701e04c3fSmrg
57801e04c3fSmrg   if (progress)
5797ec681f3Smrg      invalidate_analysis(DEPENDENCY_INSTRUCTION_DETAIL);
58001e04c3fSmrg
58101e04c3fSmrg   return progress;
58201e04c3fSmrg}
58301e04c3fSmrg
58401e04c3fSmrgvoid
58501e04c3fSmrgvec4_visitor::split_uniform_registers()
58601e04c3fSmrg{
58701e04c3fSmrg   /* Prior to this, uniforms have been in an array sized according to
58801e04c3fSmrg    * the number of vector uniforms present, sparsely filled (so an
58901e04c3fSmrg    * aggregate results in reg indices being skipped over).  Now we're
59001e04c3fSmrg    * going to cut those aggregates up so each .nr index is one
59101e04c3fSmrg    * vector.  The goal is to make elimination of unused uniform
59201e04c3fSmrg    * components easier later.
59301e04c3fSmrg    */
59401e04c3fSmrg   foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
59501e04c3fSmrg      for (int i = 0 ; i < 3; i++) {
5967ec681f3Smrg         if (inst->src[i].file != UNIFORM || inst->src[i].nr >= UBO_START)
59701e04c3fSmrg	    continue;
59801e04c3fSmrg
59901e04c3fSmrg	 assert(!inst->src[i].reladdr);
60001e04c3fSmrg
60101e04c3fSmrg         inst->src[i].nr += inst->src[i].offset / 16;
60201e04c3fSmrg	 inst->src[i].offset %= 16;
60301e04c3fSmrg      }
60401e04c3fSmrg   }
60501e04c3fSmrg}
60601e04c3fSmrg
60701e04c3fSmrg/* This function returns the register number where we placed the uniform */
60801e04c3fSmrgstatic int
60901e04c3fSmrgset_push_constant_loc(const int nr_uniforms, int *new_uniform_count,
61001e04c3fSmrg                      const int src, const int size, const int channel_size,
61101e04c3fSmrg                      int *new_loc, int *new_chan,
61201e04c3fSmrg                      int *new_chans_used)
61301e04c3fSmrg{
61401e04c3fSmrg   int dst;
61501e04c3fSmrg   /* Find the lowest place we can slot this uniform in. */
61601e04c3fSmrg   for (dst = 0; dst < nr_uniforms; dst++) {
61701e04c3fSmrg      if (ALIGN(new_chans_used[dst], channel_size) + size <= 4)
61801e04c3fSmrg         break;
61901e04c3fSmrg   }
62001e04c3fSmrg
62101e04c3fSmrg   assert(dst < nr_uniforms);
62201e04c3fSmrg
62301e04c3fSmrg   new_loc[src] = dst;
62401e04c3fSmrg   new_chan[src] = ALIGN(new_chans_used[dst], channel_size);
62501e04c3fSmrg   new_chans_used[dst] = ALIGN(new_chans_used[dst], channel_size) + size;
62601e04c3fSmrg
62701e04c3fSmrg   *new_uniform_count = MAX2(*new_uniform_count, dst + 1);
62801e04c3fSmrg   return dst;
62901e04c3fSmrg}
63001e04c3fSmrg
63101e04c3fSmrgvoid
63201e04c3fSmrgvec4_visitor::pack_uniform_registers()
63301e04c3fSmrg{
6347ec681f3Smrg   if (!compiler->compact_params)
6357ec681f3Smrg      return;
6367ec681f3Smrg
63701e04c3fSmrg   uint8_t chans_used[this->uniforms];
63801e04c3fSmrg   int new_loc[this->uniforms];
63901e04c3fSmrg   int new_chan[this->uniforms];
64001e04c3fSmrg   bool is_aligned_to_dvec4[this->uniforms];
64101e04c3fSmrg   int new_chans_used[this->uniforms];
64201e04c3fSmrg   int channel_sizes[this->uniforms];
64301e04c3fSmrg
64401e04c3fSmrg   memset(chans_used, 0, sizeof(chans_used));
64501e04c3fSmrg   memset(new_loc, 0, sizeof(new_loc));
64601e04c3fSmrg   memset(new_chan, 0, sizeof(new_chan));
64701e04c3fSmrg   memset(new_chans_used, 0, sizeof(new_chans_used));
64801e04c3fSmrg   memset(is_aligned_to_dvec4, 0, sizeof(is_aligned_to_dvec4));
64901e04c3fSmrg   memset(channel_sizes, 0, sizeof(channel_sizes));
65001e04c3fSmrg
65101e04c3fSmrg   /* Find which uniform vectors are actually used by the program.  We
65201e04c3fSmrg    * expect unused vector elements when we've moved array access out
65301e04c3fSmrg    * to pull constants, and from some GLSL code generators like wine.
65401e04c3fSmrg    */
65501e04c3fSmrg   foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
65601e04c3fSmrg      unsigned readmask;
65701e04c3fSmrg      switch (inst->opcode) {
65801e04c3fSmrg      case VEC4_OPCODE_PACK_BYTES:
65901e04c3fSmrg      case BRW_OPCODE_DP4:
66001e04c3fSmrg      case BRW_OPCODE_DPH:
66101e04c3fSmrg         readmask = 0xf;
66201e04c3fSmrg         break;
66301e04c3fSmrg      case BRW_OPCODE_DP3:
66401e04c3fSmrg         readmask = 0x7;
66501e04c3fSmrg         break;
66601e04c3fSmrg      case BRW_OPCODE_DP2:
66701e04c3fSmrg         readmask = 0x3;
66801e04c3fSmrg         break;
66901e04c3fSmrg      default:
67001e04c3fSmrg         readmask = inst->dst.writemask;
67101e04c3fSmrg         break;
67201e04c3fSmrg      }
67301e04c3fSmrg
67401e04c3fSmrg      for (int i = 0 ; i < 3; i++) {
6757ec681f3Smrg         if (inst->src[i].file != UNIFORM || inst->src[i].nr >= UBO_START)
67601e04c3fSmrg            continue;
67701e04c3fSmrg
67801e04c3fSmrg         assert(type_sz(inst->src[i].type) % 4 == 0);
67901e04c3fSmrg         int channel_size = type_sz(inst->src[i].type) / 4;
68001e04c3fSmrg
68101e04c3fSmrg         int reg = inst->src[i].nr;
68201e04c3fSmrg         for (int c = 0; c < 4; c++) {
68301e04c3fSmrg            if (!(readmask & (1 << c)))
68401e04c3fSmrg               continue;
68501e04c3fSmrg
68601e04c3fSmrg            unsigned channel = BRW_GET_SWZ(inst->src[i].swizzle, c) + 1;
68701e04c3fSmrg            unsigned used = MAX2(chans_used[reg], channel * channel_size);
68801e04c3fSmrg            if (used <= 4) {
68901e04c3fSmrg               chans_used[reg] = used;
69001e04c3fSmrg               channel_sizes[reg] = MAX2(channel_sizes[reg], channel_size);
69101e04c3fSmrg            } else {
69201e04c3fSmrg               is_aligned_to_dvec4[reg] = true;
69301e04c3fSmrg               is_aligned_to_dvec4[reg + 1] = true;
69401e04c3fSmrg               chans_used[reg + 1] = used - 4;
69501e04c3fSmrg               channel_sizes[reg + 1] = MAX2(channel_sizes[reg + 1], channel_size);
69601e04c3fSmrg            }
69701e04c3fSmrg         }
69801e04c3fSmrg      }
69901e04c3fSmrg
70001e04c3fSmrg      if (inst->opcode == SHADER_OPCODE_MOV_INDIRECT &&
70101e04c3fSmrg          inst->src[0].file == UNIFORM) {
70201e04c3fSmrg         assert(inst->src[2].file == BRW_IMMEDIATE_VALUE);
70301e04c3fSmrg         assert(inst->src[0].subnr == 0);
70401e04c3fSmrg
70501e04c3fSmrg         unsigned bytes_read = inst->src[2].ud;
70601e04c3fSmrg         assert(bytes_read % 4 == 0);
70701e04c3fSmrg         unsigned vec4s_read = DIV_ROUND_UP(bytes_read, 16);
70801e04c3fSmrg
70901e04c3fSmrg         /* We just mark every register touched by a MOV_INDIRECT as being
71001e04c3fSmrg          * fully used.  This ensures that it doesn't broken up piecewise by
71101e04c3fSmrg          * the next part of our packing algorithm.
71201e04c3fSmrg          */
71301e04c3fSmrg         int reg = inst->src[0].nr;
71401e04c3fSmrg         int channel_size = type_sz(inst->src[0].type) / 4;
71501e04c3fSmrg         for (unsigned i = 0; i < vec4s_read; i++) {
71601e04c3fSmrg            chans_used[reg + i] = 4;
71701e04c3fSmrg            channel_sizes[reg + i] = MAX2(channel_sizes[reg + i], channel_size);
71801e04c3fSmrg         }
71901e04c3fSmrg      }
72001e04c3fSmrg   }
72101e04c3fSmrg
72201e04c3fSmrg   int new_uniform_count = 0;
72301e04c3fSmrg
72401e04c3fSmrg   /* As the uniforms are going to be reordered, take the data from a temporary
72501e04c3fSmrg    * copy of the original param[].
72601e04c3fSmrg    */
72701e04c3fSmrg   uint32_t *param = ralloc_array(NULL, uint32_t, stage_prog_data->nr_params);
72801e04c3fSmrg   memcpy(param, stage_prog_data->param,
72901e04c3fSmrg          sizeof(uint32_t) * stage_prog_data->nr_params);
73001e04c3fSmrg
73101e04c3fSmrg   /* Now, figure out a packing of the live uniform vectors into our
73201e04c3fSmrg    * push constants. Start with dvec{3,4} because they are aligned to
73301e04c3fSmrg    * dvec4 size (2 vec4).
73401e04c3fSmrg    */
73501e04c3fSmrg   for (int src = 0; src < uniforms; src++) {
73601e04c3fSmrg      int size = chans_used[src];
73701e04c3fSmrg
73801e04c3fSmrg      if (size == 0 || !is_aligned_to_dvec4[src])
73901e04c3fSmrg         continue;
74001e04c3fSmrg
74101e04c3fSmrg      /* dvec3 are aligned to dvec4 size, apply the alignment of the size
74201e04c3fSmrg       * to 4 to avoid moving last component of a dvec3 to the available
74301e04c3fSmrg       * location at the end of a previous dvec3. These available locations
74401e04c3fSmrg       * could be filled by smaller variables in next loop.
74501e04c3fSmrg       */
74601e04c3fSmrg      size = ALIGN(size, 4);
74701e04c3fSmrg      int dst = set_push_constant_loc(uniforms, &new_uniform_count,
74801e04c3fSmrg                                      src, size, channel_sizes[src],
74901e04c3fSmrg                                      new_loc, new_chan,
75001e04c3fSmrg                                      new_chans_used);
75101e04c3fSmrg      /* Move the references to the data */
75201e04c3fSmrg      for (int j = 0; j < size; j++) {
75301e04c3fSmrg         stage_prog_data->param[dst * 4 + new_chan[src] + j] =
75401e04c3fSmrg            param[src * 4 + j];
75501e04c3fSmrg      }
75601e04c3fSmrg   }
75701e04c3fSmrg
75801e04c3fSmrg   /* Continue with the rest of data, which is aligned to vec4. */
75901e04c3fSmrg   for (int src = 0; src < uniforms; src++) {
76001e04c3fSmrg      int size = chans_used[src];
76101e04c3fSmrg
76201e04c3fSmrg      if (size == 0 || is_aligned_to_dvec4[src])
76301e04c3fSmrg         continue;
76401e04c3fSmrg
76501e04c3fSmrg      int dst = set_push_constant_loc(uniforms, &new_uniform_count,
76601e04c3fSmrg                                      src, size, channel_sizes[src],
76701e04c3fSmrg                                      new_loc, new_chan,
76801e04c3fSmrg                                      new_chans_used);
76901e04c3fSmrg      /* Move the references to the data */
77001e04c3fSmrg      for (int j = 0; j < size; j++) {
77101e04c3fSmrg         stage_prog_data->param[dst * 4 + new_chan[src] + j] =
77201e04c3fSmrg            param[src * 4 + j];
77301e04c3fSmrg      }
77401e04c3fSmrg   }
77501e04c3fSmrg
77601e04c3fSmrg   ralloc_free(param);
77701e04c3fSmrg   this->uniforms = new_uniform_count;
7787ec681f3Smrg   stage_prog_data->nr_params = new_uniform_count * 4;
77901e04c3fSmrg
78001e04c3fSmrg   /* Now, update the instructions for our repacked uniforms. */
78101e04c3fSmrg   foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
78201e04c3fSmrg      for (int i = 0 ; i < 3; i++) {
78301e04c3fSmrg         int src = inst->src[i].nr;
78401e04c3fSmrg
7857ec681f3Smrg         if (inst->src[i].file != UNIFORM || inst->src[i].nr >= UBO_START)
78601e04c3fSmrg            continue;
78701e04c3fSmrg
78801e04c3fSmrg         int chan = new_chan[src] / channel_sizes[src];
78901e04c3fSmrg         inst->src[i].nr = new_loc[src];
79001e04c3fSmrg         inst->src[i].swizzle += BRW_SWIZZLE4(chan, chan, chan, chan);
79101e04c3fSmrg      }
79201e04c3fSmrg   }
79301e04c3fSmrg}
79401e04c3fSmrg
79501e04c3fSmrg/**
79601e04c3fSmrg * Does algebraic optimizations (0 * a = 0, 1 * a = a, a + 0 = a).
79701e04c3fSmrg *
79801e04c3fSmrg * While GLSL IR also performs this optimization, we end up with it in
79901e04c3fSmrg * our instruction stream for a couple of reasons.  One is that we
80001e04c3fSmrg * sometimes generate silly instructions, for example in array access
80101e04c3fSmrg * where we'll generate "ADD offset, index, base" even if base is 0.
80201e04c3fSmrg * The other is that GLSL IR's constant propagation doesn't track the
80301e04c3fSmrg * components of aggregates, so some VS patterns (initialize matrix to
80401e04c3fSmrg * 0, accumulate in vertex blending factors) end up breaking down to
80501e04c3fSmrg * instructions involving 0.
80601e04c3fSmrg */
80701e04c3fSmrgbool
80801e04c3fSmrgvec4_visitor::opt_algebraic()
80901e04c3fSmrg{
81001e04c3fSmrg   bool progress = false;
81101e04c3fSmrg
81201e04c3fSmrg   foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
81301e04c3fSmrg      switch (inst->opcode) {
81401e04c3fSmrg      case BRW_OPCODE_MOV:
81501e04c3fSmrg         if (inst->src[0].file != IMM)
81601e04c3fSmrg            break;
81701e04c3fSmrg
81801e04c3fSmrg         if (inst->saturate) {
81901e04c3fSmrg            /* Full mixed-type saturates don't happen.  However, we can end up
82001e04c3fSmrg             * with things like:
82101e04c3fSmrg             *
82201e04c3fSmrg             *    mov.sat(8) g21<1>DF       -1F
82301e04c3fSmrg             *
82401e04c3fSmrg             * Other mixed-size-but-same-base-type cases may also be possible.
82501e04c3fSmrg             */
82601e04c3fSmrg            if (inst->dst.type != inst->src[0].type &&
82701e04c3fSmrg                inst->dst.type != BRW_REGISTER_TYPE_DF &&
82801e04c3fSmrg                inst->src[0].type != BRW_REGISTER_TYPE_F)
82901e04c3fSmrg               assert(!"unimplemented: saturate mixed types");
83001e04c3fSmrg
83101e04c3fSmrg            if (brw_saturate_immediate(inst->src[0].type,
83201e04c3fSmrg                                       &inst->src[0].as_brw_reg())) {
83301e04c3fSmrg               inst->saturate = false;
83401e04c3fSmrg               progress = true;
83501e04c3fSmrg            }
83601e04c3fSmrg         }
83701e04c3fSmrg         break;
83801e04c3fSmrg
83901e04c3fSmrg      case BRW_OPCODE_OR:
84001e04c3fSmrg         if (inst->src[1].is_zero()) {
84101e04c3fSmrg            inst->opcode = BRW_OPCODE_MOV;
84201e04c3fSmrg            inst->src[1] = src_reg();
84301e04c3fSmrg            progress = true;
84401e04c3fSmrg         }
84501e04c3fSmrg         break;
84601e04c3fSmrg
84701e04c3fSmrg      case VEC4_OPCODE_UNPACK_UNIFORM:
84801e04c3fSmrg         if (inst->src[0].file != UNIFORM) {
84901e04c3fSmrg            inst->opcode = BRW_OPCODE_MOV;
85001e04c3fSmrg            progress = true;
85101e04c3fSmrg         }
85201e04c3fSmrg         break;
85301e04c3fSmrg
85401e04c3fSmrg      case BRW_OPCODE_ADD:
85501e04c3fSmrg	 if (inst->src[1].is_zero()) {
85601e04c3fSmrg	    inst->opcode = BRW_OPCODE_MOV;
85701e04c3fSmrg	    inst->src[1] = src_reg();
85801e04c3fSmrg	    progress = true;
85901e04c3fSmrg	 }
86001e04c3fSmrg	 break;
86101e04c3fSmrg
86201e04c3fSmrg      case BRW_OPCODE_MUL:
86301e04c3fSmrg	 if (inst->src[1].is_zero()) {
86401e04c3fSmrg	    inst->opcode = BRW_OPCODE_MOV;
86501e04c3fSmrg	    switch (inst->src[0].type) {
86601e04c3fSmrg	    case BRW_REGISTER_TYPE_F:
86701e04c3fSmrg	       inst->src[0] = brw_imm_f(0.0f);
86801e04c3fSmrg	       break;
86901e04c3fSmrg	    case BRW_REGISTER_TYPE_D:
87001e04c3fSmrg	       inst->src[0] = brw_imm_d(0);
87101e04c3fSmrg	       break;
87201e04c3fSmrg	    case BRW_REGISTER_TYPE_UD:
87301e04c3fSmrg	       inst->src[0] = brw_imm_ud(0u);
87401e04c3fSmrg	       break;
87501e04c3fSmrg	    default:
87601e04c3fSmrg	       unreachable("not reached");
87701e04c3fSmrg	    }
87801e04c3fSmrg	    inst->src[1] = src_reg();
87901e04c3fSmrg	    progress = true;
88001e04c3fSmrg	 } else if (inst->src[1].is_one()) {
88101e04c3fSmrg	    inst->opcode = BRW_OPCODE_MOV;
88201e04c3fSmrg	    inst->src[1] = src_reg();
88301e04c3fSmrg	    progress = true;
88401e04c3fSmrg         } else if (inst->src[1].is_negative_one()) {
88501e04c3fSmrg            inst->opcode = BRW_OPCODE_MOV;
88601e04c3fSmrg            inst->src[0].negate = !inst->src[0].negate;
88701e04c3fSmrg            inst->src[1] = src_reg();
88801e04c3fSmrg            progress = true;
88901e04c3fSmrg	 }
89001e04c3fSmrg	 break;
89101e04c3fSmrg      case SHADER_OPCODE_BROADCAST:
89201e04c3fSmrg         if (is_uniform(inst->src[0]) ||
89301e04c3fSmrg             inst->src[1].is_zero()) {
89401e04c3fSmrg            inst->opcode = BRW_OPCODE_MOV;
89501e04c3fSmrg            inst->src[1] = src_reg();
89601e04c3fSmrg            inst->force_writemask_all = true;
89701e04c3fSmrg            progress = true;
89801e04c3fSmrg         }
89901e04c3fSmrg         break;
90001e04c3fSmrg
90101e04c3fSmrg      default:
90201e04c3fSmrg	 break;
90301e04c3fSmrg      }
90401e04c3fSmrg   }
90501e04c3fSmrg
90601e04c3fSmrg   if (progress)
9077ec681f3Smrg      invalidate_analysis(DEPENDENCY_INSTRUCTION_DATA_FLOW |
9087ec681f3Smrg                          DEPENDENCY_INSTRUCTION_DETAIL);
90901e04c3fSmrg
91001e04c3fSmrg   return progress;
91101e04c3fSmrg}
91201e04c3fSmrg
91301e04c3fSmrg/**
91401e04c3fSmrg * Only a limited number of hardware registers may be used for push
91501e04c3fSmrg * constants, so this turns access to the overflowed constants into
91601e04c3fSmrg * pull constants.
91701e04c3fSmrg */
91801e04c3fSmrgvoid
91901e04c3fSmrgvec4_visitor::move_push_constants_to_pull_constants()
92001e04c3fSmrg{
92101e04c3fSmrg   int pull_constant_loc[this->uniforms];
92201e04c3fSmrg
9237ec681f3Smrg   const int max_uniform_components = push_length * 8;
9247ec681f3Smrg
92501e04c3fSmrg   if (this->uniforms * 4 <= max_uniform_components)
92601e04c3fSmrg      return;
92701e04c3fSmrg
9287ec681f3Smrg   assert(compiler->supports_pull_constants);
9297ec681f3Smrg   assert(compiler->compact_params);
9307ec681f3Smrg
9317ec681f3Smrg   /* If we got here, we also can't have any push ranges */
9327ec681f3Smrg   for (unsigned i = 0; i < 4; i++)
9337ec681f3Smrg      assert(prog_data->base.ubo_ranges[i].length == 0);
9347ec681f3Smrg
93501e04c3fSmrg   /* Make some sort of choice as to which uniforms get sent to pull
93601e04c3fSmrg    * constants.  We could potentially do something clever here like
93701e04c3fSmrg    * look for the most infrequently used uniform vec4s, but leave
93801e04c3fSmrg    * that for later.
93901e04c3fSmrg    */
94001e04c3fSmrg   for (int i = 0; i < this->uniforms * 4; i += 4) {
94101e04c3fSmrg      pull_constant_loc[i / 4] = -1;
94201e04c3fSmrg
94301e04c3fSmrg      if (i >= max_uniform_components) {
94401e04c3fSmrg         uint32_t *values = &stage_prog_data->param[i];
94501e04c3fSmrg
94601e04c3fSmrg         /* Try to find an existing copy of this uniform in the pull
94701e04c3fSmrg          * constants if it was part of an array access already.
94801e04c3fSmrg          */
94901e04c3fSmrg         for (unsigned int j = 0; j < stage_prog_data->nr_pull_params; j += 4) {
95001e04c3fSmrg            int matches;
95101e04c3fSmrg
95201e04c3fSmrg            for (matches = 0; matches < 4; matches++) {
95301e04c3fSmrg               if (stage_prog_data->pull_param[j + matches] != values[matches])
95401e04c3fSmrg                  break;
95501e04c3fSmrg            }
95601e04c3fSmrg
95701e04c3fSmrg            if (matches == 4) {
95801e04c3fSmrg               pull_constant_loc[i / 4] = j / 4;
95901e04c3fSmrg               break;
96001e04c3fSmrg            }
96101e04c3fSmrg         }
96201e04c3fSmrg
96301e04c3fSmrg         if (pull_constant_loc[i / 4] == -1) {
96401e04c3fSmrg            assert(stage_prog_data->nr_pull_params % 4 == 0);
96501e04c3fSmrg            pull_constant_loc[i / 4] = stage_prog_data->nr_pull_params / 4;
96601e04c3fSmrg
96701e04c3fSmrg            for (int j = 0; j < 4; j++) {
96801e04c3fSmrg               stage_prog_data->pull_param[stage_prog_data->nr_pull_params++] =
96901e04c3fSmrg                  values[j];
97001e04c3fSmrg            }
97101e04c3fSmrg         }
97201e04c3fSmrg      }
97301e04c3fSmrg   }
97401e04c3fSmrg
97501e04c3fSmrg   /* Now actually rewrite usage of the things we've moved to pull
97601e04c3fSmrg    * constants.
97701e04c3fSmrg    */
97801e04c3fSmrg   foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
97901e04c3fSmrg      for (int i = 0 ; i < 3; i++) {
9807ec681f3Smrg         if (inst->src[i].file != UNIFORM || inst->src[i].nr >= UBO_START ||
98101e04c3fSmrg             pull_constant_loc[inst->src[i].nr] == -1)
98201e04c3fSmrg            continue;
98301e04c3fSmrg
98401e04c3fSmrg         int uniform = inst->src[i].nr;
98501e04c3fSmrg
98601e04c3fSmrg         const glsl_type *temp_type = type_sz(inst->src[i].type) == 8 ?
98701e04c3fSmrg            glsl_type::dvec4_type : glsl_type::vec4_type;
98801e04c3fSmrg         dst_reg temp = dst_reg(this, temp_type);
98901e04c3fSmrg
99001e04c3fSmrg         emit_pull_constant_load(block, inst, temp, inst->src[i],
99101e04c3fSmrg                                 pull_constant_loc[uniform], src_reg());
99201e04c3fSmrg
99301e04c3fSmrg         inst->src[i].file = temp.file;
99401e04c3fSmrg         inst->src[i].nr = temp.nr;
99501e04c3fSmrg         inst->src[i].offset %= 16;
99601e04c3fSmrg         inst->src[i].reladdr = NULL;
99701e04c3fSmrg      }
99801e04c3fSmrg   }
99901e04c3fSmrg
100001e04c3fSmrg   /* Repack push constants to remove the now-unused ones. */
100101e04c3fSmrg   pack_uniform_registers();
100201e04c3fSmrg}
100301e04c3fSmrg
100401e04c3fSmrg/* Conditions for which we want to avoid setting the dependency control bits */
100501e04c3fSmrgbool
100601e04c3fSmrgvec4_visitor::is_dep_ctrl_unsafe(const vec4_instruction *inst)
100701e04c3fSmrg{
100801e04c3fSmrg#define IS_DWORD(reg) \
100901e04c3fSmrg   (reg.type == BRW_REGISTER_TYPE_UD || \
101001e04c3fSmrg    reg.type == BRW_REGISTER_TYPE_D)
101101e04c3fSmrg
101201e04c3fSmrg#define IS_64BIT(reg) (reg.file != BAD_FILE && type_sz(reg.type) == 8)
101301e04c3fSmrg
10147ec681f3Smrg   if (devinfo->ver >= 7) {
101501e04c3fSmrg      if (IS_64BIT(inst->dst) || IS_64BIT(inst->src[0]) ||
101601e04c3fSmrg          IS_64BIT(inst->src[1]) || IS_64BIT(inst->src[2]))
101701e04c3fSmrg      return true;
101801e04c3fSmrg   }
101901e04c3fSmrg
102001e04c3fSmrg#undef IS_64BIT
102101e04c3fSmrg#undef IS_DWORD
102201e04c3fSmrg
102301e04c3fSmrg   /*
102401e04c3fSmrg    * mlen:
102501e04c3fSmrg    * In the presence of send messages, totally interrupt dependency
102601e04c3fSmrg    * control. They're long enough that the chance of dependency
102701e04c3fSmrg    * control around them just doesn't matter.
102801e04c3fSmrg    *
102901e04c3fSmrg    * predicate:
103001e04c3fSmrg    * From the Ivy Bridge PRM, volume 4 part 3.7, page 80:
103101e04c3fSmrg    * When a sequence of NoDDChk and NoDDClr are used, the last instruction that
103201e04c3fSmrg    * completes the scoreboard clear must have a non-zero execution mask. This
103301e04c3fSmrg    * means, if any kind of predication can change the execution mask or channel
103401e04c3fSmrg    * enable of the last instruction, the optimization must be avoided. This is
103501e04c3fSmrg    * to avoid instructions being shot down the pipeline when no writes are
103601e04c3fSmrg    * required.
103701e04c3fSmrg    *
103801e04c3fSmrg    * math:
103901e04c3fSmrg    * Dependency control does not work well over math instructions.
104001e04c3fSmrg    * NB: Discovered empirically
104101e04c3fSmrg    */
104201e04c3fSmrg   return (inst->mlen || inst->predicate || inst->is_math());
104301e04c3fSmrg}
104401e04c3fSmrg
104501e04c3fSmrg/**
104601e04c3fSmrg * Sets the dependency control fields on instructions after register
104701e04c3fSmrg * allocation and before the generator is run.
104801e04c3fSmrg *
104901e04c3fSmrg * When you have a sequence of instructions like:
105001e04c3fSmrg *
105101e04c3fSmrg * DP4 temp.x vertex uniform[0]
105201e04c3fSmrg * DP4 temp.y vertex uniform[0]
105301e04c3fSmrg * DP4 temp.z vertex uniform[0]
105401e04c3fSmrg * DP4 temp.w vertex uniform[0]
105501e04c3fSmrg *
105601e04c3fSmrg * The hardware doesn't know that it can actually run the later instructions
105701e04c3fSmrg * while the previous ones are in flight, producing stalls.  However, we have
105801e04c3fSmrg * manual fields we can set in the instructions that let it do so.
105901e04c3fSmrg */
106001e04c3fSmrgvoid
106101e04c3fSmrgvec4_visitor::opt_set_dependency_control()
106201e04c3fSmrg{
106301e04c3fSmrg   vec4_instruction *last_grf_write[BRW_MAX_GRF];
106401e04c3fSmrg   uint8_t grf_channels_written[BRW_MAX_GRF];
106501e04c3fSmrg   vec4_instruction *last_mrf_write[BRW_MAX_GRF];
106601e04c3fSmrg   uint8_t mrf_channels_written[BRW_MAX_GRF];
106701e04c3fSmrg
106801e04c3fSmrg   assert(prog_data->total_grf ||
106901e04c3fSmrg          !"Must be called after register allocation");
107001e04c3fSmrg
107101e04c3fSmrg   foreach_block (block, cfg) {
107201e04c3fSmrg      memset(last_grf_write, 0, sizeof(last_grf_write));
107301e04c3fSmrg      memset(last_mrf_write, 0, sizeof(last_mrf_write));
107401e04c3fSmrg
107501e04c3fSmrg      foreach_inst_in_block (vec4_instruction, inst, block) {
107601e04c3fSmrg         /* If we read from a register that we were doing dependency control
107701e04c3fSmrg          * on, don't do dependency control across the read.
107801e04c3fSmrg          */
107901e04c3fSmrg         for (int i = 0; i < 3; i++) {
108001e04c3fSmrg            int reg = inst->src[i].nr + inst->src[i].offset / REG_SIZE;
108101e04c3fSmrg            if (inst->src[i].file == VGRF) {
108201e04c3fSmrg               last_grf_write[reg] = NULL;
108301e04c3fSmrg            } else if (inst->src[i].file == FIXED_GRF) {
108401e04c3fSmrg               memset(last_grf_write, 0, sizeof(last_grf_write));
108501e04c3fSmrg               break;
108601e04c3fSmrg            }
108701e04c3fSmrg            assert(inst->src[i].file != MRF);
108801e04c3fSmrg         }
108901e04c3fSmrg
109001e04c3fSmrg         if (is_dep_ctrl_unsafe(inst)) {
109101e04c3fSmrg            memset(last_grf_write, 0, sizeof(last_grf_write));
109201e04c3fSmrg            memset(last_mrf_write, 0, sizeof(last_mrf_write));
109301e04c3fSmrg            continue;
109401e04c3fSmrg         }
109501e04c3fSmrg
109601e04c3fSmrg         /* Now, see if we can do dependency control for this instruction
109701e04c3fSmrg          * against a previous one writing to its destination.
109801e04c3fSmrg          */
109901e04c3fSmrg         int reg = inst->dst.nr + inst->dst.offset / REG_SIZE;
110001e04c3fSmrg         if (inst->dst.file == VGRF || inst->dst.file == FIXED_GRF) {
110101e04c3fSmrg            if (last_grf_write[reg] &&
110201e04c3fSmrg                last_grf_write[reg]->dst.offset == inst->dst.offset &&
110301e04c3fSmrg                !(inst->dst.writemask & grf_channels_written[reg])) {
110401e04c3fSmrg               last_grf_write[reg]->no_dd_clear = true;
110501e04c3fSmrg               inst->no_dd_check = true;
110601e04c3fSmrg            } else {
110701e04c3fSmrg               grf_channels_written[reg] = 0;
110801e04c3fSmrg            }
110901e04c3fSmrg
111001e04c3fSmrg            last_grf_write[reg] = inst;
111101e04c3fSmrg            grf_channels_written[reg] |= inst->dst.writemask;
111201e04c3fSmrg         } else if (inst->dst.file == MRF) {
111301e04c3fSmrg            if (last_mrf_write[reg] &&
111401e04c3fSmrg                last_mrf_write[reg]->dst.offset == inst->dst.offset &&
111501e04c3fSmrg                !(inst->dst.writemask & mrf_channels_written[reg])) {
111601e04c3fSmrg               last_mrf_write[reg]->no_dd_clear = true;
111701e04c3fSmrg               inst->no_dd_check = true;
111801e04c3fSmrg            } else {
111901e04c3fSmrg               mrf_channels_written[reg] = 0;
112001e04c3fSmrg            }
112101e04c3fSmrg
112201e04c3fSmrg            last_mrf_write[reg] = inst;
112301e04c3fSmrg            mrf_channels_written[reg] |= inst->dst.writemask;
112401e04c3fSmrg         }
112501e04c3fSmrg      }
112601e04c3fSmrg   }
112701e04c3fSmrg}
112801e04c3fSmrg
112901e04c3fSmrgbool
11307ec681f3Smrgvec4_instruction::can_reswizzle(const struct intel_device_info *devinfo,
113101e04c3fSmrg                                int dst_writemask,
113201e04c3fSmrg                                int swizzle,
113301e04c3fSmrg                                int swizzle_mask)
113401e04c3fSmrg{
11357ec681f3Smrg   /* Gfx6 MATH instructions can not execute in align16 mode, so swizzles
113601e04c3fSmrg    * are not allowed.
113701e04c3fSmrg    */
11387ec681f3Smrg   if (devinfo->ver == 6 && is_math() && swizzle != BRW_SWIZZLE_XYZW)
113901e04c3fSmrg      return false;
114001e04c3fSmrg
11419f464c52Smaya   /* If we write to the flag register changing the swizzle would change
11429f464c52Smaya    * what channels are written to the flag register.
11439f464c52Smaya    */
11447ec681f3Smrg   if (writes_flag(devinfo))
11459f464c52Smaya      return false;
11469f464c52Smaya
114701e04c3fSmrg   /* We can't swizzle implicit accumulator access.  We'd have to
114801e04c3fSmrg    * reswizzle the producer of the accumulator value in addition
114901e04c3fSmrg    * to the consumer (i.e. both MUL and MACH).  Just skip this.
115001e04c3fSmrg    */
115101e04c3fSmrg   if (reads_accumulator_implicitly())
115201e04c3fSmrg      return false;
115301e04c3fSmrg
115401e04c3fSmrg   if (!can_do_writemask(devinfo) && dst_writemask != WRITEMASK_XYZW)
115501e04c3fSmrg      return false;
115601e04c3fSmrg
115701e04c3fSmrg   /* If this instruction sets anything not referenced by swizzle, then we'd
115801e04c3fSmrg    * totally break it when we reswizzle.
115901e04c3fSmrg    */
116001e04c3fSmrg   if (dst.writemask & ~swizzle_mask)
116101e04c3fSmrg      return false;
116201e04c3fSmrg
116301e04c3fSmrg   if (mlen > 0)
116401e04c3fSmrg      return false;
116501e04c3fSmrg
116601e04c3fSmrg   for (int i = 0; i < 3; i++) {
116701e04c3fSmrg      if (src[i].is_accumulator())
116801e04c3fSmrg         return false;
116901e04c3fSmrg   }
117001e04c3fSmrg
117101e04c3fSmrg   return true;
117201e04c3fSmrg}
117301e04c3fSmrg
117401e04c3fSmrg/**
117501e04c3fSmrg * For any channels in the swizzle's source that were populated by this
117601e04c3fSmrg * instruction, rewrite the instruction to put the appropriate result directly
117701e04c3fSmrg * in those channels.
117801e04c3fSmrg *
117901e04c3fSmrg * e.g. for swizzle=yywx, MUL a.xy b c -> MUL a.yy_x b.yy z.yy_x
118001e04c3fSmrg */
118101e04c3fSmrgvoid
118201e04c3fSmrgvec4_instruction::reswizzle(int dst_writemask, int swizzle)
118301e04c3fSmrg{
118401e04c3fSmrg   /* Destination write mask doesn't correspond to source swizzle for the dot
118501e04c3fSmrg    * product and pack_bytes instructions.
118601e04c3fSmrg    */
118701e04c3fSmrg   if (opcode != BRW_OPCODE_DP4 && opcode != BRW_OPCODE_DPH &&
118801e04c3fSmrg       opcode != BRW_OPCODE_DP3 && opcode != BRW_OPCODE_DP2 &&
118901e04c3fSmrg       opcode != VEC4_OPCODE_PACK_BYTES) {
119001e04c3fSmrg      for (int i = 0; i < 3; i++) {
11919f464c52Smaya         if (src[i].file == BAD_FILE)
119201e04c3fSmrg            continue;
119301e04c3fSmrg
11949f464c52Smaya         if (src[i].file == IMM) {
11959f464c52Smaya            assert(src[i].type != BRW_REGISTER_TYPE_V &&
11969f464c52Smaya                   src[i].type != BRW_REGISTER_TYPE_UV);
11979f464c52Smaya
11989f464c52Smaya            /* Vector immediate types need to be reswizzled. */
11999f464c52Smaya            if (src[i].type == BRW_REGISTER_TYPE_VF) {
12009f464c52Smaya               const unsigned imm[] = {
12019f464c52Smaya                  (src[i].ud >>  0) & 0x0ff,
12029f464c52Smaya                  (src[i].ud >>  8) & 0x0ff,
12039f464c52Smaya                  (src[i].ud >> 16) & 0x0ff,
12049f464c52Smaya                  (src[i].ud >> 24) & 0x0ff,
12059f464c52Smaya               };
12069f464c52Smaya
12079f464c52Smaya               src[i] = brw_imm_vf4(imm[BRW_GET_SWZ(swizzle, 0)],
12089f464c52Smaya                                    imm[BRW_GET_SWZ(swizzle, 1)],
12099f464c52Smaya                                    imm[BRW_GET_SWZ(swizzle, 2)],
12109f464c52Smaya                                    imm[BRW_GET_SWZ(swizzle, 3)]);
12119f464c52Smaya            }
12129f464c52Smaya
12139f464c52Smaya            continue;
12149f464c52Smaya         }
12159f464c52Smaya
121601e04c3fSmrg         src[i].swizzle = brw_compose_swizzle(swizzle, src[i].swizzle);
121701e04c3fSmrg      }
121801e04c3fSmrg   }
121901e04c3fSmrg
122001e04c3fSmrg   /* Apply the specified swizzle and writemask to the original mask of
122101e04c3fSmrg    * written components.
122201e04c3fSmrg    */
122301e04c3fSmrg   dst.writemask = dst_writemask &
122401e04c3fSmrg                   brw_apply_swizzle_to_mask(swizzle, dst.writemask);
122501e04c3fSmrg}
122601e04c3fSmrg
122701e04c3fSmrg/*
122801e04c3fSmrg * Tries to reduce extra MOV instructions by taking temporary GRFs that get
122901e04c3fSmrg * just written and then MOVed into another reg and making the original write
123001e04c3fSmrg * of the GRF write directly to the final destination instead.
123101e04c3fSmrg */
123201e04c3fSmrgbool
123301e04c3fSmrgvec4_visitor::opt_register_coalesce()
123401e04c3fSmrg{
123501e04c3fSmrg   bool progress = false;
123601e04c3fSmrg   int next_ip = 0;
12377ec681f3Smrg   const vec4_live_variables &live = live_analysis.require();
123801e04c3fSmrg
123901e04c3fSmrg   foreach_block_and_inst_safe (block, vec4_instruction, inst, cfg) {
124001e04c3fSmrg      int ip = next_ip;
124101e04c3fSmrg      next_ip++;
124201e04c3fSmrg
124301e04c3fSmrg      if (inst->opcode != BRW_OPCODE_MOV ||
124401e04c3fSmrg          (inst->dst.file != VGRF && inst->dst.file != MRF) ||
124501e04c3fSmrg	  inst->predicate ||
124601e04c3fSmrg	  inst->src[0].file != VGRF ||
124701e04c3fSmrg	  inst->dst.type != inst->src[0].type ||
124801e04c3fSmrg	  inst->src[0].abs || inst->src[0].negate || inst->src[0].reladdr)
124901e04c3fSmrg	 continue;
125001e04c3fSmrg
125101e04c3fSmrg      /* Remove no-op MOVs */
125201e04c3fSmrg      if (inst->dst.file == inst->src[0].file &&
125301e04c3fSmrg          inst->dst.nr == inst->src[0].nr &&
125401e04c3fSmrg          inst->dst.offset == inst->src[0].offset) {
125501e04c3fSmrg         bool is_nop_mov = true;
125601e04c3fSmrg
125701e04c3fSmrg         for (unsigned c = 0; c < 4; c++) {
125801e04c3fSmrg            if ((inst->dst.writemask & (1 << c)) == 0)
125901e04c3fSmrg               continue;
126001e04c3fSmrg
126101e04c3fSmrg            if (BRW_GET_SWZ(inst->src[0].swizzle, c) != c) {
126201e04c3fSmrg               is_nop_mov = false;
126301e04c3fSmrg               break;
126401e04c3fSmrg            }
126501e04c3fSmrg         }
126601e04c3fSmrg
126701e04c3fSmrg         if (is_nop_mov) {
126801e04c3fSmrg            inst->remove(block);
126901e04c3fSmrg            progress = true;
127001e04c3fSmrg            continue;
127101e04c3fSmrg         }
127201e04c3fSmrg      }
127301e04c3fSmrg
127401e04c3fSmrg      bool to_mrf = (inst->dst.file == MRF);
127501e04c3fSmrg
127601e04c3fSmrg      /* Can't coalesce this GRF if someone else was going to
127701e04c3fSmrg       * read it later.
127801e04c3fSmrg       */
12797ec681f3Smrg      if (live.var_range_end(var_from_reg(alloc, dst_reg(inst->src[0])), 8) > ip)
128001e04c3fSmrg	 continue;
128101e04c3fSmrg
128201e04c3fSmrg      /* We need to check interference with the final destination between this
128301e04c3fSmrg       * instruction and the earliest instruction involved in writing the GRF
128401e04c3fSmrg       * we're eliminating.  To do that, keep track of which of our source
128501e04c3fSmrg       * channels we've seen initialized.
128601e04c3fSmrg       */
128701e04c3fSmrg      const unsigned chans_needed =
128801e04c3fSmrg         brw_apply_inv_swizzle_to_mask(inst->src[0].swizzle,
128901e04c3fSmrg                                       inst->dst.writemask);
129001e04c3fSmrg      unsigned chans_remaining = chans_needed;
129101e04c3fSmrg
129201e04c3fSmrg      /* Now walk up the instruction stream trying to see if we can rewrite
129301e04c3fSmrg       * everything writing to the temporary to write into the destination
129401e04c3fSmrg       * instead.
129501e04c3fSmrg       */
129601e04c3fSmrg      vec4_instruction *_scan_inst = (vec4_instruction *)inst->prev;
129701e04c3fSmrg      foreach_inst_in_block_reverse_starting_from(vec4_instruction, scan_inst,
129801e04c3fSmrg                                                  inst) {
129901e04c3fSmrg         _scan_inst = scan_inst;
130001e04c3fSmrg
130101e04c3fSmrg         if (regions_overlap(inst->src[0], inst->size_read(0),
130201e04c3fSmrg                             scan_inst->dst, scan_inst->size_written)) {
130301e04c3fSmrg            /* Found something writing to the reg we want to coalesce away. */
130401e04c3fSmrg            if (to_mrf) {
130501e04c3fSmrg               /* SEND instructions can't have MRF as a destination. */
130601e04c3fSmrg               if (scan_inst->mlen)
130701e04c3fSmrg                  break;
130801e04c3fSmrg
13097ec681f3Smrg               if (devinfo->ver == 6) {
13107ec681f3Smrg                  /* gfx6 math instructions must have the destination be
131101e04c3fSmrg                   * VGRF, so no compute-to-MRF for them.
131201e04c3fSmrg                   */
131301e04c3fSmrg                  if (scan_inst->is_math()) {
131401e04c3fSmrg                     break;
131501e04c3fSmrg                  }
131601e04c3fSmrg               }
131701e04c3fSmrg            }
131801e04c3fSmrg
131901e04c3fSmrg            /* VS_OPCODE_UNPACK_FLAGS_SIMD4X2 generates a bunch of mov(1)
132001e04c3fSmrg             * instructions, and this optimization pass is not capable of
132101e04c3fSmrg             * handling that.  Bail on these instructions and hope that some
132201e04c3fSmrg             * later optimization pass can do the right thing after they are
132301e04c3fSmrg             * expanded.
132401e04c3fSmrg             */
132501e04c3fSmrg            if (scan_inst->opcode == VS_OPCODE_UNPACK_FLAGS_SIMD4X2)
132601e04c3fSmrg               break;
132701e04c3fSmrg
132801e04c3fSmrg            /* This doesn't handle saturation on the instruction we
132901e04c3fSmrg             * want to coalesce away if the register types do not match.
133001e04c3fSmrg             * But if scan_inst is a non type-converting 'mov', we can fix
133101e04c3fSmrg             * the types later.
133201e04c3fSmrg             */
133301e04c3fSmrg            if (inst->saturate &&
133401e04c3fSmrg                inst->dst.type != scan_inst->dst.type &&
133501e04c3fSmrg                !(scan_inst->opcode == BRW_OPCODE_MOV &&
133601e04c3fSmrg                  scan_inst->dst.type == scan_inst->src[0].type))
133701e04c3fSmrg               break;
133801e04c3fSmrg
133901e04c3fSmrg            /* Only allow coalescing between registers of the same type size.
134001e04c3fSmrg             * Otherwise we would need to make the pass aware of the fact that
134101e04c3fSmrg             * channel sizes are different for single and double precision.
134201e04c3fSmrg             */
134301e04c3fSmrg            if (type_sz(inst->src[0].type) != type_sz(scan_inst->src[0].type))
134401e04c3fSmrg               break;
134501e04c3fSmrg
134601e04c3fSmrg            /* Check that scan_inst writes the same amount of data as the
134701e04c3fSmrg             * instruction, otherwise coalescing would lead to writing a
134801e04c3fSmrg             * different (larger or smaller) region of the destination
134901e04c3fSmrg             */
135001e04c3fSmrg            if (scan_inst->size_written != inst->size_written)
135101e04c3fSmrg               break;
135201e04c3fSmrg
135301e04c3fSmrg            /* If we can't handle the swizzle, bail. */
135401e04c3fSmrg            if (!scan_inst->can_reswizzle(devinfo, inst->dst.writemask,
135501e04c3fSmrg                                          inst->src[0].swizzle,
135601e04c3fSmrg                                          chans_needed)) {
135701e04c3fSmrg               break;
135801e04c3fSmrg            }
135901e04c3fSmrg
136001e04c3fSmrg            /* This only handles coalescing writes of 8 channels (1 register
136101e04c3fSmrg             * for single-precision and 2 registers for double-precision)
136201e04c3fSmrg             * starting at the source offset of the copy instruction.
136301e04c3fSmrg             */
136401e04c3fSmrg            if (DIV_ROUND_UP(scan_inst->size_written,
136501e04c3fSmrg                             type_sz(scan_inst->dst.type)) > 8 ||
136601e04c3fSmrg                scan_inst->dst.offset != inst->src[0].offset)
136701e04c3fSmrg               break;
136801e04c3fSmrg
136901e04c3fSmrg	    /* Mark which channels we found unconditional writes for. */
137001e04c3fSmrg	    if (!scan_inst->predicate)
137101e04c3fSmrg               chans_remaining &= ~scan_inst->dst.writemask;
137201e04c3fSmrg
137301e04c3fSmrg	    if (chans_remaining == 0)
137401e04c3fSmrg	       break;
137501e04c3fSmrg	 }
137601e04c3fSmrg
137701e04c3fSmrg         /* You can't read from an MRF, so if someone else reads our MRF's
137801e04c3fSmrg          * source GRF that we wanted to rewrite, that stops us.  If it's a
137901e04c3fSmrg          * GRF we're trying to coalesce to, we don't actually handle
138001e04c3fSmrg          * rewriting sources so bail in that case as well.
138101e04c3fSmrg          */
138201e04c3fSmrg	 bool interfered = false;
138301e04c3fSmrg	 for (int i = 0; i < 3; i++) {
138401e04c3fSmrg            if (regions_overlap(inst->src[0], inst->size_read(0),
138501e04c3fSmrg                                scan_inst->src[i], scan_inst->size_read(i)))
138601e04c3fSmrg	       interfered = true;
138701e04c3fSmrg	 }
138801e04c3fSmrg	 if (interfered)
138901e04c3fSmrg	    break;
139001e04c3fSmrg
139101e04c3fSmrg         /* If somebody else writes the same channels of our destination here,
139201e04c3fSmrg          * we can't coalesce before that.
139301e04c3fSmrg          */
139401e04c3fSmrg         if (regions_overlap(inst->dst, inst->size_written,
139501e04c3fSmrg                             scan_inst->dst, scan_inst->size_written) &&
139601e04c3fSmrg             (inst->dst.writemask & scan_inst->dst.writemask) != 0) {
139701e04c3fSmrg            break;
139801e04c3fSmrg         }
139901e04c3fSmrg
140001e04c3fSmrg         /* Check for reads of the register we're trying to coalesce into.  We
140101e04c3fSmrg          * can't go rewriting instructions above that to put some other value
140201e04c3fSmrg          * in the register instead.
140301e04c3fSmrg          */
140401e04c3fSmrg         if (to_mrf && scan_inst->mlen > 0) {
14059f464c52Smaya            unsigned start = scan_inst->base_mrf;
14069f464c52Smaya            unsigned end = scan_inst->base_mrf + scan_inst->mlen;
14079f464c52Smaya
14089f464c52Smaya            if (inst->dst.nr >= start && inst->dst.nr < end) {
140901e04c3fSmrg               break;
141001e04c3fSmrg            }
141101e04c3fSmrg         } else {
141201e04c3fSmrg            for (int i = 0; i < 3; i++) {
141301e04c3fSmrg               if (regions_overlap(inst->dst, inst->size_written,
141401e04c3fSmrg                                   scan_inst->src[i], scan_inst->size_read(i)))
141501e04c3fSmrg                  interfered = true;
141601e04c3fSmrg            }
141701e04c3fSmrg            if (interfered)
141801e04c3fSmrg               break;
141901e04c3fSmrg         }
142001e04c3fSmrg      }
142101e04c3fSmrg
142201e04c3fSmrg      if (chans_remaining == 0) {
142301e04c3fSmrg	 /* If we've made it here, we have an MOV we want to coalesce out, and
142401e04c3fSmrg	  * a scan_inst pointing to the earliest instruction involved in
142501e04c3fSmrg	  * computing the value.  Now go rewrite the instruction stream
142601e04c3fSmrg	  * between the two.
142701e04c3fSmrg	  */
142801e04c3fSmrg         vec4_instruction *scan_inst = _scan_inst;
142901e04c3fSmrg	 while (scan_inst != inst) {
143001e04c3fSmrg	    if (scan_inst->dst.file == VGRF &&
143101e04c3fSmrg                scan_inst->dst.nr == inst->src[0].nr &&
143201e04c3fSmrg		scan_inst->dst.offset == inst->src[0].offset) {
143301e04c3fSmrg               scan_inst->reswizzle(inst->dst.writemask,
143401e04c3fSmrg                                    inst->src[0].swizzle);
143501e04c3fSmrg	       scan_inst->dst.file = inst->dst.file;
143601e04c3fSmrg               scan_inst->dst.nr = inst->dst.nr;
143701e04c3fSmrg	       scan_inst->dst.offset = inst->dst.offset;
143801e04c3fSmrg               if (inst->saturate &&
143901e04c3fSmrg                   inst->dst.type != scan_inst->dst.type) {
144001e04c3fSmrg                  /* If we have reached this point, scan_inst is a non
144101e04c3fSmrg                   * type-converting 'mov' and we can modify its register types
144201e04c3fSmrg                   * to match the ones in inst. Otherwise, we could have an
144301e04c3fSmrg                   * incorrect saturation result.
144401e04c3fSmrg                   */
144501e04c3fSmrg                  scan_inst->dst.type = inst->dst.type;
144601e04c3fSmrg                  scan_inst->src[0].type = inst->src[0].type;
144701e04c3fSmrg               }
144801e04c3fSmrg	       scan_inst->saturate |= inst->saturate;
144901e04c3fSmrg	    }
145001e04c3fSmrg	    scan_inst = (vec4_instruction *)scan_inst->next;
145101e04c3fSmrg	 }
145201e04c3fSmrg	 inst->remove(block);
145301e04c3fSmrg	 progress = true;
145401e04c3fSmrg      }
145501e04c3fSmrg   }
145601e04c3fSmrg
145701e04c3fSmrg   if (progress)
14587ec681f3Smrg      invalidate_analysis(DEPENDENCY_INSTRUCTIONS);
145901e04c3fSmrg
146001e04c3fSmrg   return progress;
146101e04c3fSmrg}
146201e04c3fSmrg
146301e04c3fSmrg/**
146401e04c3fSmrg * Eliminate FIND_LIVE_CHANNEL instructions occurring outside any control
146501e04c3fSmrg * flow.  We could probably do better here with some form of divergence
146601e04c3fSmrg * analysis.
146701e04c3fSmrg */
146801e04c3fSmrgbool
146901e04c3fSmrgvec4_visitor::eliminate_find_live_channel()
147001e04c3fSmrg{
147101e04c3fSmrg   bool progress = false;
147201e04c3fSmrg   unsigned depth = 0;
147301e04c3fSmrg
147401e04c3fSmrg   if (!brw_stage_has_packed_dispatch(devinfo, stage, stage_prog_data)) {
147501e04c3fSmrg      /* The optimization below assumes that channel zero is live on thread
147601e04c3fSmrg       * dispatch, which may not be the case if the fixed function dispatches
147701e04c3fSmrg       * threads sparsely.
147801e04c3fSmrg       */
147901e04c3fSmrg      return false;
148001e04c3fSmrg   }
148101e04c3fSmrg
148201e04c3fSmrg   foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
148301e04c3fSmrg      switch (inst->opcode) {
148401e04c3fSmrg      case BRW_OPCODE_IF:
148501e04c3fSmrg      case BRW_OPCODE_DO:
148601e04c3fSmrg         depth++;
148701e04c3fSmrg         break;
148801e04c3fSmrg
148901e04c3fSmrg      case BRW_OPCODE_ENDIF:
149001e04c3fSmrg      case BRW_OPCODE_WHILE:
149101e04c3fSmrg         depth--;
149201e04c3fSmrg         break;
149301e04c3fSmrg
149401e04c3fSmrg      case SHADER_OPCODE_FIND_LIVE_CHANNEL:
149501e04c3fSmrg         if (depth == 0) {
149601e04c3fSmrg            inst->opcode = BRW_OPCODE_MOV;
149701e04c3fSmrg            inst->src[0] = brw_imm_d(0);
149801e04c3fSmrg            inst->force_writemask_all = true;
149901e04c3fSmrg            progress = true;
150001e04c3fSmrg         }
150101e04c3fSmrg         break;
150201e04c3fSmrg
150301e04c3fSmrg      default:
150401e04c3fSmrg         break;
150501e04c3fSmrg      }
150601e04c3fSmrg   }
150701e04c3fSmrg
15087ec681f3Smrg   if (progress)
15097ec681f3Smrg      invalidate_analysis(DEPENDENCY_INSTRUCTION_DETAIL);
15107ec681f3Smrg
151101e04c3fSmrg   return progress;
151201e04c3fSmrg}
151301e04c3fSmrg
151401e04c3fSmrg/**
151501e04c3fSmrg * Splits virtual GRFs requesting more than one contiguous physical register.
151601e04c3fSmrg *
151701e04c3fSmrg * We initially create large virtual GRFs for temporary structures, arrays,
151801e04c3fSmrg * and matrices, so that the visitor functions can add offsets to work their
151901e04c3fSmrg * way down to the actual member being accessed.  But when it comes to
152001e04c3fSmrg * optimization, we'd like to treat each register as individual storage if
152101e04c3fSmrg * possible.
152201e04c3fSmrg *
152301e04c3fSmrg * So far, the only thing that might prevent splitting is a send message from
152401e04c3fSmrg * a GRF on IVB.
152501e04c3fSmrg */
152601e04c3fSmrgvoid
152701e04c3fSmrgvec4_visitor::split_virtual_grfs()
152801e04c3fSmrg{
152901e04c3fSmrg   int num_vars = this->alloc.count;
153001e04c3fSmrg   int new_virtual_grf[num_vars];
153101e04c3fSmrg   bool split_grf[num_vars];
153201e04c3fSmrg
153301e04c3fSmrg   memset(new_virtual_grf, 0, sizeof(new_virtual_grf));
153401e04c3fSmrg
153501e04c3fSmrg   /* Try to split anything > 0 sized. */
153601e04c3fSmrg   for (int i = 0; i < num_vars; i++) {
153701e04c3fSmrg      split_grf[i] = this->alloc.sizes[i] != 1;
153801e04c3fSmrg   }
153901e04c3fSmrg
154001e04c3fSmrg   /* Check that the instructions are compatible with the registers we're trying
154101e04c3fSmrg    * to split.
154201e04c3fSmrg    */
154301e04c3fSmrg   foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
154401e04c3fSmrg      if (inst->dst.file == VGRF && regs_written(inst) > 1)
154501e04c3fSmrg         split_grf[inst->dst.nr] = false;
154601e04c3fSmrg
154701e04c3fSmrg      for (int i = 0; i < 3; i++) {
154801e04c3fSmrg         if (inst->src[i].file == VGRF && regs_read(inst, i) > 1)
154901e04c3fSmrg            split_grf[inst->src[i].nr] = false;
155001e04c3fSmrg      }
155101e04c3fSmrg   }
155201e04c3fSmrg
155301e04c3fSmrg   /* Allocate new space for split regs.  Note that the virtual
155401e04c3fSmrg    * numbers will be contiguous.
155501e04c3fSmrg    */
155601e04c3fSmrg   for (int i = 0; i < num_vars; i++) {
155701e04c3fSmrg      if (!split_grf[i])
155801e04c3fSmrg         continue;
155901e04c3fSmrg
156001e04c3fSmrg      new_virtual_grf[i] = alloc.allocate(1);
156101e04c3fSmrg      for (unsigned j = 2; j < this->alloc.sizes[i]; j++) {
156201e04c3fSmrg         unsigned reg = alloc.allocate(1);
156301e04c3fSmrg         assert(reg == new_virtual_grf[i] + j - 1);
156401e04c3fSmrg         (void) reg;
156501e04c3fSmrg      }
156601e04c3fSmrg      this->alloc.sizes[i] = 1;
156701e04c3fSmrg   }
156801e04c3fSmrg
156901e04c3fSmrg   foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
157001e04c3fSmrg      if (inst->dst.file == VGRF && split_grf[inst->dst.nr] &&
157101e04c3fSmrg          inst->dst.offset / REG_SIZE != 0) {
157201e04c3fSmrg         inst->dst.nr = (new_virtual_grf[inst->dst.nr] +
157301e04c3fSmrg                         inst->dst.offset / REG_SIZE - 1);
157401e04c3fSmrg         inst->dst.offset %= REG_SIZE;
157501e04c3fSmrg      }
157601e04c3fSmrg      for (int i = 0; i < 3; i++) {
157701e04c3fSmrg         if (inst->src[i].file == VGRF && split_grf[inst->src[i].nr] &&
157801e04c3fSmrg             inst->src[i].offset / REG_SIZE != 0) {
157901e04c3fSmrg            inst->src[i].nr = (new_virtual_grf[inst->src[i].nr] +
158001e04c3fSmrg                                inst->src[i].offset / REG_SIZE - 1);
158101e04c3fSmrg            inst->src[i].offset %= REG_SIZE;
158201e04c3fSmrg         }
158301e04c3fSmrg      }
158401e04c3fSmrg   }
15857ec681f3Smrg   invalidate_analysis(DEPENDENCY_INSTRUCTION_DETAIL | DEPENDENCY_VARIABLES);
158601e04c3fSmrg}
158701e04c3fSmrg
158801e04c3fSmrgvoid
15897ec681f3Smrgvec4_visitor::dump_instruction(const backend_instruction *be_inst) const
159001e04c3fSmrg{
159101e04c3fSmrg   dump_instruction(be_inst, stderr);
159201e04c3fSmrg}
159301e04c3fSmrg
159401e04c3fSmrgvoid
15957ec681f3Smrgvec4_visitor::dump_instruction(const backend_instruction *be_inst, FILE *file) const
159601e04c3fSmrg{
15977ec681f3Smrg   const vec4_instruction *inst = (const vec4_instruction *)be_inst;
159801e04c3fSmrg
159901e04c3fSmrg   if (inst->predicate) {
160001e04c3fSmrg      fprintf(file, "(%cf%d.%d%s) ",
160101e04c3fSmrg              inst->predicate_inverse ? '-' : '+',
160201e04c3fSmrg              inst->flag_subreg / 2,
160301e04c3fSmrg              inst->flag_subreg % 2,
160401e04c3fSmrg              pred_ctrl_align16[inst->predicate]);
160501e04c3fSmrg   }
160601e04c3fSmrg
160701e04c3fSmrg   fprintf(file, "%s(%d)", brw_instruction_name(devinfo, inst->opcode),
160801e04c3fSmrg           inst->exec_size);
160901e04c3fSmrg   if (inst->saturate)
161001e04c3fSmrg      fprintf(file, ".sat");
161101e04c3fSmrg   if (inst->conditional_mod) {
161201e04c3fSmrg      fprintf(file, "%s", conditional_modifier[inst->conditional_mod]);
161301e04c3fSmrg      if (!inst->predicate &&
16147ec681f3Smrg          (devinfo->ver < 5 || (inst->opcode != BRW_OPCODE_SEL &&
161501e04c3fSmrg                                inst->opcode != BRW_OPCODE_CSEL &&
161601e04c3fSmrg                                inst->opcode != BRW_OPCODE_IF &&
161701e04c3fSmrg                                inst->opcode != BRW_OPCODE_WHILE))) {
161801e04c3fSmrg         fprintf(file, ".f%d.%d", inst->flag_subreg / 2, inst->flag_subreg % 2);
161901e04c3fSmrg      }
162001e04c3fSmrg   }
162101e04c3fSmrg   fprintf(file, " ");
162201e04c3fSmrg
162301e04c3fSmrg   switch (inst->dst.file) {
162401e04c3fSmrg   case VGRF:
162501e04c3fSmrg      fprintf(file, "vgrf%d", inst->dst.nr);
162601e04c3fSmrg      break;
162701e04c3fSmrg   case FIXED_GRF:
162801e04c3fSmrg      fprintf(file, "g%d", inst->dst.nr);
162901e04c3fSmrg      break;
163001e04c3fSmrg   case MRF:
163101e04c3fSmrg      fprintf(file, "m%d", inst->dst.nr);
163201e04c3fSmrg      break;
163301e04c3fSmrg   case ARF:
163401e04c3fSmrg      switch (inst->dst.nr) {
163501e04c3fSmrg      case BRW_ARF_NULL:
163601e04c3fSmrg         fprintf(file, "null");
163701e04c3fSmrg         break;
163801e04c3fSmrg      case BRW_ARF_ADDRESS:
163901e04c3fSmrg         fprintf(file, "a0.%d", inst->dst.subnr);
164001e04c3fSmrg         break;
164101e04c3fSmrg      case BRW_ARF_ACCUMULATOR:
164201e04c3fSmrg         fprintf(file, "acc%d", inst->dst.subnr);
164301e04c3fSmrg         break;
164401e04c3fSmrg      case BRW_ARF_FLAG:
164501e04c3fSmrg         fprintf(file, "f%d.%d", inst->dst.nr & 0xf, inst->dst.subnr);
164601e04c3fSmrg         break;
164701e04c3fSmrg      default:
164801e04c3fSmrg         fprintf(file, "arf%d.%d", inst->dst.nr & 0xf, inst->dst.subnr);
164901e04c3fSmrg         break;
165001e04c3fSmrg      }
165101e04c3fSmrg      break;
165201e04c3fSmrg   case BAD_FILE:
165301e04c3fSmrg      fprintf(file, "(null)");
165401e04c3fSmrg      break;
165501e04c3fSmrg   case IMM:
165601e04c3fSmrg   case ATTR:
165701e04c3fSmrg   case UNIFORM:
165801e04c3fSmrg      unreachable("not reached");
165901e04c3fSmrg   }
166001e04c3fSmrg   if (inst->dst.offset ||
166101e04c3fSmrg       (inst->dst.file == VGRF &&
166201e04c3fSmrg        alloc.sizes[inst->dst.nr] * REG_SIZE != inst->size_written)) {
166301e04c3fSmrg      const unsigned reg_size = (inst->dst.file == UNIFORM ? 16 : REG_SIZE);
166401e04c3fSmrg      fprintf(file, "+%d.%d", inst->dst.offset / reg_size,
166501e04c3fSmrg              inst->dst.offset % reg_size);
166601e04c3fSmrg   }
166701e04c3fSmrg   if (inst->dst.writemask != WRITEMASK_XYZW) {
166801e04c3fSmrg      fprintf(file, ".");
166901e04c3fSmrg      if (inst->dst.writemask & 1)
167001e04c3fSmrg         fprintf(file, "x");
167101e04c3fSmrg      if (inst->dst.writemask & 2)
167201e04c3fSmrg         fprintf(file, "y");
167301e04c3fSmrg      if (inst->dst.writemask & 4)
167401e04c3fSmrg         fprintf(file, "z");
167501e04c3fSmrg      if (inst->dst.writemask & 8)
167601e04c3fSmrg         fprintf(file, "w");
167701e04c3fSmrg   }
167801e04c3fSmrg   fprintf(file, ":%s", brw_reg_type_to_letters(inst->dst.type));
167901e04c3fSmrg
168001e04c3fSmrg   if (inst->src[0].file != BAD_FILE)
168101e04c3fSmrg      fprintf(file, ", ");
168201e04c3fSmrg
168301e04c3fSmrg   for (int i = 0; i < 3 && inst->src[i].file != BAD_FILE; i++) {
168401e04c3fSmrg      if (inst->src[i].negate)
168501e04c3fSmrg         fprintf(file, "-");
168601e04c3fSmrg      if (inst->src[i].abs)
168701e04c3fSmrg         fprintf(file, "|");
168801e04c3fSmrg      switch (inst->src[i].file) {
168901e04c3fSmrg      case VGRF:
169001e04c3fSmrg         fprintf(file, "vgrf%d", inst->src[i].nr);
169101e04c3fSmrg         break;
169201e04c3fSmrg      case FIXED_GRF:
169301e04c3fSmrg         fprintf(file, "g%d.%d", inst->src[i].nr, inst->src[i].subnr);
169401e04c3fSmrg         break;
169501e04c3fSmrg      case ATTR:
169601e04c3fSmrg         fprintf(file, "attr%d", inst->src[i].nr);
169701e04c3fSmrg         break;
169801e04c3fSmrg      case UNIFORM:
169901e04c3fSmrg         fprintf(file, "u%d", inst->src[i].nr);
170001e04c3fSmrg         break;
170101e04c3fSmrg      case IMM:
170201e04c3fSmrg         switch (inst->src[i].type) {
170301e04c3fSmrg         case BRW_REGISTER_TYPE_F:
170401e04c3fSmrg            fprintf(file, "%fF", inst->src[i].f);
170501e04c3fSmrg            break;
170601e04c3fSmrg         case BRW_REGISTER_TYPE_DF:
170701e04c3fSmrg            fprintf(file, "%fDF", inst->src[i].df);
170801e04c3fSmrg            break;
170901e04c3fSmrg         case BRW_REGISTER_TYPE_D:
171001e04c3fSmrg            fprintf(file, "%dD", inst->src[i].d);
171101e04c3fSmrg            break;
171201e04c3fSmrg         case BRW_REGISTER_TYPE_UD:
171301e04c3fSmrg            fprintf(file, "%uU", inst->src[i].ud);
171401e04c3fSmrg            break;
171501e04c3fSmrg         case BRW_REGISTER_TYPE_VF:
171601e04c3fSmrg            fprintf(file, "[%-gF, %-gF, %-gF, %-gF]",
171701e04c3fSmrg                    brw_vf_to_float((inst->src[i].ud >>  0) & 0xff),
171801e04c3fSmrg                    brw_vf_to_float((inst->src[i].ud >>  8) & 0xff),
171901e04c3fSmrg                    brw_vf_to_float((inst->src[i].ud >> 16) & 0xff),
172001e04c3fSmrg                    brw_vf_to_float((inst->src[i].ud >> 24) & 0xff));
172101e04c3fSmrg            break;
172201e04c3fSmrg         default:
172301e04c3fSmrg            fprintf(file, "???");
172401e04c3fSmrg            break;
172501e04c3fSmrg         }
172601e04c3fSmrg         break;
172701e04c3fSmrg      case ARF:
172801e04c3fSmrg         switch (inst->src[i].nr) {
172901e04c3fSmrg         case BRW_ARF_NULL:
173001e04c3fSmrg            fprintf(file, "null");
173101e04c3fSmrg            break;
173201e04c3fSmrg         case BRW_ARF_ADDRESS:
173301e04c3fSmrg            fprintf(file, "a0.%d", inst->src[i].subnr);
173401e04c3fSmrg            break;
173501e04c3fSmrg         case BRW_ARF_ACCUMULATOR:
173601e04c3fSmrg            fprintf(file, "acc%d", inst->src[i].subnr);
173701e04c3fSmrg            break;
173801e04c3fSmrg         case BRW_ARF_FLAG:
173901e04c3fSmrg            fprintf(file, "f%d.%d", inst->src[i].nr & 0xf, inst->src[i].subnr);
174001e04c3fSmrg            break;
174101e04c3fSmrg         default:
174201e04c3fSmrg            fprintf(file, "arf%d.%d", inst->src[i].nr & 0xf, inst->src[i].subnr);
174301e04c3fSmrg            break;
174401e04c3fSmrg         }
174501e04c3fSmrg         break;
174601e04c3fSmrg      case BAD_FILE:
174701e04c3fSmrg         fprintf(file, "(null)");
174801e04c3fSmrg         break;
174901e04c3fSmrg      case MRF:
175001e04c3fSmrg         unreachable("not reached");
175101e04c3fSmrg      }
175201e04c3fSmrg
175301e04c3fSmrg      if (inst->src[i].offset ||
175401e04c3fSmrg          (inst->src[i].file == VGRF &&
175501e04c3fSmrg           alloc.sizes[inst->src[i].nr] * REG_SIZE != inst->size_read(i))) {
175601e04c3fSmrg         const unsigned reg_size = (inst->src[i].file == UNIFORM ? 16 : REG_SIZE);
175701e04c3fSmrg         fprintf(file, "+%d.%d", inst->src[i].offset / reg_size,
175801e04c3fSmrg                 inst->src[i].offset % reg_size);
175901e04c3fSmrg      }
176001e04c3fSmrg
176101e04c3fSmrg      if (inst->src[i].file != IMM) {
176201e04c3fSmrg         static const char *chans[4] = {"x", "y", "z", "w"};
176301e04c3fSmrg         fprintf(file, ".");
176401e04c3fSmrg         for (int c = 0; c < 4; c++) {
176501e04c3fSmrg            fprintf(file, "%s", chans[BRW_GET_SWZ(inst->src[i].swizzle, c)]);
176601e04c3fSmrg         }
176701e04c3fSmrg      }
176801e04c3fSmrg
176901e04c3fSmrg      if (inst->src[i].abs)
177001e04c3fSmrg         fprintf(file, "|");
177101e04c3fSmrg
177201e04c3fSmrg      if (inst->src[i].file != IMM) {
177301e04c3fSmrg         fprintf(file, ":%s", brw_reg_type_to_letters(inst->src[i].type));
177401e04c3fSmrg      }
177501e04c3fSmrg
177601e04c3fSmrg      if (i < 2 && inst->src[i + 1].file != BAD_FILE)
177701e04c3fSmrg         fprintf(file, ", ");
177801e04c3fSmrg   }
177901e04c3fSmrg
178001e04c3fSmrg   if (inst->force_writemask_all)
178101e04c3fSmrg      fprintf(file, " NoMask");
178201e04c3fSmrg
178301e04c3fSmrg   if (inst->exec_size != 8)
178401e04c3fSmrg      fprintf(file, " group%d", inst->group);
178501e04c3fSmrg
178601e04c3fSmrg   fprintf(file, "\n");
178701e04c3fSmrg}
178801e04c3fSmrg
178901e04c3fSmrg
179001e04c3fSmrgint
179101e04c3fSmrgvec4_vs_visitor::setup_attributes(int payload_reg)
179201e04c3fSmrg{
179301e04c3fSmrg   foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
179401e04c3fSmrg      for (int i = 0; i < 3; i++) {
179501e04c3fSmrg         if (inst->src[i].file == ATTR) {
179601e04c3fSmrg            assert(inst->src[i].offset % REG_SIZE == 0);
179701e04c3fSmrg            int grf = payload_reg + inst->src[i].nr +
179801e04c3fSmrg                      inst->src[i].offset / REG_SIZE;
179901e04c3fSmrg
180001e04c3fSmrg            struct brw_reg reg = brw_vec8_grf(grf, 0);
180101e04c3fSmrg            reg.swizzle = inst->src[i].swizzle;
180201e04c3fSmrg            reg.type = inst->src[i].type;
180301e04c3fSmrg            reg.abs = inst->src[i].abs;
180401e04c3fSmrg            reg.negate = inst->src[i].negate;
180501e04c3fSmrg            inst->src[i] = reg;
180601e04c3fSmrg         }
180701e04c3fSmrg      }
180801e04c3fSmrg   }
180901e04c3fSmrg
181001e04c3fSmrg   return payload_reg + vs_prog_data->nr_attribute_slots;
181101e04c3fSmrg}
181201e04c3fSmrg
18137ec681f3Smrgvoid
18147ec681f3Smrgvec4_visitor::setup_push_ranges()
18157ec681f3Smrg{
18167ec681f3Smrg   /* Only allow 32 registers (256 uniform components) as push constants,
18177ec681f3Smrg    * which is the limit on gfx6.
18187ec681f3Smrg    *
18197ec681f3Smrg    * If changing this value, note the limitation about total_regs in
18207ec681f3Smrg    * brw_curbe.c.
18217ec681f3Smrg    */
18227ec681f3Smrg   const unsigned max_push_length = 32;
18237ec681f3Smrg
18247ec681f3Smrg   push_length = DIV_ROUND_UP(prog_data->base.nr_params, 8);
18257ec681f3Smrg   push_length = MIN2(push_length, max_push_length);
18267ec681f3Smrg
18277ec681f3Smrg   /* Shrink UBO push ranges so it all fits in max_push_length */
18287ec681f3Smrg   for (unsigned i = 0; i < 4; i++) {
18297ec681f3Smrg      struct brw_ubo_range *range = &prog_data->base.ubo_ranges[i];
18307ec681f3Smrg
18317ec681f3Smrg      if (push_length + range->length > max_push_length)
18327ec681f3Smrg         range->length = max_push_length - push_length;
18337ec681f3Smrg
18347ec681f3Smrg      push_length += range->length;
18357ec681f3Smrg   }
18367ec681f3Smrg   assert(push_length <= max_push_length);
18377ec681f3Smrg}
18387ec681f3Smrg
183901e04c3fSmrgint
184001e04c3fSmrgvec4_visitor::setup_uniforms(int reg)
184101e04c3fSmrg{
18427ec681f3Smrg   /* It's possible that uniform compaction will shrink further than expected
18437ec681f3Smrg    * so we re-compute the layout and set up our UBO push starts.
18447ec681f3Smrg    */
18457ec681f3Smrg   const unsigned old_push_length = push_length;
18467ec681f3Smrg   push_length = DIV_ROUND_UP(prog_data->base.nr_params, 8);
18477ec681f3Smrg   for (unsigned i = 0; i < 4; i++) {
18487ec681f3Smrg      ubo_push_start[i] = push_length;
18497ec681f3Smrg      push_length += stage_prog_data->ubo_ranges[i].length;
18507ec681f3Smrg   }
18517ec681f3Smrg   assert(push_length <= old_push_length);
18527ec681f3Smrg   if (push_length < old_push_length)
18537ec681f3Smrg      assert(compiler->compact_params);
185401e04c3fSmrg
18557ec681f3Smrg   /* The pre-gfx6 VS requires that some push constants get loaded no
185601e04c3fSmrg    * matter what, or the GPU would hang.
185701e04c3fSmrg    */
18587ec681f3Smrg   if (devinfo->ver < 6 && push_length == 0) {
185901e04c3fSmrg      brw_stage_prog_data_add_params(stage_prog_data, 4);
186001e04c3fSmrg      for (unsigned int i = 0; i < 4; i++) {
186101e04c3fSmrg	 unsigned int slot = this->uniforms * 4 + i;
186201e04c3fSmrg	 stage_prog_data->param[slot] = BRW_PARAM_BUILTIN_ZERO;
186301e04c3fSmrg      }
18647ec681f3Smrg      push_length = 1;
186501e04c3fSmrg   }
186601e04c3fSmrg
18677ec681f3Smrg   prog_data->base.dispatch_grf_start_reg = reg;
18687ec681f3Smrg   prog_data->base.curb_read_length = push_length;
186901e04c3fSmrg
18707ec681f3Smrg   return reg + push_length;
187101e04c3fSmrg}
187201e04c3fSmrg
187301e04c3fSmrgvoid
187401e04c3fSmrgvec4_vs_visitor::setup_payload(void)
187501e04c3fSmrg{
187601e04c3fSmrg   int reg = 0;
187701e04c3fSmrg
187801e04c3fSmrg   /* The payload always contains important data in g0, which contains
187901e04c3fSmrg    * the URB handles that are passed on to the URB write at the end
188001e04c3fSmrg    * of the thread.  So, we always start push constants at g1.
188101e04c3fSmrg    */
188201e04c3fSmrg   reg++;
188301e04c3fSmrg
188401e04c3fSmrg   reg = setup_uniforms(reg);
188501e04c3fSmrg
188601e04c3fSmrg   reg = setup_attributes(reg);
188701e04c3fSmrg
188801e04c3fSmrg   this->first_non_payload_grf = reg;
188901e04c3fSmrg}
189001e04c3fSmrg
189101e04c3fSmrgbool
189201e04c3fSmrgvec4_visitor::lower_minmax()
189301e04c3fSmrg{
18947ec681f3Smrg   assert(devinfo->ver < 6);
189501e04c3fSmrg
189601e04c3fSmrg   bool progress = false;
189701e04c3fSmrg
189801e04c3fSmrg   foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
189901e04c3fSmrg      const vec4_builder ibld(this, block, inst);
190001e04c3fSmrg
190101e04c3fSmrg      if (inst->opcode == BRW_OPCODE_SEL &&
190201e04c3fSmrg          inst->predicate == BRW_PREDICATE_NONE) {
19037ec681f3Smrg         /* If src1 is an immediate value that is not NaN, then it can't be
19047ec681f3Smrg          * NaN.  In that case, emit CMP because it is much better for cmod
19057ec681f3Smrg          * propagation.  Likewise if src1 is not float.  Gfx4 and Gfx5 don't
19067ec681f3Smrg          * support HF or DF, so it is not necessary to check for those.
190701e04c3fSmrg          */
19087ec681f3Smrg         if (inst->src[1].type != BRW_REGISTER_TYPE_F ||
19097ec681f3Smrg             (inst->src[1].file == IMM && !isnan(inst->src[1].f))) {
19107ec681f3Smrg            ibld.CMP(ibld.null_reg_d(), inst->src[0], inst->src[1],
19117ec681f3Smrg                     inst->conditional_mod);
19127ec681f3Smrg         } else {
19137ec681f3Smrg            ibld.CMPN(ibld.null_reg_d(), inst->src[0], inst->src[1],
19147ec681f3Smrg                      inst->conditional_mod);
19157ec681f3Smrg         }
191601e04c3fSmrg         inst->predicate = BRW_PREDICATE_NORMAL;
191701e04c3fSmrg         inst->conditional_mod = BRW_CONDITIONAL_NONE;
191801e04c3fSmrg
191901e04c3fSmrg         progress = true;
192001e04c3fSmrg      }
192101e04c3fSmrg   }
192201e04c3fSmrg
192301e04c3fSmrg   if (progress)
19247ec681f3Smrg      invalidate_analysis(DEPENDENCY_INSTRUCTIONS);
192501e04c3fSmrg
192601e04c3fSmrg   return progress;
192701e04c3fSmrg}
192801e04c3fSmrg
192901e04c3fSmrgsrc_reg
193001e04c3fSmrgvec4_visitor::get_timestamp()
193101e04c3fSmrg{
19327ec681f3Smrg   assert(devinfo->ver == 7);
193301e04c3fSmrg
193401e04c3fSmrg   src_reg ts = src_reg(brw_reg(BRW_ARCHITECTURE_REGISTER_FILE,
193501e04c3fSmrg                                BRW_ARF_TIMESTAMP,
193601e04c3fSmrg                                0,
193701e04c3fSmrg                                0,
193801e04c3fSmrg                                0,
193901e04c3fSmrg                                BRW_REGISTER_TYPE_UD,
194001e04c3fSmrg                                BRW_VERTICAL_STRIDE_0,
194101e04c3fSmrg                                BRW_WIDTH_4,
194201e04c3fSmrg                                BRW_HORIZONTAL_STRIDE_4,
194301e04c3fSmrg                                BRW_SWIZZLE_XYZW,
194401e04c3fSmrg                                WRITEMASK_XYZW));
194501e04c3fSmrg
194601e04c3fSmrg   dst_reg dst = dst_reg(this, glsl_type::uvec4_type);
194701e04c3fSmrg
194801e04c3fSmrg   vec4_instruction *mov = emit(MOV(dst, ts));
194901e04c3fSmrg   /* We want to read the 3 fields we care about (mostly field 0, but also 2)
195001e04c3fSmrg    * even if it's not enabled in the dispatch.
195101e04c3fSmrg    */
195201e04c3fSmrg   mov->force_writemask_all = true;
195301e04c3fSmrg
195401e04c3fSmrg   return src_reg(dst);
195501e04c3fSmrg}
195601e04c3fSmrg
195701e04c3fSmrgvoid
195801e04c3fSmrgvec4_visitor::emit_shader_time_begin()
195901e04c3fSmrg{
196001e04c3fSmrg   current_annotation = "shader time start";
196101e04c3fSmrg   shader_start_time = get_timestamp();
196201e04c3fSmrg}
196301e04c3fSmrg
196401e04c3fSmrgvoid
196501e04c3fSmrgvec4_visitor::emit_shader_time_end()
196601e04c3fSmrg{
196701e04c3fSmrg   current_annotation = "shader time end";
196801e04c3fSmrg   src_reg shader_end_time = get_timestamp();
196901e04c3fSmrg
197001e04c3fSmrg
197101e04c3fSmrg   /* Check that there weren't any timestamp reset events (assuming these
197201e04c3fSmrg    * were the only two timestamp reads that happened).
197301e04c3fSmrg    */
197401e04c3fSmrg   src_reg reset_end = shader_end_time;
197501e04c3fSmrg   reset_end.swizzle = BRW_SWIZZLE_ZZZZ;
197601e04c3fSmrg   vec4_instruction *test = emit(AND(dst_null_ud(), reset_end, brw_imm_ud(1u)));
197701e04c3fSmrg   test->conditional_mod = BRW_CONDITIONAL_Z;
197801e04c3fSmrg
197901e04c3fSmrg   emit(IF(BRW_PREDICATE_NORMAL));
198001e04c3fSmrg
198101e04c3fSmrg   /* Take the current timestamp and get the delta. */
198201e04c3fSmrg   shader_start_time.negate = true;
198301e04c3fSmrg   dst_reg diff = dst_reg(this, glsl_type::uint_type);
198401e04c3fSmrg   emit(ADD(diff, shader_start_time, shader_end_time));
198501e04c3fSmrg
198601e04c3fSmrg   /* If there were no instructions between the two timestamp gets, the diff
198701e04c3fSmrg    * is 2 cycles.  Remove that overhead, so I can forget about that when
198801e04c3fSmrg    * trying to determine the time taken for single instructions.
198901e04c3fSmrg    */
199001e04c3fSmrg   emit(ADD(diff, src_reg(diff), brw_imm_ud(-2u)));
199101e04c3fSmrg
199201e04c3fSmrg   emit_shader_time_write(0, src_reg(diff));
199301e04c3fSmrg   emit_shader_time_write(1, brw_imm_ud(1u));
199401e04c3fSmrg   emit(BRW_OPCODE_ELSE);
199501e04c3fSmrg   emit_shader_time_write(2, brw_imm_ud(1u));
199601e04c3fSmrg   emit(BRW_OPCODE_ENDIF);
199701e04c3fSmrg}
199801e04c3fSmrg
199901e04c3fSmrgvoid
200001e04c3fSmrgvec4_visitor::emit_shader_time_write(int shader_time_subindex, src_reg value)
200101e04c3fSmrg{
200201e04c3fSmrg   dst_reg dst =
200301e04c3fSmrg      dst_reg(this, glsl_type::get_array_instance(glsl_type::vec4_type, 2));
200401e04c3fSmrg
200501e04c3fSmrg   dst_reg offset = dst;
200601e04c3fSmrg   dst_reg time = dst;
200701e04c3fSmrg   time.offset += REG_SIZE;
200801e04c3fSmrg
200901e04c3fSmrg   offset.type = BRW_REGISTER_TYPE_UD;
201001e04c3fSmrg   int index = shader_time_index * 3 + shader_time_subindex;
201101e04c3fSmrg   emit(MOV(offset, brw_imm_d(index * BRW_SHADER_TIME_STRIDE)));
201201e04c3fSmrg
201301e04c3fSmrg   time.type = BRW_REGISTER_TYPE_UD;
201401e04c3fSmrg   emit(MOV(time, value));
201501e04c3fSmrg
201601e04c3fSmrg   vec4_instruction *inst =
201701e04c3fSmrg      emit(SHADER_OPCODE_SHADER_TIME_ADD, dst_reg(), src_reg(dst));
201801e04c3fSmrg   inst->mlen = 2;
201901e04c3fSmrg}
202001e04c3fSmrg
202101e04c3fSmrgstatic bool
202201e04c3fSmrgis_align1_df(vec4_instruction *inst)
202301e04c3fSmrg{
202401e04c3fSmrg   switch (inst->opcode) {
202501e04c3fSmrg   case VEC4_OPCODE_DOUBLE_TO_F32:
202601e04c3fSmrg   case VEC4_OPCODE_DOUBLE_TO_D32:
202701e04c3fSmrg   case VEC4_OPCODE_DOUBLE_TO_U32:
202801e04c3fSmrg   case VEC4_OPCODE_TO_DOUBLE:
202901e04c3fSmrg   case VEC4_OPCODE_PICK_LOW_32BIT:
203001e04c3fSmrg   case VEC4_OPCODE_PICK_HIGH_32BIT:
203101e04c3fSmrg   case VEC4_OPCODE_SET_LOW_32BIT:
203201e04c3fSmrg   case VEC4_OPCODE_SET_HIGH_32BIT:
203301e04c3fSmrg      return true;
203401e04c3fSmrg   default:
203501e04c3fSmrg      return false;
203601e04c3fSmrg   }
203701e04c3fSmrg}
203801e04c3fSmrg
203901e04c3fSmrg/**
204001e04c3fSmrg * Three source instruction must have a GRF/MRF destination register.
204101e04c3fSmrg * ARF NULL is not allowed.  Fix that up by allocating a temporary GRF.
204201e04c3fSmrg */
204301e04c3fSmrgvoid
204401e04c3fSmrgvec4_visitor::fixup_3src_null_dest()
204501e04c3fSmrg{
204601e04c3fSmrg   bool progress = false;
204701e04c3fSmrg
204801e04c3fSmrg   foreach_block_and_inst_safe (block, vec4_instruction, inst, cfg) {
204901e04c3fSmrg      if (inst->is_3src(devinfo) && inst->dst.is_null()) {
205001e04c3fSmrg         const unsigned size_written = type_sz(inst->dst.type);
205101e04c3fSmrg         const unsigned num_regs = DIV_ROUND_UP(size_written, REG_SIZE);
205201e04c3fSmrg
205301e04c3fSmrg         inst->dst = retype(dst_reg(VGRF, alloc.allocate(num_regs)),
205401e04c3fSmrg                            inst->dst.type);
205501e04c3fSmrg         progress = true;
205601e04c3fSmrg      }
205701e04c3fSmrg   }
205801e04c3fSmrg
205901e04c3fSmrg   if (progress)
20607ec681f3Smrg      invalidate_analysis(DEPENDENCY_INSTRUCTION_DETAIL |
20617ec681f3Smrg                          DEPENDENCY_VARIABLES);
206201e04c3fSmrg}
206301e04c3fSmrg
206401e04c3fSmrgvoid
206501e04c3fSmrgvec4_visitor::convert_to_hw_regs()
206601e04c3fSmrg{
206701e04c3fSmrg   foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
206801e04c3fSmrg      for (int i = 0; i < 3; i++) {
206901e04c3fSmrg         class src_reg &src = inst->src[i];
207001e04c3fSmrg         struct brw_reg reg;
207101e04c3fSmrg         switch (src.file) {
207201e04c3fSmrg         case VGRF: {
207301e04c3fSmrg            reg = byte_offset(brw_vecn_grf(4, src.nr, 0), src.offset);
207401e04c3fSmrg            reg.type = src.type;
207501e04c3fSmrg            reg.abs = src.abs;
207601e04c3fSmrg            reg.negate = src.negate;
207701e04c3fSmrg            break;
207801e04c3fSmrg         }
207901e04c3fSmrg
208001e04c3fSmrg         case UNIFORM: {
20817ec681f3Smrg            if (src.nr >= UBO_START) {
20827ec681f3Smrg               reg = byte_offset(brw_vec4_grf(
20837ec681f3Smrg                                    prog_data->base.dispatch_grf_start_reg +
20847ec681f3Smrg                                    ubo_push_start[src.nr - UBO_START] +
20857ec681f3Smrg                                    src.offset / 32, 0),
20867ec681f3Smrg                                 src.offset % 32);
20877ec681f3Smrg            } else {
20887ec681f3Smrg               reg = byte_offset(brw_vec4_grf(
20897ec681f3Smrg                                    prog_data->base.dispatch_grf_start_reg +
20907ec681f3Smrg                                    src.nr / 2, src.nr % 2 * 4),
20917ec681f3Smrg                                 src.offset);
20927ec681f3Smrg            }
20937ec681f3Smrg            reg = stride(reg, 0, 4, 1);
209401e04c3fSmrg            reg.type = src.type;
209501e04c3fSmrg            reg.abs = src.abs;
209601e04c3fSmrg            reg.negate = src.negate;
209701e04c3fSmrg
209801e04c3fSmrg            /* This should have been moved to pull constants. */
209901e04c3fSmrg            assert(!src.reladdr);
210001e04c3fSmrg            break;
210101e04c3fSmrg         }
210201e04c3fSmrg
210301e04c3fSmrg         case FIXED_GRF:
210401e04c3fSmrg            if (type_sz(src.type) == 8) {
210501e04c3fSmrg               reg = src.as_brw_reg();
210601e04c3fSmrg               break;
210701e04c3fSmrg            }
21087ec681f3Smrg            FALLTHROUGH;
210901e04c3fSmrg         case ARF:
211001e04c3fSmrg         case IMM:
211101e04c3fSmrg            continue;
211201e04c3fSmrg
211301e04c3fSmrg         case BAD_FILE:
211401e04c3fSmrg            /* Probably unused. */
211501e04c3fSmrg            reg = brw_null_reg();
211601e04c3fSmrg            reg = retype(reg, src.type);
211701e04c3fSmrg            break;
211801e04c3fSmrg
211901e04c3fSmrg         case MRF:
212001e04c3fSmrg         case ATTR:
212101e04c3fSmrg            unreachable("not reached");
212201e04c3fSmrg         }
212301e04c3fSmrg
212401e04c3fSmrg         apply_logical_swizzle(&reg, inst, i);
212501e04c3fSmrg         src = reg;
212601e04c3fSmrg
212701e04c3fSmrg         /* From IVB PRM, vol4, part3, "General Restrictions on Regioning
212801e04c3fSmrg          * Parameters":
212901e04c3fSmrg          *
213001e04c3fSmrg          *   "If ExecSize = Width and HorzStride ≠ 0, VertStride must be set
213101e04c3fSmrg          *    to Width * HorzStride."
213201e04c3fSmrg          *
213301e04c3fSmrg          * We can break this rule with DF sources on DF align1
213401e04c3fSmrg          * instructions, because the exec_size would be 4 and width is 4.
213501e04c3fSmrg          * As we know we are not accessing to next GRF, it is safe to
213601e04c3fSmrg          * set vstride to the formula given by the rule itself.
213701e04c3fSmrg          */
213801e04c3fSmrg         if (is_align1_df(inst) && (cvt(inst->exec_size) - 1) == src.width)
213901e04c3fSmrg            src.vstride = src.width + src.hstride;
214001e04c3fSmrg      }
214101e04c3fSmrg
214201e04c3fSmrg      if (inst->is_3src(devinfo)) {
214301e04c3fSmrg         /* 3-src instructions with scalar sources support arbitrary subnr,
214401e04c3fSmrg          * but don't actually use swizzles.  Convert swizzle into subnr.
214501e04c3fSmrg          * Skip this for double-precision instructions: RepCtrl=1 is not
214601e04c3fSmrg          * allowed for them and needs special handling.
214701e04c3fSmrg          */
214801e04c3fSmrg         for (int i = 0; i < 3; i++) {
214901e04c3fSmrg            if (inst->src[i].vstride == BRW_VERTICAL_STRIDE_0 &&
215001e04c3fSmrg                type_sz(inst->src[i].type) < 8) {
215101e04c3fSmrg               assert(brw_is_single_value_swizzle(inst->src[i].swizzle));
215201e04c3fSmrg               inst->src[i].subnr += 4 * BRW_GET_SWZ(inst->src[i].swizzle, 0);
215301e04c3fSmrg            }
215401e04c3fSmrg         }
215501e04c3fSmrg      }
215601e04c3fSmrg
215701e04c3fSmrg      dst_reg &dst = inst->dst;
215801e04c3fSmrg      struct brw_reg reg;
215901e04c3fSmrg
216001e04c3fSmrg      switch (inst->dst.file) {
216101e04c3fSmrg      case VGRF:
216201e04c3fSmrg         reg = byte_offset(brw_vec8_grf(dst.nr, 0), dst.offset);
216301e04c3fSmrg         reg.type = dst.type;
216401e04c3fSmrg         reg.writemask = dst.writemask;
216501e04c3fSmrg         break;
216601e04c3fSmrg
216701e04c3fSmrg      case MRF:
216801e04c3fSmrg         reg = byte_offset(brw_message_reg(dst.nr), dst.offset);
21697ec681f3Smrg         assert((reg.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->ver));
217001e04c3fSmrg         reg.type = dst.type;
217101e04c3fSmrg         reg.writemask = dst.writemask;
217201e04c3fSmrg         break;
217301e04c3fSmrg
217401e04c3fSmrg      case ARF:
217501e04c3fSmrg      case FIXED_GRF:
217601e04c3fSmrg         reg = dst.as_brw_reg();
217701e04c3fSmrg         break;
217801e04c3fSmrg
217901e04c3fSmrg      case BAD_FILE:
218001e04c3fSmrg         reg = brw_null_reg();
218101e04c3fSmrg         reg = retype(reg, dst.type);
218201e04c3fSmrg         break;
218301e04c3fSmrg
218401e04c3fSmrg      case IMM:
218501e04c3fSmrg      case ATTR:
218601e04c3fSmrg      case UNIFORM:
218701e04c3fSmrg         unreachable("not reached");
218801e04c3fSmrg      }
218901e04c3fSmrg
219001e04c3fSmrg      dst = reg;
219101e04c3fSmrg   }
219201e04c3fSmrg}
219301e04c3fSmrg
219401e04c3fSmrgstatic bool
219501e04c3fSmrgstage_uses_interleaved_attributes(unsigned stage,
219601e04c3fSmrg                                  enum shader_dispatch_mode dispatch_mode)
219701e04c3fSmrg{
219801e04c3fSmrg   switch (stage) {
219901e04c3fSmrg   case MESA_SHADER_TESS_EVAL:
220001e04c3fSmrg      return true;
220101e04c3fSmrg   case MESA_SHADER_GEOMETRY:
220201e04c3fSmrg      return dispatch_mode != DISPATCH_MODE_4X2_DUAL_OBJECT;
220301e04c3fSmrg   default:
220401e04c3fSmrg      return false;
220501e04c3fSmrg   }
220601e04c3fSmrg}
220701e04c3fSmrg
220801e04c3fSmrg/**
220901e04c3fSmrg * Get the closest native SIMD width supported by the hardware for instruction
221001e04c3fSmrg * \p inst.  The instruction will be left untouched by
221101e04c3fSmrg * vec4_visitor::lower_simd_width() if the returned value matches the
221201e04c3fSmrg * instruction's original execution size.
221301e04c3fSmrg */
221401e04c3fSmrgstatic unsigned
22157ec681f3Smrgget_lowered_simd_width(const struct intel_device_info *devinfo,
221601e04c3fSmrg                       enum shader_dispatch_mode dispatch_mode,
221701e04c3fSmrg                       unsigned stage, const vec4_instruction *inst)
221801e04c3fSmrg{
221901e04c3fSmrg   /* Do not split some instructions that require special handling */
222001e04c3fSmrg   switch (inst->opcode) {
22217ec681f3Smrg   case SHADER_OPCODE_GFX4_SCRATCH_READ:
22227ec681f3Smrg   case SHADER_OPCODE_GFX4_SCRATCH_WRITE:
222301e04c3fSmrg      return inst->exec_size;
222401e04c3fSmrg   default:
222501e04c3fSmrg      break;
222601e04c3fSmrg   }
222701e04c3fSmrg
222801e04c3fSmrg   unsigned lowered_width = MIN2(16, inst->exec_size);
222901e04c3fSmrg
223001e04c3fSmrg   /* We need to split some cases of double-precision instructions that write
22317ec681f3Smrg    * 2 registers. We only need to care about this in gfx7 because that is the
223201e04c3fSmrg    * only hardware that implements fp64 in Align16.
223301e04c3fSmrg    */
22347ec681f3Smrg   if (devinfo->ver == 7 && inst->size_written > REG_SIZE) {
223501e04c3fSmrg      /* Align16 8-wide double-precision SEL does not work well. Verified
223601e04c3fSmrg       * empirically.
223701e04c3fSmrg       */
223801e04c3fSmrg      if (inst->opcode == BRW_OPCODE_SEL && type_sz(inst->dst.type) == 8)
223901e04c3fSmrg         lowered_width = MIN2(lowered_width, 4);
224001e04c3fSmrg
224101e04c3fSmrg      /* HSW PRM, 3D Media GPGPU Engine, Region Alignment Rules for Direct
224201e04c3fSmrg       * Register Addressing:
224301e04c3fSmrg       *
224401e04c3fSmrg       *    "When destination spans two registers, the source MUST span two
224501e04c3fSmrg       *     registers."
224601e04c3fSmrg       */
224701e04c3fSmrg      for (unsigned i = 0; i < 3; i++) {
224801e04c3fSmrg         if (inst->src[i].file == BAD_FILE)
224901e04c3fSmrg            continue;
225001e04c3fSmrg         if (inst->size_read(i) <= REG_SIZE)
225101e04c3fSmrg            lowered_width = MIN2(lowered_width, 4);
225201e04c3fSmrg
225301e04c3fSmrg         /* Interleaved attribute setups use a vertical stride of 0, which
22547ec681f3Smrg          * makes them hit the associated instruction decompression bug in gfx7.
225501e04c3fSmrg          * Split them to prevent this.
225601e04c3fSmrg          */
225701e04c3fSmrg         if (inst->src[i].file == ATTR &&
225801e04c3fSmrg             stage_uses_interleaved_attributes(stage, dispatch_mode))
225901e04c3fSmrg            lowered_width = MIN2(lowered_width, 4);
226001e04c3fSmrg      }
226101e04c3fSmrg   }
226201e04c3fSmrg
226301e04c3fSmrg   /* IvyBridge can manage a maximum of 4 DFs per SIMD4x2 instruction, since
226401e04c3fSmrg    * it doesn't support compression in Align16 mode, no matter if it has
226501e04c3fSmrg    * force_writemask_all enabled or disabled (the latter is affected by the
22667ec681f3Smrg    * compressed instruction bug in gfx7, which is another reason to enforce
226701e04c3fSmrg    * this limit).
226801e04c3fSmrg    */
22697ec681f3Smrg   if (devinfo->verx10 == 70 &&
227001e04c3fSmrg       (get_exec_type_size(inst) == 8 || type_sz(inst->dst.type) == 8))
227101e04c3fSmrg      lowered_width = MIN2(lowered_width, 4);
227201e04c3fSmrg
227301e04c3fSmrg   return lowered_width;
227401e04c3fSmrg}
227501e04c3fSmrg
227601e04c3fSmrgstatic bool
227701e04c3fSmrgdst_src_regions_overlap(vec4_instruction *inst)
227801e04c3fSmrg{
227901e04c3fSmrg   if (inst->size_written == 0)
228001e04c3fSmrg      return false;
228101e04c3fSmrg
228201e04c3fSmrg   unsigned dst_start = inst->dst.offset;
228301e04c3fSmrg   unsigned dst_end = dst_start + inst->size_written - 1;
228401e04c3fSmrg   for (int i = 0; i < 3; i++) {
228501e04c3fSmrg      if (inst->src[i].file == BAD_FILE)
228601e04c3fSmrg         continue;
228701e04c3fSmrg
228801e04c3fSmrg      if (inst->dst.file != inst->src[i].file ||
228901e04c3fSmrg          inst->dst.nr != inst->src[i].nr)
229001e04c3fSmrg         continue;
229101e04c3fSmrg
229201e04c3fSmrg      unsigned src_start = inst->src[i].offset;
229301e04c3fSmrg      unsigned src_end = src_start + inst->size_read(i) - 1;
229401e04c3fSmrg
229501e04c3fSmrg      if ((dst_start >= src_start && dst_start <= src_end) ||
229601e04c3fSmrg          (dst_end >= src_start && dst_end <= src_end) ||
229701e04c3fSmrg          (dst_start <= src_start && dst_end >= src_end)) {
229801e04c3fSmrg         return true;
229901e04c3fSmrg      }
230001e04c3fSmrg   }
230101e04c3fSmrg
230201e04c3fSmrg   return false;
230301e04c3fSmrg}
230401e04c3fSmrg
230501e04c3fSmrgbool
230601e04c3fSmrgvec4_visitor::lower_simd_width()
230701e04c3fSmrg{
230801e04c3fSmrg   bool progress = false;
230901e04c3fSmrg
231001e04c3fSmrg   foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
231101e04c3fSmrg      const unsigned lowered_width =
231201e04c3fSmrg         get_lowered_simd_width(devinfo, prog_data->dispatch_mode, stage, inst);
231301e04c3fSmrg      assert(lowered_width <= inst->exec_size);
231401e04c3fSmrg      if (lowered_width == inst->exec_size)
231501e04c3fSmrg         continue;
231601e04c3fSmrg
231701e04c3fSmrg      /* We need to deal with source / destination overlaps when splitting.
231801e04c3fSmrg       * The hardware supports reading from and writing to the same register
231901e04c3fSmrg       * in the same instruction, but we need to be careful that each split
232001e04c3fSmrg       * instruction we produce does not corrupt the source of the next.
232101e04c3fSmrg       *
232201e04c3fSmrg       * The easiest way to handle this is to make the split instructions write
232301e04c3fSmrg       * to temporaries if there is an src/dst overlap and then move from the
232401e04c3fSmrg       * temporaries to the original destination. We also need to consider
232501e04c3fSmrg       * instructions that do partial writes via align1 opcodes, in which case
232601e04c3fSmrg       * we need to make sure that the we initialize the temporary with the
232701e04c3fSmrg       * value of the instruction's dst.
232801e04c3fSmrg       */
232901e04c3fSmrg      bool needs_temp = dst_src_regions_overlap(inst);
233001e04c3fSmrg      for (unsigned n = 0; n < inst->exec_size / lowered_width; n++)  {
233101e04c3fSmrg         unsigned channel_offset = lowered_width * n;
233201e04c3fSmrg
233301e04c3fSmrg         unsigned size_written = lowered_width * type_sz(inst->dst.type);
233401e04c3fSmrg
233501e04c3fSmrg         /* Create the split instruction from the original so that we copy all
233601e04c3fSmrg          * relevant instruction fields, then set the width and calculate the
233701e04c3fSmrg          * new dst/src regions.
233801e04c3fSmrg          */
233901e04c3fSmrg         vec4_instruction *linst = new(mem_ctx) vec4_instruction(*inst);
234001e04c3fSmrg         linst->exec_size = lowered_width;
234101e04c3fSmrg         linst->group = channel_offset;
234201e04c3fSmrg         linst->size_written = size_written;
234301e04c3fSmrg
234401e04c3fSmrg         /* Compute split dst region */
234501e04c3fSmrg         dst_reg dst;
234601e04c3fSmrg         if (needs_temp) {
234701e04c3fSmrg            unsigned num_regs = DIV_ROUND_UP(size_written, REG_SIZE);
234801e04c3fSmrg            dst = retype(dst_reg(VGRF, alloc.allocate(num_regs)),
234901e04c3fSmrg                         inst->dst.type);
235001e04c3fSmrg            if (inst->is_align1_partial_write()) {
235101e04c3fSmrg               vec4_instruction *copy = MOV(dst, src_reg(inst->dst));
235201e04c3fSmrg               copy->exec_size = lowered_width;
235301e04c3fSmrg               copy->group = channel_offset;
235401e04c3fSmrg               copy->size_written = size_written;
235501e04c3fSmrg               inst->insert_before(block, copy);
235601e04c3fSmrg            }
235701e04c3fSmrg         } else {
235801e04c3fSmrg            dst = horiz_offset(inst->dst, channel_offset);
235901e04c3fSmrg         }
236001e04c3fSmrg         linst->dst = dst;
236101e04c3fSmrg
236201e04c3fSmrg         /* Compute split source regions */
236301e04c3fSmrg         for (int i = 0; i < 3; i++) {
236401e04c3fSmrg            if (linst->src[i].file == BAD_FILE)
236501e04c3fSmrg               continue;
236601e04c3fSmrg
236701e04c3fSmrg            bool is_interleaved_attr =
236801e04c3fSmrg               linst->src[i].file == ATTR &&
236901e04c3fSmrg               stage_uses_interleaved_attributes(stage,
237001e04c3fSmrg                                                 prog_data->dispatch_mode);
237101e04c3fSmrg
237201e04c3fSmrg            if (!is_uniform(linst->src[i]) && !is_interleaved_attr)
237301e04c3fSmrg               linst->src[i] = horiz_offset(linst->src[i], channel_offset);
237401e04c3fSmrg         }
237501e04c3fSmrg
237601e04c3fSmrg         inst->insert_before(block, linst);
237701e04c3fSmrg
237801e04c3fSmrg         /* If we used a temporary to store the result of the split
237901e04c3fSmrg          * instruction, copy the result to the original destination
238001e04c3fSmrg          */
238101e04c3fSmrg         if (needs_temp) {
238201e04c3fSmrg            vec4_instruction *mov =
238301e04c3fSmrg               MOV(offset(inst->dst, lowered_width, n), src_reg(dst));
238401e04c3fSmrg            mov->exec_size = lowered_width;
238501e04c3fSmrg            mov->group = channel_offset;
238601e04c3fSmrg            mov->size_written = size_written;
238701e04c3fSmrg            mov->predicate = inst->predicate;
238801e04c3fSmrg            inst->insert_before(block, mov);
238901e04c3fSmrg         }
239001e04c3fSmrg      }
239101e04c3fSmrg
239201e04c3fSmrg      inst->remove(block);
239301e04c3fSmrg      progress = true;
239401e04c3fSmrg   }
239501e04c3fSmrg
239601e04c3fSmrg   if (progress)
23977ec681f3Smrg      invalidate_analysis(DEPENDENCY_INSTRUCTIONS | DEPENDENCY_VARIABLES);
239801e04c3fSmrg
239901e04c3fSmrg   return progress;
240001e04c3fSmrg}
240101e04c3fSmrg
240201e04c3fSmrgstatic brw_predicate
240301e04c3fSmrgscalarize_predicate(brw_predicate predicate, unsigned writemask)
240401e04c3fSmrg{
240501e04c3fSmrg   if (predicate != BRW_PREDICATE_NORMAL)
240601e04c3fSmrg      return predicate;
240701e04c3fSmrg
240801e04c3fSmrg   switch (writemask) {
240901e04c3fSmrg   case WRITEMASK_X:
241001e04c3fSmrg      return BRW_PREDICATE_ALIGN16_REPLICATE_X;
241101e04c3fSmrg   case WRITEMASK_Y:
241201e04c3fSmrg      return BRW_PREDICATE_ALIGN16_REPLICATE_Y;
241301e04c3fSmrg   case WRITEMASK_Z:
241401e04c3fSmrg      return BRW_PREDICATE_ALIGN16_REPLICATE_Z;
241501e04c3fSmrg   case WRITEMASK_W:
241601e04c3fSmrg      return BRW_PREDICATE_ALIGN16_REPLICATE_W;
241701e04c3fSmrg   default:
241801e04c3fSmrg      unreachable("invalid writemask");
241901e04c3fSmrg   }
242001e04c3fSmrg}
242101e04c3fSmrg
24227ec681f3Smrg/* Gfx7 has a hardware decompression bug that we can exploit to represent
242301e04c3fSmrg * handful of additional swizzles natively.
242401e04c3fSmrg */
242501e04c3fSmrgstatic bool
24267ec681f3Smrgis_gfx7_supported_64bit_swizzle(vec4_instruction *inst, unsigned arg)
242701e04c3fSmrg{
242801e04c3fSmrg   switch (inst->src[arg].swizzle) {
242901e04c3fSmrg   case BRW_SWIZZLE_XXXX:
243001e04c3fSmrg   case BRW_SWIZZLE_YYYY:
243101e04c3fSmrg   case BRW_SWIZZLE_ZZZZ:
243201e04c3fSmrg   case BRW_SWIZZLE_WWWW:
243301e04c3fSmrg   case BRW_SWIZZLE_XYXY:
243401e04c3fSmrg   case BRW_SWIZZLE_YXYX:
243501e04c3fSmrg   case BRW_SWIZZLE_ZWZW:
243601e04c3fSmrg   case BRW_SWIZZLE_WZWZ:
243701e04c3fSmrg      return true;
243801e04c3fSmrg   default:
243901e04c3fSmrg      return false;
244001e04c3fSmrg   }
244101e04c3fSmrg}
244201e04c3fSmrg
244301e04c3fSmrg/* 64-bit sources use regions with a width of 2. These 2 elements in each row
244401e04c3fSmrg * can be addressed using 32-bit swizzles (which is what the hardware supports)
244501e04c3fSmrg * but it also means that the swizzle we apply on the first two components of a
244601e04c3fSmrg * dvec4 is coupled with the swizzle we use for the last 2. In other words,
244701e04c3fSmrg * only some specific swizzle combinations can be natively supported.
244801e04c3fSmrg *
244901e04c3fSmrg * FIXME: we can go an step further and implement even more swizzle
245001e04c3fSmrg *        variations using only partial scalarization.
245101e04c3fSmrg *
245201e04c3fSmrg * For more details see:
245301e04c3fSmrg * https://bugs.freedesktop.org/show_bug.cgi?id=92760#c82
245401e04c3fSmrg */
245501e04c3fSmrgbool
245601e04c3fSmrgvec4_visitor::is_supported_64bit_region(vec4_instruction *inst, unsigned arg)
245701e04c3fSmrg{
245801e04c3fSmrg   const src_reg &src = inst->src[arg];
245901e04c3fSmrg   assert(type_sz(src.type) == 8);
246001e04c3fSmrg
246101e04c3fSmrg   /* Uniform regions have a vstride=0. Because we use 2-wide rows with
246201e04c3fSmrg    * 64-bit regions it means that we cannot access components Z/W, so
246301e04c3fSmrg    * return false for any such case. Interleaved attributes will also be
246401e04c3fSmrg    * mapped to GRF registers with a vstride of 0, so apply the same
246501e04c3fSmrg    * treatment.
246601e04c3fSmrg    */
246701e04c3fSmrg   if ((is_uniform(src) ||
246801e04c3fSmrg        (stage_uses_interleaved_attributes(stage, prog_data->dispatch_mode) &&
246901e04c3fSmrg         src.file == ATTR)) &&
247001e04c3fSmrg       (brw_mask_for_swizzle(src.swizzle) & 12))
247101e04c3fSmrg      return false;
247201e04c3fSmrg
247301e04c3fSmrg   switch (src.swizzle) {
247401e04c3fSmrg   case BRW_SWIZZLE_XYZW:
247501e04c3fSmrg   case BRW_SWIZZLE_XXZZ:
247601e04c3fSmrg   case BRW_SWIZZLE_YYWW:
247701e04c3fSmrg   case BRW_SWIZZLE_YXWZ:
247801e04c3fSmrg      return true;
247901e04c3fSmrg   default:
24807ec681f3Smrg      return devinfo->ver == 7 && is_gfx7_supported_64bit_swizzle(inst, arg);
248101e04c3fSmrg   }
248201e04c3fSmrg}
248301e04c3fSmrg
248401e04c3fSmrgbool
248501e04c3fSmrgvec4_visitor::scalarize_df()
248601e04c3fSmrg{
248701e04c3fSmrg   bool progress = false;
248801e04c3fSmrg
248901e04c3fSmrg   foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
249001e04c3fSmrg      /* Skip DF instructions that operate in Align1 mode */
249101e04c3fSmrg      if (is_align1_df(inst))
249201e04c3fSmrg         continue;
249301e04c3fSmrg
249401e04c3fSmrg      /* Check if this is a double-precision instruction */
249501e04c3fSmrg      bool is_double = type_sz(inst->dst.type) == 8;
249601e04c3fSmrg      for (int arg = 0; !is_double && arg < 3; arg++) {
249701e04c3fSmrg         is_double = inst->src[arg].file != BAD_FILE &&
249801e04c3fSmrg                     type_sz(inst->src[arg].type) == 8;
249901e04c3fSmrg      }
250001e04c3fSmrg
250101e04c3fSmrg      if (!is_double)
250201e04c3fSmrg         continue;
250301e04c3fSmrg
250401e04c3fSmrg      /* Skip the lowering for specific regioning scenarios that we can
250501e04c3fSmrg       * support natively.
250601e04c3fSmrg       */
250701e04c3fSmrg      bool skip_lowering = true;
250801e04c3fSmrg
250901e04c3fSmrg      /* XY and ZW writemasks operate in 32-bit, which means that they don't
251001e04c3fSmrg       * have a native 64-bit representation and they should always be split.
251101e04c3fSmrg       */
251201e04c3fSmrg      if (inst->dst.writemask == WRITEMASK_XY ||
251301e04c3fSmrg          inst->dst.writemask == WRITEMASK_ZW) {
251401e04c3fSmrg         skip_lowering = false;
251501e04c3fSmrg      } else {
251601e04c3fSmrg         for (unsigned i = 0; i < 3; i++) {
251701e04c3fSmrg            if (inst->src[i].file == BAD_FILE || type_sz(inst->src[i].type) < 8)
251801e04c3fSmrg               continue;
251901e04c3fSmrg            skip_lowering = skip_lowering && is_supported_64bit_region(inst, i);
252001e04c3fSmrg         }
252101e04c3fSmrg      }
252201e04c3fSmrg
252301e04c3fSmrg      if (skip_lowering)
252401e04c3fSmrg         continue;
252501e04c3fSmrg
252601e04c3fSmrg      /* Generate scalar instructions for each enabled channel */
252701e04c3fSmrg      for (unsigned chan = 0; chan < 4; chan++) {
252801e04c3fSmrg         unsigned chan_mask = 1 << chan;
252901e04c3fSmrg         if (!(inst->dst.writemask & chan_mask))
253001e04c3fSmrg            continue;
253101e04c3fSmrg
253201e04c3fSmrg         vec4_instruction *scalar_inst = new(mem_ctx) vec4_instruction(*inst);
253301e04c3fSmrg
253401e04c3fSmrg         for (unsigned i = 0; i < 3; i++) {
253501e04c3fSmrg            unsigned swz = BRW_GET_SWZ(inst->src[i].swizzle, chan);
253601e04c3fSmrg            scalar_inst->src[i].swizzle = BRW_SWIZZLE4(swz, swz, swz, swz);
253701e04c3fSmrg         }
253801e04c3fSmrg
253901e04c3fSmrg         scalar_inst->dst.writemask = chan_mask;
254001e04c3fSmrg
254101e04c3fSmrg         if (inst->predicate != BRW_PREDICATE_NONE) {
254201e04c3fSmrg            scalar_inst->predicate =
254301e04c3fSmrg               scalarize_predicate(inst->predicate, chan_mask);
254401e04c3fSmrg         }
254501e04c3fSmrg
254601e04c3fSmrg         inst->insert_before(block, scalar_inst);
254701e04c3fSmrg      }
254801e04c3fSmrg
254901e04c3fSmrg      inst->remove(block);
255001e04c3fSmrg      progress = true;
255101e04c3fSmrg   }
255201e04c3fSmrg
255301e04c3fSmrg   if (progress)
25547ec681f3Smrg      invalidate_analysis(DEPENDENCY_INSTRUCTIONS);
255501e04c3fSmrg
255601e04c3fSmrg   return progress;
255701e04c3fSmrg}
255801e04c3fSmrg
255901e04c3fSmrgbool
256001e04c3fSmrgvec4_visitor::lower_64bit_mad_to_mul_add()
256101e04c3fSmrg{
256201e04c3fSmrg   bool progress = false;
256301e04c3fSmrg
256401e04c3fSmrg   foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
256501e04c3fSmrg      if (inst->opcode != BRW_OPCODE_MAD)
256601e04c3fSmrg         continue;
256701e04c3fSmrg
256801e04c3fSmrg      if (type_sz(inst->dst.type) != 8)
256901e04c3fSmrg         continue;
257001e04c3fSmrg
257101e04c3fSmrg      dst_reg mul_dst = dst_reg(this, glsl_type::dvec4_type);
257201e04c3fSmrg
257301e04c3fSmrg      /* Use the copy constructor so we copy all relevant instruction fields
257401e04c3fSmrg       * from the original mad into the add and mul instructions
257501e04c3fSmrg       */
257601e04c3fSmrg      vec4_instruction *mul = new(mem_ctx) vec4_instruction(*inst);
257701e04c3fSmrg      mul->opcode = BRW_OPCODE_MUL;
257801e04c3fSmrg      mul->dst = mul_dst;
257901e04c3fSmrg      mul->src[0] = inst->src[1];
258001e04c3fSmrg      mul->src[1] = inst->src[2];
258101e04c3fSmrg      mul->src[2].file = BAD_FILE;
258201e04c3fSmrg
258301e04c3fSmrg      vec4_instruction *add = new(mem_ctx) vec4_instruction(*inst);
258401e04c3fSmrg      add->opcode = BRW_OPCODE_ADD;
258501e04c3fSmrg      add->src[0] = src_reg(mul_dst);
258601e04c3fSmrg      add->src[1] = inst->src[0];
258701e04c3fSmrg      add->src[2].file = BAD_FILE;
258801e04c3fSmrg
258901e04c3fSmrg      inst->insert_before(block, mul);
259001e04c3fSmrg      inst->insert_before(block, add);
259101e04c3fSmrg      inst->remove(block);
259201e04c3fSmrg
259301e04c3fSmrg      progress = true;
259401e04c3fSmrg   }
259501e04c3fSmrg
259601e04c3fSmrg   if (progress)
25977ec681f3Smrg      invalidate_analysis(DEPENDENCY_INSTRUCTIONS | DEPENDENCY_VARIABLES);
259801e04c3fSmrg
259901e04c3fSmrg   return progress;
260001e04c3fSmrg}
260101e04c3fSmrg
260201e04c3fSmrg/* The align16 hardware can only do 32-bit swizzle channels, so we need to
260301e04c3fSmrg * translate the logical 64-bit swizzle channels that we use in the Vec4 IR
260401e04c3fSmrg * to 32-bit swizzle channels in hardware registers.
260501e04c3fSmrg *
260601e04c3fSmrg * @inst and @arg identify the original vec4 IR source operand we need to
260701e04c3fSmrg * translate the swizzle for and @hw_reg is the hardware register where we
260801e04c3fSmrg * will write the hardware swizzle to use.
260901e04c3fSmrg *
261001e04c3fSmrg * This pass assumes that Align16/DF instructions have been fully scalarized
261101e04c3fSmrg * previously so there is just one 64-bit swizzle channel to deal with for any
261201e04c3fSmrg * given Vec4 IR source.
261301e04c3fSmrg */
261401e04c3fSmrgvoid
261501e04c3fSmrgvec4_visitor::apply_logical_swizzle(struct brw_reg *hw_reg,
261601e04c3fSmrg                                    vec4_instruction *inst, int arg)
261701e04c3fSmrg{
261801e04c3fSmrg   src_reg reg = inst->src[arg];
261901e04c3fSmrg
262001e04c3fSmrg   if (reg.file == BAD_FILE || reg.file == BRW_IMMEDIATE_VALUE)
262101e04c3fSmrg      return;
262201e04c3fSmrg
262301e04c3fSmrg   /* If this is not a 64-bit operand or this is a scalar instruction we don't
262401e04c3fSmrg    * need to do anything about the swizzles.
262501e04c3fSmrg    */
262601e04c3fSmrg   if(type_sz(reg.type) < 8 || is_align1_df(inst)) {
262701e04c3fSmrg      hw_reg->swizzle = reg.swizzle;
262801e04c3fSmrg      return;
262901e04c3fSmrg   }
263001e04c3fSmrg
263101e04c3fSmrg   /* Take the 64-bit logical swizzle channel and translate it to 32-bit */
263201e04c3fSmrg   assert(brw_is_single_value_swizzle(reg.swizzle) ||
263301e04c3fSmrg          is_supported_64bit_region(inst, arg));
263401e04c3fSmrg
263501e04c3fSmrg   /* Apply the region <2, 2, 1> for GRF or <0, 2, 1> for uniforms, as align16
263601e04c3fSmrg    * HW can only do 32-bit swizzle channels.
263701e04c3fSmrg    */
263801e04c3fSmrg   hw_reg->width = BRW_WIDTH_2;
263901e04c3fSmrg
264001e04c3fSmrg   if (is_supported_64bit_region(inst, arg) &&
26417ec681f3Smrg       !is_gfx7_supported_64bit_swizzle(inst, arg)) {
264201e04c3fSmrg      /* Supported 64-bit swizzles are those such that their first two
264301e04c3fSmrg       * components, when expanded to 32-bit swizzles, match the semantics
264401e04c3fSmrg       * of the original 64-bit swizzle with 2-wide row regioning.
264501e04c3fSmrg       */
264601e04c3fSmrg      unsigned swizzle0 = BRW_GET_SWZ(reg.swizzle, 0);
264701e04c3fSmrg      unsigned swizzle1 = BRW_GET_SWZ(reg.swizzle, 1);
264801e04c3fSmrg      hw_reg->swizzle = BRW_SWIZZLE4(swizzle0 * 2, swizzle0 * 2 + 1,
264901e04c3fSmrg                                     swizzle1 * 2, swizzle1 * 2 + 1);
265001e04c3fSmrg   } else {
265101e04c3fSmrg      /* If we got here then we have one of the following:
265201e04c3fSmrg       *
265301e04c3fSmrg       * 1. An unsupported swizzle, which should be single-value thanks to the
265401e04c3fSmrg       *    scalarization pass.
265501e04c3fSmrg       *
26567ec681f3Smrg       * 2. A gfx7 supported swizzle. These can be single-value or double-value
265701e04c3fSmrg       *    swizzles. If the latter, they are never cross-dvec2 channels. For
26587ec681f3Smrg       *    these we always need to activate the gfx7 vstride=0 exploit.
265901e04c3fSmrg       */
266001e04c3fSmrg      unsigned swizzle0 = BRW_GET_SWZ(reg.swizzle, 0);
266101e04c3fSmrg      unsigned swizzle1 = BRW_GET_SWZ(reg.swizzle, 1);
266201e04c3fSmrg      assert((swizzle0 < 2) == (swizzle1 < 2));
266301e04c3fSmrg
266401e04c3fSmrg      /* To gain access to Z/W components we need to select the second half
266501e04c3fSmrg       * of the register and then use a X/Y swizzle to select Z/W respectively.
266601e04c3fSmrg       */
266701e04c3fSmrg      if (swizzle0 >= 2) {
266801e04c3fSmrg         *hw_reg = suboffset(*hw_reg, 2);
266901e04c3fSmrg         swizzle0 -= 2;
267001e04c3fSmrg         swizzle1 -= 2;
267101e04c3fSmrg      }
267201e04c3fSmrg
26737ec681f3Smrg      /* All gfx7-specific supported swizzles require the vstride=0 exploit */
26747ec681f3Smrg      if (devinfo->ver == 7 && is_gfx7_supported_64bit_swizzle(inst, arg))
267501e04c3fSmrg         hw_reg->vstride = BRW_VERTICAL_STRIDE_0;
267601e04c3fSmrg
267701e04c3fSmrg      /* Any 64-bit source with an offset at 16B is intended to address the
267801e04c3fSmrg       * second half of a register and needs a vertical stride of 0 so we:
267901e04c3fSmrg       *
268001e04c3fSmrg       * 1. Don't violate register region restrictions.
26817ec681f3Smrg       * 2. Activate the gfx7 instruction decompresion bug exploit when
268201e04c3fSmrg       *    execsize > 4
268301e04c3fSmrg       */
268401e04c3fSmrg      if (hw_reg->subnr % REG_SIZE == 16) {
26857ec681f3Smrg         assert(devinfo->ver == 7);
268601e04c3fSmrg         hw_reg->vstride = BRW_VERTICAL_STRIDE_0;
268701e04c3fSmrg      }
268801e04c3fSmrg
268901e04c3fSmrg      hw_reg->swizzle = BRW_SWIZZLE4(swizzle0 * 2, swizzle0 * 2 + 1,
269001e04c3fSmrg                                     swizzle1 * 2, swizzle1 * 2 + 1);
269101e04c3fSmrg   }
269201e04c3fSmrg}
269301e04c3fSmrg
26947ec681f3Smrgvoid
26957ec681f3Smrgvec4_visitor::invalidate_analysis(brw::analysis_dependency_class c)
26967ec681f3Smrg{
26977ec681f3Smrg   backend_shader::invalidate_analysis(c);
26987ec681f3Smrg   live_analysis.invalidate(c);
26997ec681f3Smrg}
27007ec681f3Smrg
270101e04c3fSmrgbool
270201e04c3fSmrgvec4_visitor::run()
270301e04c3fSmrg{
270401e04c3fSmrg   if (shader_time_index >= 0)
270501e04c3fSmrg      emit_shader_time_begin();
270601e04c3fSmrg
27077ec681f3Smrg   setup_push_ranges();
27087ec681f3Smrg
27097ec681f3Smrg   if (prog_data->base.zero_push_reg) {
27107ec681f3Smrg      /* push_reg_mask_param is in uint32 params and UNIFORM is in vec4s */
27117ec681f3Smrg      const unsigned mask_param = stage_prog_data->push_reg_mask_param;
27127ec681f3Smrg      src_reg mask = src_reg(dst_reg(UNIFORM, mask_param / 4));
27137ec681f3Smrg      assert(mask_param % 2 == 0); /* Should be 64-bit-aligned */
27147ec681f3Smrg      mask.swizzle = BRW_SWIZZLE4((mask_param + 0) % 4,
27157ec681f3Smrg                                  (mask_param + 1) % 4,
27167ec681f3Smrg                                  (mask_param + 0) % 4,
27177ec681f3Smrg                                  (mask_param + 1) % 4);
27187ec681f3Smrg
27197ec681f3Smrg      emit(VEC4_OPCODE_ZERO_OOB_PUSH_REGS,
27207ec681f3Smrg           dst_reg(VGRF, alloc.allocate(3)), mask);
27217ec681f3Smrg   }
27227ec681f3Smrg
272301e04c3fSmrg   emit_prolog();
272401e04c3fSmrg
272501e04c3fSmrg   emit_nir_code();
272601e04c3fSmrg   if (failed)
272701e04c3fSmrg      return false;
272801e04c3fSmrg   base_ir = NULL;
272901e04c3fSmrg
273001e04c3fSmrg   emit_thread_end();
273101e04c3fSmrg
273201e04c3fSmrg   calculate_cfg();
273301e04c3fSmrg
273401e04c3fSmrg   /* Before any optimization, push array accesses out to scratch
273501e04c3fSmrg    * space where we need them to be.  This pass may allocate new
273601e04c3fSmrg    * virtual GRFs, so we want to do it early.  It also makes sure
273701e04c3fSmrg    * that we have reladdr computations available for CSE, since we'll
273801e04c3fSmrg    * often do repeated subexpressions for those.
273901e04c3fSmrg    */
274001e04c3fSmrg   move_grf_array_access_to_scratch();
274101e04c3fSmrg   move_uniform_array_access_to_pull_constants();
274201e04c3fSmrg
274301e04c3fSmrg   pack_uniform_registers();
274401e04c3fSmrg   move_push_constants_to_pull_constants();
274501e04c3fSmrg   split_virtual_grfs();
274601e04c3fSmrg
274701e04c3fSmrg#define OPT(pass, args...) ({                                          \
274801e04c3fSmrg      pass_num++;                                                      \
274901e04c3fSmrg      bool this_progress = pass(args);                                 \
275001e04c3fSmrg                                                                       \
27517ec681f3Smrg      if (INTEL_DEBUG(DEBUG_OPTIMIZER) && this_progress) {             \
275201e04c3fSmrg         char filename[64];                                            \
275301e04c3fSmrg         snprintf(filename, 64, "%s-%s-%02d-%02d-" #pass,              \
275401e04c3fSmrg                  stage_abbrev, nir->info.name, iteration, pass_num); \
275501e04c3fSmrg                                                                       \
275601e04c3fSmrg         backend_shader::dump_instructions(filename);                  \
275701e04c3fSmrg      }                                                                \
275801e04c3fSmrg                                                                       \
275901e04c3fSmrg      progress = progress || this_progress;                            \
276001e04c3fSmrg      this_progress;                                                   \
276101e04c3fSmrg   })
276201e04c3fSmrg
276301e04c3fSmrg
27647ec681f3Smrg   if (INTEL_DEBUG(DEBUG_OPTIMIZER)) {
276501e04c3fSmrg      char filename[64];
276601e04c3fSmrg      snprintf(filename, 64, "%s-%s-00-00-start",
276701e04c3fSmrg               stage_abbrev, nir->info.name);
276801e04c3fSmrg
276901e04c3fSmrg      backend_shader::dump_instructions(filename);
277001e04c3fSmrg   }
277101e04c3fSmrg
277201e04c3fSmrg   bool progress;
277301e04c3fSmrg   int iteration = 0;
277401e04c3fSmrg   int pass_num = 0;
277501e04c3fSmrg   do {
277601e04c3fSmrg      progress = false;
277701e04c3fSmrg      pass_num = 0;
277801e04c3fSmrg      iteration++;
277901e04c3fSmrg
278001e04c3fSmrg      OPT(opt_predicated_break, this);
278101e04c3fSmrg      OPT(opt_reduce_swizzle);
278201e04c3fSmrg      OPT(dead_code_eliminate);
278301e04c3fSmrg      OPT(dead_control_flow_eliminate, this);
278401e04c3fSmrg      OPT(opt_copy_propagation);
278501e04c3fSmrg      OPT(opt_cmod_propagation);
278601e04c3fSmrg      OPT(opt_cse);
278701e04c3fSmrg      OPT(opt_algebraic);
278801e04c3fSmrg      OPT(opt_register_coalesce);
278901e04c3fSmrg      OPT(eliminate_find_live_channel);
279001e04c3fSmrg   } while (progress);
279101e04c3fSmrg
279201e04c3fSmrg   pass_num = 0;
279301e04c3fSmrg
279401e04c3fSmrg   if (OPT(opt_vector_float)) {
279501e04c3fSmrg      OPT(opt_cse);
279601e04c3fSmrg      OPT(opt_copy_propagation, false);
279701e04c3fSmrg      OPT(opt_copy_propagation, true);
279801e04c3fSmrg      OPT(dead_code_eliminate);
279901e04c3fSmrg   }
280001e04c3fSmrg
28017ec681f3Smrg   if (devinfo->ver <= 5 && OPT(lower_minmax)) {
280201e04c3fSmrg      OPT(opt_cmod_propagation);
280301e04c3fSmrg      OPT(opt_cse);
280401e04c3fSmrg      OPT(opt_copy_propagation);
280501e04c3fSmrg      OPT(dead_code_eliminate);
280601e04c3fSmrg   }
280701e04c3fSmrg
280801e04c3fSmrg   if (OPT(lower_simd_width)) {
280901e04c3fSmrg      OPT(opt_copy_propagation);
281001e04c3fSmrg      OPT(dead_code_eliminate);
281101e04c3fSmrg   }
281201e04c3fSmrg
281301e04c3fSmrg   if (failed)
281401e04c3fSmrg      return false;
281501e04c3fSmrg
281601e04c3fSmrg   OPT(lower_64bit_mad_to_mul_add);
281701e04c3fSmrg
281801e04c3fSmrg   /* Run this before payload setup because tesselation shaders
281901e04c3fSmrg    * rely on it to prevent cross dvec2 regioning on DF attributes
282001e04c3fSmrg    * that are setup so that XY are on the second half of register and
282101e04c3fSmrg    * ZW are in the first half of the next.
282201e04c3fSmrg    */
282301e04c3fSmrg   OPT(scalarize_df);
282401e04c3fSmrg
282501e04c3fSmrg   setup_payload();
282601e04c3fSmrg
28277ec681f3Smrg   if (INTEL_DEBUG(DEBUG_SPILL_VEC4)) {
282801e04c3fSmrg      /* Debug of register spilling: Go spill everything. */
282901e04c3fSmrg      const int grf_count = alloc.count;
283001e04c3fSmrg      float spill_costs[alloc.count];
283101e04c3fSmrg      bool no_spill[alloc.count];
283201e04c3fSmrg      evaluate_spill_costs(spill_costs, no_spill);
283301e04c3fSmrg      for (int i = 0; i < grf_count; i++) {
283401e04c3fSmrg         if (no_spill[i])
283501e04c3fSmrg            continue;
283601e04c3fSmrg         spill_reg(i);
283701e04c3fSmrg      }
283801e04c3fSmrg
283901e04c3fSmrg      /* We want to run this after spilling because 64-bit (un)spills need to
284001e04c3fSmrg       * emit code to shuffle 64-bit data for the 32-bit scratch read/write
284101e04c3fSmrg       * messages that can produce unsupported 64-bit swizzle regions.
284201e04c3fSmrg       */
284301e04c3fSmrg      OPT(scalarize_df);
284401e04c3fSmrg   }
284501e04c3fSmrg
284601e04c3fSmrg   fixup_3src_null_dest();
284701e04c3fSmrg
284801e04c3fSmrg   bool allocated_without_spills = reg_allocate();
284901e04c3fSmrg
285001e04c3fSmrg   if (!allocated_without_spills) {
28517ec681f3Smrg      brw_shader_perf_log(compiler, log_data,
28527ec681f3Smrg                          "%s shader triggered register spilling.  "
28537ec681f3Smrg                          "Try reducing the number of live vec4 values "
28547ec681f3Smrg                          "to improve performance.\n",
28557ec681f3Smrg                          stage_name);
285601e04c3fSmrg
285701e04c3fSmrg      while (!reg_allocate()) {
285801e04c3fSmrg         if (failed)
285901e04c3fSmrg            return false;
286001e04c3fSmrg      }
286101e04c3fSmrg
286201e04c3fSmrg      /* We want to run this after spilling because 64-bit (un)spills need to
286301e04c3fSmrg       * emit code to shuffle 64-bit data for the 32-bit scratch read/write
286401e04c3fSmrg       * messages that can produce unsupported 64-bit swizzle regions.
286501e04c3fSmrg       */
286601e04c3fSmrg      OPT(scalarize_df);
286701e04c3fSmrg   }
286801e04c3fSmrg
286901e04c3fSmrg   opt_schedule_instructions();
287001e04c3fSmrg
287101e04c3fSmrg   opt_set_dependency_control();
287201e04c3fSmrg
287301e04c3fSmrg   convert_to_hw_regs();
287401e04c3fSmrg
287501e04c3fSmrg   if (last_scratch > 0) {
287601e04c3fSmrg      prog_data->base.total_scratch =
287701e04c3fSmrg         brw_get_scratch_size(last_scratch * REG_SIZE);
287801e04c3fSmrg   }
287901e04c3fSmrg
288001e04c3fSmrg   return !failed;
288101e04c3fSmrg}
288201e04c3fSmrg
288301e04c3fSmrg} /* namespace brw */
288401e04c3fSmrg
288501e04c3fSmrgextern "C" {
288601e04c3fSmrg
288701e04c3fSmrgconst unsigned *
28887ec681f3Smrgbrw_compile_vs(const struct brw_compiler *compiler,
288901e04c3fSmrg               void *mem_ctx,
28907ec681f3Smrg               struct brw_compile_vs_params *params)
289101e04c3fSmrg{
28927ec681f3Smrg   struct nir_shader *nir = params->nir;
28937ec681f3Smrg   const struct brw_vs_prog_key *key = params->key;
28947ec681f3Smrg   struct brw_vs_prog_data *prog_data = params->prog_data;
28957ec681f3Smrg   const bool debug_enabled =
28967ec681f3Smrg      INTEL_DEBUG(params->debug_flag ? params->debug_flag : DEBUG_VS);
28977ec681f3Smrg
28987ec681f3Smrg   prog_data->base.base.stage = MESA_SHADER_VERTEX;
28997ec681f3Smrg
290001e04c3fSmrg   const bool is_scalar = compiler->scalar_stage[MESA_SHADER_VERTEX];
29017ec681f3Smrg   brw_nir_apply_key(nir, compiler, &key->base, 8, is_scalar);
290201e04c3fSmrg
290301e04c3fSmrg   const unsigned *assembly = NULL;
290401e04c3fSmrg
29057ec681f3Smrg   prog_data->inputs_read = nir->info.inputs_read;
29067ec681f3Smrg   prog_data->double_inputs_read = nir->info.vs.double_inputs;
290701e04c3fSmrg
29087ec681f3Smrg   brw_nir_lower_vs_inputs(nir, params->edgeflag_is_last, key->gl_attrib_wa_flags);
29097ec681f3Smrg   brw_nir_lower_vue_outputs(nir);
29107ec681f3Smrg   brw_postprocess_nir(nir, compiler, is_scalar, debug_enabled,
29117ec681f3Smrg                       key->base.robust_buffer_access);
291201e04c3fSmrg
291301e04c3fSmrg   prog_data->base.clip_distance_mask =
29147ec681f3Smrg      ((1 << nir->info.clip_distance_array_size) - 1);
291501e04c3fSmrg   prog_data->base.cull_distance_mask =
29167ec681f3Smrg      ((1 << nir->info.cull_distance_array_size) - 1) <<
29177ec681f3Smrg      nir->info.clip_distance_array_size;
291801e04c3fSmrg
291901e04c3fSmrg   unsigned nr_attribute_slots = util_bitcount64(prog_data->inputs_read);
292001e04c3fSmrg
292101e04c3fSmrg   /* gl_VertexID and gl_InstanceID are system values, but arrive via an
292201e04c3fSmrg    * incoming vertex attribute.  So, add an extra slot.
292301e04c3fSmrg    */
29247ec681f3Smrg   if (BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_FIRST_VERTEX) ||
29257ec681f3Smrg       BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_BASE_INSTANCE) ||
29267ec681f3Smrg       BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) ||
29277ec681f3Smrg       BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_INSTANCE_ID)) {
292801e04c3fSmrg      nr_attribute_slots++;
292901e04c3fSmrg   }
293001e04c3fSmrg
293101e04c3fSmrg   /* gl_DrawID and IsIndexedDraw share its very own vec4 */
29327ec681f3Smrg   if (BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_DRAW_ID) ||
29337ec681f3Smrg       BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_IS_INDEXED_DRAW)) {
293401e04c3fSmrg      nr_attribute_slots++;
293501e04c3fSmrg   }
293601e04c3fSmrg
29377ec681f3Smrg   if (BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_IS_INDEXED_DRAW))
293801e04c3fSmrg      prog_data->uses_is_indexed_draw = true;
293901e04c3fSmrg
29407ec681f3Smrg   if (BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_FIRST_VERTEX))
294101e04c3fSmrg      prog_data->uses_firstvertex = true;
294201e04c3fSmrg
29437ec681f3Smrg   if (BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_BASE_INSTANCE))
294401e04c3fSmrg      prog_data->uses_baseinstance = true;
294501e04c3fSmrg
29467ec681f3Smrg   if (BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_VERTEX_ID_ZERO_BASE))
294701e04c3fSmrg      prog_data->uses_vertexid = true;
294801e04c3fSmrg
29497ec681f3Smrg   if (BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_INSTANCE_ID))
295001e04c3fSmrg      prog_data->uses_instanceid = true;
295101e04c3fSmrg
29527ec681f3Smrg   if (BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_DRAW_ID))
295301e04c3fSmrg          prog_data->uses_drawid = true;
295401e04c3fSmrg
295501e04c3fSmrg   /* The 3DSTATE_VS documentation lists the lower bound on "Vertex URB Entry
295601e04c3fSmrg    * Read Length" as 1 in vec4 mode, and 0 in SIMD8 mode.  Empirically, in
295701e04c3fSmrg    * vec4 mode, the hardware appears to wedge unless we read something.
295801e04c3fSmrg    */
295901e04c3fSmrg   if (is_scalar)
296001e04c3fSmrg      prog_data->base.urb_read_length =
296101e04c3fSmrg         DIV_ROUND_UP(nr_attribute_slots, 2);
296201e04c3fSmrg   else
296301e04c3fSmrg      prog_data->base.urb_read_length =
296401e04c3fSmrg         DIV_ROUND_UP(MAX2(nr_attribute_slots, 1), 2);
296501e04c3fSmrg
296601e04c3fSmrg   prog_data->nr_attribute_slots = nr_attribute_slots;
296701e04c3fSmrg
296801e04c3fSmrg   /* Since vertex shaders reuse the same VUE entry for inputs and outputs
296901e04c3fSmrg    * (overwriting the original contents), we need to make sure the size is
297001e04c3fSmrg    * the larger of the two.
297101e04c3fSmrg    */
297201e04c3fSmrg   const unsigned vue_entries =
297301e04c3fSmrg      MAX2(nr_attribute_slots, (unsigned)prog_data->base.vue_map.num_slots);
297401e04c3fSmrg
29757ec681f3Smrg   if (compiler->devinfo->ver == 6) {
297601e04c3fSmrg      prog_data->base.urb_entry_size = DIV_ROUND_UP(vue_entries, 8);
297701e04c3fSmrg   } else {
297801e04c3fSmrg      prog_data->base.urb_entry_size = DIV_ROUND_UP(vue_entries, 4);
297901e04c3fSmrg   }
298001e04c3fSmrg
29817ec681f3Smrg   if (unlikely(debug_enabled)) {
298201e04c3fSmrg      fprintf(stderr, "VS Output ");
29837ec681f3Smrg      brw_print_vue_map(stderr, &prog_data->base.vue_map, MESA_SHADER_VERTEX);
298401e04c3fSmrg   }
298501e04c3fSmrg
298601e04c3fSmrg   if (is_scalar) {
298701e04c3fSmrg      prog_data->base.dispatch_mode = DISPATCH_MODE_SIMD8;
298801e04c3fSmrg
29897ec681f3Smrg      fs_visitor v(compiler, params->log_data, mem_ctx, &key->base,
29907ec681f3Smrg                   &prog_data->base.base, nir, 8,
29917ec681f3Smrg                   params->shader_time ? params->shader_time_index : -1,
29927ec681f3Smrg                   debug_enabled);
299301e04c3fSmrg      if (!v.run_vs()) {
29947ec681f3Smrg         params->error_str = ralloc_strdup(mem_ctx, v.fail_msg);
299501e04c3fSmrg         return NULL;
299601e04c3fSmrg      }
299701e04c3fSmrg
299801e04c3fSmrg      prog_data->base.base.dispatch_grf_start_reg = v.payload.num_regs;
299901e04c3fSmrg
30007ec681f3Smrg      fs_generator g(compiler, params->log_data, mem_ctx,
30017ec681f3Smrg                     &prog_data->base.base, v.runtime_check_aads_emit,
30027ec681f3Smrg                     MESA_SHADER_VERTEX);
30037ec681f3Smrg      if (unlikely(debug_enabled)) {
300401e04c3fSmrg         const char *debug_name =
300501e04c3fSmrg            ralloc_asprintf(mem_ctx, "%s vertex shader %s",
30067ec681f3Smrg                            nir->info.label ? nir->info.label :
300701e04c3fSmrg                               "unnamed",
30087ec681f3Smrg                            nir->info.name);
300901e04c3fSmrg
301001e04c3fSmrg         g.enable_debug(debug_name);
301101e04c3fSmrg      }
30127ec681f3Smrg      g.generate_code(v.cfg, 8, v.shader_stats,
30137ec681f3Smrg                      v.performance_analysis.require(), params->stats);
30147ec681f3Smrg      g.add_const_data(nir->constant_data, nir->constant_data_size);
301501e04c3fSmrg      assembly = g.get_assembly();
301601e04c3fSmrg   }
301701e04c3fSmrg
301801e04c3fSmrg   if (!assembly) {
301901e04c3fSmrg      prog_data->base.dispatch_mode = DISPATCH_MODE_4X2_DUAL_OBJECT;
302001e04c3fSmrg
30217ec681f3Smrg      vec4_vs_visitor v(compiler, params->log_data, key, prog_data,
30227ec681f3Smrg                        nir, mem_ctx,
30237ec681f3Smrg                        params->shader_time ? params->shader_time_index : -1,
30247ec681f3Smrg                        debug_enabled);
302501e04c3fSmrg      if (!v.run()) {
30267ec681f3Smrg         params->error_str = ralloc_strdup(mem_ctx, v.fail_msg);
302701e04c3fSmrg         return NULL;
302801e04c3fSmrg      }
302901e04c3fSmrg
30307ec681f3Smrg      assembly = brw_vec4_generate_assembly(compiler, params->log_data, mem_ctx,
30317ec681f3Smrg                                            nir, &prog_data->base,
30327ec681f3Smrg                                            v.cfg,
30337ec681f3Smrg                                            v.performance_analysis.require(),
30347ec681f3Smrg                                            params->stats, debug_enabled);
303501e04c3fSmrg   }
303601e04c3fSmrg
303701e04c3fSmrg   return assembly;
303801e04c3fSmrg}
303901e04c3fSmrg
304001e04c3fSmrg} /* extern "C" */
3041