Home | History | Annotate | Line # | Download | only in AMDGPU
      1 //===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
      2 //
      3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
      4 // See https://llvm.org/LICENSE.txt for license information.
      5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
      6 //
      7 /// \file
      8 //===----------------------------------------------------------------------===//
      9 //
     10 
     11 #include "AMDGPU.h"
     12 #include "GCNSubtarget.h"
     13 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
     14 #include "SIMachineFunctionInfo.h"
     15 #include "llvm/ADT/DepthFirstIterator.h"
     16 #include "llvm/CodeGen/MachineFunctionPass.h"
     17 
     18 #define DEBUG_TYPE "si-fold-operands"
     19 using namespace llvm;
     20 
     21 namespace {
     22 
     23 struct FoldCandidate {
     24   MachineInstr *UseMI;
     25   union {
     26     MachineOperand *OpToFold;
     27     uint64_t ImmToFold;
     28     int FrameIndexToFold;
     29   };
     30   int ShrinkOpcode;
     31   unsigned UseOpNo;
     32   MachineOperand::MachineOperandType Kind;
     33   bool Commuted;
     34 
     35   FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp,
     36                 bool Commuted_ = false,
     37                 int ShrinkOp = -1) :
     38     UseMI(MI), OpToFold(nullptr), ShrinkOpcode(ShrinkOp), UseOpNo(OpNo),
     39     Kind(FoldOp->getType()),
     40     Commuted(Commuted_) {
     41     if (FoldOp->isImm()) {
     42       ImmToFold = FoldOp->getImm();
     43     } else if (FoldOp->isFI()) {
     44       FrameIndexToFold = FoldOp->getIndex();
     45     } else {
     46       assert(FoldOp->isReg() || FoldOp->isGlobal());
     47       OpToFold = FoldOp;
     48     }
     49   }
     50 
     51   bool isFI() const {
     52     return Kind == MachineOperand::MO_FrameIndex;
     53   }
     54 
     55   bool isImm() const {
     56     return Kind == MachineOperand::MO_Immediate;
     57   }
     58 
     59   bool isReg() const {
     60     return Kind == MachineOperand::MO_Register;
     61   }
     62 
     63   bool isGlobal() const { return Kind == MachineOperand::MO_GlobalAddress; }
     64 
     65   bool isCommuted() const {
     66     return Commuted;
     67   }
     68 
     69   bool needsShrink() const {
     70     return ShrinkOpcode != -1;
     71   }
     72 
     73   int getShrinkOpcode() const {
     74     return ShrinkOpcode;
     75   }
     76 };
     77 
     78 class SIFoldOperands : public MachineFunctionPass {
     79 public:
     80   static char ID;
     81   MachineRegisterInfo *MRI;
     82   const SIInstrInfo *TII;
     83   const SIRegisterInfo *TRI;
     84   const GCNSubtarget *ST;
     85   const SIMachineFunctionInfo *MFI;
     86 
     87   void foldOperand(MachineOperand &OpToFold,
     88                    MachineInstr *UseMI,
     89                    int UseOpIdx,
     90                    SmallVectorImpl<FoldCandidate> &FoldList,
     91                    SmallVectorImpl<MachineInstr *> &CopiesToReplace) const;
     92 
     93   bool tryFoldCndMask(MachineInstr &MI) const;
     94   void foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const;
     95 
     96   const MachineOperand *isClamp(const MachineInstr &MI) const;
     97   bool tryFoldClamp(MachineInstr &MI);
     98 
     99   std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const;
    100   bool tryFoldOMod(MachineInstr &MI);
    101   bool tryFoldRegSequence(MachineInstr &MI);
    102   bool tryFoldLCSSAPhi(MachineInstr &MI);
    103   bool tryFoldLoad(MachineInstr &MI);
    104 
    105 public:
    106   SIFoldOperands() : MachineFunctionPass(ID) {
    107     initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
    108   }
    109 
    110   bool runOnMachineFunction(MachineFunction &MF) override;
    111 
    112   StringRef getPassName() const override { return "SI Fold Operands"; }
    113 
    114   void getAnalysisUsage(AnalysisUsage &AU) const override {
    115     AU.setPreservesCFG();
    116     MachineFunctionPass::getAnalysisUsage(AU);
    117   }
    118 };
    119 
    120 } // End anonymous namespace.
    121 
    122 INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
    123                 "SI Fold Operands", false, false)
    124 
    125 char SIFoldOperands::ID = 0;
    126 
    127 char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
    128 
    129 // Map multiply-accumulate opcode to corresponding multiply-add opcode if any.
    130 static unsigned macToMad(unsigned Opc) {
    131   switch (Opc) {
    132   case AMDGPU::V_MAC_F32_e64:
    133     return AMDGPU::V_MAD_F32_e64;
    134   case AMDGPU::V_MAC_F16_e64:
    135     return AMDGPU::V_MAD_F16_e64;
    136   case AMDGPU::V_FMAC_F32_e64:
    137     return AMDGPU::V_FMA_F32_e64;
    138   case AMDGPU::V_FMAC_F16_e64:
    139     return AMDGPU::V_FMA_F16_gfx9_e64;
    140   case AMDGPU::V_FMAC_LEGACY_F32_e64:
    141     return AMDGPU::V_FMA_LEGACY_F32_e64;
    142   case AMDGPU::V_FMAC_F64_e64:
    143     return AMDGPU::V_FMA_F64_e64;
    144   }
    145   return AMDGPU::INSTRUCTION_LIST_END;
    146 }
    147 
    148 // Wrapper around isInlineConstant that understands special cases when
    149 // instruction types are replaced during operand folding.
    150 static bool isInlineConstantIfFolded(const SIInstrInfo *TII,
    151                                      const MachineInstr &UseMI,
    152                                      unsigned OpNo,
    153                                      const MachineOperand &OpToFold) {
    154   if (TII->isInlineConstant(UseMI, OpNo, OpToFold))
    155     return true;
    156 
    157   unsigned Opc = UseMI.getOpcode();
    158   unsigned NewOpc = macToMad(Opc);
    159   if (NewOpc != AMDGPU::INSTRUCTION_LIST_END) {
    160     // Special case for mac. Since this is replaced with mad when folded into
    161     // src2, we need to check the legality for the final instruction.
    162     int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
    163     if (static_cast<int>(OpNo) == Src2Idx) {
    164       const MCInstrDesc &MadDesc = TII->get(NewOpc);
    165       return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType);
    166     }
    167   }
    168 
    169   return false;
    170 }
    171 
    172 // TODO: Add heuristic that the frame index might not fit in the addressing mode
    173 // immediate offset to avoid materializing in loops.
    174 static bool frameIndexMayFold(const SIInstrInfo *TII,
    175                               const MachineInstr &UseMI,
    176                               int OpNo,
    177                               const MachineOperand &OpToFold) {
    178   if (!OpToFold.isFI())
    179     return false;
    180 
    181   if (TII->isMUBUF(UseMI))
    182     return OpNo == AMDGPU::getNamedOperandIdx(UseMI.getOpcode(),
    183                                               AMDGPU::OpName::vaddr);
    184   if (!TII->isFLATScratch(UseMI))
    185     return false;
    186 
    187   int SIdx = AMDGPU::getNamedOperandIdx(UseMI.getOpcode(),
    188                                         AMDGPU::OpName::saddr);
    189   if (OpNo == SIdx)
    190     return true;
    191 
    192   int VIdx = AMDGPU::getNamedOperandIdx(UseMI.getOpcode(),
    193                                         AMDGPU::OpName::vaddr);
    194   return OpNo == VIdx && SIdx == -1;
    195 }
    196 
    197 FunctionPass *llvm::createSIFoldOperandsPass() {
    198   return new SIFoldOperands();
    199 }
    200 
    201 static bool updateOperand(FoldCandidate &Fold,
    202                           const SIInstrInfo &TII,
    203                           const TargetRegisterInfo &TRI,
    204                           const GCNSubtarget &ST) {
    205   MachineInstr *MI = Fold.UseMI;
    206   MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
    207   assert(Old.isReg());
    208 
    209   if (Fold.isImm()) {
    210     if (MI->getDesc().TSFlags & SIInstrFlags::IsPacked &&
    211         !(MI->getDesc().TSFlags & SIInstrFlags::IsMAI) &&
    212         AMDGPU::isFoldableLiteralV216(Fold.ImmToFold,
    213                                       ST.hasInv2PiInlineImm())) {
    214       // Set op_sel/op_sel_hi on this operand or bail out if op_sel is
    215       // already set.
    216       unsigned Opcode = MI->getOpcode();
    217       int OpNo = MI->getOperandNo(&Old);
    218       int ModIdx = -1;
    219       if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0))
    220         ModIdx = AMDGPU::OpName::src0_modifiers;
    221       else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1))
    222         ModIdx = AMDGPU::OpName::src1_modifiers;
    223       else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2))
    224         ModIdx = AMDGPU::OpName::src2_modifiers;
    225       assert(ModIdx != -1);
    226       ModIdx = AMDGPU::getNamedOperandIdx(Opcode, ModIdx);
    227       MachineOperand &Mod = MI->getOperand(ModIdx);
    228       unsigned Val = Mod.getImm();
    229       if (!(Val & SISrcMods::OP_SEL_0) && (Val & SISrcMods::OP_SEL_1)) {
    230         // Only apply the following transformation if that operand requries
    231         // a packed immediate.
    232         switch (TII.get(Opcode).OpInfo[OpNo].OperandType) {
    233         case AMDGPU::OPERAND_REG_IMM_V2FP16:
    234         case AMDGPU::OPERAND_REG_IMM_V2INT16:
    235         case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
    236         case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
    237           // If upper part is all zero we do not need op_sel_hi.
    238           if (!isUInt<16>(Fold.ImmToFold)) {
    239             if (!(Fold.ImmToFold & 0xffff)) {
    240               Mod.setImm(Mod.getImm() | SISrcMods::OP_SEL_0);
    241               Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
    242               Old.ChangeToImmediate((Fold.ImmToFold >> 16) & 0xffff);
    243               return true;
    244             }
    245             Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
    246             Old.ChangeToImmediate(Fold.ImmToFold & 0xffff);
    247             return true;
    248           }
    249           break;
    250         default:
    251           break;
    252         }
    253       }
    254     }
    255   }
    256 
    257   if ((Fold.isImm() || Fold.isFI() || Fold.isGlobal()) && Fold.needsShrink()) {
    258     MachineBasicBlock *MBB = MI->getParent();
    259     auto Liveness = MBB->computeRegisterLiveness(&TRI, AMDGPU::VCC, MI, 16);
    260     if (Liveness != MachineBasicBlock::LQR_Dead) {
    261       LLVM_DEBUG(dbgs() << "Not shrinking " << MI << " due to vcc liveness\n");
    262       return false;
    263     }
    264 
    265     MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
    266     int Op32 = Fold.getShrinkOpcode();
    267     MachineOperand &Dst0 = MI->getOperand(0);
    268     MachineOperand &Dst1 = MI->getOperand(1);
    269     assert(Dst0.isDef() && Dst1.isDef());
    270 
    271     bool HaveNonDbgCarryUse = !MRI.use_nodbg_empty(Dst1.getReg());
    272 
    273     const TargetRegisterClass *Dst0RC = MRI.getRegClass(Dst0.getReg());
    274     Register NewReg0 = MRI.createVirtualRegister(Dst0RC);
    275 
    276     MachineInstr *Inst32 = TII.buildShrunkInst(*MI, Op32);
    277 
    278     if (HaveNonDbgCarryUse) {
    279       BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), Dst1.getReg())
    280         .addReg(AMDGPU::VCC, RegState::Kill);
    281     }
    282 
    283     // Keep the old instruction around to avoid breaking iterators, but
    284     // replace it with a dummy instruction to remove uses.
    285     //
    286     // FIXME: We should not invert how this pass looks at operands to avoid
    287     // this. Should track set of foldable movs instead of looking for uses
    288     // when looking at a use.
    289     Dst0.setReg(NewReg0);
    290     for (unsigned I = MI->getNumOperands() - 1; I > 0; --I)
    291       MI->RemoveOperand(I);
    292     MI->setDesc(TII.get(AMDGPU::IMPLICIT_DEF));
    293 
    294     if (Fold.isCommuted())
    295       TII.commuteInstruction(*Inst32, false);
    296     return true;
    297   }
    298 
    299   assert(!Fold.needsShrink() && "not handled");
    300 
    301   if (Fold.isImm()) {
    302     Old.ChangeToImmediate(Fold.ImmToFold);
    303     return true;
    304   }
    305 
    306   if (Fold.isGlobal()) {
    307     Old.ChangeToGA(Fold.OpToFold->getGlobal(), Fold.OpToFold->getOffset(),
    308                    Fold.OpToFold->getTargetFlags());
    309     return true;
    310   }
    311 
    312   if (Fold.isFI()) {
    313     Old.ChangeToFrameIndex(Fold.FrameIndexToFold);
    314     return true;
    315   }
    316 
    317   MachineOperand *New = Fold.OpToFold;
    318   Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
    319   Old.setIsUndef(New->isUndef());
    320   return true;
    321 }
    322 
    323 static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList,
    324                               const MachineInstr *MI) {
    325   for (auto Candidate : FoldList) {
    326     if (Candidate.UseMI == MI)
    327       return true;
    328   }
    329   return false;
    330 }
    331 
    332 static void appendFoldCandidate(SmallVectorImpl<FoldCandidate> &FoldList,
    333                                 MachineInstr *MI, unsigned OpNo,
    334                                 MachineOperand *FoldOp, bool Commuted = false,
    335                                 int ShrinkOp = -1) {
    336   // Skip additional folding on the same operand.
    337   for (FoldCandidate &Fold : FoldList)
    338     if (Fold.UseMI == MI && Fold.UseOpNo == OpNo)
    339       return;
    340   LLVM_DEBUG(dbgs() << "Append " << (Commuted ? "commuted" : "normal")
    341                     << " operand " << OpNo << "\n  " << *MI);
    342   FoldList.emplace_back(MI, OpNo, FoldOp, Commuted, ShrinkOp);
    343 }
    344 
    345 static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
    346                              MachineInstr *MI, unsigned OpNo,
    347                              MachineOperand *OpToFold,
    348                              const SIInstrInfo *TII) {
    349   if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
    350     // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2
    351     unsigned Opc = MI->getOpcode();
    352     unsigned NewOpc = macToMad(Opc);
    353     if (NewOpc != AMDGPU::INSTRUCTION_LIST_END) {
    354       // Check if changing this to a v_mad_{f16, f32} instruction will allow us
    355       // to fold the operand.
    356       MI->setDesc(TII->get(NewOpc));
    357       bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
    358       if (FoldAsMAD) {
    359         MI->untieRegOperand(OpNo);
    360         return true;
    361       }
    362       MI->setDesc(TII->get(Opc));
    363     }
    364 
    365     // Special case for s_setreg_b32
    366     if (OpToFold->isImm()) {
    367       unsigned ImmOpc = 0;
    368       if (Opc == AMDGPU::S_SETREG_B32)
    369         ImmOpc = AMDGPU::S_SETREG_IMM32_B32;
    370       else if (Opc == AMDGPU::S_SETREG_B32_mode)
    371         ImmOpc = AMDGPU::S_SETREG_IMM32_B32_mode;
    372       if (ImmOpc) {
    373         MI->setDesc(TII->get(ImmOpc));
    374         appendFoldCandidate(FoldList, MI, OpNo, OpToFold);
    375         return true;
    376       }
    377     }
    378 
    379     // If we are already folding into another operand of MI, then
    380     // we can't commute the instruction, otherwise we risk making the
    381     // other fold illegal.
    382     if (isUseMIInFoldList(FoldList, MI))
    383       return false;
    384 
    385     unsigned CommuteOpNo = OpNo;
    386 
    387     // Operand is not legal, so try to commute the instruction to
    388     // see if this makes it possible to fold.
    389     unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
    390     unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
    391     bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
    392 
    393     if (CanCommute) {
    394       if (CommuteIdx0 == OpNo)
    395         CommuteOpNo = CommuteIdx1;
    396       else if (CommuteIdx1 == OpNo)
    397         CommuteOpNo = CommuteIdx0;
    398     }
    399 
    400 
    401     // One of operands might be an Imm operand, and OpNo may refer to it after
    402     // the call of commuteInstruction() below. Such situations are avoided
    403     // here explicitly as OpNo must be a register operand to be a candidate
    404     // for memory folding.
    405     if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() ||
    406                        !MI->getOperand(CommuteIdx1).isReg()))
    407       return false;
    408 
    409     if (!CanCommute ||
    410         !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
    411       return false;
    412 
    413     if (!TII->isOperandLegal(*MI, CommuteOpNo, OpToFold)) {
    414       if ((Opc == AMDGPU::V_ADD_CO_U32_e64 ||
    415            Opc == AMDGPU::V_SUB_CO_U32_e64 ||
    416            Opc == AMDGPU::V_SUBREV_CO_U32_e64) && // FIXME
    417           (OpToFold->isImm() || OpToFold->isFI() || OpToFold->isGlobal())) {
    418         MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
    419 
    420         // Verify the other operand is a VGPR, otherwise we would violate the
    421         // constant bus restriction.
    422         unsigned OtherIdx = CommuteOpNo == CommuteIdx0 ? CommuteIdx1 : CommuteIdx0;
    423         MachineOperand &OtherOp = MI->getOperand(OtherIdx);
    424         if (!OtherOp.isReg() ||
    425             !TII->getRegisterInfo().isVGPR(MRI, OtherOp.getReg()))
    426           return false;
    427 
    428         assert(MI->getOperand(1).isDef());
    429 
    430         // Make sure to get the 32-bit version of the commuted opcode.
    431         unsigned MaybeCommutedOpc = MI->getOpcode();
    432         int Op32 = AMDGPU::getVOPe32(MaybeCommutedOpc);
    433 
    434         appendFoldCandidate(FoldList, MI, CommuteOpNo, OpToFold, true, Op32);
    435         return true;
    436       }
    437 
    438       TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1);
    439       return false;
    440     }
    441 
    442     appendFoldCandidate(FoldList, MI, CommuteOpNo, OpToFold, true);
    443     return true;
    444   }
    445 
    446   // Check the case where we might introduce a second constant operand to a
    447   // scalar instruction
    448   if (TII->isSALU(MI->getOpcode())) {
    449     const MCInstrDesc &InstDesc = MI->getDesc();
    450     const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo];
    451     const SIRegisterInfo &SRI = TII->getRegisterInfo();
    452 
    453     // Fine if the operand can be encoded as an inline constant
    454     if (OpToFold->isImm()) {
    455       if (!SRI.opCanUseInlineConstant(OpInfo.OperandType) ||
    456           !TII->isInlineConstant(*OpToFold, OpInfo)) {
    457         // Otherwise check for another constant
    458         for (unsigned i = 0, e = InstDesc.getNumOperands(); i != e; ++i) {
    459           auto &Op = MI->getOperand(i);
    460           if (OpNo != i &&
    461               TII->isLiteralConstantLike(Op, OpInfo)) {
    462             return false;
    463           }
    464         }
    465       }
    466     }
    467   }
    468 
    469   appendFoldCandidate(FoldList, MI, OpNo, OpToFold);
    470   return true;
    471 }
    472 
    473 // If the use operand doesn't care about the value, this may be an operand only
    474 // used for register indexing, in which case it is unsafe to fold.
    475 static bool isUseSafeToFold(const SIInstrInfo *TII,
    476                             const MachineInstr &MI,
    477                             const MachineOperand &UseMO) {
    478   if (UseMO.isUndef() || TII->isSDWA(MI))
    479     return false;
    480 
    481   switch (MI.getOpcode()) {
    482   case AMDGPU::V_MOV_B32_e32:
    483   case AMDGPU::V_MOV_B32_e64:
    484   case AMDGPU::V_MOV_B64_PSEUDO:
    485     // Do not fold into an indirect mov.
    486     return !MI.hasRegisterImplicitUseOperand(AMDGPU::M0);
    487   }
    488 
    489   return true;
    490   //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg());
    491 }
    492 
    493 // Find a def of the UseReg, check if it is a reg_sequence and find initializers
    494 // for each subreg, tracking it to foldable inline immediate if possible.
    495 // Returns true on success.
    496 static bool getRegSeqInit(
    497     SmallVectorImpl<std::pair<MachineOperand*, unsigned>> &Defs,
    498     Register UseReg, uint8_t OpTy,
    499     const SIInstrInfo *TII, const MachineRegisterInfo &MRI) {
    500   MachineInstr *Def = MRI.getVRegDef(UseReg);
    501   if (!Def || !Def->isRegSequence())
    502     return false;
    503 
    504   for (unsigned I = 1, E = Def->getNumExplicitOperands(); I < E; I += 2) {
    505     MachineOperand *Sub = &Def->getOperand(I);
    506     assert(Sub->isReg());
    507 
    508     for (MachineInstr *SubDef = MRI.getVRegDef(Sub->getReg());
    509          SubDef && Sub->isReg() && Sub->getReg().isVirtual() &&
    510          !Sub->getSubReg() && TII->isFoldableCopy(*SubDef);
    511          SubDef = MRI.getVRegDef(Sub->getReg())) {
    512       MachineOperand *Op = &SubDef->getOperand(1);
    513       if (Op->isImm()) {
    514         if (TII->isInlineConstant(*Op, OpTy))
    515           Sub = Op;
    516         break;
    517       }
    518       if (!Op->isReg() || Op->getReg().isPhysical())
    519         break;
    520       Sub = Op;
    521     }
    522 
    523     Defs.emplace_back(Sub, Def->getOperand(I + 1).getImm());
    524   }
    525 
    526   return true;
    527 }
    528 
    529 static bool tryToFoldACImm(const SIInstrInfo *TII,
    530                            const MachineOperand &OpToFold,
    531                            MachineInstr *UseMI,
    532                            unsigned UseOpIdx,
    533                            SmallVectorImpl<FoldCandidate> &FoldList) {
    534   const MCInstrDesc &Desc = UseMI->getDesc();
    535   const MCOperandInfo *OpInfo = Desc.OpInfo;
    536   if (!OpInfo || UseOpIdx >= Desc.getNumOperands())
    537     return false;
    538 
    539   uint8_t OpTy = OpInfo[UseOpIdx].OperandType;
    540   if ((OpTy < AMDGPU::OPERAND_REG_INLINE_AC_FIRST ||
    541        OpTy > AMDGPU::OPERAND_REG_INLINE_AC_LAST) &&
    542       (OpTy < AMDGPU::OPERAND_REG_INLINE_C_FIRST ||
    543        OpTy > AMDGPU::OPERAND_REG_INLINE_C_LAST))
    544     return false;
    545 
    546   if (OpToFold.isImm() && TII->isInlineConstant(OpToFold, OpTy) &&
    547       TII->isOperandLegal(*UseMI, UseOpIdx, &OpToFold)) {
    548     UseMI->getOperand(UseOpIdx).ChangeToImmediate(OpToFold.getImm());
    549     return true;
    550   }
    551 
    552   if (!OpToFold.isReg())
    553     return false;
    554 
    555   Register UseReg = OpToFold.getReg();
    556   if (!UseReg.isVirtual())
    557     return false;
    558 
    559   if (isUseMIInFoldList(FoldList, UseMI))
    560     return false;
    561 
    562   MachineRegisterInfo &MRI = UseMI->getParent()->getParent()->getRegInfo();
    563 
    564   // Maybe it is just a COPY of an immediate itself.
    565   MachineInstr *Def = MRI.getVRegDef(UseReg);
    566   MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
    567   if (!UseOp.getSubReg() && Def && TII->isFoldableCopy(*Def)) {
    568     MachineOperand &DefOp = Def->getOperand(1);
    569     if (DefOp.isImm() && TII->isInlineConstant(DefOp, OpTy) &&
    570         TII->isOperandLegal(*UseMI, UseOpIdx, &DefOp)) {
    571       UseMI->getOperand(UseOpIdx).ChangeToImmediate(DefOp.getImm());
    572       return true;
    573     }
    574   }
    575 
    576   SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs;
    577   if (!getRegSeqInit(Defs, UseReg, OpTy, TII, MRI))
    578     return false;
    579 
    580   int32_t Imm;
    581   for (unsigned I = 0, E = Defs.size(); I != E; ++I) {
    582     const MachineOperand *Op = Defs[I].first;
    583     if (!Op->isImm())
    584       return false;
    585 
    586     auto SubImm = Op->getImm();
    587     if (!I) {
    588       Imm = SubImm;
    589       if (!TII->isInlineConstant(*Op, OpTy) ||
    590           !TII->isOperandLegal(*UseMI, UseOpIdx, Op))
    591         return false;
    592 
    593       continue;
    594     }
    595     if (Imm != SubImm)
    596       return false; // Can only fold splat constants
    597   }
    598 
    599   appendFoldCandidate(FoldList, UseMI, UseOpIdx, Defs[0].first);
    600   return true;
    601 }
    602 
    603 void SIFoldOperands::foldOperand(
    604   MachineOperand &OpToFold,
    605   MachineInstr *UseMI,
    606   int UseOpIdx,
    607   SmallVectorImpl<FoldCandidate> &FoldList,
    608   SmallVectorImpl<MachineInstr *> &CopiesToReplace) const {
    609   const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
    610 
    611   if (!isUseSafeToFold(TII, *UseMI, UseOp))
    612     return;
    613 
    614   // FIXME: Fold operands with subregs.
    615   if (UseOp.isReg() && OpToFold.isReg()) {
    616     if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister)
    617       return;
    618   }
    619 
    620   // Special case for REG_SEQUENCE: We can't fold literals into
    621   // REG_SEQUENCE instructions, so we have to fold them into the
    622   // uses of REG_SEQUENCE.
    623   if (UseMI->isRegSequence()) {
    624     Register RegSeqDstReg = UseMI->getOperand(0).getReg();
    625     unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
    626 
    627     for (auto &RSUse : make_early_inc_range(MRI->use_nodbg_operands(RegSeqDstReg))) {
    628       MachineInstr *RSUseMI = RSUse.getParent();
    629 
    630       if (tryToFoldACImm(TII, UseMI->getOperand(0), RSUseMI,
    631                          RSUseMI->getOperandNo(&RSUse), FoldList))
    632         continue;
    633 
    634       if (RSUse.getSubReg() != RegSeqDstSubReg)
    635         continue;
    636 
    637       foldOperand(OpToFold, RSUseMI, RSUseMI->getOperandNo(&RSUse), FoldList,
    638                   CopiesToReplace);
    639     }
    640 
    641     return;
    642   }
    643 
    644   if (tryToFoldACImm(TII, OpToFold, UseMI, UseOpIdx, FoldList))
    645     return;
    646 
    647   if (frameIndexMayFold(TII, *UseMI, UseOpIdx, OpToFold)) {
    648     // Sanity check that this is a stack access.
    649     // FIXME: Should probably use stack pseudos before frame lowering.
    650 
    651     if (TII->isMUBUF(*UseMI)) {
    652       if (TII->getNamedOperand(*UseMI, AMDGPU::OpName::srsrc)->getReg() !=
    653           MFI->getScratchRSrcReg())
    654         return;
    655 
    656       // Ensure this is either relative to the current frame or the current
    657       // wave.
    658       MachineOperand &SOff =
    659           *TII->getNamedOperand(*UseMI, AMDGPU::OpName::soffset);
    660       if (!SOff.isImm() || SOff.getImm() != 0)
    661         return;
    662     }
    663 
    664     // A frame index will resolve to a positive constant, so it should always be
    665     // safe to fold the addressing mode, even pre-GFX9.
    666     UseMI->getOperand(UseOpIdx).ChangeToFrameIndex(OpToFold.getIndex());
    667 
    668     if (TII->isFLATScratch(*UseMI) &&
    669         AMDGPU::getNamedOperandIdx(UseMI->getOpcode(),
    670                                    AMDGPU::OpName::vaddr) != -1) {
    671       unsigned NewOpc = AMDGPU::getFlatScratchInstSSfromSV(UseMI->getOpcode());
    672       UseMI->setDesc(TII->get(NewOpc));
    673     }
    674 
    675     return;
    676   }
    677 
    678   bool FoldingImmLike =
    679       OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
    680 
    681   if (FoldingImmLike && UseMI->isCopy()) {
    682     Register DestReg = UseMI->getOperand(0).getReg();
    683     Register SrcReg = UseMI->getOperand(1).getReg();
    684     assert(SrcReg.isVirtual());
    685 
    686     const TargetRegisterClass *SrcRC = MRI->getRegClass(SrcReg);
    687 
    688     // Don't fold into a copy to a physical register with the same class. Doing
    689     // so would interfere with the register coalescer's logic which would avoid
    690     // redundant initalizations.
    691     if (DestReg.isPhysical() && SrcRC->contains(DestReg))
    692       return;
    693 
    694     const TargetRegisterClass *DestRC = TRI->getRegClassForReg(*MRI, DestReg);
    695     if (!DestReg.isPhysical()) {
    696       if (TRI->isSGPRClass(SrcRC) && TRI->hasVectorRegisters(DestRC)) {
    697         SmallVector<FoldCandidate, 4> CopyUses;
    698         for (auto &Use : MRI->use_nodbg_operands(DestReg)) {
    699           // There's no point trying to fold into an implicit operand.
    700           if (Use.isImplicit())
    701             continue;
    702 
    703           CopyUses.emplace_back(Use.getParent(),
    704                                 Use.getParent()->getOperandNo(&Use),
    705                                 &UseMI->getOperand(1));
    706         }
    707         for (auto &F : CopyUses) {
    708           foldOperand(*F.OpToFold, F.UseMI, F.UseOpNo, FoldList, CopiesToReplace);
    709         }
    710       }
    711 
    712       if (DestRC == &AMDGPU::AGPR_32RegClass &&
    713           TII->isInlineConstant(OpToFold, AMDGPU::OPERAND_REG_INLINE_C_INT32)) {
    714         UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64));
    715         UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm());
    716         CopiesToReplace.push_back(UseMI);
    717         return;
    718       }
    719     }
    720 
    721     // In order to fold immediates into copies, we need to change the
    722     // copy to a MOV.
    723 
    724     unsigned MovOp = TII->getMovOpcode(DestRC);
    725     if (MovOp == AMDGPU::COPY)
    726       return;
    727 
    728     UseMI->setDesc(TII->get(MovOp));
    729     MachineInstr::mop_iterator ImpOpI = UseMI->implicit_operands().begin();
    730     MachineInstr::mop_iterator ImpOpE = UseMI->implicit_operands().end();
    731     while (ImpOpI != ImpOpE) {
    732       MachineInstr::mop_iterator Tmp = ImpOpI;
    733       ImpOpI++;
    734       UseMI->RemoveOperand(UseMI->getOperandNo(Tmp));
    735     }
    736     CopiesToReplace.push_back(UseMI);
    737   } else {
    738     if (UseMI->isCopy() && OpToFold.isReg() &&
    739         UseMI->getOperand(0).getReg().isVirtual() &&
    740         !UseMI->getOperand(1).getSubReg()) {
    741       LLVM_DEBUG(dbgs() << "Folding " << OpToFold << "\n into " << *UseMI);
    742       unsigned Size = TII->getOpSize(*UseMI, 1);
    743       Register UseReg = OpToFold.getReg();
    744       UseMI->getOperand(1).setReg(UseReg);
    745       UseMI->getOperand(1).setSubReg(OpToFold.getSubReg());
    746       UseMI->getOperand(1).setIsKill(false);
    747       CopiesToReplace.push_back(UseMI);
    748       OpToFold.setIsKill(false);
    749 
    750       // That is very tricky to store a value into an AGPR. v_accvgpr_write_b32
    751       // can only accept VGPR or inline immediate. Recreate a reg_sequence with
    752       // its initializers right here, so we will rematerialize immediates and
    753       // avoid copies via different reg classes.
    754       SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs;
    755       if (Size > 4 && TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) &&
    756           getRegSeqInit(Defs, UseReg, AMDGPU::OPERAND_REG_INLINE_C_INT32, TII,
    757                         *MRI)) {
    758         const DebugLoc &DL = UseMI->getDebugLoc();
    759         MachineBasicBlock &MBB = *UseMI->getParent();
    760 
    761         UseMI->setDesc(TII->get(AMDGPU::REG_SEQUENCE));
    762         for (unsigned I = UseMI->getNumOperands() - 1; I > 0; --I)
    763           UseMI->RemoveOperand(I);
    764 
    765         MachineInstrBuilder B(*MBB.getParent(), UseMI);
    766         DenseMap<TargetInstrInfo::RegSubRegPair, Register> VGPRCopies;
    767         SmallSetVector<TargetInstrInfo::RegSubRegPair, 32> SeenAGPRs;
    768         for (unsigned I = 0; I < Size / 4; ++I) {
    769           MachineOperand *Def = Defs[I].first;
    770           TargetInstrInfo::RegSubRegPair CopyToVGPR;
    771           if (Def->isImm() &&
    772               TII->isInlineConstant(*Def, AMDGPU::OPERAND_REG_INLINE_C_INT32)) {
    773             int64_t Imm = Def->getImm();
    774 
    775             auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass);
    776             BuildMI(MBB, UseMI, DL,
    777                     TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), Tmp).addImm(Imm);
    778             B.addReg(Tmp);
    779           } else if (Def->isReg() && TRI->isAGPR(*MRI, Def->getReg())) {
    780             auto Src = getRegSubRegPair(*Def);
    781             Def->setIsKill(false);
    782             if (!SeenAGPRs.insert(Src)) {
    783               // We cannot build a reg_sequence out of the same registers, they
    784               // must be copied. Better do it here before copyPhysReg() created
    785               // several reads to do the AGPR->VGPR->AGPR copy.
    786               CopyToVGPR = Src;
    787             } else {
    788               B.addReg(Src.Reg, Def->isUndef() ? RegState::Undef : 0,
    789                        Src.SubReg);
    790             }
    791           } else {
    792             assert(Def->isReg());
    793             Def->setIsKill(false);
    794             auto Src = getRegSubRegPair(*Def);
    795 
    796             // Direct copy from SGPR to AGPR is not possible. To avoid creation
    797             // of exploded copies SGPR->VGPR->AGPR in the copyPhysReg() later,
    798             // create a copy here and track if we already have such a copy.
    799             if (TRI->isSGPRReg(*MRI, Src.Reg)) {
    800               CopyToVGPR = Src;
    801             } else {
    802               auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass);
    803               BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Tmp).add(*Def);
    804               B.addReg(Tmp);
    805             }
    806           }
    807 
    808           if (CopyToVGPR.Reg) {
    809             Register Vgpr;
    810             if (VGPRCopies.count(CopyToVGPR)) {
    811               Vgpr = VGPRCopies[CopyToVGPR];
    812             } else {
    813               Vgpr = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
    814               BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Vgpr).add(*Def);
    815               VGPRCopies[CopyToVGPR] = Vgpr;
    816             }
    817             auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass);
    818             BuildMI(MBB, UseMI, DL,
    819                     TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), Tmp).addReg(Vgpr);
    820             B.addReg(Tmp);
    821           }
    822 
    823           B.addImm(Defs[I].second);
    824         }
    825         LLVM_DEBUG(dbgs() << "Folded " << *UseMI);
    826         return;
    827       }
    828 
    829       if (Size != 4)
    830         return;
    831       if (TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) &&
    832           TRI->isVGPR(*MRI, UseMI->getOperand(1).getReg()))
    833         UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64));
    834       else if (TRI->isVGPR(*MRI, UseMI->getOperand(0).getReg()) &&
    835                TRI->isAGPR(*MRI, UseMI->getOperand(1).getReg()))
    836         UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_READ_B32_e64));
    837       else if (ST->hasGFX90AInsts() &&
    838                TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) &&
    839                TRI->isAGPR(*MRI, UseMI->getOperand(1).getReg()))
    840         UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_MOV_B32));
    841       return;
    842     }
    843 
    844     unsigned UseOpc = UseMI->getOpcode();
    845     if (UseOpc == AMDGPU::V_READFIRSTLANE_B32 ||
    846         (UseOpc == AMDGPU::V_READLANE_B32 &&
    847          (int)UseOpIdx ==
    848          AMDGPU::getNamedOperandIdx(UseOpc, AMDGPU::OpName::src0))) {
    849       // %vgpr = V_MOV_B32 imm
    850       // %sgpr = V_READFIRSTLANE_B32 %vgpr
    851       // =>
    852       // %sgpr = S_MOV_B32 imm
    853       if (FoldingImmLike) {
    854         if (execMayBeModifiedBeforeUse(*MRI,
    855                                        UseMI->getOperand(UseOpIdx).getReg(),
    856                                        *OpToFold.getParent(),
    857                                        *UseMI))
    858           return;
    859 
    860         UseMI->setDesc(TII->get(AMDGPU::S_MOV_B32));
    861 
    862         if (OpToFold.isImm())
    863           UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm());
    864         else
    865           UseMI->getOperand(1).ChangeToFrameIndex(OpToFold.getIndex());
    866         UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane)
    867         return;
    868       }
    869 
    870       if (OpToFold.isReg() && TRI->isSGPRReg(*MRI, OpToFold.getReg())) {
    871         if (execMayBeModifiedBeforeUse(*MRI,
    872                                        UseMI->getOperand(UseOpIdx).getReg(),
    873                                        *OpToFold.getParent(),
    874                                        *UseMI))
    875           return;
    876 
    877         // %vgpr = COPY %sgpr0
    878         // %sgpr1 = V_READFIRSTLANE_B32 %vgpr
    879         // =>
    880         // %sgpr1 = COPY %sgpr0
    881         UseMI->setDesc(TII->get(AMDGPU::COPY));
    882         UseMI->getOperand(1).setReg(OpToFold.getReg());
    883         UseMI->getOperand(1).setSubReg(OpToFold.getSubReg());
    884         UseMI->getOperand(1).setIsKill(false);
    885         UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane)
    886         return;
    887       }
    888     }
    889 
    890     const MCInstrDesc &UseDesc = UseMI->getDesc();
    891 
    892     // Don't fold into target independent nodes.  Target independent opcodes
    893     // don't have defined register classes.
    894     if (UseDesc.isVariadic() ||
    895         UseOp.isImplicit() ||
    896         UseDesc.OpInfo[UseOpIdx].RegClass == -1)
    897       return;
    898   }
    899 
    900   if (!FoldingImmLike) {
    901     tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
    902 
    903     // FIXME: We could try to change the instruction from 64-bit to 32-bit
    904     // to enable more folding opportunites.  The shrink operands pass
    905     // already does this.
    906     return;
    907   }
    908 
    909 
    910   const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc();
    911   const TargetRegisterClass *FoldRC =
    912     TRI->getRegClass(FoldDesc.OpInfo[0].RegClass);
    913 
    914   // Split 64-bit constants into 32-bits for folding.
    915   if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
    916     Register UseReg = UseOp.getReg();
    917     const TargetRegisterClass *UseRC = MRI->getRegClass(UseReg);
    918 
    919     if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64)
    920       return;
    921 
    922     APInt Imm(64, OpToFold.getImm());
    923     if (UseOp.getSubReg() == AMDGPU::sub0) {
    924       Imm = Imm.getLoBits(32);
    925     } else {
    926       assert(UseOp.getSubReg() == AMDGPU::sub1);
    927       Imm = Imm.getHiBits(32);
    928     }
    929 
    930     MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
    931     tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
    932     return;
    933   }
    934 
    935 
    936 
    937   tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
    938 }
    939 
    940 static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result,
    941                                   uint32_t LHS, uint32_t RHS) {
    942   switch (Opcode) {
    943   case AMDGPU::V_AND_B32_e64:
    944   case AMDGPU::V_AND_B32_e32:
    945   case AMDGPU::S_AND_B32:
    946     Result = LHS & RHS;
    947     return true;
    948   case AMDGPU::V_OR_B32_e64:
    949   case AMDGPU::V_OR_B32_e32:
    950   case AMDGPU::S_OR_B32:
    951     Result = LHS | RHS;
    952     return true;
    953   case AMDGPU::V_XOR_B32_e64:
    954   case AMDGPU::V_XOR_B32_e32:
    955   case AMDGPU::S_XOR_B32:
    956     Result = LHS ^ RHS;
    957     return true;
    958   case AMDGPU::S_XNOR_B32:
    959     Result = ~(LHS ^ RHS);
    960     return true;
    961   case AMDGPU::S_NAND_B32:
    962     Result = ~(LHS & RHS);
    963     return true;
    964   case AMDGPU::S_NOR_B32:
    965     Result = ~(LHS | RHS);
    966     return true;
    967   case AMDGPU::S_ANDN2_B32:
    968     Result = LHS & ~RHS;
    969     return true;
    970   case AMDGPU::S_ORN2_B32:
    971     Result = LHS | ~RHS;
    972     return true;
    973   case AMDGPU::V_LSHL_B32_e64:
    974   case AMDGPU::V_LSHL_B32_e32:
    975   case AMDGPU::S_LSHL_B32:
    976     // The instruction ignores the high bits for out of bounds shifts.
    977     Result = LHS << (RHS & 31);
    978     return true;
    979   case AMDGPU::V_LSHLREV_B32_e64:
    980   case AMDGPU::V_LSHLREV_B32_e32:
    981     Result = RHS << (LHS & 31);
    982     return true;
    983   case AMDGPU::V_LSHR_B32_e64:
    984   case AMDGPU::V_LSHR_B32_e32:
    985   case AMDGPU::S_LSHR_B32:
    986     Result = LHS >> (RHS & 31);
    987     return true;
    988   case AMDGPU::V_LSHRREV_B32_e64:
    989   case AMDGPU::V_LSHRREV_B32_e32:
    990     Result = RHS >> (LHS & 31);
    991     return true;
    992   case AMDGPU::V_ASHR_I32_e64:
    993   case AMDGPU::V_ASHR_I32_e32:
    994   case AMDGPU::S_ASHR_I32:
    995     Result = static_cast<int32_t>(LHS) >> (RHS & 31);
    996     return true;
    997   case AMDGPU::V_ASHRREV_I32_e64:
    998   case AMDGPU::V_ASHRREV_I32_e32:
    999     Result = static_cast<int32_t>(RHS) >> (LHS & 31);
   1000     return true;
   1001   default:
   1002     return false;
   1003   }
   1004 }
   1005 
   1006 static unsigned getMovOpc(bool IsScalar) {
   1007   return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
   1008 }
   1009 
   1010 /// Remove any leftover implicit operands from mutating the instruction. e.g.
   1011 /// if we replace an s_and_b32 with a copy, we don't need the implicit scc def
   1012 /// anymore.
   1013 static void stripExtraCopyOperands(MachineInstr &MI) {
   1014   const MCInstrDesc &Desc = MI.getDesc();
   1015   unsigned NumOps = Desc.getNumOperands() +
   1016                     Desc.getNumImplicitUses() +
   1017                     Desc.getNumImplicitDefs();
   1018 
   1019   for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I)
   1020     MI.RemoveOperand(I);
   1021 }
   1022 
   1023 static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) {
   1024   MI.setDesc(NewDesc);
   1025   stripExtraCopyOperands(MI);
   1026 }
   1027 
   1028 static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI,
   1029                                                MachineOperand &Op) {
   1030   if (Op.isReg()) {
   1031     // If this has a subregister, it obviously is a register source.
   1032     if (Op.getSubReg() != AMDGPU::NoSubRegister || !Op.getReg().isVirtual())
   1033       return &Op;
   1034 
   1035     MachineInstr *Def = MRI.getVRegDef(Op.getReg());
   1036     if (Def && Def->isMoveImmediate()) {
   1037       MachineOperand &ImmSrc = Def->getOperand(1);
   1038       if (ImmSrc.isImm())
   1039         return &ImmSrc;
   1040     }
   1041   }
   1042 
   1043   return &Op;
   1044 }
   1045 
   1046 // Try to simplify operations with a constant that may appear after instruction
   1047 // selection.
   1048 // TODO: See if a frame index with a fixed offset can fold.
   1049 static bool tryConstantFoldOp(MachineRegisterInfo &MRI, const SIInstrInfo *TII,
   1050                               MachineInstr *MI) {
   1051   unsigned Opc = MI->getOpcode();
   1052 
   1053   int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
   1054   if (Src0Idx == -1)
   1055     return false;
   1056   MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx));
   1057 
   1058   if ((Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 ||
   1059        Opc == AMDGPU::S_NOT_B32) &&
   1060       Src0->isImm()) {
   1061     MI->getOperand(1).ChangeToImmediate(~Src0->getImm());
   1062     mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32)));
   1063     return true;
   1064   }
   1065 
   1066   int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
   1067   if (Src1Idx == -1)
   1068     return false;
   1069   MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx));
   1070 
   1071   if (!Src0->isImm() && !Src1->isImm())
   1072     return false;
   1073 
   1074   // and k0, k1 -> v_mov_b32 (k0 & k1)
   1075   // or k0, k1 -> v_mov_b32 (k0 | k1)
   1076   // xor k0, k1 -> v_mov_b32 (k0 ^ k1)
   1077   if (Src0->isImm() && Src1->isImm()) {
   1078     int32_t NewImm;
   1079     if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm()))
   1080       return false;
   1081 
   1082     const SIRegisterInfo &TRI = TII->getRegisterInfo();
   1083     bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg());
   1084 
   1085     // Be careful to change the right operand, src0 may belong to a different
   1086     // instruction.
   1087     MI->getOperand(Src0Idx).ChangeToImmediate(NewImm);
   1088     MI->RemoveOperand(Src1Idx);
   1089     mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR)));
   1090     return true;
   1091   }
   1092 
   1093   if (!MI->isCommutable())
   1094     return false;
   1095 
   1096   if (Src0->isImm() && !Src1->isImm()) {
   1097     std::swap(Src0, Src1);
   1098     std::swap(Src0Idx, Src1Idx);
   1099   }
   1100 
   1101   int32_t Src1Val = static_cast<int32_t>(Src1->getImm());
   1102   if (Opc == AMDGPU::V_OR_B32_e64 ||
   1103       Opc == AMDGPU::V_OR_B32_e32 ||
   1104       Opc == AMDGPU::S_OR_B32) {
   1105     if (Src1Val == 0) {
   1106       // y = or x, 0 => y = copy x
   1107       MI->RemoveOperand(Src1Idx);
   1108       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
   1109     } else if (Src1Val == -1) {
   1110       // y = or x, -1 => y = v_mov_b32 -1
   1111       MI->RemoveOperand(Src1Idx);
   1112       mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32)));
   1113     } else
   1114       return false;
   1115 
   1116     return true;
   1117   }
   1118 
   1119   if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 ||
   1120       MI->getOpcode() == AMDGPU::V_AND_B32_e32 ||
   1121       MI->getOpcode() == AMDGPU::S_AND_B32) {
   1122     if (Src1Val == 0) {
   1123       // y = and x, 0 => y = v_mov_b32 0
   1124       MI->RemoveOperand(Src0Idx);
   1125       mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32)));
   1126     } else if (Src1Val == -1) {
   1127       // y = and x, -1 => y = copy x
   1128       MI->RemoveOperand(Src1Idx);
   1129       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
   1130       stripExtraCopyOperands(*MI);
   1131     } else
   1132       return false;
   1133 
   1134     return true;
   1135   }
   1136 
   1137   if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 ||
   1138       MI->getOpcode() == AMDGPU::V_XOR_B32_e32 ||
   1139       MI->getOpcode() == AMDGPU::S_XOR_B32) {
   1140     if (Src1Val == 0) {
   1141       // y = xor x, 0 => y = copy x
   1142       MI->RemoveOperand(Src1Idx);
   1143       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
   1144       return true;
   1145     }
   1146   }
   1147 
   1148   return false;
   1149 }
   1150 
   1151 // Try to fold an instruction into a simpler one
   1152 bool SIFoldOperands::tryFoldCndMask(MachineInstr &MI) const {
   1153   unsigned Opc = MI.getOpcode();
   1154   if (Opc != AMDGPU::V_CNDMASK_B32_e32 && Opc != AMDGPU::V_CNDMASK_B32_e64 &&
   1155       Opc != AMDGPU::V_CNDMASK_B64_PSEUDO)
   1156     return false;
   1157 
   1158   MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
   1159   MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
   1160   if (!Src1->isIdenticalTo(*Src0)) {
   1161     auto *Src0Imm = getImmOrMaterializedImm(*MRI, *Src0);
   1162     auto *Src1Imm = getImmOrMaterializedImm(*MRI, *Src1);
   1163     if (!Src1Imm->isIdenticalTo(*Src0Imm))
   1164       return false;
   1165   }
   1166 
   1167   int Src1ModIdx =
   1168       AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers);
   1169   int Src0ModIdx =
   1170       AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
   1171   if ((Src1ModIdx != -1 && MI.getOperand(Src1ModIdx).getImm() != 0) ||
   1172       (Src0ModIdx != -1 && MI.getOperand(Src0ModIdx).getImm() != 0))
   1173     return false;
   1174 
   1175   LLVM_DEBUG(dbgs() << "Folded " << MI << " into ");
   1176   auto &NewDesc =
   1177       TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY : getMovOpc(false));
   1178   int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
   1179   if (Src2Idx != -1)
   1180     MI.RemoveOperand(Src2Idx);
   1181   MI.RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1));
   1182   if (Src1ModIdx != -1)
   1183     MI.RemoveOperand(Src1ModIdx);
   1184   if (Src0ModIdx != -1)
   1185     MI.RemoveOperand(Src0ModIdx);
   1186   mutateCopyOp(MI, NewDesc);
   1187   LLVM_DEBUG(dbgs() << MI);
   1188   return true;
   1189 }
   1190 
   1191 void SIFoldOperands::foldInstOperand(MachineInstr &MI,
   1192                                      MachineOperand &OpToFold) const {
   1193   // We need mutate the operands of new mov instructions to add implicit
   1194   // uses of EXEC, but adding them invalidates the use_iterator, so defer
   1195   // this.
   1196   SmallVector<MachineInstr *, 4> CopiesToReplace;
   1197   SmallVector<FoldCandidate, 4> FoldList;
   1198   MachineOperand &Dst = MI.getOperand(0);
   1199 
   1200   if (OpToFold.isImm()) {
   1201     for (auto &UseMI :
   1202          make_early_inc_range(MRI->use_nodbg_instructions(Dst.getReg()))) {
   1203       // Folding the immediate may reveal operations that can be constant
   1204       // folded or replaced with a copy. This can happen for example after
   1205       // frame indices are lowered to constants or from splitting 64-bit
   1206       // constants.
   1207       //
   1208       // We may also encounter cases where one or both operands are
   1209       // immediates materialized into a register, which would ordinarily not
   1210       // be folded due to multiple uses or operand constraints.
   1211       if (tryConstantFoldOp(*MRI, TII, &UseMI))
   1212         LLVM_DEBUG(dbgs() << "Constant folded " << UseMI);
   1213     }
   1214   }
   1215 
   1216   bool FoldingImm = OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
   1217   if (FoldingImm) {
   1218     unsigned NumLiteralUses = 0;
   1219     MachineOperand *NonInlineUse = nullptr;
   1220     int NonInlineUseOpNo = -1;
   1221 
   1222     for (auto &Use :
   1223          make_early_inc_range(MRI->use_nodbg_operands(Dst.getReg()))) {
   1224       MachineInstr *UseMI = Use.getParent();
   1225       unsigned OpNo = UseMI->getOperandNo(&Use);
   1226 
   1227       // Try to fold any inline immediate uses, and then only fold other
   1228       // constants if they have one use.
   1229       //
   1230       // The legality of the inline immediate must be checked based on the use
   1231       // operand, not the defining instruction, because 32-bit instructions
   1232       // with 32-bit inline immediate sources may be used to materialize
   1233       // constants used in 16-bit operands.
   1234       //
   1235       // e.g. it is unsafe to fold:
   1236       //  s_mov_b32 s0, 1.0    // materializes 0x3f800000
   1237       //  v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00
   1238 
   1239       // Folding immediates with more than one use will increase program size.
   1240       // FIXME: This will also reduce register usage, which may be better
   1241       // in some cases. A better heuristic is needed.
   1242       if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) {
   1243         foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace);
   1244       } else if (frameIndexMayFold(TII, *UseMI, OpNo, OpToFold)) {
   1245         foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace);
   1246       } else {
   1247         if (++NumLiteralUses == 1) {
   1248           NonInlineUse = &Use;
   1249           NonInlineUseOpNo = OpNo;
   1250         }
   1251       }
   1252     }
   1253 
   1254     if (NumLiteralUses == 1) {
   1255       MachineInstr *UseMI = NonInlineUse->getParent();
   1256       foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList, CopiesToReplace);
   1257     }
   1258   } else {
   1259     // Folding register.
   1260     SmallVector <MachineOperand *, 4> UsesToProcess;
   1261     for (auto &Use : MRI->use_nodbg_operands(Dst.getReg()))
   1262       UsesToProcess.push_back(&Use);
   1263     for (auto U : UsesToProcess) {
   1264       MachineInstr *UseMI = U->getParent();
   1265 
   1266       foldOperand(OpToFold, UseMI, UseMI->getOperandNo(U),
   1267         FoldList, CopiesToReplace);
   1268     }
   1269   }
   1270 
   1271   MachineFunction *MF = MI.getParent()->getParent();
   1272   // Make sure we add EXEC uses to any new v_mov instructions created.
   1273   for (MachineInstr *Copy : CopiesToReplace)
   1274     Copy->addImplicitDefUseOperands(*MF);
   1275 
   1276   for (FoldCandidate &Fold : FoldList) {
   1277     assert(!Fold.isReg() || Fold.OpToFold);
   1278     if (Fold.isReg() && Fold.OpToFold->getReg().isVirtual()) {
   1279       Register Reg = Fold.OpToFold->getReg();
   1280       MachineInstr *DefMI = Fold.OpToFold->getParent();
   1281       if (DefMI->readsRegister(AMDGPU::EXEC, TRI) &&
   1282           execMayBeModifiedBeforeUse(*MRI, Reg, *DefMI, *Fold.UseMI))
   1283         continue;
   1284     }
   1285     if (updateOperand(Fold, *TII, *TRI, *ST)) {
   1286       // Clear kill flags.
   1287       if (Fold.isReg()) {
   1288         assert(Fold.OpToFold && Fold.OpToFold->isReg());
   1289         // FIXME: Probably shouldn't bother trying to fold if not an
   1290         // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
   1291         // copies.
   1292         MRI->clearKillFlags(Fold.OpToFold->getReg());
   1293       }
   1294       LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo "
   1295                         << static_cast<int>(Fold.UseOpNo) << " of "
   1296                         << *Fold.UseMI);
   1297     } else if (Fold.isCommuted()) {
   1298       // Restoring instruction's original operand order if fold has failed.
   1299       TII->commuteInstruction(*Fold.UseMI, false);
   1300     }
   1301   }
   1302 }
   1303 
   1304 // Clamp patterns are canonically selected to v_max_* instructions, so only
   1305 // handle them.
   1306 const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const {
   1307   unsigned Op = MI.getOpcode();
   1308   switch (Op) {
   1309   case AMDGPU::V_MAX_F32_e64:
   1310   case AMDGPU::V_MAX_F16_e64:
   1311   case AMDGPU::V_MAX_F64_e64:
   1312   case AMDGPU::V_PK_MAX_F16: {
   1313     if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm())
   1314       return nullptr;
   1315 
   1316     // Make sure sources are identical.
   1317     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
   1318     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
   1319     if (!Src0->isReg() || !Src1->isReg() ||
   1320         Src0->getReg() != Src1->getReg() ||
   1321         Src0->getSubReg() != Src1->getSubReg() ||
   1322         Src0->getSubReg() != AMDGPU::NoSubRegister)
   1323       return nullptr;
   1324 
   1325     // Can't fold up if we have modifiers.
   1326     if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
   1327       return nullptr;
   1328 
   1329     unsigned Src0Mods
   1330       = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm();
   1331     unsigned Src1Mods
   1332       = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm();
   1333 
   1334     // Having a 0 op_sel_hi would require swizzling the output in the source
   1335     // instruction, which we can't do.
   1336     unsigned UnsetMods = (Op == AMDGPU::V_PK_MAX_F16) ? SISrcMods::OP_SEL_1
   1337                                                       : 0u;
   1338     if (Src0Mods != UnsetMods && Src1Mods != UnsetMods)
   1339       return nullptr;
   1340     return Src0;
   1341   }
   1342   default:
   1343     return nullptr;
   1344   }
   1345 }
   1346 
   1347 // FIXME: Clamp for v_mad_mixhi_f16 handled during isel.
   1348 bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) {
   1349   const MachineOperand *ClampSrc = isClamp(MI);
   1350   if (!ClampSrc || !MRI->hasOneNonDBGUser(ClampSrc->getReg()))
   1351     return false;
   1352 
   1353   MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg());
   1354 
   1355   // The type of clamp must be compatible.
   1356   if (TII->getClampMask(*Def) != TII->getClampMask(MI))
   1357     return false;
   1358 
   1359   MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp);
   1360   if (!DefClamp)
   1361     return false;
   1362 
   1363   LLVM_DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def);
   1364 
   1365   // Clamp is applied after omod, so it is OK if omod is set.
   1366   DefClamp->setImm(1);
   1367   MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
   1368   MI.eraseFromParent();
   1369   return true;
   1370 }
   1371 
   1372 static int getOModValue(unsigned Opc, int64_t Val) {
   1373   switch (Opc) {
   1374   case AMDGPU::V_MUL_F64_e64: {
   1375     switch (Val) {
   1376     case 0x3fe0000000000000: // 0.5
   1377       return SIOutMods::DIV2;
   1378     case 0x4000000000000000: // 2.0
   1379       return SIOutMods::MUL2;
   1380     case 0x4010000000000000: // 4.0
   1381       return SIOutMods::MUL4;
   1382     default:
   1383       return SIOutMods::NONE;
   1384     }
   1385   }
   1386   case AMDGPU::V_MUL_F32_e64: {
   1387     switch (static_cast<uint32_t>(Val)) {
   1388     case 0x3f000000: // 0.5
   1389       return SIOutMods::DIV2;
   1390     case 0x40000000: // 2.0
   1391       return SIOutMods::MUL2;
   1392     case 0x40800000: // 4.0
   1393       return SIOutMods::MUL4;
   1394     default:
   1395       return SIOutMods::NONE;
   1396     }
   1397   }
   1398   case AMDGPU::V_MUL_F16_e64: {
   1399     switch (static_cast<uint16_t>(Val)) {
   1400     case 0x3800: // 0.5
   1401       return SIOutMods::DIV2;
   1402     case 0x4000: // 2.0
   1403       return SIOutMods::MUL2;
   1404     case 0x4400: // 4.0
   1405       return SIOutMods::MUL4;
   1406     default:
   1407       return SIOutMods::NONE;
   1408     }
   1409   }
   1410   default:
   1411     llvm_unreachable("invalid mul opcode");
   1412   }
   1413 }
   1414 
   1415 // FIXME: Does this really not support denormals with f16?
   1416 // FIXME: Does this need to check IEEE mode bit? SNaNs are generally not
   1417 // handled, so will anything other than that break?
   1418 std::pair<const MachineOperand *, int>
   1419 SIFoldOperands::isOMod(const MachineInstr &MI) const {
   1420   unsigned Op = MI.getOpcode();
   1421   switch (Op) {
   1422   case AMDGPU::V_MUL_F64_e64:
   1423   case AMDGPU::V_MUL_F32_e64:
   1424   case AMDGPU::V_MUL_F16_e64: {
   1425     // If output denormals are enabled, omod is ignored.
   1426     if ((Op == AMDGPU::V_MUL_F32_e64 && MFI->getMode().FP32OutputDenormals) ||
   1427         ((Op == AMDGPU::V_MUL_F64_e64 || Op == AMDGPU::V_MUL_F16_e64) &&
   1428          MFI->getMode().FP64FP16OutputDenormals))
   1429       return std::make_pair(nullptr, SIOutMods::NONE);
   1430 
   1431     const MachineOperand *RegOp = nullptr;
   1432     const MachineOperand *ImmOp = nullptr;
   1433     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
   1434     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
   1435     if (Src0->isImm()) {
   1436       ImmOp = Src0;
   1437       RegOp = Src1;
   1438     } else if (Src1->isImm()) {
   1439       ImmOp = Src1;
   1440       RegOp = Src0;
   1441     } else
   1442       return std::make_pair(nullptr, SIOutMods::NONE);
   1443 
   1444     int OMod = getOModValue(Op, ImmOp->getImm());
   1445     if (OMod == SIOutMods::NONE ||
   1446         TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
   1447         TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
   1448         TII->hasModifiersSet(MI, AMDGPU::OpName::omod) ||
   1449         TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
   1450       return std::make_pair(nullptr, SIOutMods::NONE);
   1451 
   1452     return std::make_pair(RegOp, OMod);
   1453   }
   1454   case AMDGPU::V_ADD_F64_e64:
   1455   case AMDGPU::V_ADD_F32_e64:
   1456   case AMDGPU::V_ADD_F16_e64: {
   1457     // If output denormals are enabled, omod is ignored.
   1458     if ((Op == AMDGPU::V_ADD_F32_e64 && MFI->getMode().FP32OutputDenormals) ||
   1459         ((Op == AMDGPU::V_ADD_F64_e64 || Op == AMDGPU::V_ADD_F16_e64) &&
   1460          MFI->getMode().FP64FP16OutputDenormals))
   1461       return std::make_pair(nullptr, SIOutMods::NONE);
   1462 
   1463     // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x
   1464     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
   1465     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
   1466 
   1467     if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() &&
   1468         Src0->getSubReg() == Src1->getSubReg() &&
   1469         !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) &&
   1470         !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) &&
   1471         !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) &&
   1472         !TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
   1473       return std::make_pair(Src0, SIOutMods::MUL2);
   1474 
   1475     return std::make_pair(nullptr, SIOutMods::NONE);
   1476   }
   1477   default:
   1478     return std::make_pair(nullptr, SIOutMods::NONE);
   1479   }
   1480 }
   1481 
   1482 // FIXME: Does this need to check IEEE bit on function?
   1483 bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) {
   1484   const MachineOperand *RegOp;
   1485   int OMod;
   1486   std::tie(RegOp, OMod) = isOMod(MI);
   1487   if (OMod == SIOutMods::NONE || !RegOp->isReg() ||
   1488       RegOp->getSubReg() != AMDGPU::NoSubRegister ||
   1489       !MRI->hasOneNonDBGUser(RegOp->getReg()))
   1490     return false;
   1491 
   1492   MachineInstr *Def = MRI->getVRegDef(RegOp->getReg());
   1493   MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod);
   1494   if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE)
   1495     return false;
   1496 
   1497   // Clamp is applied after omod. If the source already has clamp set, don't
   1498   // fold it.
   1499   if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp))
   1500     return false;
   1501 
   1502   LLVM_DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def);
   1503 
   1504   DefOMod->setImm(OMod);
   1505   MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
   1506   MI.eraseFromParent();
   1507   return true;
   1508 }
   1509 
   1510 // Try to fold a reg_sequence with vgpr output and agpr inputs into an
   1511 // instruction which can take an agpr. So far that means a store.
   1512 bool SIFoldOperands::tryFoldRegSequence(MachineInstr &MI) {
   1513   assert(MI.isRegSequence());
   1514   auto Reg = MI.getOperand(0).getReg();
   1515 
   1516   if (!ST->hasGFX90AInsts() || !TRI->isVGPR(*MRI, Reg) ||
   1517       !MRI->hasOneNonDBGUse(Reg))
   1518     return false;
   1519 
   1520   SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs;
   1521   if (!getRegSeqInit(Defs, Reg, MCOI::OPERAND_REGISTER, TII, *MRI))
   1522     return false;
   1523 
   1524   for (auto &Def : Defs) {
   1525     const auto *Op = Def.first;
   1526     if (!Op->isReg())
   1527       return false;
   1528     if (TRI->isAGPR(*MRI, Op->getReg()))
   1529       continue;
   1530     // Maybe this is a COPY from AREG
   1531     const MachineInstr *SubDef = MRI->getVRegDef(Op->getReg());
   1532     if (!SubDef || !SubDef->isCopy() || SubDef->getOperand(1).getSubReg())
   1533       return false;
   1534     if (!TRI->isAGPR(*MRI, SubDef->getOperand(1).getReg()))
   1535       return false;
   1536   }
   1537 
   1538   MachineOperand *Op = &*MRI->use_nodbg_begin(Reg);
   1539   MachineInstr *UseMI = Op->getParent();
   1540   while (UseMI->isCopy() && !Op->getSubReg()) {
   1541     Reg = UseMI->getOperand(0).getReg();
   1542     if (!TRI->isVGPR(*MRI, Reg) || !MRI->hasOneNonDBGUse(Reg))
   1543       return false;
   1544     Op = &*MRI->use_nodbg_begin(Reg);
   1545     UseMI = Op->getParent();
   1546   }
   1547 
   1548   if (Op->getSubReg())
   1549     return false;
   1550 
   1551   unsigned OpIdx = Op - &UseMI->getOperand(0);
   1552   const MCInstrDesc &InstDesc = UseMI->getDesc();
   1553   const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx];
   1554   switch (OpInfo.RegClass) {
   1555   case AMDGPU::AV_32RegClassID:  LLVM_FALLTHROUGH;
   1556   case AMDGPU::AV_64RegClassID:  LLVM_FALLTHROUGH;
   1557   case AMDGPU::AV_96RegClassID:  LLVM_FALLTHROUGH;
   1558   case AMDGPU::AV_128RegClassID: LLVM_FALLTHROUGH;
   1559   case AMDGPU::AV_160RegClassID:
   1560     break;
   1561   default:
   1562     return false;
   1563   }
   1564 
   1565   const auto *NewDstRC = TRI->getEquivalentAGPRClass(MRI->getRegClass(Reg));
   1566   auto Dst = MRI->createVirtualRegister(NewDstRC);
   1567   auto RS = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
   1568                     TII->get(AMDGPU::REG_SEQUENCE), Dst);
   1569 
   1570   for (unsigned I = 0; I < Defs.size(); ++I) {
   1571     MachineOperand *Def = Defs[I].first;
   1572     Def->setIsKill(false);
   1573     if (TRI->isAGPR(*MRI, Def->getReg())) {
   1574       RS.add(*Def);
   1575     } else { // This is a copy
   1576       MachineInstr *SubDef = MRI->getVRegDef(Def->getReg());
   1577       SubDef->getOperand(1).setIsKill(false);
   1578       RS.addReg(SubDef->getOperand(1).getReg(), 0, Def->getSubReg());
   1579     }
   1580     RS.addImm(Defs[I].second);
   1581   }
   1582 
   1583   Op->setReg(Dst);
   1584   if (!TII->isOperandLegal(*UseMI, OpIdx, Op)) {
   1585     Op->setReg(Reg);
   1586     RS->eraseFromParent();
   1587     return false;
   1588   }
   1589 
   1590   LLVM_DEBUG(dbgs() << "Folded " << *RS << " into " << *UseMI);
   1591 
   1592   // Erase the REG_SEQUENCE eagerly, unless we followed a chain of COPY users,
   1593   // in which case we can erase them all later in runOnMachineFunction.
   1594   if (MRI->use_nodbg_empty(MI.getOperand(0).getReg()))
   1595     MI.eraseFromParentAndMarkDBGValuesForRemoval();
   1596   return true;
   1597 }
   1598 
   1599 // Try to hoist an AGPR to VGPR copy out of the loop across a LCSSA PHI.
   1600 // This should allow folding of an AGPR into a consumer which may support it.
   1601 // I.e.:
   1602 //
   1603 // loop:                             // loop:
   1604 //   %1:vreg = COPY %0:areg          // exit:
   1605 // exit:                          => //   %1:areg = PHI %0:areg, %loop
   1606 //   %2:vreg = PHI %1:vreg, %loop    //   %2:vreg = COPY %1:areg
   1607 bool SIFoldOperands::tryFoldLCSSAPhi(MachineInstr &PHI) {
   1608   assert(PHI.isPHI());
   1609 
   1610   if (PHI.getNumExplicitOperands() != 3) // Single input LCSSA PHI
   1611     return false;
   1612 
   1613   Register PhiIn = PHI.getOperand(1).getReg();
   1614   Register PhiOut = PHI.getOperand(0).getReg();
   1615   if (PHI.getOperand(1).getSubReg() ||
   1616       !TRI->isVGPR(*MRI, PhiIn) || !TRI->isVGPR(*MRI, PhiOut))
   1617     return false;
   1618 
   1619   // A single use should not matter for correctness, but if it has another use
   1620   // inside the loop we may perform copy twice in a worst case.
   1621   if (!MRI->hasOneNonDBGUse(PhiIn))
   1622     return false;
   1623 
   1624   MachineInstr *Copy = MRI->getVRegDef(PhiIn);
   1625   if (!Copy || !Copy->isCopy())
   1626     return false;
   1627 
   1628   Register CopyIn = Copy->getOperand(1).getReg();
   1629   if (!TRI->isAGPR(*MRI, CopyIn) || Copy->getOperand(1).getSubReg())
   1630     return false;
   1631 
   1632   const TargetRegisterClass *ARC = MRI->getRegClass(CopyIn);
   1633   Register NewReg = MRI->createVirtualRegister(ARC);
   1634   PHI.getOperand(1).setReg(CopyIn);
   1635   PHI.getOperand(0).setReg(NewReg);
   1636 
   1637   MachineBasicBlock *MBB = PHI.getParent();
   1638   BuildMI(*MBB, MBB->getFirstNonPHI(), Copy->getDebugLoc(),
   1639           TII->get(AMDGPU::COPY), PhiOut)
   1640     .addReg(NewReg, RegState::Kill);
   1641   Copy->eraseFromParent(); // We know this copy had a single use.
   1642 
   1643   LLVM_DEBUG(dbgs() << "Folded " << PHI);
   1644 
   1645   return true;
   1646 }
   1647 
   1648 // Attempt to convert VGPR load to an AGPR load.
   1649 bool SIFoldOperands::tryFoldLoad(MachineInstr &MI) {
   1650   assert(MI.mayLoad());
   1651   if (!ST->hasGFX90AInsts() || MI.getNumExplicitDefs() != 1)
   1652     return false;
   1653 
   1654   MachineOperand &Def = MI.getOperand(0);
   1655   if (!Def.isDef())
   1656     return false;
   1657 
   1658   Register DefReg = Def.getReg();
   1659 
   1660   if (DefReg.isPhysical() || !TRI->isVGPR(*MRI, DefReg))
   1661     return false;
   1662 
   1663   SmallVector<const MachineInstr*, 8> Users;
   1664   SmallVector<Register, 8> MoveRegs;
   1665   for (const MachineInstr &I : MRI->use_nodbg_instructions(DefReg)) {
   1666     Users.push_back(&I);
   1667   }
   1668   if (Users.empty())
   1669     return false;
   1670 
   1671   // Check that all uses a copy to an agpr or a reg_sequence producing an agpr.
   1672   while (!Users.empty()) {
   1673     const MachineInstr *I = Users.pop_back_val();
   1674     if (!I->isCopy() && !I->isRegSequence())
   1675       return false;
   1676     Register DstReg = I->getOperand(0).getReg();
   1677     if (TRI->isAGPR(*MRI, DstReg))
   1678       continue;
   1679     MoveRegs.push_back(DstReg);
   1680     for (const MachineInstr &U : MRI->use_nodbg_instructions(DstReg)) {
   1681       Users.push_back(&U);
   1682     }
   1683   }
   1684 
   1685   const TargetRegisterClass *RC = MRI->getRegClass(DefReg);
   1686   MRI->setRegClass(DefReg, TRI->getEquivalentAGPRClass(RC));
   1687   if (!TII->isOperandLegal(MI, 0, &Def)) {
   1688     MRI->setRegClass(DefReg, RC);
   1689     return false;
   1690   }
   1691 
   1692   while (!MoveRegs.empty()) {
   1693     Register Reg = MoveRegs.pop_back_val();
   1694     MRI->setRegClass(Reg, TRI->getEquivalentAGPRClass(MRI->getRegClass(Reg)));
   1695   }
   1696 
   1697   LLVM_DEBUG(dbgs() << "Folded " << MI);
   1698 
   1699   return true;
   1700 }
   1701 
   1702 bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
   1703   if (skipFunction(MF.getFunction()))
   1704     return false;
   1705 
   1706   MRI = &MF.getRegInfo();
   1707   ST = &MF.getSubtarget<GCNSubtarget>();
   1708   TII = ST->getInstrInfo();
   1709   TRI = &TII->getRegisterInfo();
   1710   MFI = MF.getInfo<SIMachineFunctionInfo>();
   1711 
   1712   // omod is ignored by hardware if IEEE bit is enabled. omod also does not
   1713   // correctly handle signed zeros.
   1714   //
   1715   // FIXME: Also need to check strictfp
   1716   bool IsIEEEMode = MFI->getMode().IEEE;
   1717   bool HasNSZ = MFI->hasNoSignedZerosFPMath();
   1718 
   1719   for (MachineBasicBlock *MBB : depth_first(&MF)) {
   1720     MachineOperand *CurrentKnownM0Val = nullptr;
   1721     for (auto &MI : make_early_inc_range(*MBB)) {
   1722       tryFoldCndMask(MI);
   1723 
   1724       if (MI.isRegSequence() && tryFoldRegSequence(MI))
   1725         continue;
   1726 
   1727       if (MI.isPHI() && tryFoldLCSSAPhi(MI))
   1728         continue;
   1729 
   1730       if (MI.mayLoad() && tryFoldLoad(MI))
   1731         continue;
   1732 
   1733       if (!TII->isFoldableCopy(MI)) {
   1734         // Saw an unknown clobber of m0, so we no longer know what it is.
   1735         if (CurrentKnownM0Val && MI.modifiesRegister(AMDGPU::M0, TRI))
   1736           CurrentKnownM0Val = nullptr;
   1737 
   1738         // TODO: Omod might be OK if there is NSZ only on the source
   1739         // instruction, and not the omod multiply.
   1740         if (IsIEEEMode || (!HasNSZ && !MI.getFlag(MachineInstr::FmNsz)) ||
   1741             !tryFoldOMod(MI))
   1742           tryFoldClamp(MI);
   1743 
   1744         continue;
   1745       }
   1746 
   1747       // Specially track simple redefs of m0 to the same value in a block, so we
   1748       // can erase the later ones.
   1749       if (MI.getOperand(0).getReg() == AMDGPU::M0) {
   1750         MachineOperand &NewM0Val = MI.getOperand(1);
   1751         if (CurrentKnownM0Val && CurrentKnownM0Val->isIdenticalTo(NewM0Val)) {
   1752           MI.eraseFromParent();
   1753           continue;
   1754         }
   1755 
   1756         // We aren't tracking other physical registers
   1757         CurrentKnownM0Val = (NewM0Val.isReg() && NewM0Val.getReg().isPhysical()) ?
   1758           nullptr : &NewM0Val;
   1759         continue;
   1760       }
   1761 
   1762       MachineOperand &OpToFold = MI.getOperand(1);
   1763       bool FoldingImm =
   1764           OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
   1765 
   1766       // FIXME: We could also be folding things like TargetIndexes.
   1767       if (!FoldingImm && !OpToFold.isReg())
   1768         continue;
   1769 
   1770       if (OpToFold.isReg() && !OpToFold.getReg().isVirtual())
   1771         continue;
   1772 
   1773       // Prevent folding operands backwards in the function. For example,
   1774       // the COPY opcode must not be replaced by 1 in this example:
   1775       //
   1776       //    %3 = COPY %vgpr0; VGPR_32:%3
   1777       //    ...
   1778       //    %vgpr0 = V_MOV_B32_e32 1, implicit %exec
   1779       if (!MI.getOperand(0).getReg().isVirtual())
   1780         continue;
   1781 
   1782       foldInstOperand(MI, OpToFold);
   1783 
   1784       // If we managed to fold all uses of this copy then we might as well
   1785       // delete it now.
   1786       // The only reason we need to follow chains of copies here is that
   1787       // tryFoldRegSequence looks forward through copies before folding a
   1788       // REG_SEQUENCE into its eventual users.
   1789       auto *InstToErase = &MI;
   1790       while (MRI->use_nodbg_empty(InstToErase->getOperand(0).getReg())) {
   1791         auto &SrcOp = InstToErase->getOperand(1);
   1792         auto SrcReg = SrcOp.isReg() ? SrcOp.getReg() : Register();
   1793         InstToErase->eraseFromParentAndMarkDBGValuesForRemoval();
   1794         InstToErase = nullptr;
   1795         if (!SrcReg || SrcReg.isPhysical())
   1796           break;
   1797         InstToErase = MRI->getVRegDef(SrcReg);
   1798         if (!InstToErase || !TII->isFoldableCopy(*InstToErase))
   1799           break;
   1800       }
   1801       if (InstToErase && InstToErase->isRegSequence() &&
   1802           MRI->use_nodbg_empty(InstToErase->getOperand(0).getReg()))
   1803         InstToErase->eraseFromParentAndMarkDBGValuesForRemoval();
   1804     }
   1805   }
   1806   return true;
   1807 }
   1808