Home | History | Annotate | Line # | Download | only in IR
      1 //===-- Instruction.cpp - Implement the Instruction class -----------------===//
      2 //
      3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
      4 // See https://llvm.org/LICENSE.txt for license information.
      5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
      6 //
      7 //===----------------------------------------------------------------------===//
      8 //
      9 // This file implements the Instruction class for the IR library.
     10 //
     11 //===----------------------------------------------------------------------===//
     12 
     13 #include "llvm/IR/Instruction.h"
     14 #include "llvm/ADT/DenseSet.h"
     15 #include "llvm/IR/Constants.h"
     16 #include "llvm/IR/Instructions.h"
     17 #include "llvm/IR/IntrinsicInst.h"
     18 #include "llvm/IR/Intrinsics.h"
     19 #include "llvm/IR/MDBuilder.h"
     20 #include "llvm/IR/Operator.h"
     21 #include "llvm/IR/Type.h"
     22 using namespace llvm;
     23 
     24 Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
     25                          Instruction *InsertBefore)
     26   : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
     27 
     28   // If requested, insert this instruction into a basic block...
     29   if (InsertBefore) {
     30     BasicBlock *BB = InsertBefore->getParent();
     31     assert(BB && "Instruction to insert before is not in a basic block!");
     32     BB->getInstList().insert(InsertBefore->getIterator(), this);
     33   }
     34 }
     35 
     36 Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
     37                          BasicBlock *InsertAtEnd)
     38   : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
     39 
     40   // append this instruction into the basic block
     41   assert(InsertAtEnd && "Basic block to append to may not be NULL!");
     42   InsertAtEnd->getInstList().push_back(this);
     43 }
     44 
     45 Instruction::~Instruction() {
     46   assert(!Parent && "Instruction still linked in the program!");
     47 
     48   // Replace any extant metadata uses of this instruction with undef to
     49   // preserve debug info accuracy. Some alternatives include:
     50   // - Treat Instruction like any other Value, and point its extant metadata
     51   //   uses to an empty ValueAsMetadata node. This makes extant dbg.value uses
     52   //   trivially dead (i.e. fair game for deletion in many passes), leading to
     53   //   stale dbg.values being in effect for too long.
     54   // - Call salvageDebugInfoOrMarkUndef. Not needed to make instruction removal
     55   //   correct. OTOH results in wasted work in some common cases (e.g. when all
     56   //   instructions in a BasicBlock are deleted).
     57   if (isUsedByMetadata())
     58     ValueAsMetadata::handleRAUW(this, UndefValue::get(getType()));
     59 }
     60 
     61 
     62 void Instruction::setParent(BasicBlock *P) {
     63   Parent = P;
     64 }
     65 
     66 const Module *Instruction::getModule() const {
     67   return getParent()->getModule();
     68 }
     69 
     70 const Function *Instruction::getFunction() const {
     71   return getParent()->getParent();
     72 }
     73 
     74 void Instruction::removeFromParent() {
     75   getParent()->getInstList().remove(getIterator());
     76 }
     77 
     78 iplist<Instruction>::iterator Instruction::eraseFromParent() {
     79   return getParent()->getInstList().erase(getIterator());
     80 }
     81 
     82 /// Insert an unlinked instruction into a basic block immediately before the
     83 /// specified instruction.
     84 void Instruction::insertBefore(Instruction *InsertPos) {
     85   InsertPos->getParent()->getInstList().insert(InsertPos->getIterator(), this);
     86 }
     87 
     88 /// Insert an unlinked instruction into a basic block immediately after the
     89 /// specified instruction.
     90 void Instruction::insertAfter(Instruction *InsertPos) {
     91   InsertPos->getParent()->getInstList().insertAfter(InsertPos->getIterator(),
     92                                                     this);
     93 }
     94 
     95 /// Unlink this instruction from its current basic block and insert it into the
     96 /// basic block that MovePos lives in, right before MovePos.
     97 void Instruction::moveBefore(Instruction *MovePos) {
     98   moveBefore(*MovePos->getParent(), MovePos->getIterator());
     99 }
    100 
    101 void Instruction::moveAfter(Instruction *MovePos) {
    102   moveBefore(*MovePos->getParent(), ++MovePos->getIterator());
    103 }
    104 
    105 void Instruction::moveBefore(BasicBlock &BB,
    106                              SymbolTableList<Instruction>::iterator I) {
    107   assert(I == BB.end() || I->getParent() == &BB);
    108   BB.getInstList().splice(I, getParent()->getInstList(), getIterator());
    109 }
    110 
    111 bool Instruction::comesBefore(const Instruction *Other) const {
    112   assert(Parent && Other->Parent &&
    113          "instructions without BB parents have no order");
    114   assert(Parent == Other->Parent && "cross-BB instruction order comparison");
    115   if (!Parent->isInstrOrderValid())
    116     Parent->renumberInstructions();
    117   return Order < Other->Order;
    118 }
    119 
    120 void Instruction::setHasNoUnsignedWrap(bool b) {
    121   cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(b);
    122 }
    123 
    124 void Instruction::setHasNoSignedWrap(bool b) {
    125   cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(b);
    126 }
    127 
    128 void Instruction::setIsExact(bool b) {
    129   cast<PossiblyExactOperator>(this)->setIsExact(b);
    130 }
    131 
    132 bool Instruction::hasNoUnsignedWrap() const {
    133   return cast<OverflowingBinaryOperator>(this)->hasNoUnsignedWrap();
    134 }
    135 
    136 bool Instruction::hasNoSignedWrap() const {
    137   return cast<OverflowingBinaryOperator>(this)->hasNoSignedWrap();
    138 }
    139 
    140 void Instruction::dropPoisonGeneratingFlags() {
    141   switch (getOpcode()) {
    142   case Instruction::Add:
    143   case Instruction::Sub:
    144   case Instruction::Mul:
    145   case Instruction::Shl:
    146     cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(false);
    147     cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(false);
    148     break;
    149 
    150   case Instruction::UDiv:
    151   case Instruction::SDiv:
    152   case Instruction::AShr:
    153   case Instruction::LShr:
    154     cast<PossiblyExactOperator>(this)->setIsExact(false);
    155     break;
    156 
    157   case Instruction::GetElementPtr:
    158     cast<GetElementPtrInst>(this)->setIsInBounds(false);
    159     break;
    160   }
    161   // TODO: FastMathFlags!
    162 }
    163 
    164 
    165 bool Instruction::isExact() const {
    166   return cast<PossiblyExactOperator>(this)->isExact();
    167 }
    168 
    169 void Instruction::setFast(bool B) {
    170   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
    171   cast<FPMathOperator>(this)->setFast(B);
    172 }
    173 
    174 void Instruction::setHasAllowReassoc(bool B) {
    175   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
    176   cast<FPMathOperator>(this)->setHasAllowReassoc(B);
    177 }
    178 
    179 void Instruction::setHasNoNaNs(bool B) {
    180   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
    181   cast<FPMathOperator>(this)->setHasNoNaNs(B);
    182 }
    183 
    184 void Instruction::setHasNoInfs(bool B) {
    185   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
    186   cast<FPMathOperator>(this)->setHasNoInfs(B);
    187 }
    188 
    189 void Instruction::setHasNoSignedZeros(bool B) {
    190   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
    191   cast<FPMathOperator>(this)->setHasNoSignedZeros(B);
    192 }
    193 
    194 void Instruction::setHasAllowReciprocal(bool B) {
    195   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
    196   cast<FPMathOperator>(this)->setHasAllowReciprocal(B);
    197 }
    198 
    199 void Instruction::setHasAllowContract(bool B) {
    200   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
    201   cast<FPMathOperator>(this)->setHasAllowContract(B);
    202 }
    203 
    204 void Instruction::setHasApproxFunc(bool B) {
    205   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
    206   cast<FPMathOperator>(this)->setHasApproxFunc(B);
    207 }
    208 
    209 void Instruction::setFastMathFlags(FastMathFlags FMF) {
    210   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
    211   cast<FPMathOperator>(this)->setFastMathFlags(FMF);
    212 }
    213 
    214 void Instruction::copyFastMathFlags(FastMathFlags FMF) {
    215   assert(isa<FPMathOperator>(this) && "copying fast-math flag on invalid op");
    216   cast<FPMathOperator>(this)->copyFastMathFlags(FMF);
    217 }
    218 
    219 bool Instruction::isFast() const {
    220   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
    221   return cast<FPMathOperator>(this)->isFast();
    222 }
    223 
    224 bool Instruction::hasAllowReassoc() const {
    225   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
    226   return cast<FPMathOperator>(this)->hasAllowReassoc();
    227 }
    228 
    229 bool Instruction::hasNoNaNs() const {
    230   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
    231   return cast<FPMathOperator>(this)->hasNoNaNs();
    232 }
    233 
    234 bool Instruction::hasNoInfs() const {
    235   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
    236   return cast<FPMathOperator>(this)->hasNoInfs();
    237 }
    238 
    239 bool Instruction::hasNoSignedZeros() const {
    240   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
    241   return cast<FPMathOperator>(this)->hasNoSignedZeros();
    242 }
    243 
    244 bool Instruction::hasAllowReciprocal() const {
    245   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
    246   return cast<FPMathOperator>(this)->hasAllowReciprocal();
    247 }
    248 
    249 bool Instruction::hasAllowContract() const {
    250   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
    251   return cast<FPMathOperator>(this)->hasAllowContract();
    252 }
    253 
    254 bool Instruction::hasApproxFunc() const {
    255   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
    256   return cast<FPMathOperator>(this)->hasApproxFunc();
    257 }
    258 
    259 FastMathFlags Instruction::getFastMathFlags() const {
    260   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
    261   return cast<FPMathOperator>(this)->getFastMathFlags();
    262 }
    263 
    264 void Instruction::copyFastMathFlags(const Instruction *I) {
    265   copyFastMathFlags(I->getFastMathFlags());
    266 }
    267 
    268 void Instruction::copyIRFlags(const Value *V, bool IncludeWrapFlags) {
    269   // Copy the wrapping flags.
    270   if (IncludeWrapFlags && isa<OverflowingBinaryOperator>(this)) {
    271     if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
    272       setHasNoSignedWrap(OB->hasNoSignedWrap());
    273       setHasNoUnsignedWrap(OB->hasNoUnsignedWrap());
    274     }
    275   }
    276 
    277   // Copy the exact flag.
    278   if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
    279     if (isa<PossiblyExactOperator>(this))
    280       setIsExact(PE->isExact());
    281 
    282   // Copy the fast-math flags.
    283   if (auto *FP = dyn_cast<FPMathOperator>(V))
    284     if (isa<FPMathOperator>(this))
    285       copyFastMathFlags(FP->getFastMathFlags());
    286 
    287   if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
    288     if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
    289       DestGEP->setIsInBounds(SrcGEP->isInBounds() | DestGEP->isInBounds());
    290 }
    291 
    292 void Instruction::andIRFlags(const Value *V) {
    293   if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
    294     if (isa<OverflowingBinaryOperator>(this)) {
    295       setHasNoSignedWrap(hasNoSignedWrap() & OB->hasNoSignedWrap());
    296       setHasNoUnsignedWrap(hasNoUnsignedWrap() & OB->hasNoUnsignedWrap());
    297     }
    298   }
    299 
    300   if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
    301     if (isa<PossiblyExactOperator>(this))
    302       setIsExact(isExact() & PE->isExact());
    303 
    304   if (auto *FP = dyn_cast<FPMathOperator>(V)) {
    305     if (isa<FPMathOperator>(this)) {
    306       FastMathFlags FM = getFastMathFlags();
    307       FM &= FP->getFastMathFlags();
    308       copyFastMathFlags(FM);
    309     }
    310   }
    311 
    312   if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
    313     if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
    314       DestGEP->setIsInBounds(SrcGEP->isInBounds() & DestGEP->isInBounds());
    315 }
    316 
    317 const char *Instruction::getOpcodeName(unsigned OpCode) {
    318   switch (OpCode) {
    319   // Terminators
    320   case Ret:    return "ret";
    321   case Br:     return "br";
    322   case Switch: return "switch";
    323   case IndirectBr: return "indirectbr";
    324   case Invoke: return "invoke";
    325   case Resume: return "resume";
    326   case Unreachable: return "unreachable";
    327   case CleanupRet: return "cleanupret";
    328   case CatchRet: return "catchret";
    329   case CatchPad: return "catchpad";
    330   case CatchSwitch: return "catchswitch";
    331   case CallBr: return "callbr";
    332 
    333   // Standard unary operators...
    334   case FNeg: return "fneg";
    335 
    336   // Standard binary operators...
    337   case Add: return "add";
    338   case FAdd: return "fadd";
    339   case Sub: return "sub";
    340   case FSub: return "fsub";
    341   case Mul: return "mul";
    342   case FMul: return "fmul";
    343   case UDiv: return "udiv";
    344   case SDiv: return "sdiv";
    345   case FDiv: return "fdiv";
    346   case URem: return "urem";
    347   case SRem: return "srem";
    348   case FRem: return "frem";
    349 
    350   // Logical operators...
    351   case And: return "and";
    352   case Or : return "or";
    353   case Xor: return "xor";
    354 
    355   // Memory instructions...
    356   case Alloca:        return "alloca";
    357   case Load:          return "load";
    358   case Store:         return "store";
    359   case AtomicCmpXchg: return "cmpxchg";
    360   case AtomicRMW:     return "atomicrmw";
    361   case Fence:         return "fence";
    362   case GetElementPtr: return "getelementptr";
    363 
    364   // Convert instructions...
    365   case Trunc:         return "trunc";
    366   case ZExt:          return "zext";
    367   case SExt:          return "sext";
    368   case FPTrunc:       return "fptrunc";
    369   case FPExt:         return "fpext";
    370   case FPToUI:        return "fptoui";
    371   case FPToSI:        return "fptosi";
    372   case UIToFP:        return "uitofp";
    373   case SIToFP:        return "sitofp";
    374   case IntToPtr:      return "inttoptr";
    375   case PtrToInt:      return "ptrtoint";
    376   case BitCast:       return "bitcast";
    377   case AddrSpaceCast: return "addrspacecast";
    378 
    379   // Other instructions...
    380   case ICmp:           return "icmp";
    381   case FCmp:           return "fcmp";
    382   case PHI:            return "phi";
    383   case Select:         return "select";
    384   case Call:           return "call";
    385   case Shl:            return "shl";
    386   case LShr:           return "lshr";
    387   case AShr:           return "ashr";
    388   case VAArg:          return "va_arg";
    389   case ExtractElement: return "extractelement";
    390   case InsertElement:  return "insertelement";
    391   case ShuffleVector:  return "shufflevector";
    392   case ExtractValue:   return "extractvalue";
    393   case InsertValue:    return "insertvalue";
    394   case LandingPad:     return "landingpad";
    395   case CleanupPad:     return "cleanuppad";
    396   case Freeze:         return "freeze";
    397 
    398   default: return "<Invalid operator> ";
    399   }
    400 }
    401 
    402 /// Return true if both instructions have the same special state. This must be
    403 /// kept in sync with FunctionComparator::cmpOperations in
    404 /// lib/Transforms/IPO/MergeFunctions.cpp.
    405 static bool haveSameSpecialState(const Instruction *I1, const Instruction *I2,
    406                                  bool IgnoreAlignment = false) {
    407   assert(I1->getOpcode() == I2->getOpcode() &&
    408          "Can not compare special state of different instructions");
    409 
    410   if (const AllocaInst *AI = dyn_cast<AllocaInst>(I1))
    411     return AI->getAllocatedType() == cast<AllocaInst>(I2)->getAllocatedType() &&
    412            (AI->getAlignment() == cast<AllocaInst>(I2)->getAlignment() ||
    413             IgnoreAlignment);
    414   if (const LoadInst *LI = dyn_cast<LoadInst>(I1))
    415     return LI->isVolatile() == cast<LoadInst>(I2)->isVolatile() &&
    416            (LI->getAlignment() == cast<LoadInst>(I2)->getAlignment() ||
    417             IgnoreAlignment) &&
    418            LI->getOrdering() == cast<LoadInst>(I2)->getOrdering() &&
    419            LI->getSyncScopeID() == cast<LoadInst>(I2)->getSyncScopeID();
    420   if (const StoreInst *SI = dyn_cast<StoreInst>(I1))
    421     return SI->isVolatile() == cast<StoreInst>(I2)->isVolatile() &&
    422            (SI->getAlignment() == cast<StoreInst>(I2)->getAlignment() ||
    423             IgnoreAlignment) &&
    424            SI->getOrdering() == cast<StoreInst>(I2)->getOrdering() &&
    425            SI->getSyncScopeID() == cast<StoreInst>(I2)->getSyncScopeID();
    426   if (const CmpInst *CI = dyn_cast<CmpInst>(I1))
    427     return CI->getPredicate() == cast<CmpInst>(I2)->getPredicate();
    428   if (const CallInst *CI = dyn_cast<CallInst>(I1))
    429     return CI->isTailCall() == cast<CallInst>(I2)->isTailCall() &&
    430            CI->getCallingConv() == cast<CallInst>(I2)->getCallingConv() &&
    431            CI->getAttributes() == cast<CallInst>(I2)->getAttributes() &&
    432            CI->hasIdenticalOperandBundleSchema(*cast<CallInst>(I2));
    433   if (const InvokeInst *CI = dyn_cast<InvokeInst>(I1))
    434     return CI->getCallingConv() == cast<InvokeInst>(I2)->getCallingConv() &&
    435            CI->getAttributes() == cast<InvokeInst>(I2)->getAttributes() &&
    436            CI->hasIdenticalOperandBundleSchema(*cast<InvokeInst>(I2));
    437   if (const CallBrInst *CI = dyn_cast<CallBrInst>(I1))
    438     return CI->getCallingConv() == cast<CallBrInst>(I2)->getCallingConv() &&
    439            CI->getAttributes() == cast<CallBrInst>(I2)->getAttributes() &&
    440            CI->hasIdenticalOperandBundleSchema(*cast<CallBrInst>(I2));
    441   if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(I1))
    442     return IVI->getIndices() == cast<InsertValueInst>(I2)->getIndices();
    443   if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I1))
    444     return EVI->getIndices() == cast<ExtractValueInst>(I2)->getIndices();
    445   if (const FenceInst *FI = dyn_cast<FenceInst>(I1))
    446     return FI->getOrdering() == cast<FenceInst>(I2)->getOrdering() &&
    447            FI->getSyncScopeID() == cast<FenceInst>(I2)->getSyncScopeID();
    448   if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1))
    449     return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() &&
    450            CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() &&
    451            CXI->getSuccessOrdering() ==
    452                cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() &&
    453            CXI->getFailureOrdering() ==
    454                cast<AtomicCmpXchgInst>(I2)->getFailureOrdering() &&
    455            CXI->getSyncScopeID() ==
    456                cast<AtomicCmpXchgInst>(I2)->getSyncScopeID();
    457   if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I1))
    458     return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() &&
    459            RMWI->isVolatile() == cast<AtomicRMWInst>(I2)->isVolatile() &&
    460            RMWI->getOrdering() == cast<AtomicRMWInst>(I2)->getOrdering() &&
    461            RMWI->getSyncScopeID() == cast<AtomicRMWInst>(I2)->getSyncScopeID();
    462   if (const ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I1))
    463     return SVI->getShuffleMask() ==
    464            cast<ShuffleVectorInst>(I2)->getShuffleMask();
    465 
    466   return true;
    467 }
    468 
    469 bool Instruction::isIdenticalTo(const Instruction *I) const {
    470   return isIdenticalToWhenDefined(I) &&
    471          SubclassOptionalData == I->SubclassOptionalData;
    472 }
    473 
    474 bool Instruction::isIdenticalToWhenDefined(const Instruction *I) const {
    475   if (getOpcode() != I->getOpcode() ||
    476       getNumOperands() != I->getNumOperands() ||
    477       getType() != I->getType())
    478     return false;
    479 
    480   // If both instructions have no operands, they are identical.
    481   if (getNumOperands() == 0 && I->getNumOperands() == 0)
    482     return haveSameSpecialState(this, I);
    483 
    484   // We have two instructions of identical opcode and #operands.  Check to see
    485   // if all operands are the same.
    486   if (!std::equal(op_begin(), op_end(), I->op_begin()))
    487     return false;
    488 
    489   // WARNING: this logic must be kept in sync with EliminateDuplicatePHINodes()!
    490   if (const PHINode *thisPHI = dyn_cast<PHINode>(this)) {
    491     const PHINode *otherPHI = cast<PHINode>(I);
    492     return std::equal(thisPHI->block_begin(), thisPHI->block_end(),
    493                       otherPHI->block_begin());
    494   }
    495 
    496   return haveSameSpecialState(this, I);
    497 }
    498 
    499 // Keep this in sync with FunctionComparator::cmpOperations in
    500 // lib/Transforms/IPO/MergeFunctions.cpp.
    501 bool Instruction::isSameOperationAs(const Instruction *I,
    502                                     unsigned flags) const {
    503   bool IgnoreAlignment = flags & CompareIgnoringAlignment;
    504   bool UseScalarTypes  = flags & CompareUsingScalarTypes;
    505 
    506   if (getOpcode() != I->getOpcode() ||
    507       getNumOperands() != I->getNumOperands() ||
    508       (UseScalarTypes ?
    509        getType()->getScalarType() != I->getType()->getScalarType() :
    510        getType() != I->getType()))
    511     return false;
    512 
    513   // We have two instructions of identical opcode and #operands.  Check to see
    514   // if all operands are the same type
    515   for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
    516     if (UseScalarTypes ?
    517         getOperand(i)->getType()->getScalarType() !=
    518           I->getOperand(i)->getType()->getScalarType() :
    519         getOperand(i)->getType() != I->getOperand(i)->getType())
    520       return false;
    521 
    522   return haveSameSpecialState(this, I, IgnoreAlignment);
    523 }
    524 
    525 bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const {
    526   for (const Use &U : uses()) {
    527     // PHI nodes uses values in the corresponding predecessor block.  For other
    528     // instructions, just check to see whether the parent of the use matches up.
    529     const Instruction *I = cast<Instruction>(U.getUser());
    530     const PHINode *PN = dyn_cast<PHINode>(I);
    531     if (!PN) {
    532       if (I->getParent() != BB)
    533         return true;
    534       continue;
    535     }
    536 
    537     if (PN->getIncomingBlock(U) != BB)
    538       return true;
    539   }
    540   return false;
    541 }
    542 
    543 bool Instruction::mayReadFromMemory() const {
    544   switch (getOpcode()) {
    545   default: return false;
    546   case Instruction::VAArg:
    547   case Instruction::Load:
    548   case Instruction::Fence: // FIXME: refine definition of mayReadFromMemory
    549   case Instruction::AtomicCmpXchg:
    550   case Instruction::AtomicRMW:
    551   case Instruction::CatchPad:
    552   case Instruction::CatchRet:
    553     return true;
    554   case Instruction::Call:
    555   case Instruction::Invoke:
    556   case Instruction::CallBr:
    557     return !cast<CallBase>(this)->doesNotReadMemory();
    558   case Instruction::Store:
    559     return !cast<StoreInst>(this)->isUnordered();
    560   }
    561 }
    562 
    563 bool Instruction::mayWriteToMemory() const {
    564   switch (getOpcode()) {
    565   default: return false;
    566   case Instruction::Fence: // FIXME: refine definition of mayWriteToMemory
    567   case Instruction::Store:
    568   case Instruction::VAArg:
    569   case Instruction::AtomicCmpXchg:
    570   case Instruction::AtomicRMW:
    571   case Instruction::CatchPad:
    572   case Instruction::CatchRet:
    573     return true;
    574   case Instruction::Call:
    575   case Instruction::Invoke:
    576   case Instruction::CallBr:
    577     return !cast<CallBase>(this)->onlyReadsMemory();
    578   case Instruction::Load:
    579     return !cast<LoadInst>(this)->isUnordered();
    580   }
    581 }
    582 
    583 bool Instruction::isAtomic() const {
    584   switch (getOpcode()) {
    585   default:
    586     return false;
    587   case Instruction::AtomicCmpXchg:
    588   case Instruction::AtomicRMW:
    589   case Instruction::Fence:
    590     return true;
    591   case Instruction::Load:
    592     return cast<LoadInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
    593   case Instruction::Store:
    594     return cast<StoreInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
    595   }
    596 }
    597 
    598 bool Instruction::hasAtomicLoad() const {
    599   assert(isAtomic());
    600   switch (getOpcode()) {
    601   default:
    602     return false;
    603   case Instruction::AtomicCmpXchg:
    604   case Instruction::AtomicRMW:
    605   case Instruction::Load:
    606     return true;
    607   }
    608 }
    609 
    610 bool Instruction::hasAtomicStore() const {
    611   assert(isAtomic());
    612   switch (getOpcode()) {
    613   default:
    614     return false;
    615   case Instruction::AtomicCmpXchg:
    616   case Instruction::AtomicRMW:
    617   case Instruction::Store:
    618     return true;
    619   }
    620 }
    621 
    622 bool Instruction::isVolatile() const {
    623   switch (getOpcode()) {
    624   default:
    625     return false;
    626   case Instruction::AtomicRMW:
    627     return cast<AtomicRMWInst>(this)->isVolatile();
    628   case Instruction::Store:
    629     return cast<StoreInst>(this)->isVolatile();
    630   case Instruction::Load:
    631     return cast<LoadInst>(this)->isVolatile();
    632   case Instruction::AtomicCmpXchg:
    633     return cast<AtomicCmpXchgInst>(this)->isVolatile();
    634   case Instruction::Call:
    635   case Instruction::Invoke:
    636     // There are a very limited number of intrinsics with volatile flags.
    637     if (auto *II = dyn_cast<IntrinsicInst>(this)) {
    638       if (auto *MI = dyn_cast<MemIntrinsic>(II))
    639         return MI->isVolatile();
    640       switch (II->getIntrinsicID()) {
    641       default: break;
    642       case Intrinsic::matrix_column_major_load:
    643         return cast<ConstantInt>(II->getArgOperand(2))->isOne();
    644       case Intrinsic::matrix_column_major_store:
    645         return cast<ConstantInt>(II->getArgOperand(3))->isOne();
    646       }
    647     }
    648     return false;
    649   }
    650 }
    651 
    652 bool Instruction::mayThrow() const {
    653   if (const CallInst *CI = dyn_cast<CallInst>(this))
    654     return !CI->doesNotThrow();
    655   if (const auto *CRI = dyn_cast<CleanupReturnInst>(this))
    656     return CRI->unwindsToCaller();
    657   if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(this))
    658     return CatchSwitch->unwindsToCaller();
    659   return isa<ResumeInst>(this);
    660 }
    661 
    662 bool Instruction::isSafeToRemove() const {
    663   return (!isa<CallInst>(this) || !this->mayHaveSideEffects()) &&
    664          !this->isTerminator();
    665 }
    666 
    667 bool Instruction::willReturn() const {
    668   if (const auto *CB = dyn_cast<CallBase>(this))
    669     // FIXME: Temporarily assume that all side-effect free intrinsics will
    670     // return. Remove this workaround once all intrinsics are appropriately
    671     // annotated.
    672     return CB->hasFnAttr(Attribute::WillReturn) ||
    673            (isa<IntrinsicInst>(CB) && CB->onlyReadsMemory());
    674   return true;
    675 }
    676 
    677 bool Instruction::isLifetimeStartOrEnd() const {
    678   auto *II = dyn_cast<IntrinsicInst>(this);
    679   if (!II)
    680     return false;
    681   Intrinsic::ID ID = II->getIntrinsicID();
    682   return ID == Intrinsic::lifetime_start || ID == Intrinsic::lifetime_end;
    683 }
    684 
    685 bool Instruction::isLaunderOrStripInvariantGroup() const {
    686   auto *II = dyn_cast<IntrinsicInst>(this);
    687   if (!II)
    688     return false;
    689   Intrinsic::ID ID = II->getIntrinsicID();
    690   return ID == Intrinsic::launder_invariant_group ||
    691          ID == Intrinsic::strip_invariant_group;
    692 }
    693 
    694 bool Instruction::isDebugOrPseudoInst() const {
    695   return isa<DbgInfoIntrinsic>(this) || isa<PseudoProbeInst>(this);
    696 }
    697 
    698 const Instruction *
    699 Instruction::getNextNonDebugInstruction(bool SkipPseudoOp) const {
    700   for (const Instruction *I = getNextNode(); I; I = I->getNextNode())
    701     if (!isa<DbgInfoIntrinsic>(I) && !(SkipPseudoOp && isa<PseudoProbeInst>(I)))
    702       return I;
    703   return nullptr;
    704 }
    705 
    706 const Instruction *
    707 Instruction::getPrevNonDebugInstruction(bool SkipPseudoOp) const {
    708   for (const Instruction *I = getPrevNode(); I; I = I->getPrevNode())
    709     if (!isa<DbgInfoIntrinsic>(I) && !(SkipPseudoOp && isa<PseudoProbeInst>(I)))
    710       return I;
    711   return nullptr;
    712 }
    713 
    714 bool Instruction::isAssociative() const {
    715   unsigned Opcode = getOpcode();
    716   if (isAssociative(Opcode))
    717     return true;
    718 
    719   switch (Opcode) {
    720   case FMul:
    721   case FAdd:
    722     return cast<FPMathOperator>(this)->hasAllowReassoc() &&
    723            cast<FPMathOperator>(this)->hasNoSignedZeros();
    724   default:
    725     return false;
    726   }
    727 }
    728 
    729 bool Instruction::isCommutative() const {
    730   if (auto *II = dyn_cast<IntrinsicInst>(this))
    731     return II->isCommutative();
    732   // TODO: Should allow icmp/fcmp?
    733   return isCommutative(getOpcode());
    734 }
    735 
    736 unsigned Instruction::getNumSuccessors() const {
    737   switch (getOpcode()) {
    738 #define HANDLE_TERM_INST(N, OPC, CLASS)                                        \
    739   case Instruction::OPC:                                                       \
    740     return static_cast<const CLASS *>(this)->getNumSuccessors();
    741 #include "llvm/IR/Instruction.def"
    742   default:
    743     break;
    744   }
    745   llvm_unreachable("not a terminator");
    746 }
    747 
    748 BasicBlock *Instruction::getSuccessor(unsigned idx) const {
    749   switch (getOpcode()) {
    750 #define HANDLE_TERM_INST(N, OPC, CLASS)                                        \
    751   case Instruction::OPC:                                                       \
    752     return static_cast<const CLASS *>(this)->getSuccessor(idx);
    753 #include "llvm/IR/Instruction.def"
    754   default:
    755     break;
    756   }
    757   llvm_unreachable("not a terminator");
    758 }
    759 
    760 void Instruction::setSuccessor(unsigned idx, BasicBlock *B) {
    761   switch (getOpcode()) {
    762 #define HANDLE_TERM_INST(N, OPC, CLASS)                                        \
    763   case Instruction::OPC:                                                       \
    764     return static_cast<CLASS *>(this)->setSuccessor(idx, B);
    765 #include "llvm/IR/Instruction.def"
    766   default:
    767     break;
    768   }
    769   llvm_unreachable("not a terminator");
    770 }
    771 
    772 void Instruction::replaceSuccessorWith(BasicBlock *OldBB, BasicBlock *NewBB) {
    773   for (unsigned Idx = 0, NumSuccessors = Instruction::getNumSuccessors();
    774        Idx != NumSuccessors; ++Idx)
    775     if (getSuccessor(Idx) == OldBB)
    776       setSuccessor(Idx, NewBB);
    777 }
    778 
    779 Instruction *Instruction::cloneImpl() const {
    780   llvm_unreachable("Subclass of Instruction failed to implement cloneImpl");
    781 }
    782 
    783 void Instruction::swapProfMetadata() {
    784   MDNode *ProfileData = getMetadata(LLVMContext::MD_prof);
    785   if (!ProfileData || ProfileData->getNumOperands() != 3 ||
    786       !isa<MDString>(ProfileData->getOperand(0)))
    787     return;
    788 
    789   MDString *MDName = cast<MDString>(ProfileData->getOperand(0));
    790   if (MDName->getString() != "branch_weights")
    791     return;
    792 
    793   // The first operand is the name. Fetch them backwards and build a new one.
    794   Metadata *Ops[] = {ProfileData->getOperand(0), ProfileData->getOperand(2),
    795                      ProfileData->getOperand(1)};
    796   setMetadata(LLVMContext::MD_prof,
    797               MDNode::get(ProfileData->getContext(), Ops));
    798 }
    799 
    800 void Instruction::copyMetadata(const Instruction &SrcInst,
    801                                ArrayRef<unsigned> WL) {
    802   if (!SrcInst.hasMetadata())
    803     return;
    804 
    805   DenseSet<unsigned> WLS;
    806   for (unsigned M : WL)
    807     WLS.insert(M);
    808 
    809   // Otherwise, enumerate and copy over metadata from the old instruction to the
    810   // new one.
    811   SmallVector<std::pair<unsigned, MDNode *>, 4> TheMDs;
    812   SrcInst.getAllMetadataOtherThanDebugLoc(TheMDs);
    813   for (const auto &MD : TheMDs) {
    814     if (WL.empty() || WLS.count(MD.first))
    815       setMetadata(MD.first, MD.second);
    816   }
    817   if (WL.empty() || WLS.count(LLVMContext::MD_dbg))
    818     setDebugLoc(SrcInst.getDebugLoc());
    819 }
    820 
    821 Instruction *Instruction::clone() const {
    822   Instruction *New = nullptr;
    823   switch (getOpcode()) {
    824   default:
    825     llvm_unreachable("Unhandled Opcode.");
    826 #define HANDLE_INST(num, opc, clas)                                            \
    827   case Instruction::opc:                                                       \
    828     New = cast<clas>(this)->cloneImpl();                                       \
    829     break;
    830 #include "llvm/IR/Instruction.def"
    831 #undef HANDLE_INST
    832   }
    833 
    834   New->SubclassOptionalData = SubclassOptionalData;
    835   New->copyMetadata(*this);
    836   return New;
    837 }
    838