Home | History | Annotate | Line # | Download | only in Analysis
      1 //===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===//
      2 //
      3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
      4 // See https://llvm.org/LICENSE.txt for license information.
      5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
      6 //
      7 //===----------------------------------------------------------------------===//
      8 //
      9 /// \file
     10 /// This file exposes an interface to building/using memory SSA to
     11 /// walk memory instructions using a use/def graph.
     12 ///
     13 /// Memory SSA class builds an SSA form that links together memory access
     14 /// instructions such as loads, stores, atomics, and calls. Additionally, it
     15 /// does a trivial form of "heap versioning" Every time the memory state changes
     16 /// in the program, we generate a new heap version. It generates
     17 /// MemoryDef/Uses/Phis that are overlayed on top of the existing instructions.
     18 ///
     19 /// As a trivial example,
     20 /// define i32 @main() #0 {
     21 /// entry:
     22 ///   %call = call noalias i8* @_Znwm(i64 4) #2
     23 ///   %0 = bitcast i8* %call to i32*
     24 ///   %call1 = call noalias i8* @_Znwm(i64 4) #2
     25 ///   %1 = bitcast i8* %call1 to i32*
     26 ///   store i32 5, i32* %0, align 4
     27 ///   store i32 7, i32* %1, align 4
     28 ///   %2 = load i32* %0, align 4
     29 ///   %3 = load i32* %1, align 4
     30 ///   %add = add nsw i32 %2, %3
     31 ///   ret i32 %add
     32 /// }
     33 ///
     34 /// Will become
     35 /// define i32 @main() #0 {
     36 /// entry:
     37 ///   ; 1 = MemoryDef(0)
     38 ///   %call = call noalias i8* @_Znwm(i64 4) #3
     39 ///   %2 = bitcast i8* %call to i32*
     40 ///   ; 2 = MemoryDef(1)
     41 ///   %call1 = call noalias i8* @_Znwm(i64 4) #3
     42 ///   %4 = bitcast i8* %call1 to i32*
     43 ///   ; 3 = MemoryDef(2)
     44 ///   store i32 5, i32* %2, align 4
     45 ///   ; 4 = MemoryDef(3)
     46 ///   store i32 7, i32* %4, align 4
     47 ///   ; MemoryUse(3)
     48 ///   %7 = load i32* %2, align 4
     49 ///   ; MemoryUse(4)
     50 ///   %8 = load i32* %4, align 4
     51 ///   %add = add nsw i32 %7, %8
     52 ///   ret i32 %add
     53 /// }
     54 ///
     55 /// Given this form, all the stores that could ever effect the load at %8 can be
     56 /// gotten by using the MemoryUse associated with it, and walking from use to
     57 /// def until you hit the top of the function.
     58 ///
     59 /// Each def also has a list of users associated with it, so you can walk from
     60 /// both def to users, and users to defs. Note that we disambiguate MemoryUses,
     61 /// but not the RHS of MemoryDefs. You can see this above at %7, which would
     62 /// otherwise be a MemoryUse(4). Being disambiguated means that for a given
     63 /// store, all the MemoryUses on its use lists are may-aliases of that store
     64 /// (but the MemoryDefs on its use list may not be).
     65 ///
     66 /// MemoryDefs are not disambiguated because it would require multiple reaching
     67 /// definitions, which would require multiple phis, and multiple memoryaccesses
     68 /// per instruction.
     69 //
     70 //===----------------------------------------------------------------------===//
     71 
     72 #ifndef LLVM_ANALYSIS_MEMORYSSA_H
     73 #define LLVM_ANALYSIS_MEMORYSSA_H
     74 
     75 #include "llvm/ADT/DenseMap.h"
     76 #include "llvm/ADT/GraphTraits.h"
     77 #include "llvm/ADT/SmallPtrSet.h"
     78 #include "llvm/ADT/SmallVector.h"
     79 #include "llvm/ADT/ilist.h"
     80 #include "llvm/ADT/ilist_node.h"
     81 #include "llvm/ADT/iterator.h"
     82 #include "llvm/ADT/iterator_range.h"
     83 #include "llvm/ADT/simple_ilist.h"
     84 #include "llvm/Analysis/AliasAnalysis.h"
     85 #include "llvm/Analysis/MemoryLocation.h"
     86 #include "llvm/Analysis/PHITransAddr.h"
     87 #include "llvm/IR/BasicBlock.h"
     88 #include "llvm/IR/DerivedUser.h"
     89 #include "llvm/IR/Dominators.h"
     90 #include "llvm/IR/Module.h"
     91 #include "llvm/IR/Operator.h"
     92 #include "llvm/IR/Type.h"
     93 #include "llvm/IR/Use.h"
     94 #include "llvm/IR/User.h"
     95 #include "llvm/IR/Value.h"
     96 #include "llvm/IR/ValueHandle.h"
     97 #include "llvm/Pass.h"
     98 #include "llvm/Support/Casting.h"
     99 #include "llvm/Support/CommandLine.h"
    100 #include <algorithm>
    101 #include <cassert>
    102 #include <cstddef>
    103 #include <iterator>
    104 #include <memory>
    105 #include <utility>
    106 
    107 namespace llvm {
    108 
    109 /// Enables memory ssa as a dependency for loop passes.
    110 extern cl::opt<bool> EnableMSSALoopDependency;
    111 
    112 class AllocaInst;
    113 class Function;
    114 class Instruction;
    115 class MemoryAccess;
    116 class MemorySSAWalker;
    117 class LLVMContext;
    118 class raw_ostream;
    119 
    120 namespace MSSAHelpers {
    121 
    122 struct AllAccessTag {};
    123 struct DefsOnlyTag {};
    124 
    125 } // end namespace MSSAHelpers
    126 
    127 enum : unsigned {
    128   // Used to signify what the default invalid ID is for MemoryAccess's
    129   // getID()
    130   INVALID_MEMORYACCESS_ID = -1U
    131 };
    132 
    133 template <class T> class memoryaccess_def_iterator_base;
    134 using memoryaccess_def_iterator = memoryaccess_def_iterator_base<MemoryAccess>;
    135 using const_memoryaccess_def_iterator =
    136     memoryaccess_def_iterator_base<const MemoryAccess>;
    137 
    138 // The base for all memory accesses. All memory accesses in a block are
    139 // linked together using an intrusive list.
    140 class MemoryAccess
    141     : public DerivedUser,
    142       public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>,
    143       public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>> {
    144 public:
    145   using AllAccessType =
    146       ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>;
    147   using DefsOnlyType =
    148       ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>;
    149 
    150   MemoryAccess(const MemoryAccess &) = delete;
    151   MemoryAccess &operator=(const MemoryAccess &) = delete;
    152 
    153   void *operator new(size_t) = delete;
    154 
    155   // Methods for support type inquiry through isa, cast, and
    156   // dyn_cast
    157   static bool classof(const Value *V) {
    158     unsigned ID = V->getValueID();
    159     return ID == MemoryUseVal || ID == MemoryPhiVal || ID == MemoryDefVal;
    160   }
    161 
    162   BasicBlock *getBlock() const { return Block; }
    163 
    164   void print(raw_ostream &OS) const;
    165   void dump() const;
    166 
    167   /// The user iterators for a memory access
    168   using iterator = user_iterator;
    169   using const_iterator = const_user_iterator;
    170 
    171   /// This iterator walks over all of the defs in a given
    172   /// MemoryAccess. For MemoryPhi nodes, this walks arguments. For
    173   /// MemoryUse/MemoryDef, this walks the defining access.
    174   memoryaccess_def_iterator defs_begin();
    175   const_memoryaccess_def_iterator defs_begin() const;
    176   memoryaccess_def_iterator defs_end();
    177   const_memoryaccess_def_iterator defs_end() const;
    178 
    179   /// Get the iterators for the all access list and the defs only list
    180   /// We default to the all access list.
    181   AllAccessType::self_iterator getIterator() {
    182     return this->AllAccessType::getIterator();
    183   }
    184   AllAccessType::const_self_iterator getIterator() const {
    185     return this->AllAccessType::getIterator();
    186   }
    187   AllAccessType::reverse_self_iterator getReverseIterator() {
    188     return this->AllAccessType::getReverseIterator();
    189   }
    190   AllAccessType::const_reverse_self_iterator getReverseIterator() const {
    191     return this->AllAccessType::getReverseIterator();
    192   }
    193   DefsOnlyType::self_iterator getDefsIterator() {
    194     return this->DefsOnlyType::getIterator();
    195   }
    196   DefsOnlyType::const_self_iterator getDefsIterator() const {
    197     return this->DefsOnlyType::getIterator();
    198   }
    199   DefsOnlyType::reverse_self_iterator getReverseDefsIterator() {
    200     return this->DefsOnlyType::getReverseIterator();
    201   }
    202   DefsOnlyType::const_reverse_self_iterator getReverseDefsIterator() const {
    203     return this->DefsOnlyType::getReverseIterator();
    204   }
    205 
    206 protected:
    207   friend class MemoryDef;
    208   friend class MemoryPhi;
    209   friend class MemorySSA;
    210   friend class MemoryUse;
    211   friend class MemoryUseOrDef;
    212 
    213   /// Used by MemorySSA to change the block of a MemoryAccess when it is
    214   /// moved.
    215   void setBlock(BasicBlock *BB) { Block = BB; }
    216 
    217   /// Used for debugging and tracking things about MemoryAccesses.
    218   /// Guaranteed unique among MemoryAccesses, no guarantees otherwise.
    219   inline unsigned getID() const;
    220 
    221   MemoryAccess(LLVMContext &C, unsigned Vty, DeleteValueTy DeleteValue,
    222                BasicBlock *BB, unsigned NumOperands)
    223       : DerivedUser(Type::getVoidTy(C), Vty, nullptr, NumOperands, DeleteValue),
    224         Block(BB) {}
    225 
    226   // Use deleteValue() to delete a generic MemoryAccess.
    227   ~MemoryAccess() = default;
    228 
    229 private:
    230   BasicBlock *Block;
    231 };
    232 
    233 template <>
    234 struct ilist_alloc_traits<MemoryAccess> {
    235   static void deleteNode(MemoryAccess *MA) { MA->deleteValue(); }
    236 };
    237 
    238 inline raw_ostream &operator<<(raw_ostream &OS, const MemoryAccess &MA) {
    239   MA.print(OS);
    240   return OS;
    241 }
    242 
    243 /// Class that has the common methods + fields of memory uses/defs. It's
    244 /// a little awkward to have, but there are many cases where we want either a
    245 /// use or def, and there are many cases where uses are needed (defs aren't
    246 /// acceptable), and vice-versa.
    247 ///
    248 /// This class should never be instantiated directly; make a MemoryUse or
    249 /// MemoryDef instead.
    250 class MemoryUseOrDef : public MemoryAccess {
    251 public:
    252   void *operator new(size_t) = delete;
    253 
    254   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
    255 
    256   /// Get the instruction that this MemoryUse represents.
    257   Instruction *getMemoryInst() const { return MemoryInstruction; }
    258 
    259   /// Get the access that produces the memory state used by this Use.
    260   MemoryAccess *getDefiningAccess() const { return getOperand(0); }
    261 
    262   static bool classof(const Value *MA) {
    263     return MA->getValueID() == MemoryUseVal || MA->getValueID() == MemoryDefVal;
    264   }
    265 
    266   // Sadly, these have to be public because they are needed in some of the
    267   // iterators.
    268   inline bool isOptimized() const;
    269   inline MemoryAccess *getOptimized() const;
    270   inline void setOptimized(MemoryAccess *);
    271 
    272   // Retrieve AliasResult type of the optimized access. Ideally this would be
    273   // returned by the caching walker and may go away in the future.
    274   Optional<AliasResult> getOptimizedAccessType() const {
    275     return isOptimized() ? OptimizedAccessAlias : None;
    276   }
    277 
    278   /// Reset the ID of what this MemoryUse was optimized to, causing it to
    279   /// be rewalked by the walker if necessary.
    280   /// This really should only be called by tests.
    281   inline void resetOptimized();
    282 
    283 protected:
    284   friend class MemorySSA;
    285   friend class MemorySSAUpdater;
    286 
    287   MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty,
    288                  DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB,
    289                  unsigned NumOperands)
    290       : MemoryAccess(C, Vty, DeleteValue, BB, NumOperands),
    291         MemoryInstruction(MI), OptimizedAccessAlias(AliasResult::MayAlias) {
    292     setDefiningAccess(DMA);
    293   }
    294 
    295   // Use deleteValue() to delete a generic MemoryUseOrDef.
    296   ~MemoryUseOrDef() = default;
    297 
    298   void setOptimizedAccessType(Optional<AliasResult> AR) {
    299     OptimizedAccessAlias = AR;
    300   }
    301 
    302   void setDefiningAccess(
    303       MemoryAccess *DMA, bool Optimized = false,
    304       Optional<AliasResult> AR = AliasResult(AliasResult::MayAlias)) {
    305     if (!Optimized) {
    306       setOperand(0, DMA);
    307       return;
    308     }
    309     setOptimized(DMA);
    310     setOptimizedAccessType(AR);
    311   }
    312 
    313 private:
    314   Instruction *MemoryInstruction;
    315   Optional<AliasResult> OptimizedAccessAlias;
    316 };
    317 
    318 /// Represents read-only accesses to memory
    319 ///
    320 /// In particular, the set of Instructions that will be represented by
    321 /// MemoryUse's is exactly the set of Instructions for which
    322 /// AliasAnalysis::getModRefInfo returns "Ref".
    323 class MemoryUse final : public MemoryUseOrDef {
    324 public:
    325   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
    326 
    327   MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB)
    328       : MemoryUseOrDef(C, DMA, MemoryUseVal, deleteMe, MI, BB,
    329                        /*NumOperands=*/1) {}
    330 
    331   // allocate space for exactly one operand
    332   void *operator new(size_t s) { return User::operator new(s, 1); }
    333 
    334   static bool classof(const Value *MA) {
    335     return MA->getValueID() == MemoryUseVal;
    336   }
    337 
    338   void print(raw_ostream &OS) const;
    339 
    340   void setOptimized(MemoryAccess *DMA) {
    341     OptimizedID = DMA->getID();
    342     setOperand(0, DMA);
    343   }
    344 
    345   bool isOptimized() const {
    346     return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID();
    347   }
    348 
    349   MemoryAccess *getOptimized() const {
    350     return getDefiningAccess();
    351   }
    352 
    353   void resetOptimized() {
    354     OptimizedID = INVALID_MEMORYACCESS_ID;
    355   }
    356 
    357 protected:
    358   friend class MemorySSA;
    359 
    360 private:
    361   static void deleteMe(DerivedUser *Self);
    362 
    363   unsigned OptimizedID = INVALID_MEMORYACCESS_ID;
    364 };
    365 
    366 template <>
    367 struct OperandTraits<MemoryUse> : public FixedNumOperandTraits<MemoryUse, 1> {};
    368 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUse, MemoryAccess)
    369 
    370 /// Represents a read-write access to memory, whether it is a must-alias,
    371 /// or a may-alias.
    372 ///
    373 /// In particular, the set of Instructions that will be represented by
    374 /// MemoryDef's is exactly the set of Instructions for which
    375 /// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef".
    376 /// Note that, in order to provide def-def chains, all defs also have a use
    377 /// associated with them. This use points to the nearest reaching
    378 /// MemoryDef/MemoryPhi.
    379 class MemoryDef final : public MemoryUseOrDef {
    380 public:
    381   friend class MemorySSA;
    382 
    383   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
    384 
    385   MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB,
    386             unsigned Ver)
    387       : MemoryUseOrDef(C, DMA, MemoryDefVal, deleteMe, MI, BB,
    388                        /*NumOperands=*/2),
    389         ID(Ver) {}
    390 
    391   // allocate space for exactly two operands
    392   void *operator new(size_t s) { return User::operator new(s, 2); }
    393 
    394   static bool classof(const Value *MA) {
    395     return MA->getValueID() == MemoryDefVal;
    396   }
    397 
    398   void setOptimized(MemoryAccess *MA) {
    399     setOperand(1, MA);
    400     OptimizedID = MA->getID();
    401   }
    402 
    403   MemoryAccess *getOptimized() const {
    404     return cast_or_null<MemoryAccess>(getOperand(1));
    405   }
    406 
    407   bool isOptimized() const {
    408     return getOptimized() && OptimizedID == getOptimized()->getID();
    409   }
    410 
    411   void resetOptimized() {
    412     OptimizedID = INVALID_MEMORYACCESS_ID;
    413     setOperand(1, nullptr);
    414   }
    415 
    416   void print(raw_ostream &OS) const;
    417 
    418   unsigned getID() const { return ID; }
    419 
    420 private:
    421   static void deleteMe(DerivedUser *Self);
    422 
    423   const unsigned ID;
    424   unsigned OptimizedID = INVALID_MEMORYACCESS_ID;
    425 };
    426 
    427 template <>
    428 struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 2> {};
    429 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryDef, MemoryAccess)
    430 
    431 template <>
    432 struct OperandTraits<MemoryUseOrDef> {
    433   static Use *op_begin(MemoryUseOrDef *MUD) {
    434     if (auto *MU = dyn_cast<MemoryUse>(MUD))
    435       return OperandTraits<MemoryUse>::op_begin(MU);
    436     return OperandTraits<MemoryDef>::op_begin(cast<MemoryDef>(MUD));
    437   }
    438 
    439   static Use *op_end(MemoryUseOrDef *MUD) {
    440     if (auto *MU = dyn_cast<MemoryUse>(MUD))
    441       return OperandTraits<MemoryUse>::op_end(MU);
    442     return OperandTraits<MemoryDef>::op_end(cast<MemoryDef>(MUD));
    443   }
    444 
    445   static unsigned operands(const MemoryUseOrDef *MUD) {
    446     if (const auto *MU = dyn_cast<MemoryUse>(MUD))
    447       return OperandTraits<MemoryUse>::operands(MU);
    448     return OperandTraits<MemoryDef>::operands(cast<MemoryDef>(MUD));
    449   }
    450 };
    451 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess)
    452 
    453 /// Represents phi nodes for memory accesses.
    454 ///
    455 /// These have the same semantic as regular phi nodes, with the exception that
    456 /// only one phi will ever exist in a given basic block.
    457 /// Guaranteeing one phi per block means guaranteeing there is only ever one
    458 /// valid reaching MemoryDef/MemoryPHI along each path to the phi node.
    459 /// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or
    460 /// a MemoryPhi's operands.
    461 /// That is, given
    462 /// if (a) {
    463 ///   store %a
    464 ///   store %b
    465 /// }
    466 /// it *must* be transformed into
    467 /// if (a) {
    468 ///    1 = MemoryDef(liveOnEntry)
    469 ///    store %a
    470 ///    2 = MemoryDef(1)
    471 ///    store %b
    472 /// }
    473 /// and *not*
    474 /// if (a) {
    475 ///    1 = MemoryDef(liveOnEntry)
    476 ///    store %a
    477 ///    2 = MemoryDef(liveOnEntry)
    478 ///    store %b
    479 /// }
    480 /// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the
    481 /// end of the branch, and if there are not two phi nodes, one will be
    482 /// disconnected completely from the SSA graph below that point.
    483 /// Because MemoryUse's do not generate new definitions, they do not have this
    484 /// issue.
    485 class MemoryPhi final : public MemoryAccess {
    486   // allocate space for exactly zero operands
    487   void *operator new(size_t s) { return User::operator new(s); }
    488 
    489 public:
    490   /// Provide fast operand accessors
    491   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
    492 
    493   MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds = 0)
    494       : MemoryAccess(C, MemoryPhiVal, deleteMe, BB, 0), ID(Ver),
    495         ReservedSpace(NumPreds) {
    496     allocHungoffUses(ReservedSpace);
    497   }
    498 
    499   // Block iterator interface. This provides access to the list of incoming
    500   // basic blocks, which parallels the list of incoming values.
    501   using block_iterator = BasicBlock **;
    502   using const_block_iterator = BasicBlock *const *;
    503 
    504   block_iterator block_begin() {
    505     return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace);
    506   }
    507 
    508   const_block_iterator block_begin() const {
    509     return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
    510   }
    511 
    512   block_iterator block_end() { return block_begin() + getNumOperands(); }
    513 
    514   const_block_iterator block_end() const {
    515     return block_begin() + getNumOperands();
    516   }
    517 
    518   iterator_range<block_iterator> blocks() {
    519     return make_range(block_begin(), block_end());
    520   }
    521 
    522   iterator_range<const_block_iterator> blocks() const {
    523     return make_range(block_begin(), block_end());
    524   }
    525 
    526   op_range incoming_values() { return operands(); }
    527 
    528   const_op_range incoming_values() const { return operands(); }
    529 
    530   /// Return the number of incoming edges
    531   unsigned getNumIncomingValues() const { return getNumOperands(); }
    532 
    533   /// Return incoming value number x
    534   MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); }
    535   void setIncomingValue(unsigned I, MemoryAccess *V) {
    536     assert(V && "PHI node got a null value!");
    537     setOperand(I, V);
    538   }
    539 
    540   static unsigned getOperandNumForIncomingValue(unsigned I) { return I; }
    541   static unsigned getIncomingValueNumForOperand(unsigned I) { return I; }
    542 
    543   /// Return incoming basic block number @p i.
    544   BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; }
    545 
    546   /// Return incoming basic block corresponding
    547   /// to an operand of the PHI.
    548   BasicBlock *getIncomingBlock(const Use &U) const {
    549     assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
    550     return getIncomingBlock(unsigned(&U - op_begin()));
    551   }
    552 
    553   /// Return incoming basic block corresponding
    554   /// to value use iterator.
    555   BasicBlock *getIncomingBlock(MemoryAccess::const_user_iterator I) const {
    556     return getIncomingBlock(I.getUse());
    557   }
    558 
    559   void setIncomingBlock(unsigned I, BasicBlock *BB) {
    560     assert(BB && "PHI node got a null basic block!");
    561     block_begin()[I] = BB;
    562   }
    563 
    564   /// Add an incoming value to the end of the PHI list
    565   void addIncoming(MemoryAccess *V, BasicBlock *BB) {
    566     if (getNumOperands() == ReservedSpace)
    567       growOperands(); // Get more space!
    568     // Initialize some new operands.
    569     setNumHungOffUseOperands(getNumOperands() + 1);
    570     setIncomingValue(getNumOperands() - 1, V);
    571     setIncomingBlock(getNumOperands() - 1, BB);
    572   }
    573 
    574   /// Return the first index of the specified basic
    575   /// block in the value list for this PHI.  Returns -1 if no instance.
    576   int getBasicBlockIndex(const BasicBlock *BB) const {
    577     for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
    578       if (block_begin()[I] == BB)
    579         return I;
    580     return -1;
    581   }
    582 
    583   MemoryAccess *getIncomingValueForBlock(const BasicBlock *BB) const {
    584     int Idx = getBasicBlockIndex(BB);
    585     assert(Idx >= 0 && "Invalid basic block argument!");
    586     return getIncomingValue(Idx);
    587   }
    588 
    589   // After deleting incoming position I, the order of incoming may be changed.
    590   void unorderedDeleteIncoming(unsigned I) {
    591     unsigned E = getNumOperands();
    592     assert(I < E && "Cannot remove out of bounds Phi entry.");
    593     // MemoryPhi must have at least two incoming values, otherwise the MemoryPhi
    594     // itself should be deleted.
    595     assert(E >= 2 && "Cannot only remove incoming values in MemoryPhis with "
    596                      "at least 2 values.");
    597     setIncomingValue(I, getIncomingValue(E - 1));
    598     setIncomingBlock(I, block_begin()[E - 1]);
    599     setOperand(E - 1, nullptr);
    600     block_begin()[E - 1] = nullptr;
    601     setNumHungOffUseOperands(getNumOperands() - 1);
    602   }
    603 
    604   // After deleting entries that satisfy Pred, remaining entries may have
    605   // changed order.
    606   template <typename Fn> void unorderedDeleteIncomingIf(Fn &&Pred) {
    607     for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
    608       if (Pred(getIncomingValue(I), getIncomingBlock(I))) {
    609         unorderedDeleteIncoming(I);
    610         E = getNumOperands();
    611         --I;
    612       }
    613     assert(getNumOperands() >= 1 &&
    614            "Cannot remove all incoming blocks in a MemoryPhi.");
    615   }
    616 
    617   // After deleting incoming block BB, the incoming blocks order may be changed.
    618   void unorderedDeleteIncomingBlock(const BasicBlock *BB) {
    619     unorderedDeleteIncomingIf(
    620         [&](const MemoryAccess *, const BasicBlock *B) { return BB == B; });
    621   }
    622 
    623   // After deleting incoming memory access MA, the incoming accesses order may
    624   // be changed.
    625   void unorderedDeleteIncomingValue(const MemoryAccess *MA) {
    626     unorderedDeleteIncomingIf(
    627         [&](const MemoryAccess *M, const BasicBlock *) { return MA == M; });
    628   }
    629 
    630   static bool classof(const Value *V) {
    631     return V->getValueID() == MemoryPhiVal;
    632   }
    633 
    634   void print(raw_ostream &OS) const;
    635 
    636   unsigned getID() const { return ID; }
    637 
    638 protected:
    639   friend class MemorySSA;
    640 
    641   /// this is more complicated than the generic
    642   /// User::allocHungoffUses, because we have to allocate Uses for the incoming
    643   /// values and pointers to the incoming blocks, all in one allocation.
    644   void allocHungoffUses(unsigned N) {
    645     User::allocHungoffUses(N, /* IsPhi */ true);
    646   }
    647 
    648 private:
    649   // For debugging only
    650   const unsigned ID;
    651   unsigned ReservedSpace;
    652 
    653   /// This grows the operand list in response to a push_back style of
    654   /// operation.  This grows the number of ops by 1.5 times.
    655   void growOperands() {
    656     unsigned E = getNumOperands();
    657     // 2 op PHI nodes are VERY common, so reserve at least enough for that.
    658     ReservedSpace = std::max(E + E / 2, 2u);
    659     growHungoffUses(ReservedSpace, /* IsPhi */ true);
    660   }
    661 
    662   static void deleteMe(DerivedUser *Self);
    663 };
    664 
    665 inline unsigned MemoryAccess::getID() const {
    666   assert((isa<MemoryDef>(this) || isa<MemoryPhi>(this)) &&
    667          "only memory defs and phis have ids");
    668   if (const auto *MD = dyn_cast<MemoryDef>(this))
    669     return MD->getID();
    670   return cast<MemoryPhi>(this)->getID();
    671 }
    672 
    673 inline bool MemoryUseOrDef::isOptimized() const {
    674   if (const auto *MD = dyn_cast<MemoryDef>(this))
    675     return MD->isOptimized();
    676   return cast<MemoryUse>(this)->isOptimized();
    677 }
    678 
    679 inline MemoryAccess *MemoryUseOrDef::getOptimized() const {
    680   if (const auto *MD = dyn_cast<MemoryDef>(this))
    681     return MD->getOptimized();
    682   return cast<MemoryUse>(this)->getOptimized();
    683 }
    684 
    685 inline void MemoryUseOrDef::setOptimized(MemoryAccess *MA) {
    686   if (auto *MD = dyn_cast<MemoryDef>(this))
    687     MD->setOptimized(MA);
    688   else
    689     cast<MemoryUse>(this)->setOptimized(MA);
    690 }
    691 
    692 inline void MemoryUseOrDef::resetOptimized() {
    693   if (auto *MD = dyn_cast<MemoryDef>(this))
    694     MD->resetOptimized();
    695   else
    696     cast<MemoryUse>(this)->resetOptimized();
    697 }
    698 
    699 template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits<2> {};
    700 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryPhi, MemoryAccess)
    701 
    702 /// Encapsulates MemorySSA, including all data associated with memory
    703 /// accesses.
    704 class MemorySSA {
    705 public:
    706   MemorySSA(Function &, AliasAnalysis *, DominatorTree *);
    707 
    708   // MemorySSA must remain where it's constructed; Walkers it creates store
    709   // pointers to it.
    710   MemorySSA(MemorySSA &&) = delete;
    711 
    712   ~MemorySSA();
    713 
    714   MemorySSAWalker *getWalker();
    715   MemorySSAWalker *getSkipSelfWalker();
    716 
    717   /// Given a memory Mod/Ref'ing instruction, get the MemorySSA
    718   /// access associated with it. If passed a basic block gets the memory phi
    719   /// node that exists for that block, if there is one. Otherwise, this will get
    720   /// a MemoryUseOrDef.
    721   MemoryUseOrDef *getMemoryAccess(const Instruction *I) const {
    722     return cast_or_null<MemoryUseOrDef>(ValueToMemoryAccess.lookup(I));
    723   }
    724 
    725   MemoryPhi *getMemoryAccess(const BasicBlock *BB) const {
    726     return cast_or_null<MemoryPhi>(ValueToMemoryAccess.lookup(cast<Value>(BB)));
    727   }
    728 
    729   DominatorTree &getDomTree() const { return *DT; }
    730 
    731   void dump() const;
    732   void print(raw_ostream &) const;
    733 
    734   /// Return true if \p MA represents the live on entry value
    735   ///
    736   /// Loads and stores from pointer arguments and other global values may be
    737   /// defined by memory operations that do not occur in the current function, so
    738   /// they may be live on entry to the function. MemorySSA represents such
    739   /// memory state by the live on entry definition, which is guaranteed to occur
    740   /// before any other memory access in the function.
    741   inline bool isLiveOnEntryDef(const MemoryAccess *MA) const {
    742     return MA == LiveOnEntryDef.get();
    743   }
    744 
    745   inline MemoryAccess *getLiveOnEntryDef() const {
    746     return LiveOnEntryDef.get();
    747   }
    748 
    749   // Sadly, iplists, by default, owns and deletes pointers added to the
    750   // list. It's not currently possible to have two iplists for the same type,
    751   // where one owns the pointers, and one does not. This is because the traits
    752   // are per-type, not per-tag.  If this ever changes, we should make the
    753   // DefList an iplist.
    754   using AccessList = iplist<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>;
    755   using DefsList =
    756       simple_ilist<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>;
    757 
    758   /// Return the list of MemoryAccess's for a given basic block.
    759   ///
    760   /// This list is not modifiable by the user.
    761   const AccessList *getBlockAccesses(const BasicBlock *BB) const {
    762     return getWritableBlockAccesses(BB);
    763   }
    764 
    765   /// Return the list of MemoryDef's and MemoryPhi's for a given basic
    766   /// block.
    767   ///
    768   /// This list is not modifiable by the user.
    769   const DefsList *getBlockDefs(const BasicBlock *BB) const {
    770     return getWritableBlockDefs(BB);
    771   }
    772 
    773   /// Given two memory accesses in the same basic block, determine
    774   /// whether MemoryAccess \p A dominates MemoryAccess \p B.
    775   bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const;
    776 
    777   /// Given two memory accesses in potentially different blocks,
    778   /// determine whether MemoryAccess \p A dominates MemoryAccess \p B.
    779   bool dominates(const MemoryAccess *A, const MemoryAccess *B) const;
    780 
    781   /// Given a MemoryAccess and a Use, determine whether MemoryAccess \p A
    782   /// dominates Use \p B.
    783   bool dominates(const MemoryAccess *A, const Use &B) const;
    784 
    785   /// Verify that MemorySSA is self consistent (IE definitions dominate
    786   /// all uses, uses appear in the right places).  This is used by unit tests.
    787   void verifyMemorySSA() const;
    788 
    789   /// Used in various insertion functions to specify whether we are talking
    790   /// about the beginning or end of a block.
    791   enum InsertionPlace { Beginning, End, BeforeTerminator };
    792 
    793 protected:
    794   // Used by Memory SSA annotater, dumpers, and wrapper pass
    795   friend class MemorySSAAnnotatedWriter;
    796   friend class MemorySSAPrinterLegacyPass;
    797   friend class MemorySSAUpdater;
    798 
    799   void verifyOrderingDominationAndDefUses(Function &F) const;
    800   void verifyDominationNumbers(const Function &F) const;
    801   void verifyPrevDefInPhis(Function &F) const;
    802 
    803   // This is used by the use optimizer and updater.
    804   AccessList *getWritableBlockAccesses(const BasicBlock *BB) const {
    805     auto It = PerBlockAccesses.find(BB);
    806     return It == PerBlockAccesses.end() ? nullptr : It->second.get();
    807   }
    808 
    809   // This is used by the use optimizer and updater.
    810   DefsList *getWritableBlockDefs(const BasicBlock *BB) const {
    811     auto It = PerBlockDefs.find(BB);
    812     return It == PerBlockDefs.end() ? nullptr : It->second.get();
    813   }
    814 
    815   // These is used by the updater to perform various internal MemorySSA
    816   // machinsations.  They do not always leave the IR in a correct state, and
    817   // relies on the updater to fixup what it breaks, so it is not public.
    818 
    819   void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where);
    820   void moveTo(MemoryAccess *What, BasicBlock *BB, InsertionPlace Point);
    821 
    822   // Rename the dominator tree branch rooted at BB.
    823   void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal,
    824                   SmallPtrSetImpl<BasicBlock *> &Visited) {
    825     renamePass(DT->getNode(BB), IncomingVal, Visited, true, true);
    826   }
    827 
    828   void removeFromLookups(MemoryAccess *);
    829   void removeFromLists(MemoryAccess *, bool ShouldDelete = true);
    830   void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *,
    831                                InsertionPlace);
    832   void insertIntoListsBefore(MemoryAccess *, const BasicBlock *,
    833                              AccessList::iterator);
    834   MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *,
    835                                       const MemoryUseOrDef *Template = nullptr,
    836                                       bool CreationMustSucceed = true);
    837 
    838 private:
    839   template <class AliasAnalysisType> class ClobberWalkerBase;
    840   template <class AliasAnalysisType> class CachingWalker;
    841   template <class AliasAnalysisType> class SkipSelfWalker;
    842   class OptimizeUses;
    843 
    844   CachingWalker<AliasAnalysis> *getWalkerImpl();
    845   void buildMemorySSA(BatchAAResults &BAA);
    846 
    847   void prepareForMoveTo(MemoryAccess *, BasicBlock *);
    848   void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const;
    849 
    850   using AccessMap = DenseMap<const BasicBlock *, std::unique_ptr<AccessList>>;
    851   using DefsMap = DenseMap<const BasicBlock *, std::unique_ptr<DefsList>>;
    852 
    853   void markUnreachableAsLiveOnEntry(BasicBlock *BB);
    854   MemoryPhi *createMemoryPhi(BasicBlock *BB);
    855   template <typename AliasAnalysisType>
    856   MemoryUseOrDef *createNewAccess(Instruction *, AliasAnalysisType *,
    857                                   const MemoryUseOrDef *Template = nullptr);
    858   void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &);
    859   MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool);
    860   void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool);
    861   void renamePass(DomTreeNode *, MemoryAccess *IncomingVal,
    862                   SmallPtrSetImpl<BasicBlock *> &Visited,
    863                   bool SkipVisited = false, bool RenameAllUses = false);
    864   AccessList *getOrCreateAccessList(const BasicBlock *);
    865   DefsList *getOrCreateDefsList(const BasicBlock *);
    866   void renumberBlock(const BasicBlock *) const;
    867   AliasAnalysis *AA;
    868   DominatorTree *DT;
    869   Function &F;
    870 
    871   // Memory SSA mappings
    872   DenseMap<const Value *, MemoryAccess *> ValueToMemoryAccess;
    873 
    874   // These two mappings contain the main block to access/def mappings for
    875   // MemorySSA. The list contained in PerBlockAccesses really owns all the
    876   // MemoryAccesses.
    877   // Both maps maintain the invariant that if a block is found in them, the
    878   // corresponding list is not empty, and if a block is not found in them, the
    879   // corresponding list is empty.
    880   AccessMap PerBlockAccesses;
    881   DefsMap PerBlockDefs;
    882   std::unique_ptr<MemoryAccess, ValueDeleter> LiveOnEntryDef;
    883 
    884   // Domination mappings
    885   // Note that the numbering is local to a block, even though the map is
    886   // global.
    887   mutable SmallPtrSet<const BasicBlock *, 16> BlockNumberingValid;
    888   mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering;
    889 
    890   // Memory SSA building info
    891   std::unique_ptr<ClobberWalkerBase<AliasAnalysis>> WalkerBase;
    892   std::unique_ptr<CachingWalker<AliasAnalysis>> Walker;
    893   std::unique_ptr<SkipSelfWalker<AliasAnalysis>> SkipWalker;
    894   unsigned NextID;
    895 };
    896 
    897 // Internal MemorySSA utils, for use by MemorySSA classes and walkers
    898 class MemorySSAUtil {
    899 protected:
    900   friend class GVNHoist;
    901   friend class MemorySSAWalker;
    902 
    903   // This function should not be used by new passes.
    904   static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
    905                                   AliasAnalysis &AA);
    906 };
    907 
    908 // This pass does eager building and then printing of MemorySSA. It is used by
    909 // the tests to be able to build, dump, and verify Memory SSA.
    910 class MemorySSAPrinterLegacyPass : public FunctionPass {
    911 public:
    912   MemorySSAPrinterLegacyPass();
    913 
    914   bool runOnFunction(Function &) override;
    915   void getAnalysisUsage(AnalysisUsage &AU) const override;
    916 
    917   static char ID;
    918 };
    919 
    920 /// An analysis that produces \c MemorySSA for a function.
    921 ///
    922 class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> {
    923   friend AnalysisInfoMixin<MemorySSAAnalysis>;
    924 
    925   static AnalysisKey Key;
    926 
    927 public:
    928   // Wrap MemorySSA result to ensure address stability of internal MemorySSA
    929   // pointers after construction.  Use a wrapper class instead of plain
    930   // unique_ptr<MemorySSA> to avoid build breakage on MSVC.
    931   struct Result {
    932     Result(std::unique_ptr<MemorySSA> &&MSSA) : MSSA(std::move(MSSA)) {}
    933 
    934     MemorySSA &getMSSA() { return *MSSA.get(); }
    935 
    936     std::unique_ptr<MemorySSA> MSSA;
    937 
    938     bool invalidate(Function &F, const PreservedAnalyses &PA,
    939                     FunctionAnalysisManager::Invalidator &Inv);
    940   };
    941 
    942   Result run(Function &F, FunctionAnalysisManager &AM);
    943 };
    944 
    945 /// Printer pass for \c MemorySSA.
    946 class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> {
    947   raw_ostream &OS;
    948 
    949 public:
    950   explicit MemorySSAPrinterPass(raw_ostream &OS) : OS(OS) {}
    951 
    952   PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
    953 };
    954 
    955 /// Verifier pass for \c MemorySSA.
    956 struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> {
    957   PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
    958 };
    959 
    960 /// Legacy analysis pass which computes \c MemorySSA.
    961 class MemorySSAWrapperPass : public FunctionPass {
    962 public:
    963   MemorySSAWrapperPass();
    964 
    965   static char ID;
    966 
    967   bool runOnFunction(Function &) override;
    968   void releaseMemory() override;
    969   MemorySSA &getMSSA() { return *MSSA; }
    970   const MemorySSA &getMSSA() const { return *MSSA; }
    971 
    972   void getAnalysisUsage(AnalysisUsage &AU) const override;
    973 
    974   void verifyAnalysis() const override;
    975   void print(raw_ostream &OS, const Module *M = nullptr) const override;
    976 
    977 private:
    978   std::unique_ptr<MemorySSA> MSSA;
    979 };
    980 
    981 /// This is the generic walker interface for walkers of MemorySSA.
    982 /// Walkers are used to be able to further disambiguate the def-use chains
    983 /// MemorySSA gives you, or otherwise produce better info than MemorySSA gives
    984 /// you.
    985 /// In particular, while the def-use chains provide basic information, and are
    986 /// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a
    987 /// MemoryUse as AliasAnalysis considers it, a user mant want better or other
    988 /// information. In particular, they may want to use SCEV info to further
    989 /// disambiguate memory accesses, or they may want the nearest dominating
    990 /// may-aliasing MemoryDef for a call or a store. This API enables a
    991 /// standardized interface to getting and using that info.
    992 class MemorySSAWalker {
    993 public:
    994   MemorySSAWalker(MemorySSA *);
    995   virtual ~MemorySSAWalker() = default;
    996 
    997   using MemoryAccessSet = SmallVector<MemoryAccess *, 8>;
    998 
    999   /// Given a memory Mod/Ref/ModRef'ing instruction, calling this
   1000   /// will give you the nearest dominating MemoryAccess that Mod's the location
   1001   /// the instruction accesses (by skipping any def which AA can prove does not
   1002   /// alias the location(s) accessed by the instruction given).
   1003   ///
   1004   /// Note that this will return a single access, and it must dominate the
   1005   /// Instruction, so if an operand of a MemoryPhi node Mod's the instruction,
   1006   /// this will return the MemoryPhi, not the operand. This means that
   1007   /// given:
   1008   /// if (a) {
   1009   ///   1 = MemoryDef(liveOnEntry)
   1010   ///   store %a
   1011   /// } else {
   1012   ///   2 = MemoryDef(liveOnEntry)
   1013   ///   store %b
   1014   /// }
   1015   /// 3 = MemoryPhi(2, 1)
   1016   /// MemoryUse(3)
   1017   /// load %a
   1018   ///
   1019   /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef
   1020   /// in the if (a) branch.
   1021   MemoryAccess *getClobberingMemoryAccess(const Instruction *I) {
   1022     MemoryAccess *MA = MSSA->getMemoryAccess(I);
   1023     assert(MA && "Handed an instruction that MemorySSA doesn't recognize?");
   1024     return getClobberingMemoryAccess(MA);
   1025   }
   1026 
   1027   /// Does the same thing as getClobberingMemoryAccess(const Instruction *I),
   1028   /// but takes a MemoryAccess instead of an Instruction.
   1029   virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) = 0;
   1030 
   1031   /// Given a potentially clobbering memory access and a new location,
   1032   /// calling this will give you the nearest dominating clobbering MemoryAccess
   1033   /// (by skipping non-aliasing def links).
   1034   ///
   1035   /// This version of the function is mainly used to disambiguate phi translated
   1036   /// pointers, where the value of a pointer may have changed from the initial
   1037   /// memory access. Note that this expects to be handed either a MemoryUse,
   1038   /// or an already potentially clobbering access. Unlike the above API, if
   1039   /// given a MemoryDef that clobbers the pointer as the starting access, it
   1040   /// will return that MemoryDef, whereas the above would return the clobber
   1041   /// starting from the use side of  the memory def.
   1042   virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
   1043                                                   const MemoryLocation &) = 0;
   1044 
   1045   /// Given a memory access, invalidate anything this walker knows about
   1046   /// that access.
   1047   /// This API is used by walkers that store information to perform basic cache
   1048   /// invalidation.  This will be called by MemorySSA at appropriate times for
   1049   /// the walker it uses or returns.
   1050   virtual void invalidateInfo(MemoryAccess *) {}
   1051 
   1052 protected:
   1053   friend class MemorySSA; // For updating MSSA pointer in MemorySSA move
   1054                           // constructor.
   1055   MemorySSA *MSSA;
   1056 };
   1057 
   1058 /// A MemorySSAWalker that does no alias queries, or anything else. It
   1059 /// simply returns the links as they were constructed by the builder.
   1060 class DoNothingMemorySSAWalker final : public MemorySSAWalker {
   1061 public:
   1062   // Keep the overrides below from hiding the Instruction overload of
   1063   // getClobberingMemoryAccess.
   1064   using MemorySSAWalker::getClobberingMemoryAccess;
   1065 
   1066   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override;
   1067   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
   1068                                           const MemoryLocation &) override;
   1069 };
   1070 
   1071 using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>;
   1072 using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>;
   1073 
   1074 /// Iterator base class used to implement const and non-const iterators
   1075 /// over the defining accesses of a MemoryAccess.
   1076 template <class T>
   1077 class memoryaccess_def_iterator_base
   1078     : public iterator_facade_base<memoryaccess_def_iterator_base<T>,
   1079                                   std::forward_iterator_tag, T, ptrdiff_t, T *,
   1080                                   T *> {
   1081   using BaseT = typename memoryaccess_def_iterator_base::iterator_facade_base;
   1082 
   1083 public:
   1084   memoryaccess_def_iterator_base(T *Start) : Access(Start) {}
   1085   memoryaccess_def_iterator_base() = default;
   1086 
   1087   bool operator==(const memoryaccess_def_iterator_base &Other) const {
   1088     return Access == Other.Access && (!Access || ArgNo == Other.ArgNo);
   1089   }
   1090 
   1091   // This is a bit ugly, but for MemoryPHI's, unlike PHINodes, you can't get the
   1092   // block from the operand in constant time (In a PHINode, the uselist has
   1093   // both, so it's just subtraction). We provide it as part of the
   1094   // iterator to avoid callers having to linear walk to get the block.
   1095   // If the operation becomes constant time on MemoryPHI's, this bit of
   1096   // abstraction breaking should be removed.
   1097   BasicBlock *getPhiArgBlock() const {
   1098     MemoryPhi *MP = dyn_cast<MemoryPhi>(Access);
   1099     assert(MP && "Tried to get phi arg block when not iterating over a PHI");
   1100     return MP->getIncomingBlock(ArgNo);
   1101   }
   1102 
   1103   typename std::iterator_traits<BaseT>::pointer operator*() const {
   1104     assert(Access && "Tried to access past the end of our iterator");
   1105     // Go to the first argument for phis, and the defining access for everything
   1106     // else.
   1107     if (const MemoryPhi *MP = dyn_cast<MemoryPhi>(Access))
   1108       return MP->getIncomingValue(ArgNo);
   1109     return cast<MemoryUseOrDef>(Access)->getDefiningAccess();
   1110   }
   1111 
   1112   using BaseT::operator++;
   1113   memoryaccess_def_iterator_base &operator++() {
   1114     assert(Access && "Hit end of iterator");
   1115     if (const MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) {
   1116       if (++ArgNo >= MP->getNumIncomingValues()) {
   1117         ArgNo = 0;
   1118         Access = nullptr;
   1119       }
   1120     } else {
   1121       Access = nullptr;
   1122     }
   1123     return *this;
   1124   }
   1125 
   1126 private:
   1127   T *Access = nullptr;
   1128   unsigned ArgNo = 0;
   1129 };
   1130 
   1131 inline memoryaccess_def_iterator MemoryAccess::defs_begin() {
   1132   return memoryaccess_def_iterator(this);
   1133 }
   1134 
   1135 inline const_memoryaccess_def_iterator MemoryAccess::defs_begin() const {
   1136   return const_memoryaccess_def_iterator(this);
   1137 }
   1138 
   1139 inline memoryaccess_def_iterator MemoryAccess::defs_end() {
   1140   return memoryaccess_def_iterator();
   1141 }
   1142 
   1143 inline const_memoryaccess_def_iterator MemoryAccess::defs_end() const {
   1144   return const_memoryaccess_def_iterator();
   1145 }
   1146 
   1147 /// GraphTraits for a MemoryAccess, which walks defs in the normal case,
   1148 /// and uses in the inverse case.
   1149 template <> struct GraphTraits<MemoryAccess *> {
   1150   using NodeRef = MemoryAccess *;
   1151   using ChildIteratorType = memoryaccess_def_iterator;
   1152 
   1153   static NodeRef getEntryNode(NodeRef N) { return N; }
   1154   static ChildIteratorType child_begin(NodeRef N) { return N->defs_begin(); }
   1155   static ChildIteratorType child_end(NodeRef N) { return N->defs_end(); }
   1156 };
   1157 
   1158 template <> struct GraphTraits<Inverse<MemoryAccess *>> {
   1159   using NodeRef = MemoryAccess *;
   1160   using ChildIteratorType = MemoryAccess::iterator;
   1161 
   1162   static NodeRef getEntryNode(NodeRef N) { return N; }
   1163   static ChildIteratorType child_begin(NodeRef N) { return N->user_begin(); }
   1164   static ChildIteratorType child_end(NodeRef N) { return N->user_end(); }
   1165 };
   1166 
   1167 /// Provide an iterator that walks defs, giving both the memory access,
   1168 /// and the current pointer location, updating the pointer location as it
   1169 /// changes due to phi node translation.
   1170 ///
   1171 /// This iterator, while somewhat specialized, is what most clients actually
   1172 /// want when walking upwards through MemorySSA def chains. It takes a pair of
   1173 /// <MemoryAccess,MemoryLocation>, and walks defs, properly translating the
   1174 /// memory location through phi nodes for the user.
   1175 class upward_defs_iterator
   1176     : public iterator_facade_base<upward_defs_iterator,
   1177                                   std::forward_iterator_tag,
   1178                                   const MemoryAccessPair> {
   1179   using BaseT = upward_defs_iterator::iterator_facade_base;
   1180 
   1181 public:
   1182   upward_defs_iterator(const MemoryAccessPair &Info, DominatorTree *DT,
   1183                        bool *PerformedPhiTranslation = nullptr)
   1184       : DefIterator(Info.first), Location(Info.second),
   1185         OriginalAccess(Info.first), DT(DT),
   1186         PerformedPhiTranslation(PerformedPhiTranslation) {
   1187     CurrentPair.first = nullptr;
   1188 
   1189     WalkingPhi = Info.first && isa<MemoryPhi>(Info.first);
   1190     fillInCurrentPair();
   1191   }
   1192 
   1193   upward_defs_iterator() { CurrentPair.first = nullptr; }
   1194 
   1195   bool operator==(const upward_defs_iterator &Other) const {
   1196     return DefIterator == Other.DefIterator;
   1197   }
   1198 
   1199   typename std::iterator_traits<BaseT>::reference operator*() const {
   1200     assert(DefIterator != OriginalAccess->defs_end() &&
   1201            "Tried to access past the end of our iterator");
   1202     return CurrentPair;
   1203   }
   1204 
   1205   using BaseT::operator++;
   1206   upward_defs_iterator &operator++() {
   1207     assert(DefIterator != OriginalAccess->defs_end() &&
   1208            "Tried to access past the end of the iterator");
   1209     ++DefIterator;
   1210     if (DefIterator != OriginalAccess->defs_end())
   1211       fillInCurrentPair();
   1212     return *this;
   1213   }
   1214 
   1215   BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); }
   1216 
   1217 private:
   1218   /// Returns true if \p Ptr is guaranteed to be loop invariant for any possible
   1219   /// loop. In particular, this guarantees that it only references a single
   1220   /// MemoryLocation during execution of the containing function.
   1221   bool IsGuaranteedLoopInvariant(Value *Ptr) const;
   1222 
   1223   void fillInCurrentPair() {
   1224     CurrentPair.first = *DefIterator;
   1225     CurrentPair.second = Location;
   1226     if (WalkingPhi && Location.Ptr) {
   1227       // Mark size as unknown, if the location is not guaranteed to be
   1228       // loop-invariant for any possible loop in the function. Setting the size
   1229       // to unknown guarantees that any memory accesses that access locations
   1230       // after the pointer are considered as clobbers, which is important to
   1231       // catch loop carried dependences.
   1232       if (Location.Ptr &&
   1233           !IsGuaranteedLoopInvariant(const_cast<Value *>(Location.Ptr)))
   1234         CurrentPair.second =
   1235             Location.getWithNewSize(LocationSize::beforeOrAfterPointer());
   1236       PHITransAddr Translator(
   1237           const_cast<Value *>(Location.Ptr),
   1238           OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr);
   1239 
   1240       if (!Translator.PHITranslateValue(OriginalAccess->getBlock(),
   1241                                         DefIterator.getPhiArgBlock(), DT,
   1242                                         true)) {
   1243         Value *TransAddr = Translator.getAddr();
   1244         if (TransAddr != Location.Ptr) {
   1245           CurrentPair.second = CurrentPair.second.getWithNewPtr(TransAddr);
   1246 
   1247           if (TransAddr &&
   1248               !IsGuaranteedLoopInvariant(const_cast<Value *>(TransAddr)))
   1249             CurrentPair.second = CurrentPair.second.getWithNewSize(
   1250                 LocationSize::beforeOrAfterPointer());
   1251 
   1252           if (PerformedPhiTranslation)
   1253             *PerformedPhiTranslation = true;
   1254         }
   1255       }
   1256     }
   1257   }
   1258 
   1259   MemoryAccessPair CurrentPair;
   1260   memoryaccess_def_iterator DefIterator;
   1261   MemoryLocation Location;
   1262   MemoryAccess *OriginalAccess = nullptr;
   1263   DominatorTree *DT = nullptr;
   1264   bool WalkingPhi = false;
   1265   bool *PerformedPhiTranslation = nullptr;
   1266 };
   1267 
   1268 inline upward_defs_iterator
   1269 upward_defs_begin(const MemoryAccessPair &Pair, DominatorTree &DT,
   1270                   bool *PerformedPhiTranslation = nullptr) {
   1271   return upward_defs_iterator(Pair, &DT, PerformedPhiTranslation);
   1272 }
   1273 
   1274 inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); }
   1275 
   1276 inline iterator_range<upward_defs_iterator>
   1277 upward_defs(const MemoryAccessPair &Pair, DominatorTree &DT) {
   1278   return make_range(upward_defs_begin(Pair, DT), upward_defs_end());
   1279 }
   1280 
   1281 /// Walks the defining accesses of MemoryDefs. Stops after we hit something that
   1282 /// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when
   1283 /// comparing against a null def_chain_iterator, this will compare equal only
   1284 /// after walking said Phi/liveOnEntry.
   1285 ///
   1286 /// The UseOptimizedChain flag specifies whether to walk the clobbering
   1287 /// access chain, or all the accesses.
   1288 ///
   1289 /// Normally, MemoryDef are all just def/use linked together, so a def_chain on
   1290 /// a MemoryDef will walk all MemoryDefs above it in the program until it hits
   1291 /// a phi node.  The optimized chain walks the clobbering access of a store.
   1292 /// So if you are just trying to find, given a store, what the next
   1293 /// thing that would clobber the same memory is, you want the optimized chain.
   1294 template <class T, bool UseOptimizedChain = false>
   1295 struct def_chain_iterator
   1296     : public iterator_facade_base<def_chain_iterator<T, UseOptimizedChain>,
   1297                                   std::forward_iterator_tag, MemoryAccess *> {
   1298   def_chain_iterator() : MA(nullptr) {}
   1299   def_chain_iterator(T MA) : MA(MA) {}
   1300 
   1301   T operator*() const { return MA; }
   1302 
   1303   def_chain_iterator &operator++() {
   1304     // N.B. liveOnEntry has a null defining access.
   1305     if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
   1306       if (UseOptimizedChain && MUD->isOptimized())
   1307         MA = MUD->getOptimized();
   1308       else
   1309         MA = MUD->getDefiningAccess();
   1310     } else {
   1311       MA = nullptr;
   1312     }
   1313 
   1314     return *this;
   1315   }
   1316 
   1317   bool operator==(const def_chain_iterator &O) const { return MA == O.MA; }
   1318 
   1319 private:
   1320   T MA;
   1321 };
   1322 
   1323 template <class T>
   1324 inline iterator_range<def_chain_iterator<T>>
   1325 def_chain(T MA, MemoryAccess *UpTo = nullptr) {
   1326 #ifdef EXPENSIVE_CHECKS
   1327   assert((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) &&
   1328          "UpTo isn't in the def chain!");
   1329 #endif
   1330   return make_range(def_chain_iterator<T>(MA), def_chain_iterator<T>(UpTo));
   1331 }
   1332 
   1333 template <class T>
   1334 inline iterator_range<def_chain_iterator<T, true>> optimized_def_chain(T MA) {
   1335   return make_range(def_chain_iterator<T, true>(MA),
   1336                     def_chain_iterator<T, true>(nullptr));
   1337 }
   1338 
   1339 } // end namespace llvm
   1340 
   1341 #endif // LLVM_ANALYSIS_MEMORYSSA_H
   1342