HomeSort by: relevance | last modified time | path
    Searched defs:Loads (Results 1 - 14 of 14) sorted by relevancy

  /src/external/apache2/llvm/dist/llvm/lib/Transforms/Utils/
DemoteRegToStack.cpp 65 // node that we cannot have multiple loads. The problem is that the
68 // keep track of and reuse loads we insert.
69 DenseMap<BasicBlock*, Value*> Loads;
72 Value *&V = Loads[PN->getIncomingBlock(i)];
  /src/external/apache2/llvm/dist/llvm/lib/Analysis/
AliasAnalysisEvaluator.cpp 104 SetVector<Value *> Loads;
115 Loads.insert(&Inst);
179 for (Value *Load : Loads) {
LoopAccessAnalysis.cpp 1302 // If loads occur at a distance that is not a multiple of a feasible vector
1669 // Check loads only against next equivalent class, but stores also against
1786 SmallVector<LoadInst *, 16> Loads;
1809 // Scan the BB and collect legal loads and stores. Also detect any
1860 Loads.push_back(Ld);
1897 // Now we have two lists that hold the loads and the stores.
1964 for (LoadInst *LD : Loads) {
  /src/external/apache2/llvm/dist/llvm/lib/CodeGen/SelectionDAG/
ScheduleDAGSDNodes.cpp 39 STATISTIC(LoadsClustered, "Number of loads clustered together");
195 /// ClusterNeighboringLoads - Force nearby loads together by "gluing" them.
196 /// This function finds loads of the same base and different offsets. If the
221 // Look for other loads of the same chain. Find loads that are loading from
248 // optimizations really should have eliminated one of the loads.
268 // Check if the loads are close enough.
269 SmallVector<SDNode*, 4> Loads;
273 Loads.push_back(BaseLoad);
278 break; // Stop right here. Ignore loads that are further away
    [all...]
DAGCombiner.cpp 114 cl::desc("DAG combiner may split indexing from loads"));
683 /// Used by BackwardsPropagateMask to find suitable loads.
684 bool SearchForAndLoads(SDNode *N, SmallVectorImpl<LoadSDNode*> &Loads,
688 /// can be combined into narrow loads.
5209 // Do not change the width of a volatile or atomic loads.
5213 // Do not generate loads of non-round integer types since these can
5237 // Do not generate loads of non-round integer types since these can
5242 // Don't change the width of a volatile or atomic loads.
5317 SmallVectorImpl<LoadSDNode*> &Loads,
5321 // Recursively search for the operands, looking for loads which can b
    [all...]
  /src/external/apache2/llvm/dist/llvm/lib/Target/ARM/
ARMSelectionDAGInfo.cpp 182 // Do repeated 4-byte loads and stores. To be improved.
202 // Emit a maximum of 4 loads in Thumb1 since we have fewer registers
205 SDValue Loads[6];
247 // Issue loads / stores for the trailing (1 - 3) bytes.
260 Loads[i] = DAG.getLoad(VT, dl, Chain,
264 TFOps[i] = Loads[i].getValue(1);
277 TFOps[i] = DAG.getStore(Chain, dl, Loads[i],
ARMParallelDSP.cpp 50 cl::desc("Limit the number of loads analysed"));
69 SmallVector<LoadInst*, 2> VecLd; // Container for loads to widen.
200 SmallVector<LoadInst*, 4> Loads;
205 append_range(Loads, Lds);
229 LoadInst* CreateWideLoad(MemInstList &Loads, IntegerType *LoadTy);
318 LLVM_DEBUG(dbgs() << "Loads are sequential and valid:\n";
348 /// Iterate through the block and record base, offset pairs of loads which can
351 SmallVector<LoadInst*, 8> Loads;
356 // Collect loads and instruction that may write to memory. For now we only
357 // record loads which are simple, sign-extended and have a single user
    [all...]
ARMISelLowering.cpp 314 // Pre and Post inc are supported on loads and stores
344 // Pre and Post inc are supported on loads and stores
1009 // loads and stores are provided by the hardware.
1064 // ARM does not have floating-point extending loads.
1295 // if they can be combined with nearby atomic loads and stores.
2315 // Walk the register/memloc assignments, inserting copies/loads. In the case
3307 /// Convert a TLS address reference into the correct sequence of loads
6797 // Prevent floating-point constants from using literal loads
7571 // Loads are better lowered with insert_vector_elt/ARMISD::BUILD_VECTOR.
9498 // The basic MVE VLDR on a v4i1/v8i1 actually loads the entire 16bi
    [all...]
  /src/external/apache2/llvm/dist/llvm/lib/Target/X86/
X86PreAMXConfig.cpp 107 bool checkVolatileModel(SmallSet<Value *, 4> &Loads, IntrinsicInst *Store,
214 bool X86PreAMXConfig::checkVolatileModel(SmallSet<Value *, 4> &Loads,
221 return (Loads.size() == 1) && Loads.contains(ST);
223 // All Loads should be operands of KeyAMX.
224 // All tile operands of KeyAMX should come from Loads.
227 if (!Loads.erase(Op))
233 return Loads.empty() && (ST == cast<Value>(KeyAMX));
272 SmallSet<Value *, 4> Loads;
282 Loads.insert(II)
    [all...]
  /src/external/apache2/llvm/dist/llvm/lib/CodeGen/
ExpandMemCmp.cpp 9 // This pass tries to expand memcmp() calls into optimally-sized loads and
44 "memcmp-num-loads-per-block", cl::Hidden, cl::init(1),
45 cl::desc("The number of loads per basic block for inline expansion of "
49 "max-loads-per-memcmp", cl::Hidden,
50 cl::desc("Set maximum number of loads used in expanded memcmp"));
53 "max-loads-per-memcmp-opt-size", cl::Hidden,
54 cl::desc("Set maximum number of loads used in expanded memcmp for -Os/Oz"));
84 // comparing 33 bytes on X86+sse can be done with 2x16-byte loads and
149 // Do not expand if the total number of loads is larger than what the
178 // We try to do as many non-overlapping loads as possible starting from th
    [all...]
WinEHPrepare.cpp 83 DenseMap<BasicBlock *, Value *> &Loads, Function &F);
1086 // TODO: Share loads when one use dominates another, or when a catchpad exit
1107 // loads of the slot before every use.
1108 DenseMap<BasicBlock *, Value *> Loads;
1112 // Use is on an EH pad phi. Leave it alone; we'll insert loads and
1116 replaceUseWithLoad(PN, U, SpillSlot, Loads, F);
1177 DenseMap<BasicBlock *, Value *> &Loads,
1192 // PHI node that we cannot have multiple loads. The problem is that
1195 // For this reason, we keep track of and reuse loads we insert.
1237 Value *&Load = Loads[IncomingBlock]
    [all...]
  /src/external/apache2/llvm/dist/llvm/lib/Transforms/IPO/
ArgumentPromotion.cpp 47 #include "llvm/Analysis/Loads.h"
127 // We need to keep the original loads for each argument and the elements
157 // Okay, this is being promoted. This means that the only uses are loads
158 // or GEPs which are only used by loads
161 // (where direct loads are tracked as no indices).
178 // Since loads will only have a single operand, and GEPs only a single
179 // non-index operand, this will record direct loads without any indices,
180 // and gep+loads with the GEP indices.
184 // GEPs with a single 0 index can be merged with direct loads
253 // Loop over the operands, inserting GEP and loads in the caller a
    [all...]
  /src/external/apache2/llvm/dist/llvm/lib/Target/AMDGPU/
SIISelLowering.cpp 1275 // assume those use MUBUF instructions. Scratch loads / stores are currently
3058 // Walk the register/memloc assignments, inserting copies/loads.
4562 // packs values if loads return unpacked values.
6347 SmallVector<SDValue, 4> Loads;
6379 Loads.push_back(getMemIntrinsicNode(AMDGPUISD::BUFFER_LOAD, DL, VTList, Ops,
6384 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Loads);
6386 return Loads[0];
7896 // Handle 8 bit and 16 bit buffer loads
7957 // FIXME: Constant loads should all be marked invariant.
8101 // Non-uniform loads will be selected to MUBUF instructions, so the
    [all...]
  /src/external/apache2/llvm/dist/llvm/lib/Target/PowerPC/
PPCISelLowering.cpp 185 // No extending loads from f16 or HW conversions back and forth.
2525 /// Used when computing address flags for selecting loads and stores.
2773 /// displacement field (prefixed loads/stores).
2880 // loads.
2943 // Do not generate pre-inc forms for specific loads that feed scalar_to_vector
5339 // The loads are scheduled at the beginning of the call sequence, and the
5752 // Walk the register/memloc assignments, inserting copies/loads.
6055 // force all the loads to happen before doing any other lowering.
7233 // left-justified on AIX. Loads must be a power-of-2 size and cannot be
7235 // 2 and 1 byte loads
    [all...]

Completed in 77 milliseconds