HomeSort by: relevance | last modified time | path
    Searched refs:Masked (Results 1 - 19 of 19) sorted by relevancy

  /src/external/apache2/llvm/dist/llvm/lib/Target/RISCV/
RISCVISelDAGToDAG.h 97 uint8_t Masked;
107 uint8_t Masked;
117 uint8_t Masked;
126 uint8_t Masked;
135 uint8_t Masked;
144 uint8_t Masked;
152 uint8_t Masked;
RISCVISelLowering.cpp 4373 // the selection of the masked intrinsics doesn't do this for us.
4454 // the selection of the masked intrinsics doesn't do this for us.
4525 SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
4528 return DAG.getMergeValues({Masked, Chain}, DL);
  /src/external/apache2/llvm/dist/llvm/lib/Transforms/InstCombine/
InstCombineShifts.cpp 191 Value *Masked, *ShiftShAmt;
193 m_Shift(m_Value(Masked), m_ZExtOrSelf(m_Value(ShiftShAmt))));
198 if (match(Masked, m_CombineAnd(m_Trunc(m_Value(Masked)), m_Value(Trunc))) &&
203 Type *WidestTy = Masked->getType();
225 if (match(Masked, m_c_And(m_CombineOr(MaskA, MaskB), m_Value(X)))) {
230 if (!canTryToConstantAddTwoShiftAmounts(OuterShift, ShiftShAmt, Masked,
255 } else if (match(Masked, m_c_And(m_CombineOr(MaskC, MaskD), m_Value(X))) ||
256 match(Masked, m_Shr(m_Shl(m_Value(X), m_Value(MaskShAmt)),
262 if (!canTryToConstantAddTwoShiftAmounts(OuterShift, ShiftShAmt, Masked,
    [all...]
InstCombineAndOrXor.cpp 231 /// Convert an analysis of a masked ICmp into its equivalent if all boolean
292 // Any icmp can be viewed as being trivially masked; if it allows us to
523 "Expected equality predicates for masked type of icmps.");
560 "Expected equality predicates for masked type of icmps.");
828 Value *Masked = Builder.CreateAnd(L1, Mask);
830 return Builder.CreateICmp(NewPred, Masked, Mask);
864 /// masked bits are zero.
1857 Value *NewRHS = Builder.CreateAnd(Y, Op1, Y->getName() + ".masked");
1863 Value *NewLHS = Builder.CreateAnd(X, Op1, X->getName() + ".masked");
2177 // The shift amount may be masked with negation
    [all...]
InstCombineCompares.cpp 105 /// If AndCst is non-null, then the loaded value is masked with that constant
193 // If the element is masked, handle it.
1319 Value *Masked = Builder.CreateAnd(X, Mask);
1320 return ICmpInst::Create(Instruction::ICmp, Pred, Masked, Zero);
2331 // For 'is positive?' check that the sign-bit is clear and at least 1 masked
2337 // For 'is negative?' check that the sign-bit is set and at least 1 masked
  /src/external/apache2/llvm/dist/llvm/lib/Support/
APFixedPoint.cpp 38 APInt Masked(NewVal & Mask);
41 if (!(Masked == Mask || Masked == 0)) {
  /src/external/apache2/llvm/dist/llvm/lib/Transforms/Instrumentation/
MemProfiler.cpp 372 // Masked store has an initial operand for the value.
427 if (auto *Masked = dyn_cast<ConstantInt>(Vector->getOperand(Idx))) {
428 if (Masked->isZero())
AddressSanitizer.cpp 1453 if (F && (F->getName().startswith("llvm.masked.load.") ||
1454 F->getName().startswith("llvm.masked.store."))) {
1455 bool IsWrite = F->getName().startswith("llvm.masked.store.");
1456 // Masked store has an initial operand for the value.
1571 if (auto *Masked = dyn_cast<ConstantInt>(Vector->getOperand(Idx))) {
1572 if (Masked->isZero())
  /src/external/apache2/llvm/dist/llvm/lib/CodeGen/
TypePromotion.cpp 66 // byte value is masked out as follows:
668 Value *Masked = Builder.CreateAnd(Trunc->getOperand(0), Mask);
670 if (auto *I = dyn_cast<Instruction>(Masked))
673 ReplaceAllUsersOfWith(Trunc, Masked);
  /src/external/apache2/llvm/dist/llvm/lib/Target/X86/
X86InstCombineIntrinsic.cpp 54 // XMM register mask efficiently, we could transform all x86 masked intrinsics
55 // to LLVM masked intrinsics and remove the x86 masked intrinsic defs.
61 // Zero Mask - masked load instruction creates a zero vector.
74 // The pass-through vector for an x86 masked load is a zero vector.
84 // XMM register mask efficiently, we could transform all x86 masked intrinsics
85 // to LLVM masked intrinsics and remove the x86 masked intrinsic defs.
91 // Zero Mask - this masked store instruction does nothing.
111 // 'Replace uses' doesn't work for stores. Erase the original masked store
    [all...]
X86ISelDAGToDAG.cpp 527 // Returns true if this masked compare can be implemented legally with this
717 // Indicates it is profitable to form an AVX512 masked operation. Returning
718 // false will favor a masked register-register masked move or vblendm and the
1464 // used. We're doing this late so we can prefer to fold the AND into masked
1945 // Implement some heroics to detect shifts of masked values where the mask can
2002 // The final check is to ensure that any masked out high bits of X are
4309 bool FoldedBCast, bool Masked) {
4312 if (Masked) \
4357 // to form a masked operation
    [all...]
X86ISelLowering.cpp 12119 if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
12121 return Masked;
12187 if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
12189 return Masked;
12193 // masked move.
14488 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask,
14490 return Masked;
15197 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask,
15199 return Masked;
15476 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v16i8, V1, V2, Mask
    [all...]
  /src/external/apache2/llvm/dist/llvm/lib/Target/ARM/
ARMTargetTransformInfo.cpp 47 cl::desc("Enable the generation of masked loads and stores"));
424 // Extending masked load/Truncating masked stores is expensive because we
433 if (CCH == TTI::CastContextHint::Masked && DstTy.getSizeInBits() > 128)
439 CCH == TTI::CastContextHint::Masked) {
1049 // - from the masked intrinsic lowering pass with the actual vector type.
1052 // the rest. The pass runs before the masked intrinsic lowering pass, so if we
2083 // tail-predicated hardware loop, for which we need the MVE masked
  /src/external/apache2/llvm/dist/llvm/lib/Analysis/
TargetTransformInfo.cpp 745 return TTI::CastContextHint::Masked;
  /src/external/apache2/llvm/dist/llvm/include/llvm/Analysis/
TargetTransformInfo.h 643 /// Return true if the target supports masked store.
645 /// Return true if the target supports masked load.
653 /// Return true if the target supports masked scatter.
655 /// Return true if the target supports masked gather.
658 /// Return true if the target supports masked compress store.
660 /// Return true if the target supports masked expand load.
784 /// accesses or gaps and therefore vectorized using masked
1087 Masked, ///< The cast is used with a masked load/store.
1145 /// \return The cost of masked Load and Store instructions
    [all...]
  /src/external/apache2/llvm/dist/clang/lib/CodeGen/
CGOpenMPRuntime.cpp 11325 llvm::SmallVector<char, 2> Masked;
11328 Masked.push_back('N');
11329 Masked.push_back('M');
11332 Masked.push_back('N');
11335 Masked.push_back('M');
11338 for (char Mask : Masked) {
11621 // SVE generates only a masked function.
  /src/external/apache2/llvm/dist/llvm/lib/CodeGen/SelectionDAG/
TargetLowering.cpp 5932 SDValue Masked = DAG.getNode(ISD::AND, DL, VT, N, IntMax);
5933 Created.push_back(Masked.getNode());
5934 SDValue MaskedIsZero = DAG.getSetCC(DL, SETCCVT, Masked, Zero, Cond);
DAGCombiner.cpp 2576 bool Masked = false;
2586 Masked = true;
2606 // If the result is masked, then no matter what kind of bool it is we can
2609 if (Masked ||
2618 /// masked 0/1 whose source operand is actually known to be 0/-1. If so, invert
5381 // Allow one node which will masked along with any loads found.
5385 // Also ensure that the node to be masked only produces one data result.
5399 assert(HasValue && "Node to be masked has no data result?");
5618 // For this AND to be a zero extension of the masked load the elements
6035 // If the left-shift isn't masked out then the only way this is a bswap i
    [all...]
  /src/external/apache2/llvm/dist/llvm/lib/Transforms/Vectorize/
LoopVectorize.cpp 249 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
250 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
1470 /// Returns true if the target machine supports masked store operation
1477 /// Returns true if the target machine supports masked load operation
1484 /// Returns true if the target machine supports masked scatter operation
1490 /// Returns true if the target machine supports masked gather operation
1496 /// Returns true if the target machine can represent \p V as a masked gather
1526 // through scalar predication or masked load/store or masked gather/scatter.
1531 // Loads and stores that need some form of masked operation are predicate
    [all...]

Completed in 148 milliseconds