HomeSort by: relevance | last modified time | path
    Searched defs:And (Results 1 - 25 of 36) sorted by relevancy

1 2

  /src/external/gpl3/gdb.old/dist/gdbsupport/
traits.h 5 This program is free software; you can redistribute it and/or modify
93 because "and/or", etc. are reserved keywords. */
121 struct And;
124 struct And<> : public std::true_type
128 struct And<B1> : public B1
132 struct And<B1, B2>
137 struct And<B1, B2, B3, Bn...>
138 : public std::conditional<B1::value, And<B2, B3, Bn...>, B1>::type
  /src/external/apache2/llvm/dist/clang/include/clang/Basic/
OperatorPrecedence.h 10 /// Defines and computes precedence levels for binary/ternary operators.
35 And = 8, // &
  /src/external/apache2/llvm/dist/clang/include/clang/Analysis/Analyses/
ThreadSafetyLogical.h 25 And,
68 class And : public BinOp {
70 And(LExpr *LHS, LExpr *RHS) : BinOp(LHS, RHS, LExpr::And) {}
72 static bool classof(const LExpr *E) { return E->kind() == LExpr::And; }
  /src/external/apache2/llvm/dist/llvm/include/llvm/Analysis/
IVDescriptors.h 9 // This file "describes" induction and recurrence variables.
42 And, ///< Bitwise or logical AND of integers.
59 /// Basic recurrences are defined as the summation, product, OR, AND, XOR, min,
116 /// compare instruction to the select instruction and stores this pointer in
147 /// Returns true if Phi is a reduction of type Kind and adds it to the
148 /// RecurrenceDescriptor. If either \p DB is non-null or \p AC and \p DT are
159 /// is returned in RedDes. If either \p DB is non-null or \p AC and \p DT are
  /src/external/apache2/llvm/dist/llvm/lib/Target/AMDGPU/
SIOptimizeExecMaskingPreRA.cpp 81 // See if there is a def between \p AndIdx and \p SelIdx that needs to live
92 const MachineInstr &Sel, const MachineInstr &And) {
93 SlotIndex AndIdx = LIS->getInstructionIndex(And);
132 auto *And =
134 if (!And || And->getOpcode() != AndOpc ||
135 !And->getOperand(1).isReg() || !And->getOperand(2).isReg())
138 MachineOperand *AndCC = &And->getOperand(1);
142 AndCC = &And->getOperand(2)
    [all...]
SIPreEmitPeephole.cpp 76 // and another block which consumes that saved mask and then a branch.
83 const unsigned And = IsWave32 ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64;
98 (A->getOpcode() != And && A->getOpcode() != AndN2))
135 // First if sreg is only used in the AND instruction fold the immediate
136 // into into the AND.
154 // Replace AND with MOV
163 // Remove AND instruction
375 // and limit the distance to 20 instructions for compile time purposes.
SILowerControlFlow.cpp 13 /// All control flow is handled using predicated instructions and
19 /// Vector ALU) and then the ScalarALU will AND the VCC register with the
32 /// %sgpr0 = S_AND_SAVEEXEC_B64 %vcc // Save and update the exec mask
108 // Skip to the next instruction, ignoring debug instructions, and trivial
110 // and the successor has one predecessor.
209 // If there is only one use of save exec register and that use is SI_END_CF,
233 MachineInstr *And =
238 setImpSCCDefDead(*And, true);
271 // Replace with and so we don't need to fix the live interval for conditio
    [all...]
AMDGPUISelDAGToDAG.cpp 726 // calculation into the LOAD and STORE instructions.
820 // has a separate operand for the offset and width, the scalar version packs
821 // the width and offset into a single operand. Try to move to the scalar
859 case ISD::AND:
930 // Check that split base (Lo and Hi) are extracted from the same one.
1201 // On Southern Islands instruction with a negative base value and an offset
1257 // operations can share the zero base address register, and enables merging
1290 // On Southern Islands instruction with a negative base value and an offset
1393 // FIXME: This should be a pattern predicate and not reach here
1423 // Both N2 and N3 are divergent. Use N0 (the result of the add) as th
    [all...]
  /src/external/apache2/llvm/dist/llvm/lib/Transforms/AggressiveInstCombine/
AggressiveInstCombine.cpp 49 /// This class provides both the logic to combine expression patterns and
87 // Match V to funnel shift left/right and capture the source operands and
117 // One phi operand must be a funnel/rotate operation, and the other phi
197 /// This is used by foldAnyOrAllBitsSet() to capture a source value (Root) and
199 /// of 'and' ops, then we also need to capture the fact that we saw an
200 /// "and X, 1", so that's an extra return value for that case.
213 /// chain of 'and' or 'or' instructions looking for shift ops of a common source
217 /// and (and (X >> 1), 1), (X >> 4
    [all...]
  /src/external/apache2/llvm/dist/llvm/lib/Target/M68k/
M68kInstrInfo.cpp 99 // Start from the bottom of the block and work up, examining the
177 // And we are allowed to modify the block and the target block of the
209 // Otherwise preserve TBB, FBB and Cond as requested
228 // conditional branches branch to the same destination and their condition
333 unsigned Mask, And;
340 And = M68k::AND16di;
342 And = M68k::AND32di;
345 BuildMI(MBB, I, DL, get(And), Reg).addReg(Reg).addImm(Mask);
384 /// Expand SExt MOVE pseudos into a MOV and a EXT if the operands are tw
    [all...]
  /src/external/apache2/llvm/dist/llvm/include/llvm/CodeGen/GlobalISel/
MIPatternMatch.h 117 // We might want to support taking in some MachineOperands and call getReg on
130 template <typename... Preds> struct And {
138 struct And<Pred, Preds...> : And<Preds...> {
140 And(Pred &&p, Preds &&... preds)
141 : And<Preds...>(std::forward<Preds>(preds)...), P(std::forward<Pred>(p)) {
145 return P.match(MRI, src) && And<Preds...>::match(MRI, src);
167 template <typename... Preds> And<Preds...> m_all_of(Preds &&... preds) {
168 return And<Preds...>(std::forward<Preds>(preds)...);
467 // General helper for generic MI compares, i.e. G_ICMP and G_FCM
    [all...]
  /src/external/apache2/llvm/dist/llvm/lib/Target/X86/MCTargetDesc/
X86BaseInfo.h 9 // This file contains small standalone helper functions and enum definitions for
10 // the X86 target useful for the compiler back-end and the MC libraries.
103 // are never used in MachineInstrs and are inverses of one another.
116 // AND
117 And,
128 // JA, JB and variants.
130 // JE, JL, JG and variants.
132 // JS, JP, JO and variants
186 return FirstMacroFusionInstKind::And;
342 case X86::FirstMacroFusionInstKind::And
    [all...]
  /src/external/apache2/llvm/dist/llvm/lib/Transforms/InstCombine/
InstCombineSimplifyDemanded.cpp 29 /// constant that are not demanded. If so, shrink the constant and return true.
70 /// change and false otherwise.
88 /// Consequently, depending on the mask and V, it may be possible to replace V
90 /// the replacement and returns true. In all other cases, it returns false after
91 /// analyzing the expression and setting KnownOne and known to be one in the
96 /// Known.One and Known.Zero always follow the invariant that:
98 /// That is, a bit can't be both 1 and 0. Note that the bits in Known.One and
100 /// also that the bitwidth of V, DemandedMask, Known.Zero and Known.One must al
    [all...]
InstCombineMulDivRem.cpp 48 /// non-zero. If this allows us to simplify the computation, do so and return
73 // We know that this is an exact/nuw shift and that the input is a
101 // But that needs to be done carefully and/or while removing potential
206 // Interpret X * (-1<<C) as (-X) * (1<<C) and try to sink the negation.
299 /// i1 mul -> i1 and.
325 // (zext bool X) * (zext bool Y) --> zext (and X, Y)
326 // (sext bool X) * (sext bool Y) --> zext (and X, Y)
332 Value *And = Builder.CreateAnd(X, Y, "mulbool");
333 return CastInst::Create(Instruction::ZExt, And, I.getType());
335 // (sext bool X) * (zext bool Y) --> sext (and X, Y
    [all...]
InstCombineShifts.cpp 9 // This file implements the visitShl, visitLShr, and visitAShr functions.
51 // x shiftopcode (Q+K) iff (Q+K) u< bitwidth(x) and
53 // This is valid for any shift, but they must be identical, and we must be
54 // careful in case we have (zext(Q)+zext(K)) and look past extensions,
70 // and look through it. The truncation imposes additional constraints on the
91 // ... and if it's not two right-shifts, we know the answer already.
103 // and for that one of the operands of the shift must be one-use,
122 // If there was a truncation, and we have a right-shift, we can only fold if
133 // If it is, and that was the question, return the base value.
147 // If the pattern did not involve trunc, and both of the original shift
    [all...]
InstCombineSelect.cpp 103 // +0.0 compares equal to -0.0, and so it does not behave as required for this
117 /// select (icmp eq (and X, C1)), TC, FC
118 /// iff C1 is a power 2 and the difference between TC and FC is a power-of-2.
120 /// (shr (and (X, C1)), (log2(C1) - log2(TC-FC))) + FC
122 /// (shl (and (X, C1)), (log2(TC-FC) - log2(C1))) + FC
169 // If the select constants differ by exactly one bit and that's the same
170 // bit that is masked and checked by the select condition, the select can
175 // If we have to create an 'and', then we must kill the cmp to not
203 // Determine which shift is needed to transform result of the 'and' into th
    [all...]
InstCombineAndOrXor.cpp 9 // This file implements the visitAnd, visitOr, and visitXor functions.
53 /// This is the complement of getICmpCode, which turns an opcode and two
65 /// This is the complement of getFCmpCode, which turns an opcode and two
119 /// whether to treat V, Lo, and Hi as signed or not.
144 /// Classify (icmp eq (A & B), C) and (icmp ne (A & B), C) as matching patterns
146 /// One of A and B is considered the mask. The other is the value. This is
148 /// only "Mask", then both A and B can be considered masks. If A is the mask,
150 /// If both A and C are constants, this proof is also easy.
161 /// "Mixed" declares that (A & B) == C and C might or might not contain any
162 /// number of one bits and zero bits
    [all...]
InstCombineCasts.cpp 92 // Get the type really allocated and the type casted to.
98 // is scalable and the allocated type is not. This because we need to
100 // For the opposite case where the allocated type is scalable and the
118 // The alloc and cast types should be either both fixed or both scalable.
165 // If the allocation has multiple real uses, insert a cast and change all
196 case Instruction::And:
300 // condition may inhibit other folds and lead to worse codegen.
325 /// Constants and extensions/truncates from the destination type are always
353 /// instead of its larger type, and arrive with the same value.
359 /// makes sense if x and y can be efficiently truncated
    [all...]
  /src/external/apache2/llvm/dist/llvm/bindings/ocaml/llvm/
llvm.ml 205 | And
288 | And
504 (*--... Operations on pointer, vector, and array types .....................--*)
714 (*--... Operations on global variables, functions, and aliases (globals) ...--*)
1094 (*--... Operations on call and invoke instructions (only) ..................--*)
  /src/external/apache2/llvm/dist/llvm/lib/CodeGen/
AtomicExpandPass.cpp 157 // and is of appropriate alignment, to be passed through for target
239 // If a compare and swap is lowered to LL/SC, we can do smarter fence
289 Op == AtomicRMWInst::And)) {
457 // shouldExpandAtomicRMW in cases where this is required and possible.
506 case AtomicRMWInst::And:
624 /// incoming address, Addr, and ValueType, and constructs the address,
625 /// shift-amounts and masks needed to work with a larger value of size
675 // turn bytes into bits, and count from the other side.
709 Value *And = Builder.CreateAnd(WideWord, PMV.Inv_Mask, "unmasked")
    [all...]
  /src/external/apache2/llvm/dist/llvm/lib/Target/Hexagon/
HexagonLoopIdiomRecognition.cpp 332 // of the clone, and build a map from the cloned values to the
435 // equivalent counterparts in Root, and replace those subtrees with
727 // Check C against the possible values for comparison: 0 and (1 << i):
904 // And for the inverse:
930 // If X is loop invariant, it must be the input polynomial, and the
991 // and the result are promoted, for example, it may produce more non-zero
1003 case Instruction::And:
1058 Value *And = IRBuilder<>(In).CreateAnd(T->getOperand(0), Mask);
1059 T->replaceAllUsesWith(And);
1076 // coming from the loop block will be promoted to another type, and so th
    [all...]
HexagonVectorCombine.cpp 11 // AlignVectors: replace unaligned vector loads and stores with aligned ones.
303 // VS2017 and some versions of VS2019 have trouble compiling this:
461 Value *And = Builder.CreateAnd(AsInt, Mask);
462 return Builder.CreateIntToPtr(And, ValTy->getPointerTo());
548 "Base and In should be in the same block");
569 // Don't mix HVX and non-HVX instructions.
620 // Don't mix HVX and non-HVX instructions.
704 // address and the amount od the down-alignment will be AlignVal:
752 // Align instructions for both HVX (V6_valign) and scalar (S2_valignrb)
839 // Create an extra "undef" sector at the beginning and at the end
    [all...]
  /src/external/apache2/llvm/dist/llvm/lib/Target/WebAssembly/
WebAssemblyISelLowering.cpp 72 // Transform loads and stores to pointers in address space 1 to loads and
93 // Take the default expansion for va_arg, va_copy, and va_end. There is no
116 // Support minimum and maximum, which otherwise default to expand.
152 // Combine int_to_fp of extract_vectors and vice versa into conversions ops
160 // Support saturating add for i8x16 and i16x8
204 // But we do have integer min and max operations
263 // - truncating SIMD stores and most extending loads
286 // And some truncating stores are legal as well
306 // consistent with the f64 and f128 names
    [all...]
  /src/external/apache2/llvm/dist/llvm/lib/Target/AArch64/
AArch64ISelDAGToDAG.cpp 228 /// between 1 and 4 elements. If it contains a single element that is returned
233 // e.g. structured loads and stores (ldN, stN).
360 // opcode and that it has a immediate integer right operand.
394 /// Val set to the 12-bit value and Shift set to the shifter operand.
438 // This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0"
504 // the addressing mode and save a cycle.
580 } else if (N.getOpcode() == ISD::AND) {
638 /// is a lane in the upper half of a 128-bit vector. Recognize and select this
732 /// GPR64 (either via SEXT_INREG or AND). Extract the appropriate low bits if
809 /// need to create a real ADD instruction from it anyway and there's no point i
    [all...]
  /src/external/apache2/llvm/dist/llvm/lib/Target/X86/
X86ISelDAGToDAG.cpp 41 static cl::opt<bool> AndImmShrink("x86-and-imm-shrink", cl::init(true),
344 // This user is already selected. Count it as a legitimate use and
360 // Those instruction won't match in ISEL, for now, and would
375 // used to indicate SP offsets for argument passing and
397 // ... otherwise, count this and move on.
436 // Helper to detect unneeded and instructions on shift amounts. Called
439 assert(N->getOpcode() == ISD::AND && "Unexpected opcode");
466 /// Address-mode matching performs shift-of-and to and-of-shift
535 // this happens we will use 512-bit operations and the mask will not b
    [all...]

Completed in 52 milliseconds

1 2