| /src/external/apache2/llvm/dist/llvm/include/llvm/CodeGen/GlobalISel/ |
| MachineIRBuilder.h | 1396 MachineInstrBuilder buildAdd(const DstOp &Dst, const SrcOp &Src0, 1399 return buildInstr(TargetOpcode::G_ADD, {Dst}, {Src0, Src1}, Flags); 1413 MachineInstrBuilder buildSub(const DstOp &Dst, const SrcOp &Src0, 1416 return buildInstr(TargetOpcode::G_SUB, {Dst}, {Src0, Src1}, Flags); 1429 MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, 1432 return buildInstr(TargetOpcode::G_MUL, {Dst}, {Src0, Src1}, Flags); 1435 MachineInstrBuilder buildUMulH(const DstOp &Dst, const SrcOp &Src0, 1438 return buildInstr(TargetOpcode::G_UMULH, {Dst}, {Src0, Src1}, Flags); 1441 MachineInstrBuilder buildSMulH(const DstOp &Dst, const SrcOp &Src0, 1444 return buildInstr(TargetOpcode::G_SMULH, {Dst}, {Src0, Src1}, Flags) [all...] |
| GISelKnownBits.h | 38 void computeKnownBitsMin(Register Src0, Register Src1, KnownBits &Known, 42 unsigned computeNumSignBitsMin(Register Src0, Register Src1,
|
| MIPatternMatch.h | 520 Src0Ty Src0; 524 TernaryOp_match(const Src0Ty &Src0, const Src1Ty &Src1, const Src2Ty &Src2) 525 : Src0(Src0), Src1(Src1), Src2(Src2) {} 531 return (Src0.match(MRI, TmpMI->getOperand(1).getReg()) && 542 m_GInsertVecElt(const Src0Ty &Src0, const Src1Ty &Src1, const Src2Ty &Src2) { 544 TargetOpcode::G_INSERT_VECTOR_ELT>(Src0, Src1, Src2); 549 m_GISelect(const Src0Ty &Src0, const Src1Ty &Src1, const Src2Ty &Src2) { 551 Src0, Src1, Src2);
|
| /src/external/apache2/llvm/dist/llvm/lib/Target/AMDGPU/ |
| R600ExpandSpecialInstrs.cpp | 111 MI.getOperand(1).getReg(), // src0 146 Register Src0 = 147 BMI->getOperand(TII->getOperandIdx(Opcode, R600::OpName::src0)) 152 (void) Src0; 154 if ((TRI.getEncodingValue(Src0) & 0xff) < 127 && 156 assert(TRI.getHWRegChan(Src0) == TRI.getHWRegChan(Src1)); 198 Register Src0 = 199 MI.getOperand(TII->getOperandIdx(MI, R600::OpName::src0)).getReg(); 211 Src0 = TRI.getSubReg(Src0, SubRegIndex) [all...] |
| AMDGPUInstCombineIntrinsic.cpp | 43 static APFloat fmed3AMDGCN(const APFloat &Src0, const APFloat &Src1, 45 APFloat Max3 = maxnum(maxnum(Src0, Src1), Src2); 47 APFloat::cmpResult Cmp0 = Max3.compare(Src0); 55 return maxnum(Src0, Src2); 57 return maxnum(Src0, Src1); 278 Value *Src0 = II.getArgOperand(0); 282 if (isa<UndefValue>(Src0)) { 306 Value *FCmp = IC.Builder.CreateFCmpUNO(Src0, Src0); 314 IC.Builder.CreateFCmpOEQ(Src0, ConstantFP::get(Src0->getType(), 0.0)) [all...] |
| SIShrinkInstructions.cpp | 66 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0); 68 // Try to fold Src0 69 MachineOperand &Src0 = MI.getOperand(Src0Idx); 70 if (Src0.isReg()) { 71 Register Reg = Src0.getReg(); 81 Src0.ChangeToImmediate(MovSrc.getImm()); 84 Src0.ChangeToFrameIndex(MovSrc.getIndex()); 87 Src0.ChangeToGA(MovSrc.getGlobal(), MovSrc.getOffset(), 103 // We have failed to fold src0, so commute the instruction and try again. 177 // cmpk requires src0 to be a registe [all...] |
| GCNDPPCombine.cpp | 8 // The pass combines V_MOV_B32_dpp instruction with its VALU uses as a DPP src0 238 auto *Src0 = TII->getNamedOperand(MovMI, AMDGPU::OpName::src0); 239 assert(Src0); 240 if (!TII->isOperandLegal(*DPPInst.getInstr(), NumOperands, Src0)) { 241 LLVM_DEBUG(dbgs() << " failed: src0 is illegal\n"); 245 DPPInst.add(*Src0); 425 auto *SrcOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::src0); 540 auto *Src0 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src0); [all...] |
| SIPeepholeSDWA.cpp | 308 if (TII->getNamedOperand(*MI, AMDGPU::OpName::src0) == SrcOp) { 343 MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 349 // If this is not src0 then it could be src1 537 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 538 auto Imm = foldToImm(*Src0); 577 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 578 auto Imm = foldToImm(*Src0); 645 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0) [all...] |
| SIFoldOperands.cpp | 219 if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0)) 848 AMDGPU::getNamedOperandIdx(UseOpc, AMDGPU::OpName::src0))) { 1053 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 1056 MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx)); 1060 Src0->isImm()) { 1061 MI->getOperand(1).ChangeToImmediate(~Src0->getImm()); 1071 if (!Src0->isImm() && !Src1->isImm()) 1077 if (Src0->isImm() && Src1->isImm()) { 1079 if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm())) 1085 // Be careful to change the right operand, src0 may belong to a differen [all...] |
| SIInstrInfo.cpp | 2020 MachineOperand &Src0, 2030 "All commutable instructions have both src0 and src1 modifiers"); 2079 assert(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) == 2085 MachineOperand &Src0 = MI.getOperand(Src0Idx); 2089 if (Src0.isReg() && Src1.isReg()) { 2090 if (isOperandLegal(MI, Src1Idx, &Src0)) { 2096 } else if (Src0.isReg() && !Src1.isReg()) { 2097 // src0 should always be able to support any operand type, so no need to 2099 CommutedMI = swapRegAndNonRegOperand(MI, Src0, Src1); 2100 } else if (!Src0.isReg() && Src1.isReg()) [all...] |
| AMDGPUPostLegalizerCombiner.cpp | 211 Register Src0; 213 bool IsShr = mi_match(SrcReg, MRI, m_GLShr(m_Reg(Src0), m_ICst(ShiftAmt))); 214 if (IsShr || mi_match(SrcReg, MRI, m_GShl(m_Reg(Src0), m_ICst(ShiftAmt)))) { 223 MatchInfo.CvtVal = Src0;
|
| SIOptimizeExecMasking.cpp | 428 MachineOperand &Src0 = SaveExecInst->getOperand(1); 433 if (Src0.isReg() && Src0.getReg() == CopyFromExec) { 439 OtherOp = &Src0;
|
| SIFixSGPRCopies.cpp | 311 TII->getNamedOperand(*MoveImm, AMDGPU::OpName::src0); 717 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0); 720 MachineOperand &Src0 = MI.getOperand(Src0Idx); 724 if ((Src0.isReg() && TRI->isSGPRReg(*MRI, Src0.getReg()) && 725 Src0.getReg() != AMDGPU::M0) && 734 for (MachineOperand *MO : {&Src0, &Src1}) {
|
| SILoadStoreOptimizer.cpp | 1461 const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata); 1465 .add(*Src0) 1616 const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata); 1620 .add(*Src0) 1790 const auto *Src0 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src0); 1793 auto Offset0P = extractConstOffset(*Src0); 1799 BaseLo = *Src0; 1802 Src0 = TII->getNamedOperand(*BaseHiDef, AMDGPU::OpName::src0); [all...] |
| SIISelLowering.cpp | 3890 MachineOperand &Src0 = MI.getOperand(2); 3896 BuildMI(*BB, MI, DL, TII->get(Opc), Dest0.getReg()).add(Src0).add(Src1); 3914 MachineOperand &Src0 = MI.getOperand(1); 3921 MI, MRI, Src0, BoolRC, AMDGPU::sub0, &AMDGPU::SReg_32RegClass); 3923 MI, MRI, Src0, BoolRC, AMDGPU::sub1, &AMDGPU::SReg_32RegClass); 3962 MachineOperand &Src0 = MI.getOperand(1); 3965 const TargetRegisterClass *Src0RC = Src0.isReg() 3966 ? MRI.getRegClass(Src0.getReg()) 3978 MI, MRI, Src0, Src0RC, AMDGPU::sub0, Src0SubRC); 3983 MI, MRI, Src0, Src0RC, AMDGPU::sub1, Src0SubRC) [all...] |
| AMDGPUInstructionSelector.cpp | 603 Register Src0 = MI.getOperand(1).getReg(); 605 if (MRI->getType(Src0) != S32) 615 getConstantVRegValWithLookThrough(Src0, *MRI, true, true, true); 630 // (build_vector_trunc $src0, undef -> copy $src0 636 RBI.constrainGenericRegister(Src0, AMDGPU::SReg_32RegClass, *MRI); 645 // (build_vector_trunc (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16) 646 // => (S_PACK_HH_B32_B16 $src0, $src1) 647 // (build_vector_trunc $src0, (lshr_oneuse SReg_32:$src1, 16)) 648 // => (S_PACK_LH_B32_B16 $src0, $src1 [all...] |
| AMDGPUPromoteAlloca.cpp | 962 Value *Src0 = CI->getOperand(0); 963 Type *EltTy = Src0->getType()->getPointerElementType();
|
| GCNHazardRecognizer.cpp | 894 // Use V_MOB_B32 v?, v?. Register must be alive so use src0 of V_PERMLANE* 896 auto *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0); 897 Register Reg = Src0->getReg(); 898 bool IsUndef = Src0->isUndef();
|
| /src/external/apache2/llvm/dist/llvm/lib/Target/AArch64/ |
| AArch64AdvSIMDScalarPass.cpp | 298 unsigned Src0 = 0, SubReg0; 309 Src0 = MOSrc0->getReg(); 311 // Src0 is going to be reused, thus, it cannot be killed anymore. 330 // Src0 is going to be reused, thus, it cannot be killed anymore. 341 if (!Src0) { 343 Src0 = MRI->createVirtualRegister(&AArch64::FPR64RegClass); 344 insertCopy(TII, MI, Src0, OrigSrc0, KillSrc0); 363 .addReg(Src0, getKillRegState(KillSrc0), SubReg0)
|
| /src/external/apache2/llvm/dist/llvm/lib/Transforms/Scalar/ |
| InferAddressSpaces.cpp | 656 Constant *Src0 = CE->getOperand(1); 658 if (Src0->getType()->getPointerAddressSpace() == 662 CE->getOperand(0), ConstantExpr::getAddrSpaceCast(Src0, TargetType), 842 Value *Src0 = Op.getOperand(1); 845 auto I = InferredAddrSpace.find(Src0); 847 I->second : Src0->getType()->getPointerAddressSpace(); 853 auto *C0 = dyn_cast<Constant>(Src0);
|
| ScalarizeMaskedMemIntrin.cpp | 150 Value *Src0 = CI->getArgOperand(3); 182 Value *VResult = Src0; 420 Value *Src0 = CI->getArgOperand(3); 434 Value *VResult = Src0;
|
| /src/external/apache2/llvm/dist/llvm/lib/CodeGen/GlobalISel/ |
| GISelKnownBits.cpp | 97 /// Compute known bits for the intersection of \p Src0 and \p Src1 98 void GISelKnownBits::computeKnownBitsMin(Register Src0, Register Src1, 110 computeKnownBitsImpl(Src0, Known2, DemandedElts, Depth); 507 /// Compute number of sign bits for the intersection of \p Src0 and \p Src1 508 unsigned GISelKnownBits::computeNumSignBitsMin(Register Src0, Register Src1, 515 return std::min(computeNumSignBits(Src0, DemandedElts, Depth), Src1SignBits);
|
| CSEMIRBuilder.cpp | 199 const SrcOp &Src0 = SrcOps[0]; 202 ConstantFoldExtOp(Opc, Src0.getReg(), Src1.getImm(), *getMRI()))
|
| /src/external/apache2/llvm/dist/clang/lib/CodeGen/ |
| CGBuiltin.cpp | 444 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); 448 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType()); 449 return CGF.Builder.CreateConstrainedFPCall(F, { Src0 }); 451 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); 452 return CGF.Builder.CreateCall(F, Src0); 461 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); 466 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType()); 467 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 }); 469 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); 470 return CGF.Builder.CreateCall(F, { Src0, Src1 }) [all...] |
| /src/external/apache2/llvm/dist/llvm/lib/Target/X86/ |
| X86ISelDAGToDAG.cpp | 4386 SDValue Src0 = N0; 4397 Src0 = N0Temp.getOperand(0); 4435 bool CanFoldLoads = Src0 != Src1; 4444 FoldedLoad = tryFoldLoadOrBCast(Root, N0.getNode(), Src0, Tmp0, Tmp1, 4447 std::swap(Src0, Src1); 4468 Src0 = CurDAG->getTargetInsertSubreg(SubReg, dl, CmpVT, ImplDef, Src0); 4491 SDValue Ops[] = { InMask, Src0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, 4495 SDValue Ops[] = { Src0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, 4506 CNode = CurDAG->getMachineNode(Opc, dl, MaskVT, InMask, Src0, Src1) [all...] |