HomeSort by: relevance | last modified time | path
    Searched refs:DstIdx (Results 1 - 16 of 16) sorted by relevancy

  /src/external/apache2/llvm/dist/llvm/lib/CodeGen/
RegisterCoalescer.h 39 unsigned DstIdx = 0;
103 unsigned getDstIdx() const { return DstIdx; }
TwoAddressInstructionPass.cpp 128 bool commuteInstruction(MachineInstr *MI, unsigned DstIdx,
146 unsigned SrcIdx, unsigned DstIdx,
538 unsigned DstIdx,
559 Register RegA = MI->getOperand(DstIdx).getReg();
1127 unsigned SrcIdx, unsigned DstIdx,
1133 Register regA = MI.getOperand(DstIdx).getReg();
1142 bool Commuted = tryInstructionCommute(&MI, DstIdx, SrcIdx, regBKilled, Dist);
1323 unsigned DstIdx = 0;
1324 if (!MI->isRegTiedToDefOperand(SrcIdx, &DstIdx))
1328 MachineOperand &DstMO = MI->getOperand(DstIdx);
    [all...]
RegisterCoalescer.cpp 256 unsigned DstIdx);
440 SrcIdx = DstIdx = 0;
488 SrcIdx, DstIdx);
497 DstIdx = SrcSub;
510 if (DstIdx && !SrcIdx) {
512 std::swap(SrcIdx, DstIdx);
531 std::swap(SrcIdx, DstIdx);
556 assert(!DstIdx && !SrcIdx && "Inconsistent CoalescerPair state.");
571 TRI.composeSubRegIndices(DstIdx, DstSub);
1266 unsigned DstIdx = CP.isFlipped() ? CP.getSrcIdx() : CP.getDstIdx()
    [all...]
  /src/external/apache2/llvm/dist/clang/lib/CodeGen/
CGNonTrivialStruct.cpp 33 enum { DstIdx = 0, SrcIdx = 1 };
362 Address DstAddr = StartAddrs[DstIdx];
393 CGF.Builder.CreateICmpEQ(PHIs[DstIdx], DstArrayEnd, "done");
526 Address DstAddr = this->getAddrWithOffset(Addrs[DstIdx], this->Start);
562 Address DstAddr = this->getAddrWithOffset(Addrs[DstIdx], Offset);
572 Address DstAddr = this->CGF->Builder.CreateBitCast(Addrs[DstIdx], Ty);
603 *CGF, getAddrWithOffset(Addrs[DstIdx], CurStructOffset, FD), QT);
609 *CGF, getAddrWithOffset(Addrs[DstIdx], CurStructOffset, FD), QT);
615 CGF->MakeAddrLValue(getAddrWithOffset(Addrs[DstIdx], Offset), FT));
644 getAddrWithOffset(Addrs[DstIdx], CurStructOffset, FD), QT)
    [all...]
  /src/external/apache2/llvm/dist/llvm/lib/Target/AMDGPU/
R600ExpandSpecialInstrs.cpp 86 int DstIdx = TII->getOperandIdx(MI.getOpcode(), R600::OpName::dst);
87 assert(DstIdx != -1);
88 MachineOperand &DstOp = MI.getOperand(DstIdx);
R600Packetizer.cpp 84 int DstIdx = TII->getOperandIdx(BI->getOpcode(), R600::OpName::dst);
85 if (DstIdx == -1) {
88 Register Dst = BI->getOperand(DstIdx).getReg();
SIPeepholeSDWA.cpp 377 auto DstIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
379 auto TiedIdx = MI.findTiedOperandIdx(DstIdx);
SIInstrInfo.cpp 3843 int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst);
3845 const int OpIndicies[] = { DstIdx, Src0Idx, Src1Idx, Src2Idx };
3880 if (!ST.hasSDWASdst() && DstIdx != -1) {
3882 const MachineOperand &Dst = MI.getOperand(DstIdx);
3907 const MachineOperand &Dst = MI.getOperand(DstIdx);
3914 MI.getOperand(MI.findTiedOperandIdx(DstIdx));
3948 const uint32_t DstIdx =
3950 const MachineOperand &Dst = MI.getOperand(DstIdx);
3952 const TargetRegisterClass *DstRC = getOpRegClass(MI, DstIdx);
4296 int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst)
    [all...]
R600ISelLowering.cpp 276 int DstIdx = TII->getOperandIdx(MI.getOpcode(), R600::OpName::dst);
277 assert(DstIdx != -1);
281 if (!MRI.use_empty(MI.getOperand(DstIdx).getReg()) ||
SIISelLowering.cpp 11278 int DstIdx =
11300 uint32_t DstSize = TRI.getRegSizeInBits(*TII->getOpRegClass(MI, DstIdx)) / 32;
11305 Register PrevDst = MRI.createVirtualRegister(TII->getOpRegClass(MI, DstIdx));
11316 NewDst = MRI.createVirtualRegister(TII->getOpRegClass(MI, DstIdx));
11334 MI.tieOperands(DstIdx, MI.getNumOperands() - 1);
  /src/external/apache2/llvm/dist/llvm/lib/CodeGen/GlobalISel/
GISelKnownBits.cpp 478 unsigned DstIdx = 0;
479 for (; DstIdx != NumOps - 1 && MI.getOperand(DstIdx).getReg() != R;
480 ++DstIdx)
483 Known = SrcOpKnown.extractBits(BitWidth, BitWidth * DstIdx);
LegalizerHelper.cpp 4670 unsigned DstIdx = 0; // Low bits of the result.
4672 B.buildMul(NarrowTy, Src1Regs[DstIdx], Src2Regs[DstIdx]).getReg(0);
4673 DstRegs[DstIdx] = FactorSum;
4678 for (DstIdx = 1; DstIdx < DstParts; DstIdx++) {
4679 // Collect low parts of muls for DstIdx.
4680 for (unsigned i = DstIdx + 1 < SrcParts ? 0 : DstIdx - SrcParts + 1
    [all...]
  /src/external/apache2/llvm/dist/llvm/lib/Target/AMDGPU/Disassembler/
AMDGPUDisassembler.cpp 301 int DstIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
302 if (IsAGPROperand(Inst, DstIdx, MRI))
  /src/external/apache2/llvm/dist/llvm/lib/Target/AMDGPU/AsmParser/
AMDGPUAsmParser.cpp 3327 const int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst);
3328 if (DstIdx == -1 ||
3329 Desc.getOperandConstraint(DstIdx, MCOI::EARLY_CLOBBER) == -1) {
3339 assert(DstIdx != -1);
3340 const MCOperand &Dst = Inst.getOperand(DstIdx);
  /src/external/apache2/llvm/dist/llvm/lib/Target/X86/
X86InstrInfo.cpp 2116 unsigned DstIdx = (Imm >> 4) & 3;
2121 if (DstIdx == SrcIdx && (ZMask & (1 << DstIdx)) == 0 &&
2123 unsigned AltIdx = findFirstSet((ZMask | (1 << DstIdx)) ^ 15);
5422 unsigned DstIdx = (Imm >> 4) & 3;
5430 unsigned NewImm = (DstIdx << 4) | ZMask;
X86ISelLowering.cpp 7680 unsigned DstIdx = 0;
7686 DstIdx = N.getConstantOperandVal(2);
7688 // Attempt to recognise an INSERT*(VEC, 0, DstIdx) shuffle pattern.
7692 Mask.push_back(i == DstIdx ? SM_SentinelZero : (int)i);
7728 unsigned DstByte = DstIdx * NumBytesPerElt;
    [all...]

Completed in 139 milliseconds