HomeSort by: relevance | last modified time | path
    Searched defs:Caller (Results 1 - 25 of 32) sorted by relevancy

1 2

  /src/external/apache2/llvm/dist/llvm/lib/Analysis/
ReplayInlineAdvisor.cpp 63 Function &Caller = *CB.getCaller();
64 auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(Caller);
MLInlineAdvisor.cpp 133 Function *Caller = Advice.getCaller();
136 // The caller features aren't valid anymore.
140 FAM.invalidate(*Caller, PA);
143 getIRSize(*Caller) + (CalleeWasDeleted ? 0 : Advice.CalleeIRSize);
149 // the caller, and maybe the callee (by deleting the latter).
151 // For edges, we 'forget' the edges that the caller and callee used to have
154 FAM.getResult<FunctionPropertiesAnalysis>(*Caller)
176 auto &Caller = *CB.getCaller();
183 auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(Caller);
191 &Caller == &Callee
    [all...]
InlineAdvisor.cpp 33 STATISTIC(NumCallerCallersAnalyzed, "Number of caller-callers analyzed");
60 << NV("Caller", Caller) << ": "
67 emitInlinedInto(ORE, DLoc, Block, *Callee, *Caller, *OIC);
72 emitInlinedInto(ORE, DLoc, Block, *Callee, *Caller, *OIC);
77 Function &Caller = *CB.getCaller();
79 FAM.getResult<ModuleAnalysisManagerFunctionProxy>(Caller)
83 auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(Caller);
118 : Advisor(Advisor), Caller(CB.getCaller()), Callee(CB.getCalledFunction()),
136 Advisor->ImportedFunctionsStats->recordInline(*Caller, *Callee)
    [all...]
ConstantFolding.cpp 1856 // a function during inlining), Call's caller may not be available.
1858 const Function *Caller =
1860 if (Caller &&
1862 Caller, Operands[0]->getType()->getPointerAddressSpace())) {
  /src/external/apache2/llvm/dist/llvm/lib/Transforms/IPO/
AlwaysInliner.cpp 50 // When callee coroutine function is inlined into caller coroutine function
67 Function *Caller = CB->getCaller();
68 OptimizationRemarkEmitter ORE(Caller);
76 emitInlinedInto(ORE, CB->getDebugLoc(), CB->getParent(), F, *Caller,
90 AttributeFuncs::mergeAttributesForInlining(*Caller, F);
193 // When callee coroutine function is inlined into caller coroutine function
Inliner.cpp 140 static void mergeInlinedArrayAllocas(Function *Caller, InlineFunctionInfo &IFI,
240 /// available from other functions inlined into the caller. If we are able to
249 Function *Caller = CB.getCaller();
260 ImportedFunctionsStats.recordInline(*Caller, *Callee);
262 AttributeFuncs::mergeAttributesForInlining(*Caller, *Callee);
265 mergeInlinedArrayAllocas(Caller, IFI, InlinedArrayAllocas, InlineHistory);
351 << NV("Caller", CB->getCaller())
393 Function *Caller = CB.getCaller();
400 bool IsTriviallyDead = isInstructionTriviallyDead(&CB, &GetTLI(*Caller));
418 OptimizationRemarkEmitter ORE(Caller);
    [all...]
ArgumentPromotion.cpp 253 // Loop over the operands, inserting GEP and loads in the caller as
577 // Promoting the argument causes it to be loaded in the caller
588 // block, and thus are safe to unconditionally load in the caller.
846 const Function *Caller = CB->getCaller();
848 if (!TTI.areFunctionArgsABICompatible(Caller, Callee, ArgsToPromote) ||
849 !TTI.areFunctionArgsABICompatible(Caller, Callee, ByValArgsToTransform))
914 // Can't change signature of musttail caller
1131 Function *Caller = OldCS.getParent()->getParent();
1134 CallGraphNode *CallerNode = CG[Caller];
PartialInlining.cpp 72 "inlined into its caller(s).");
783 Function *Caller = CB.getCaller();
805 << NV("Caller", Caller)
815 << NV("Caller", Caller) << " because too costly to inline (cost="
821 const DataLayout &DL = Caller->getParent()->getDataLayout();
833 << NV("Caller", Caller) << " runtime overhead (overhead="
847 << NV("Caller", Caller) << " with cost=" << NV("Cost", IC.getCost()
    [all...]
SampleProfile.cpp 850 /// \param F Caller function.
874 // clone the caller first, and inline the cloned caller if it is
953 << ore::NV("Caller", &F) << "'");
961 assert(Samples && "expect non-null caller profile");
970 ContextTrieNode *Caller =
973 CalleeList.push(Caller);
1017 /// callee into the caller. If the call is an indirect call, first promote
1128 << ore::NV("Caller", &F) << "'");
1374 // it's expecting matching parameter type on both caller and calle
    [all...]
  /src/external/apache2/llvm/dist/llvm/include/llvm/Transforms/IPO/
ProfiledCallGraph.h 65 ContextTrieNode *Caller = Queue.front();
67 // Add calls for context. When AddNodeWithSamplesOnly is true, both caller
74 for (auto &Child : Caller->getAllChildContext()) {
78 addProfiledCall(Caller->getFuncName(), Callee->getFuncName());
  /src/external/apache2/llvm/dist/llvm/include/llvm/XRay/
Profile.h 120 TrieNode *Caller = nullptr;
  /src/external/apache2/llvm/dist/llvm/lib/Target/AMDGPU/
AMDGPULowerIntrinsics.cpp 138 Function *Caller = CI->getParent()->getParent();
139 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, *Caller);
AMDGPUOpenCLEnqueuedBlockLowering.cpp 79 auto *Caller = CI->getParent()->getParent();
80 if (Callers.insert(Caller).second)
81 collectCallers(Caller, Callers);
144 LLVM_DEBUG(dbgs() << "mark enqueue_kernel caller:" << F->getName() << '\n');
AMDGPUAnnotateKernelFeatures.cpp 43 bool propagateUniformWorkGroupAttribute(Function &Caller, Function &Callee);
215 Function *Caller = Node->getFunction();
220 Changed = propagateUniformWorkGroupAttribute(*Caller, *Callee);
228 Function &Caller, Function &Callee) {
233 if (!Caller.hasFnAttribute("uniform-work-group-size"))
234 Caller.addFnAttr("uniform-work-group-size", "false");
238 // Check if the Caller has the attribute
239 if (Caller.hasFnAttribute("uniform-work-group-size")) {
241 if (Caller.getFnAttribute("uniform-work-group-size")
254 Caller.addFnAttr("uniform-work-group-size", "false")
    [all...]
AMDGPUPropagateAttributes.cpp 13 /// caller.
254 Function *Caller = CI->getCaller();
255 if (!Caller || !Visited.insert(CI).second)
257 if (!Roots.count(Caller) && !NewRoots.count(Caller))
260 const FnProperties CallerProps(*TM, *Caller);
  /src/external/apache2/llvm/dist/clang/lib/AST/Interp/
InterpFrame.h 32 InterpFrame *Caller;
35 InterpFrame(InterpState &S, Function *Func, InterpFrame *Caller,
56 /// Returns the caller.
  /src/external/apache2/llvm/dist/llvm/lib/ExecutionEngine/Interpreter/
Interpreter.h 63 CallBase *Caller; // Holds the call that called subframes.
  /src/external/apache2/llvm/dist/llvm/include/llvm/Analysis/
InlineAdvisor.h 98 /// Caller and Callee are pre-inlining.
99 Function *const Caller;
270 const Function &Caller, const InlineCost &IC,
  /src/external/apache2/llvm/dist/clang/lib/Sema/
SemaExprCXX.cpp 1543 const FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext);
1545 auto CallPreference = IdentifyCUDAPreference(Caller, Method);
1555 if (IdentifyCUDAPreference(Caller, FD) > CFP_WrongSide)
1574 return IdentifyCUDAPreference(Caller, FD) >= CFP_HostDevice;
1637 if (auto *Caller = dyn_cast<FunctionDecl>(S.CurContext))
1638 CUDAPref = S.IdentifyCUDAPreference(Caller, FD);
3904 // FIXME: Return this value to the caller so they don't need to recompute it.
6829 // FIXME: Let the caller know if these fail to avoid duplicate diagnostics.
Sema.cpp 1604 FunctionDecl *Caller = UsePath.empty() ? nullptr : UsePath.back();
1605 if ((!ShouldEmitRootNode && !S.getLangOpts().OpenMP && !Caller) ||
1609 if (Caller && S.LangOpts.OpenMP && UsePath.size() == 1 &&
1611 S.finalizeOpenMPDelayedAnalysis(Caller, FD, Loc);
1612 if (Caller)
1613 S.DeviceKnownEmittedFns[FD] = {Caller, Loc};
1618 emitDeferredDiags(FD, Caller);
  /src/external/apache2/llvm/dist/clang/lib/StaticAnalyzer/Core/
ExprEngineCXX.cpp 273 // on the CFG side. We should warn the caller about that.
296 auto getArgLoc = [&](CallEventRef<> Caller) -> Optional<SVal> {
298 Caller->getCalleeStackFrame(currBldrCtx->blockCount());
304 // This should be equivalent to Caller->getDecl() for now, but
316 const TypedValueRegion *TVR = Caller->getParameterLocation(
317 *Caller->getAdjustedParameterIndex(Idx), currBldrCtx->blockCount());
325 CallEventRef<> Caller = CEMgr.getSimpleCall(CE, State, LCtx);
326 if (Optional<SVal> V = getArgLoc(Caller))
333 CallEventRef<> Caller =
335 if (Optional<SVal> V = getArgLoc(Caller))
    [all...]
  /src/external/apache2/llvm/dist/clang/include/clang/Analysis/
PathDiagnostic.h 566 const Decl *Caller;
583 : PathDiagnosticPiece(Call), Caller(callerD), NoExit(false),
585 PathDiagnosticCallPiece(PathPieces &oldPath, const Decl *caller)
586 : PathDiagnosticPiece(Call), Caller(caller), NoExit(true),
597 const Decl *getCaller() const { return Caller; }
624 const Decl *caller);
  /src/external/apache2/llvm/dist/llvm/include/llvm/DebugInfo/PDB/
PDBTypes.h 281 Caller,
  /src/external/apache2/llvm/dist/llvm/lib/Transforms/Utils/
InlineFunction.cpp 154 /// Forward the 'resume' instruction to the caller's landing pad block.
214 /// Forward the 'resume' instruction to the caller's landing pad block.
261 // annotated as "unwinds to caller" when really it's nounwind (see
265 // "unwinds to caller" cleanupret, which can be trusted.
273 // marked "unwind to caller", it would be a verifier error if it
292 // be ConstantTokenNone to indicate unwind to caller, or can
379 /// return that pad instruction. If it unwinds to caller, return
415 // descendants. An unwind all the way out to a pad in the caller would
561 // invokes. The caller's "segment" of the deoptimization continuation
611 Function *Caller = FirstNewBlock->getParent()
    [all...]
  /src/external/apache2/llvm/dist/clang/lib/AST/
ExprConstant.cpp 447 // not, trust that the caller will catch the bad behavior).
501 /// Get the parameter that the caller initialized, corresponding to the
524 /// Parent - The caller of this stack frame.
525 CallStackFrame *Caller;
632 Frame *getCaller() const override { return Caller; }
1018 Frame = Frame->Caller;
1061 Call = Call->Caller) {
1440 : Info(Info), Caller(Info.CurrentCall), Callee(Callee), This(This),
1449 Info.CurrentCall = Caller;
1949 /// \return \c true if the caller should keep evaluating
    [all...]

Completed in 60 milliseconds

1 2