diff options
author | Matthias Braun <matze@braunis.de> | 2017-12-15 22:22:58 +0000 |
---|---|---|
committer | Matthias Braun <matze@braunis.de> | 2017-12-15 22:22:58 +0000 |
commit | d318139827695f2011ef24693a101829829558b7 (patch) | |
tree | e419a9891762635d61c1db320b72411e7b330ce8 | |
parent | dfcb4f534480ecf3bc64c11781fa2d3123737e91 (diff) |
MachineFunction: Return reference from getFunction(); NFC
The Function can never be nullptr so we can return a reference.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@320884 91177308-0d34-0410-b5e6-96231b3b80d8
242 files changed, 829 insertions, 843 deletions
diff --git a/include/llvm/Analysis/BlockFrequencyInfoImpl.h b/include/llvm/Analysis/BlockFrequencyInfoImpl.h index 24295a2569f..40c40b80bc8 100644 --- a/include/llvm/Analysis/BlockFrequencyInfoImpl.h +++ b/include/llvm/Analysis/BlockFrequencyInfoImpl.h @@ -1341,7 +1341,7 @@ raw_ostream &BlockFrequencyInfoImpl<BT>::print(raw_ostream &OS) const { << ", int = " << getBlockFreq(&BB).getFrequency(); if (Optional<uint64_t> ProfileCount = BlockFrequencyInfoImplBase::getBlockProfileCount( - *F->getFunction(), getNode(&BB))) + F->getFunction(), getNode(&BB))) OS << ", count = " << ProfileCount.getValue(); if (Optional<uint64_t> IrrLoopHeaderWeight = BB.getIrrLoopHeaderWeight()) diff --git a/include/llvm/CodeGen/MachineFunction.h b/include/llvm/CodeGen/MachineFunction.h index 4ffd4bdc2f3..7d8b7ebe8d6 100644 --- a/include/llvm/CodeGen/MachineFunction.h +++ b/include/llvm/CodeGen/MachineFunction.h @@ -380,8 +380,8 @@ public: /// Return the DataLayout attached to the Module associated to this MF. const DataLayout &getDataLayout() const; - /// getFunction - Return the LLVM function that this machine code represents - const Function *getFunction() const { return &F; } + /// Return the LLVM function that this machine code represents + const Function &getFunction() const { return F; } /// getName - Return the name of the corresponding LLVM function. StringRef getName() const; diff --git a/include/llvm/CodeGen/MachineOptimizationRemarkEmitter.h b/include/llvm/CodeGen/MachineOptimizationRemarkEmitter.h index 887752b6d38..2fdefbed37c 100644 --- a/include/llvm/CodeGen/MachineOptimizationRemarkEmitter.h +++ b/include/llvm/CodeGen/MachineOptimizationRemarkEmitter.h @@ -33,7 +33,7 @@ public: const DiagnosticLocation &Loc, const MachineBasicBlock *MBB) : DiagnosticInfoOptimizationBase(Kind, DS_Remark, PassName, RemarkName, - *MBB->getParent()->getFunction(), Loc), + MBB->getParent()->getFunction(), Loc), MBB(MBB) {} /// MI-specific kinds of diagnostic Arguments. @@ -159,8 +159,8 @@ public: /// (1) to filter trivial false positives or (2) to provide more context so /// that non-trivial false positives can be quickly detected by the user. bool allowExtraAnalysis(StringRef PassName) const { - return (MF.getFunction()->getContext().getDiagnosticsOutputFile() || - MF.getFunction()->getContext() + return (MF.getFunction().getContext().getDiagnosticsOutputFile() || + MF.getFunction().getContext() .getDiagHandlerPtr()->isAnyRemarkEnabled(PassName)); } @@ -172,8 +172,8 @@ public: // remarks enabled. We can't currently check whether remarks are requested // for the calling pass since that requires actually building the remark. - if (MF.getFunction()->getContext().getDiagnosticsOutputFile() || - MF.getFunction()->getContext().getDiagHandlerPtr()->isAnyRemarkEnabled()) { + if (MF.getFunction().getContext().getDiagnosticsOutputFile() || + MF.getFunction().getContext().getDiagHandlerPtr()->isAnyRemarkEnabled()) { auto R = RemarkBuilder(); emit((DiagnosticInfoOptimizationBase &)R); } diff --git a/include/llvm/CodeGen/TargetFrameLowering.h b/include/llvm/CodeGen/TargetFrameLowering.h index 53d389d9917..61f1cf07bcf 100644 --- a/include/llvm/CodeGen/TargetFrameLowering.h +++ b/include/llvm/CodeGen/TargetFrameLowering.h @@ -330,12 +330,12 @@ public: /// Check if given function is safe for not having callee saved registers. /// This is used when interprocedural register allocation is enabled. - static bool isSafeForNoCSROpt(const Function *F) { - if (!F->hasLocalLinkage() || F->hasAddressTaken() || - !F->hasFnAttribute(Attribute::NoRecurse)) + static bool isSafeForNoCSROpt(const Function &F) { + if (!F.hasLocalLinkage() || F.hasAddressTaken() || + !F.hasFnAttribute(Attribute::NoRecurse)) return false; // Function should not be optimized as tail call. - for (const User *U : F->users()) + for (const User *U : F.users()) if (auto CS = ImmutableCallSite(U)) if (CS.isTailCall()) return false; diff --git a/include/llvm/IR/Function.h b/include/llvm/IR/Function.h index 3fb853715f5..e811ae5e215 100644 --- a/include/llvm/IR/Function.h +++ b/include/llvm/IR/Function.h @@ -131,7 +131,7 @@ public: // This is here to help easily convert from FunctionT * (Function * or // MachineFunction *) in BlockFrequencyInfoImpl to Function * by calling // FunctionT->getFunction(). - const Function *getFunction() const { return this; } + const Function &getFunction() const { return *this; } static Function *Create(FunctionType *Ty, LinkageTypes Linkage, const Twine &N = "", Module *M = nullptr) { diff --git a/lib/CodeGen/Analysis.cpp b/lib/CodeGen/Analysis.cpp index ec662d0851c..0731ae57543 100644 --- a/lib/CodeGen/Analysis.cpp +++ b/lib/CodeGen/Analysis.cpp @@ -668,7 +668,7 @@ llvm::getFuncletMembership(const MachineFunction &MF) { int EntryBBNumber = MF.front().getNumber(); bool IsSEH = isAsynchronousEHPersonality( - classifyEHPersonality(MF.getFunction()->getPersonalityFn())); + classifyEHPersonality(MF.getFunction().getPersonalityFn())); const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); SmallVector<const MachineBasicBlock *, 16> FuncletBlocks; diff --git a/lib/CodeGen/AsmPrinter/ARMException.cpp b/lib/CodeGen/AsmPrinter/ARMException.cpp index 47feac5eac3..15cfbd5c40f 100644 --- a/lib/CodeGen/AsmPrinter/ARMException.cpp +++ b/lib/CodeGen/AsmPrinter/ARMException.cpp @@ -60,16 +60,16 @@ void ARMException::beginFunction(const MachineFunction *MF) { /// void ARMException::endFunction(const MachineFunction *MF) { ARMTargetStreamer &ATS = getTargetStreamer(); - const Function *F = MF->getFunction(); + const Function &F = MF->getFunction(); const Function *Per = nullptr; - if (F->hasPersonalityFn()) - Per = dyn_cast<Function>(F->getPersonalityFn()->stripPointerCasts()); + if (F.hasPersonalityFn()) + Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts()); bool forceEmitPersonality = - F->hasPersonalityFn() && !isNoOpWithoutInvoke(classifyEHPersonality(Per)) && - F->needsUnwindTableEntry(); + F.hasPersonalityFn() && !isNoOpWithoutInvoke(classifyEHPersonality(Per)) && + F.needsUnwindTableEntry(); bool shouldEmitPersonality = forceEmitPersonality || !MF->getLandingPads().empty(); - if (!Asm->MF->getFunction()->needsUnwindTableEntry() && + if (!Asm->MF->getFunction().needsUnwindTableEntry() && !shouldEmitPersonality) ATS.emitCantUnwind(); else if (shouldEmitPersonality) { diff --git a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp index 294ea51969f..31037095aa2 100644 --- a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -621,35 +621,35 @@ void AsmPrinter::EmitDebugThreadLocal(const MCExpr *Value, /// EmitFunctionHeader - This method emits the header for the current /// function. void AsmPrinter::EmitFunctionHeader() { - const Function *F = MF->getFunction(); + const Function &F = MF->getFunction(); if (isVerbose()) OutStreamer->GetCommentOS() << "-- Begin function " - << GlobalValue::dropLLVMManglingEscape(F->getName()) << '\n'; + << GlobalValue::dropLLVMManglingEscape(F.getName()) << '\n'; // Print out constants referenced by the function EmitConstantPool(); // Print the 'header' of function. - OutStreamer->SwitchSection(getObjFileLowering().SectionForGlobal(F, TM)); - EmitVisibility(CurrentFnSym, F->getVisibility()); + OutStreamer->SwitchSection(getObjFileLowering().SectionForGlobal(&F, TM)); + EmitVisibility(CurrentFnSym, F.getVisibility()); - EmitLinkage(F, CurrentFnSym); + EmitLinkage(&F, CurrentFnSym); if (MAI->hasFunctionAlignment()) - EmitAlignment(MF->getAlignment(), F); + EmitAlignment(MF->getAlignment(), &F); if (MAI->hasDotTypeDotSizeDirective()) OutStreamer->EmitSymbolAttribute(CurrentFnSym, MCSA_ELF_TypeFunction); if (isVerbose()) { - F->printAsOperand(OutStreamer->GetCommentOS(), - /*PrintType=*/false, F->getParent()); + F.printAsOperand(OutStreamer->GetCommentOS(), + /*PrintType=*/false, F.getParent()); OutStreamer->GetCommentOS() << '\n'; } // Emit the prefix data. - if (F->hasPrefixData()) { + if (F.hasPrefixData()) { if (MAI->hasSubsectionsViaSymbols()) { // Preserving prefix data on platforms which use subsections-via-symbols // is a bit tricky. Here we introduce a symbol for the prefix data @@ -658,12 +658,12 @@ void AsmPrinter::EmitFunctionHeader() { MCSymbol *PrefixSym = OutContext.createLinkerPrivateTempSymbol(); OutStreamer->EmitLabel(PrefixSym); - EmitGlobalConstant(F->getParent()->getDataLayout(), F->getPrefixData()); + EmitGlobalConstant(F.getParent()->getDataLayout(), F.getPrefixData()); // Emit an .alt_entry directive for the actual function symbol. OutStreamer->EmitSymbolAttribute(CurrentFnSym, MCSA_AltEntry); } else { - EmitGlobalConstant(F->getParent()->getDataLayout(), F->getPrefixData()); + EmitGlobalConstant(F.getParent()->getDataLayout(), F.getPrefixData()); } } @@ -675,7 +675,7 @@ void AsmPrinter::EmitFunctionHeader() { // references to the dangling symbols. Emit them at the start of the function // so that we don't get references to undefined symbols. std::vector<MCSymbol*> DeadBlockSyms; - MMI->takeDeletedSymbolsForFunction(F, DeadBlockSyms); + MMI->takeDeletedSymbolsForFunction(&F, DeadBlockSyms); for (unsigned i = 0, e = DeadBlockSyms.size(); i != e; ++i) { OutStreamer->AddComment("Address taken block that was later removed"); OutStreamer->EmitLabel(DeadBlockSyms[i]); @@ -700,8 +700,8 @@ void AsmPrinter::EmitFunctionHeader() { } // Emit the prologue data. - if (F->hasPrologueData()) - EmitGlobalConstant(F->getParent()->getDataLayout(), F->getPrologueData()); + if (F.hasPrologueData()) + EmitGlobalConstant(F.getParent()->getDataLayout(), F.getPrologueData()); } /// EmitFunctionEntryLabel - Emit the label that is the entrypoint for the @@ -900,7 +900,7 @@ static bool emitDebugValueComment(const MachineInstr *MI, AsmPrinter &AP) { AsmPrinter::CFIMoveType AsmPrinter::needsCFIMoves() const { if (MAI->getExceptionHandlingType() == ExceptionHandling::DwarfCFI && - MF->getFunction()->needsUnwindTableEntry()) + MF->getFunction().needsUnwindTableEntry()) return CFI_M_EH; if (MMI->hasDebugInfo()) @@ -910,7 +910,7 @@ AsmPrinter::CFIMoveType AsmPrinter::needsCFIMoves() const { } bool AsmPrinter::needsSEHMoves() { - return MAI->usesWindowsCFI() && MF->getFunction()->needsUnwindTableEntry(); + return MAI->usesWindowsCFI() && MF->getFunction().needsUnwindTableEntry(); } void AsmPrinter::emitCFIInstruction(const MachineInstr &MI) { @@ -964,7 +964,7 @@ void AsmPrinter::emitStackSizeSection(const MachineFunction &MF) { OutStreamer->PushSection(); OutStreamer->SwitchSection(StackSizeSection); - const MCSymbol *FunctionSymbol = getSymbol(MF.getFunction()); + const MCSymbol *FunctionSymbol = getSymbol(&MF.getFunction()); uint64_t StackSize = FrameInfo.getStackSize(); OutStreamer->EmitValue(MCSymbolRefExpr::create(FunctionSymbol, OutContext), /* size = */ 8); @@ -980,10 +980,10 @@ static bool needFuncLabelsForEHOrDebugInfo(const MachineFunction &MF, // We might emit an EH table that uses function begin and end labels even if // we don't have any landingpads. - if (!MF.getFunction()->hasPersonalityFn()) + if (!MF.getFunction().hasPersonalityFn()) return false; return !isNoOpWithoutInvoke( - classifyEHPersonality(MF.getFunction()->getPersonalityFn())); + classifyEHPersonality(MF.getFunction().getPersonalityFn())); } /// EmitFunctionBody - This method emits the body and trailer for a @@ -1070,7 +1070,7 @@ void AsmPrinter::EmitFunctionBody() { EmittedInsts += NumInstsInFunction; MachineOptimizationRemarkAnalysis R(DEBUG_TYPE, "InstructionCount", - MF->getFunction()->getSubprogram(), + MF->getFunction().getSubprogram(), &MF->front()); R << ore::NV("NumInstructions", NumInstsInFunction) << " instructions in function"; @@ -1098,8 +1098,8 @@ void AsmPrinter::EmitFunctionBody() { } } - const Function *F = MF->getFunction(); - for (const auto &BB : *F) { + const Function &F = MF->getFunction(); + for (const auto &BB : F) { if (!BB.hasAddressTaken()) continue; MCSymbol *Sym = GetBlockAddressSymbol(&BB); @@ -1442,7 +1442,7 @@ MCSymbol *AsmPrinter::getCurExceptionSym() { void AsmPrinter::SetupMachineFunction(MachineFunction &MF) { this->MF = &MF; // Get the function symbol. - CurrentFnSym = getSymbol(MF.getFunction()); + CurrentFnSym = getSymbol(&MF.getFunction()); CurrentFnSymForSize = CurrentFnSym; CurrentFnBegin = nullptr; CurExceptionSym = nullptr; @@ -1568,14 +1568,14 @@ void AsmPrinter::EmitJumpTableInfo() { // Pick the directive to use to print the jump table entries, and switch to // the appropriate section. - const Function *F = MF->getFunction(); + const Function &F = MF->getFunction(); const TargetLoweringObjectFile &TLOF = getObjFileLowering(); bool JTInDiffSection = !TLOF.shouldPutJumpTableInFunctionSection( MJTI->getEntryKind() == MachineJumpTableInfo::EK_LabelDifference32, - *F); + F); if (JTInDiffSection) { // Drop it in the readonly section. - MCSection *ReadOnlySection = TLOF.getSectionForJumpTable(*F, TM); + MCSection *ReadOnlySection = TLOF.getSectionForJumpTable(F, TM); OutStreamer->SwitchSection(ReadOnlySection); } @@ -1949,7 +1949,7 @@ const MCExpr *AsmPrinter::lowerConstant(const Constant *CV) { raw_string_ostream OS(S); OS << "Unsupported expression in static initializer: "; CE->printAsOperand(OS, /*PrintType=*/false, - !MF ? nullptr : MF->getFunction()->getParent()); + !MF ? nullptr : MF->getFunction().getParent()); report_fatal_error(OS.str()); } case Instruction::GetElementPtr: { @@ -2632,7 +2632,7 @@ void AsmPrinter::setupCodePaddingContext(const MachineBasicBlock &MBB, assert(MF != nullptr && "Machine function must be valid"); assert(LI != nullptr && "Loop info must be valid"); Context.IsPaddingActive = !MF->hasInlineAsm() && - !MF->getFunction()->optForSize() && + !MF->getFunction().optForSize() && TM.getOptLevel() != CodeGenOpt::None; const MachineLoop *CurrentLoop = LI->getLoopFor(&MBB); Context.IsBasicBlockInsideInnermostLoop = @@ -2830,7 +2830,7 @@ void AsmPrinter::emitXRayTable() { return; auto PrevSection = OutStreamer->getCurrentSectionOnly(); - auto Fn = MF->getFunction(); + const Function &F = MF->getFunction(); MCSection *InstMap = nullptr; MCSection *FnSledIndex = nullptr; if (MF->getSubtarget().getTargetTriple().isOSBinFormatELF()) { @@ -2838,9 +2838,9 @@ void AsmPrinter::emitXRayTable() { assert(Associated != nullptr); auto Flags = ELF::SHF_WRITE | ELF::SHF_ALLOC | ELF::SHF_LINK_ORDER; std::string GroupName; - if (Fn->hasComdat()) { + if (F.hasComdat()) { Flags |= ELF::SHF_GROUP; - GroupName = Fn->getComdat()->getName(); + GroupName = F.getComdat()->getName(); } auto UniqueID = ++XRayFnUniqueID; @@ -2886,15 +2886,15 @@ void AsmPrinter::emitXRayTable() { void AsmPrinter::recordSled(MCSymbol *Sled, const MachineInstr &MI, SledKind Kind, uint8_t Version) { - auto Fn = MI.getMF()->getFunction(); - auto Attr = Fn->getFnAttribute("function-instrument"); - bool LogArgs = Fn->hasFnAttribute("xray-log-args"); + const Function &F = MI.getMF()->getFunction(); + auto Attr = F.getFnAttribute("function-instrument"); + bool LogArgs = F.hasFnAttribute("xray-log-args"); bool AlwaysInstrument = Attr.isStringAttribute() && Attr.getValueAsString() == "xray-always"; if (Kind == SledKind::FUNCTION_ENTER && LogArgs) Kind = SledKind::LOG_ARGS_ENTER; Sleds.emplace_back(XRayFunctionEntry{Sled, CurrentFnSym, Kind, - AlwaysInstrument, Fn, Version}); + AlwaysInstrument, &F, Version}); } uint16_t AsmPrinter::getDwarfVersion() const { diff --git a/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp b/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp index 580830d39f2..04a72ba3d73 100644 --- a/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp +++ b/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp @@ -514,7 +514,7 @@ void AsmPrinter::EmitInlineAsm(const MachineInstr *MI) const { // Reset SanitizeAddress based on the function's attribute. MCTargetOptions MCOptions = TM.Options.MCOptions; MCOptions.SanitizeAddress = - MF->getFunction()->hasFnAttribute(Attribute::SanitizeAddress); + MF->getFunction().hasFnAttribute(Attribute::SanitizeAddress); EmitInlineAsm(OS.str(), getSubtargetInfo(), MCOptions, LocMD, MI->getInlineAsmDialect()); diff --git a/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp b/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp index 97b59491c3d..1d0a003dc50 100644 --- a/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp +++ b/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp @@ -1154,9 +1154,9 @@ void CodeViewDebug::collectVariableInfo(const DISubprogram *SP) { } void CodeViewDebug::beginFunctionImpl(const MachineFunction *MF) { - const Function *GV = MF->getFunction(); - assert(FnDebugInfo.count(GV) == false); - CurFn = &FnDebugInfo[GV]; + const Function &GV = MF->getFunction(); + assert(FnDebugInfo.count(&GV) == false); + CurFn = &FnDebugInfo[&GV]; CurFn->FuncId = NextFuncId++; CurFn->Begin = Asm->getFunctionBegin(); @@ -2273,15 +2273,15 @@ void CodeViewDebug::emitLocalVariable(const LocalVariable &Var) { } void CodeViewDebug::endFunctionImpl(const MachineFunction *MF) { - const Function *GV = MF->getFunction(); - assert(FnDebugInfo.count(GV)); - assert(CurFn == &FnDebugInfo[GV]); + const Function &GV = MF->getFunction(); + assert(FnDebugInfo.count(&GV)); + assert(CurFn == &FnDebugInfo[&GV]); - collectVariableInfo(GV->getSubprogram()); + collectVariableInfo(GV.getSubprogram()); // Don't emit anything if we don't have any line tables. if (!CurFn->HaveLineInfo) { - FnDebugInfo.erase(GV); + FnDebugInfo.erase(&GV); CurFn = nullptr; return; } diff --git a/lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp b/lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp index 68354571423..d94b0e5c211 100644 --- a/lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp +++ b/lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp @@ -179,7 +179,7 @@ static bool hasDebugInfo(const MachineModuleInfo *MMI, const MachineFunction *MF) { if (!MMI->hasDebugInfo()) return false; - auto *SP = MF->getFunction()->getSubprogram(); + auto *SP = MF->getFunction().getSubprogram(); if (!SP) return false; assert(SP->getUnit()); @@ -223,7 +223,7 @@ void DebugHandlerBase::beginFunction(const MachineFunction *MF) { // label, so arguments are visible when breaking at function entry. const DILocalVariable *DIVar = Ranges.front().first->getDebugVariable(); if (DIVar->isParameter() && - getDISubprogram(DIVar->getScope())->describes(MF->getFunction())) { + getDISubprogram(DIVar->getScope())->describes(&MF->getFunction())) { LabelsBeforeInsn[Ranges.front().first] = Asm->getFunctionBegin(); if (Ranges.front().first->getDebugExpression()->isFragment()) { // Mark all non-overlapping initial fragments. diff --git a/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp b/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp index cde744df14a..cbb4c48b4d8 100644 --- a/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp +++ b/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp @@ -87,7 +87,7 @@ static MCSymbol *getExceptionSym(AsmPrinter *Asm) { void DwarfCFIException::beginFunction(const MachineFunction *MF) { shouldEmitMoves = shouldEmitPersonality = shouldEmitLSDA = false; - const Function *F = MF->getFunction(); + const Function &F = MF->getFunction(); // If any landing pads survive, we need an EH table. bool hasLandingPads = !MF->getLandingPads().empty(); @@ -100,17 +100,17 @@ void DwarfCFIException::beginFunction(const MachineFunction *MF) { const TargetLoweringObjectFile &TLOF = Asm->getObjFileLowering(); unsigned PerEncoding = TLOF.getPersonalityEncoding(); const Function *Per = nullptr; - if (F->hasPersonalityFn()) - Per = dyn_cast<Function>(F->getPersonalityFn()->stripPointerCasts()); + if (F.hasPersonalityFn()) + Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts()); // Emit a personality function even when there are no landing pads forceEmitPersonality = // ...if a personality function is explicitly specified - F->hasPersonalityFn() && + F.hasPersonalityFn() && // ... and it's not known to be a noop in the absence of invokes !isNoOpWithoutInvoke(classifyEHPersonality(Per)) && // ... and we're not explicitly asked not to emit it - F->needsUnwindTableEntry(); + F.needsUnwindTableEntry(); shouldEmitPersonality = (forceEmitPersonality || @@ -143,8 +143,8 @@ void DwarfCFIException::beginFragment(const MachineBasicBlock *MBB, if (!shouldEmitPersonality) return; - auto *F = MBB->getParent()->getFunction(); - auto *P = dyn_cast<Function>(F->getPersonalityFn()->stripPointerCasts()); + auto &F = MBB->getParent()->getFunction(); + auto *P = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts()); assert(P && "Expected personality function"); // If we are forced to emit this personality, make sure to record diff --git a/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/lib/CodeGen/AsmPrinter/DwarfDebug.cpp index cd8f61dbe22..2c9c7d4f314 100644 --- a/lib/CodeGen/AsmPrinter/DwarfDebug.cpp +++ b/lib/CodeGen/AsmPrinter/DwarfDebug.cpp @@ -1163,7 +1163,7 @@ void DwarfDebug::beginInstruction(const MachineInstr *MI) { DebugHandlerBase::beginInstruction(MI); assert(CurMI); - const auto *SP = MI->getMF()->getFunction()->getSubprogram(); + const auto *SP = MI->getMF()->getFunction().getSubprogram(); if (!SP || SP->getUnit()->getEmissionKind() == DICompileUnit::NoDebug) return; @@ -1261,7 +1261,7 @@ static DebugLoc findPrologueEndLoc(const MachineFunction *MF) { void DwarfDebug::beginFunctionImpl(const MachineFunction *MF) { CurFn = MF; - auto *SP = MF->getFunction()->getSubprogram(); + auto *SP = MF->getFunction().getSubprogram(); assert(LScopes.empty() || SP == LScopes.getCurrentFunctionScope()->getScopeNode()); if (SP->getUnit()->getEmissionKind() == DICompileUnit::NoDebug) return; @@ -1297,7 +1297,7 @@ void DwarfDebug::skippedNonDebugFunction() { // Gather and emit post-function debug information. void DwarfDebug::endFunctionImpl(const MachineFunction *MF) { - const DISubprogram *SP = MF->getFunction()->getSubprogram(); + const DISubprogram *SP = MF->getFunction().getSubprogram(); assert(CurFn == MF && "endFunction should be called with the same function as beginFunction"); diff --git a/lib/CodeGen/AsmPrinter/WinException.cpp b/lib/CodeGen/AsmPrinter/WinException.cpp index 7f3fc90c4d5..a6a8e84a949 100644 --- a/lib/CodeGen/AsmPrinter/WinException.cpp +++ b/lib/CodeGen/AsmPrinter/WinException.cpp @@ -63,7 +63,7 @@ void WinException::beginFunction(const MachineFunction *MF) { bool hasLandingPads = !MF->getLandingPads().empty(); bool hasEHFunclets = MF->hasEHFunclets(); - const Function *F = MF->getFunction(); + const Function &F = MF->getFunction(); shouldEmitMoves = Asm->needsSEHMoves() && MF->hasWinCFI(); @@ -72,14 +72,14 @@ void WinException::beginFunction(const MachineFunction *MF) { EHPersonality Per = EHPersonality::Unknown; const Function *PerFn = nullptr; - if (F->hasPersonalityFn()) { - PerFn = dyn_cast<Function>(F->getPersonalityFn()->stripPointerCasts()); + if (F.hasPersonalityFn()) { + PerFn = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts()); Per = classifyEHPersonality(PerFn); } - bool forceEmitPersonality = F->hasPersonalityFn() && + bool forceEmitPersonality = F.hasPersonalityFn() && !isNoOpWithoutInvoke(Per) && - F->needsUnwindTableEntry(); + F.needsUnwindTableEntry(); shouldEmitPersonality = forceEmitPersonality || ((hasLandingPads || hasEHFunclets) && @@ -98,7 +98,7 @@ void WinException::beginFunction(const MachineFunction *MF) { // functions may still refer to it. const WinEHFuncInfo &FuncInfo = *MF->getWinEHFuncInfo(); StringRef FLinkageName = - GlobalValue::dropLLVMManglingEscape(MF->getFunction()->getName()); + GlobalValue::dropLLVMManglingEscape(MF->getFunction().getName()); emitEHRegistrationOffsetLabel(FuncInfo, FLinkageName); } shouldEmitLSDA = hasEHFunclets; @@ -115,10 +115,10 @@ void WinException::endFunction(const MachineFunction *MF) { if (!shouldEmitPersonality && !shouldEmitMoves && !shouldEmitLSDA) return; - const Function *F = MF->getFunction(); + const Function &F = MF->getFunction(); EHPersonality Per = EHPersonality::Unknown; - if (F->hasPersonalityFn()) - Per = classifyEHPersonality(F->getPersonalityFn()->stripPointerCasts()); + if (F.hasPersonalityFn()) + Per = classifyEHPersonality(F.getPersonalityFn()->stripPointerCasts()); // Get rid of any dead landing pads if we're not using funclets. In funclet // schemes, the landing pad is not actually reachable. It only exists so @@ -170,8 +170,8 @@ static MCSymbol *getMCSymbolForMBB(AsmPrinter *Asm, // Give catches and cleanups a name based off of their parent function and // their funclet entry block's number. const MachineFunction *MF = MBB->getParent(); - const Function *F = MF->getFunction(); - StringRef FuncLinkageName = GlobalValue::dropLLVMManglingEscape(F->getName()); + const Function &F = MF->getFunction(); + StringRef FuncLinkageName = GlobalValue::dropLLVMManglingEscape(F.getName()); MCContext &Ctx = MF->getContext(); StringRef HandlerPrefix = MBB->isCleanupFuncletEntry() ? "dtor" : "catch"; return Ctx.getOrCreateSymbol("?" + HandlerPrefix + "$" + @@ -183,7 +183,7 @@ void WinException::beginFunclet(const MachineBasicBlock &MBB, MCSymbol *Sym) { CurrentFuncletEntry = &MBB; - const Function *F = Asm->MF->getFunction(); + const Function &F = Asm->MF->getFunction(); // If a symbol was not provided for the funclet, invent one. if (!Sym) { Sym = getMCSymbolForMBB(Asm, &MBB); @@ -198,7 +198,7 @@ void WinException::beginFunclet(const MachineBasicBlock &MBB, // We want our funclet's entry point to be aligned such that no nops will be // present after the label. Asm->EmitAlignment(std::max(Asm->MF->getAlignment(), MBB.getAlignment()), - F); + &F); // Now that we've emitted the alignment directive, point at our funclet. Asm->OutStreamer->EmitLabel(Sym); @@ -215,8 +215,8 @@ void WinException::beginFunclet(const MachineBasicBlock &MBB, const Function *PerFn = nullptr; // Determine which personality routine we are using for this funclet. - if (F->hasPersonalityFn()) - PerFn = dyn_cast<Function>(F->getPersonalityFn()->stripPointerCasts()); + if (F.hasPersonalityFn()) + PerFn = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts()); const MCSymbol *PersHandlerSym = TLOF.getCFIPersonalitySymbol(PerFn, Asm->TM, MMI); @@ -237,10 +237,10 @@ void WinException::endFunclet() { const MachineFunction *MF = Asm->MF; if (shouldEmitMoves || shouldEmitPersonality) { - const Function *F = MF->getFunction(); + const Function &F = MF->getFunction(); EHPersonality Per = EHPersonality::Unknown; - if (F->hasPersonalityFn()) - Per = classifyEHPersonality(F->getPersonalityFn()->stripPointerCasts()); + if (F.hasPersonalityFn()) + Per = classifyEHPersonality(F.getPersonalityFn()->stripPointerCasts()); // Emit an UNWIND_INFO struct describing the prologue. Asm->OutStreamer->EmitWinEHHandlerData(); @@ -249,7 +249,7 @@ void WinException::endFunclet() { !CurrentFuncletEntry->isCleanupFuncletEntry()) { // If this is a C++ catch funclet (or the parent function), // emit a reference to the LSDA for the parent function. - StringRef FuncLinkageName = GlobalValue::dropLLVMManglingEscape(F->getName()); + StringRef FuncLinkageName = GlobalValue::dropLLVMManglingEscape(F.getName()); MCSymbol *FuncInfoXData = Asm->OutContext.getOrCreateSymbol( Twine("$cppxdata$", FuncLinkageName)); Asm->OutStreamer->EmitValue(create32bitRef(FuncInfoXData), 4); @@ -533,7 +533,7 @@ void WinException::emitCSpecificHandlerTable(const MachineFunction *MF) { // Emit a label assignment with the SEH frame offset so we can use it for // llvm.x86.seh.recoverfp. StringRef FLinkageName = - GlobalValue::dropLLVMManglingEscape(MF->getFunction()->getName()); + GlobalValue::dropLLVMManglingEscape(MF->getFunction().getName()); MCSymbol *ParentFrameOffset = Ctx.getOrCreateParentFrameOffsetSymbol(FLinkageName); const MCExpr *MCOffset = @@ -628,11 +628,11 @@ void WinException::emitSEHActionsForRange(const WinEHFuncInfo &FuncInfo, } void WinException::emitCXXFrameHandler3Table(const MachineFunction *MF) { - const Function *F = MF->getFunction(); + const Function &F = MF->getFunction(); auto &OS = *Asm->OutStreamer; const WinEHFuncInfo &FuncInfo = *MF->getWinEHFuncInfo(); - StringRef FuncLinkageName = GlobalValue::dropLLVMManglingEscape(F->getName()); + StringRef FuncLinkageName = GlobalValue::dropLLVMManglingEscape(F.getName()); SmallVector<std::pair<const MCExpr *, int>, 4> IPToStateTable; MCSymbol *FuncInfoXData = nullptr; @@ -938,8 +938,8 @@ void WinException::emitEHRegistrationOffsetLabel(const WinEHFuncInfo &FuncInfo, /// indexed by state number instead of IP. void WinException::emitExceptHandlerTable(const MachineFunction *MF) { MCStreamer &OS = *Asm->OutStreamer; - const Function *F = MF->getFunction(); - StringRef FLinkageName = GlobalValue::dropLLVMManglingEscape(F->getName()); + const Function &F = MF->getFunction(); + StringRef FLinkageName = GlobalValue::dropLLVMManglingEscape(F.getName()); bool VerboseAsm = OS.isVerboseAsm(); auto AddComment = [&](const Twine &Comment) { @@ -956,7 +956,7 @@ void WinException::emitExceptHandlerTable(const MachineFunction *MF) { OS.EmitLabel(LSDALabel); const Function *Per = - dyn_cast<Function>(F->getPersonalityFn()->stripPointerCasts()); + dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts()); StringRef PerName = Per->getName(); int BaseState = -1; if (PerName == "_except_handler4") { diff --git a/lib/CodeGen/BranchFolding.cpp b/lib/CodeGen/BranchFolding.cpp index 99270ff4ea7..7f358a67936 100644 --- a/lib/CodeGen/BranchFolding.cpp +++ b/lib/CodeGen/BranchFolding.cpp @@ -118,7 +118,7 @@ INITIALIZE_PASS(BranchFolderPass, DEBUG_TYPE, "Control Flow Optimizer", false, false) bool BranchFolderPass::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>(); @@ -685,7 +685,7 @@ ProfitableToMerge(MachineBasicBlock *MBB1, MachineBasicBlock *MBB2, // branch instruction, which is likely to be smaller than the 2 // instructions that would be deleted in the merge. MachineFunction *MF = MBB1->getParent(); - return EffectiveTailLen >= 2 && MF->getFunction()->optForSize() && + return EffectiveTailLen >= 2 && MF->getFunction().optForSize() && (I1 == MBB1->begin() || I2 == MBB2->begin()); } @@ -1511,7 +1511,7 @@ ReoptimizeBlock: } if (!IsEmptyBlock(MBB) && MBB->pred_size() == 1 && - MF.getFunction()->optForSize()) { + MF.getFunction().optForSize()) { // Changing "Jcc foo; foo: jmp bar;" into "Jcc bar;" might change the branch // direction, thereby defeating careful block placement and regressing // performance. Therefore, only consider this for optsize functions. diff --git a/lib/CodeGen/DeadMachineInstructionElim.cpp b/lib/CodeGen/DeadMachineInstructionElim.cpp index 112ea58bc7f..e6a54bb300f 100644 --- a/lib/CodeGen/DeadMachineInstructionElim.cpp +++ b/lib/CodeGen/DeadMachineInstructionElim.cpp @@ -94,7 +94,7 @@ bool DeadMachineInstructionElim::isDead(const MachineInstr *MI) const { } bool DeadMachineInstructionElim::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; bool AnyChanges = false; diff --git a/lib/CodeGen/EarlyIfConversion.cpp b/lib/CodeGen/EarlyIfConversion.cpp index 461da8f138f..6294ff45011 100644 --- a/lib/CodeGen/EarlyIfConversion.cpp +++ b/lib/CodeGen/EarlyIfConversion.cpp @@ -785,7 +785,7 @@ bool EarlyIfConverter::tryConvertIf(MachineBasicBlock *MBB) { bool EarlyIfConverter::runOnMachineFunction(MachineFunction &MF) { DEBUG(dbgs() << "********** EARLY IF-CONVERSION **********\n" << "********** Function: " << MF.getName() << '\n'); - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; // Only run if conversion if the target wants it. diff --git a/lib/CodeGen/ExecutionDepsFix.cpp b/lib/CodeGen/ExecutionDepsFix.cpp index df51ecc0001..61ec3f4be1d 100644 --- a/lib/CodeGen/ExecutionDepsFix.cpp +++ b/lib/CodeGen/ExecutionDepsFix.cpp @@ -617,7 +617,7 @@ bool ExecutionDepsFix::isBlockDone(MachineBasicBlock *MBB) { } bool ExecutionDepsFix::runOnMachineFunction(MachineFunction &mf) { - if (skipFunction(*mf.getFunction())) + if (skipFunction(mf.getFunction())) return false; MF = &mf; TII = MF->getSubtarget().getInstrInfo(); diff --git a/lib/CodeGen/FEntryInserter.cpp b/lib/CodeGen/FEntryInserter.cpp index dbe6b30c964..4ddf9f92836 100644 --- a/lib/CodeGen/FEntryInserter.cpp +++ b/lib/CodeGen/FEntryInserter.cpp @@ -36,7 +36,7 @@ struct FEntryInserter : public MachineFunctionPass { bool FEntryInserter::runOnMachineFunction(MachineFunction &MF) { const std::string FEntryName = - MF.getFunction()->getFnAttribute("fentry-call").getValueAsString(); + MF.getFunction().getFnAttribute("fentry-call").getValueAsString(); if (FEntryName != "true") return false; diff --git a/lib/CodeGen/GCRootLowering.cpp b/lib/CodeGen/GCRootLowering.cpp index b5ebabd5455..4361d8b248c 100644 --- a/lib/CodeGen/GCRootLowering.cpp +++ b/lib/CodeGen/GCRootLowering.cpp @@ -328,10 +328,10 @@ void GCMachineCodeAnalysis::FindStackOffsets(MachineFunction &MF) { bool GCMachineCodeAnalysis::runOnMachineFunction(MachineFunction &MF) { // Quick exit for functions that do not use GC. - if (!MF.getFunction()->hasGC()) + if (!MF.getFunction().hasGC()) return false; - FI = &getAnalysis<GCModuleInfo>().getFunctionInfo(*MF.getFunction()); + FI = &getAnalysis<GCModuleInfo>().getFunctionInfo(MF.getFunction()); MMI = &getAnalysis<MachineModuleInfo>(); TII = MF.getSubtarget().getInstrInfo(); diff --git a/lib/CodeGen/GlobalISel/CallLowering.cpp b/lib/CodeGen/GlobalISel/CallLowering.cpp index 50ea69a267e..114c068749e 100644 --- a/lib/CodeGen/GlobalISel/CallLowering.cpp +++ b/lib/CodeGen/GlobalISel/CallLowering.cpp @@ -108,7 +108,7 @@ bool CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder, ArrayRef<ArgInfo> Args, ValueHandler &Handler) const { MachineFunction &MF = MIRBuilder.getMF(); - const Function &F = *MF.getFunction(); + const Function &F = MF.getFunction(); const DataLayout &DL = F.getParent()->getDataLayout(); SmallVector<CCValAssign, 16> ArgLocs; diff --git a/lib/CodeGen/GlobalISel/IRTranslator.cpp b/lib/CodeGen/GlobalISel/IRTranslator.cpp index a0d81366eb6..433f99b0113 100644 --- a/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -124,8 +124,8 @@ unsigned IRTranslator::getOrCreateVReg(const Value &Val) { bool Success = translate(*CV, VReg); if (!Success) { OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", - MF->getFunction()->getSubprogram(), - &MF->getFunction()->getEntryBlock()); + MF->getFunction().getSubprogram(), + &MF->getFunction().getEntryBlock()); R << "unable to translate constant: " << ore::NV("Type", Val.getType()); reportTranslationError(*MF, *TPC, *ORE, R); return VReg; @@ -591,7 +591,7 @@ void IRTranslator::getStackGuard(unsigned DstReg, MIB.addDef(DstReg); auto &TLI = *MF->getSubtarget().getTargetLowering(); - Value *Global = TLI.getSDagStackGuard(*MF->getFunction()->getParent()); + Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent()); if (!Global) return; @@ -925,7 +925,7 @@ bool IRTranslator::translateLandingPad(const User &U, // If there aren't registers to copy the values into (e.g., during SjLj // exceptions), then don't bother. auto &TLI = *MF->getSubtarget().getTargetLowering(); - const Constant *PersonalityFn = MF->getFunction()->getPersonalityFn(); + const Constant *PersonalityFn = MF->getFunction().getPersonalityFn(); if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 && TLI.getExceptionSelectorRegister(PersonalityFn) == 0) return true; @@ -1236,7 +1236,7 @@ void IRTranslator::finalizeFunction() { bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) { MF = &CurMF; - const Function &F = *MF->getFunction(); + const Function &F = MF->getFunction(); if (F.empty()) return false; CLI = MF->getSubtarget().getCallLowering(); @@ -1252,8 +1252,7 @@ bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) { if (!DL->isLittleEndian()) { // Currently we don't properly handle big endian code. OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", - MF->getFunction()->getSubprogram(), - &MF->getFunction()->getEntryBlock()); + F.getSubprogram(), &F.getEntryBlock()); R << "unable to translate in big endian mode"; reportTranslationError(*MF, *TPC, *ORE, R); } @@ -1289,8 +1288,7 @@ bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) { } if (!CLI->lowerFormalArguments(EntryBuilder, F, VRegArgs)) { OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", - MF->getFunction()->getSubprogram(), - &MF->getFunction()->getEntryBlock()); + F.getSubprogram(), &F.getEntryBlock()); R << "unable to lower arguments: " << ore::NV("Prototype", F.getType()); reportTranslationError(*MF, *TPC, *ORE, R); return false; diff --git a/lib/CodeGen/GlobalISel/InstructionSelect.cpp b/lib/CodeGen/GlobalISel/InstructionSelect.cpp index bd5fd5afcbc..422cc2219aa 100644 --- a/lib/CodeGen/GlobalISel/InstructionSelect.cpp +++ b/lib/CodeGen/GlobalISel/InstructionSelect.cpp @@ -189,7 +189,7 @@ bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) { if (MF.size() != NumBlocks) { MachineOptimizationRemarkMissed R("gisel-select", "GISelFailure", - MF.getFunction()->getSubprogram(), + MF.getFunction().getSubprogram(), /*MBB=*/nullptr); R << "inserting blocks is not supported yet"; reportGISelFailure(MF, TPC, MORE, R); diff --git a/lib/CodeGen/GlobalISel/Legalizer.cpp b/lib/CodeGen/GlobalISel/Legalizer.cpp index d026aca981a..f09b0d9f11e 100644 --- a/lib/CodeGen/GlobalISel/Legalizer.cpp +++ b/lib/CodeGen/GlobalISel/Legalizer.cpp @@ -175,7 +175,7 @@ bool Legalizer::runOnMachineFunction(MachineFunction &MF) { // outerloop for that. if (MF.size() != NumBlocks) { MachineOptimizationRemarkMissed R("gisel-legalize", "GISelFailure", - MF.getFunction()->getSubprogram(), + MF.getFunction().getSubprogram(), /*MBB=*/nullptr); R << "inserting blocks is not supported yet"; reportGISelFailure(MF, TPC, MORE, R); diff --git a/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/lib/CodeGen/GlobalISel/LegalizerHelper.cpp index 18d168c4f06..87a658be4c2 100644 --- a/lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ b/lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -136,7 +136,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::libcall(MachineInstr &MI) { LLT LLTy = MRI.getType(MI.getOperand(0).getReg()); unsigned Size = LLTy.getSizeInBits(); - auto &Ctx = MIRBuilder.getMF().getFunction()->getContext(); + auto &Ctx = MIRBuilder.getMF().getFunction().getContext(); MIRBuilder.setInstr(MI); @@ -410,7 +410,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI, return UnableToLegalize; int NumParts = SizeOp0 / NarrowSize; const APInt &Cst = MI.getOperand(1).getCImm()->getValue(); - LLVMContext &Ctx = MIRBuilder.getMF().getFunction()->getContext(); + LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext(); SmallVector<unsigned, 2> DstRegs; for (int i = 0; i < NumParts; ++i) { @@ -824,7 +824,7 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) { return UnableToLegalize; unsigned Res = MI.getOperand(0).getReg(); Type *ZeroTy; - LLVMContext &Ctx = MIRBuilder.getMF().getFunction()->getContext(); + LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext(); switch (Ty.getSizeInBits()) { case 16: ZeroTy = Type::getHalfTy(Ctx); diff --git a/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp index 62c396e6cdf..475bb82e5b9 100644 --- a/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp +++ b/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp @@ -263,7 +263,7 @@ MachineInstrBuilder MachineIRBuilder::buildConstant(unsigned Res, const ConstantInt *NewVal = &Val; if (Ty.getSizeInBits() != Val.getBitWidth()) - NewVal = ConstantInt::get(MF->getFunction()->getContext(), + NewVal = ConstantInt::get(MF->getFunction().getContext(), Val.getValue().sextOrTrunc(Ty.getSizeInBits())); return buildInstr(TargetOpcode::G_CONSTANT).addDef(Res).addCImm(NewVal); @@ -271,7 +271,7 @@ MachineInstrBuilder MachineIRBuilder::buildConstant(unsigned Res, MachineInstrBuilder MachineIRBuilder::buildConstant(unsigned Res, int64_t Val) { - auto IntN = IntegerType::get(MF->getFunction()->getContext(), + auto IntN = IntegerType::get(MF->getFunction().getContext(), MRI->getType(Res).getSizeInBits()); ConstantInt *CI = ConstantInt::get(IntN, Val, true); return buildConstant(Res, *CI); diff --git a/lib/CodeGen/GlobalISel/RegBankSelect.cpp b/lib/CodeGen/GlobalISel/RegBankSelect.cpp index 36ce1c220cb..006c9ea2303 100644 --- a/lib/CodeGen/GlobalISel/RegBankSelect.cpp +++ b/lib/CodeGen/GlobalISel/RegBankSelect.cpp @@ -601,9 +601,9 @@ bool RegBankSelect::runOnMachineFunction(MachineFunction &MF) { return false; DEBUG(dbgs() << "Assign register banks for: " << MF.getName() << '\n'); - const Function *F = MF.getFunction(); + const Function &F = MF.getFunction(); Mode SaveOptMode = OptMode; - if (F->hasFnAttribute(Attribute::OptimizeNone)) + if (F.hasFnAttribute(Attribute::OptimizeNone)) OptMode = Mode::Fast; init(MF); diff --git a/lib/CodeGen/IfConversion.cpp b/lib/CodeGen/IfConversion.cpp index 1bac5685ec5..a22ce0dab9c 100644 --- a/lib/CodeGen/IfConversion.cpp +++ b/lib/CodeGen/IfConversion.cpp @@ -337,7 +337,7 @@ INITIALIZE_PASS_DEPENDENCY(MachineBranchProbabilityInfo) INITIALIZE_PASS_END(IfConverter, DEBUG_TYPE, "If Converter", false, false) bool IfConverter::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction()) || (PredicateFtor && !PredicateFtor(MF))) + if (skipFunction(MF.getFunction()) || (PredicateFtor && !PredicateFtor(MF))) return false; const TargetSubtargetInfo &ST = MF.getSubtarget(); diff --git a/lib/CodeGen/LexicalScopes.cpp b/lib/CodeGen/LexicalScopes.cpp index 47ab4ef65c7..8c54751ee83 100644 --- a/lib/CodeGen/LexicalScopes.cpp +++ b/lib/CodeGen/LexicalScopes.cpp @@ -49,7 +49,7 @@ void LexicalScopes::reset() { void LexicalScopes::initialize(const MachineFunction &Fn) { reset(); // Don't attempt any lexical scope creation for a NoDebug compile unit. - if (Fn.getFunction()->getSubprogram()->getUnit()->getEmissionKind() == + if (Fn.getFunction().getSubprogram()->getUnit()->getEmissionKind() == DICompileUnit::NoDebug) return; MF = &Fn; @@ -173,7 +173,7 @@ LexicalScopes::getOrCreateRegularScope(const DILocalScope *Scope) { false)).first; if (!Parent) { - assert(cast<DISubprogram>(Scope)->describes(MF->getFunction())); + assert(cast<DISubprogram>(Scope)->describes(&MF->getFunction())); assert(!CurrentFnLexicalScope); CurrentFnLexicalScope = &I->second; } diff --git a/lib/CodeGen/LiveDebugValues.cpp b/lib/CodeGen/LiveDebugValues.cpp index f35e401d979..19ec281079c 100644 --- a/lib/CodeGen/LiveDebugValues.cpp +++ b/lib/CodeGen/LiveDebugValues.cpp @@ -703,12 +703,12 @@ bool LiveDebugValues::ExtendRanges(MachineFunction &MF) { } bool LiveDebugValues::runOnMachineFunction(MachineFunction &MF) { - if (!MF.getFunction()->getSubprogram()) + if (!MF.getFunction().getSubprogram()) // LiveDebugValues will already have removed all DBG_VALUEs. return false; // Skip functions from NoDebug compilation units. - if (MF.getFunction()->getSubprogram()->getUnit()->getEmissionKind() == + if (MF.getFunction().getSubprogram()->getUnit()->getEmissionKind() == DICompileUnit::NoDebug) return false; diff --git a/lib/CodeGen/LiveDebugVariables.cpp b/lib/CodeGen/LiveDebugVariables.cpp index 4d1d4b0ebd3..34572f24c18 100644 --- a/lib/CodeGen/LiveDebugVariables.cpp +++ b/lib/CodeGen/LiveDebugVariables.cpp @@ -833,7 +833,7 @@ static void removeDebugValues(MachineFunction &mf) { bool LiveDebugVariables::runOnMachineFunction(MachineFunction &mf) { if (!EnableLDV) return false; - if (!mf.getFunction()->getSubprogram()) { + if (!mf.getFunction().getSubprogram()) { removeDebugValues(mf); return false; } diff --git a/lib/CodeGen/LiveRangeShrink.cpp b/lib/CodeGen/LiveRangeShrink.cpp index b237c677fd3..02e1f3b01ad 100644 --- a/lib/CodeGen/LiveRangeShrink.cpp +++ b/lib/CodeGen/LiveRangeShrink.cpp @@ -106,7 +106,7 @@ static void BuildInstOrderMap(MachineBasicBlock::iterator Start, } bool LiveRangeShrink::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; MachineRegisterInfo &MRI = MF.getRegInfo(); diff --git a/lib/CodeGen/MIRParser/MIParser.cpp b/lib/CodeGen/MIRParser/MIParser.cpp index c1471520d24..1a78ae3aad0 100644 --- a/lib/CodeGen/MIRParser/MIParser.cpp +++ b/lib/CodeGen/MIRParser/MIParser.cpp @@ -431,7 +431,7 @@ bool MIParser::parseBasicBlockDefinition( break; case MIToken::IRBlock: // TODO: Report an error when both name and ir block are specified. - if (parseIRBlock(BB, *MF.getFunction())) + if (parseIRBlock(BB, MF.getFunction())) return true; lex(); break; @@ -447,7 +447,7 @@ bool MIParser::parseBasicBlockDefinition( if (!Name.empty()) { BB = dyn_cast_or_null<BasicBlock>( - MF.getFunction()->getValueSymbolTable()->lookup(Name)); + MF.getFunction().getValueSymbolTable()->lookup(Name)); if (!BB) return error(Loc, Twine("basic block '") + Name + "' is not defined in the function '" + @@ -1234,7 +1234,7 @@ bool MIParser::parseIRConstant(StringRef::iterator Loc, StringRef StringValue, const Constant *&C) { auto Source = StringValue.str(); // The source has to be null terminated. SMDiagnostic Err; - C = parseConstantValue(Source, Err, *MF.getFunction()->getParent(), + C = parseConstantValue(Source, Err, *MF.getFunction().getParent(), &PFS.IRSlots); if (!C) return error(Loc + Err.getColumnNo(), Err.getMessage()); @@ -1254,7 +1254,7 @@ bool MIParser::parseLowLevelType(StringRef::iterator Loc, LLT &Ty) { lex(); return false; } else if (Token.is(MIToken::PointerType)) { - const DataLayout &DL = MF.getFunction()->getParent()->getDataLayout(); + const DataLayout &DL = MF.getDataLayout(); unsigned AS = APSInt(Token.range().drop_front()).getZExtValue(); Ty = LLT::pointer(AS, DL.getPointerSizeInBits(AS)); lex(); @@ -1419,7 +1419,7 @@ bool MIParser::parseFixedStackObjectOperand(MachineOperand &Dest) { bool MIParser::parseGlobalValue(GlobalValue *&GV) { switch (Token.kind()) { case MIToken::NamedGlobalValue: { - const Module *M = MF.getFunction()->getParent(); + const Module *M = MF.getFunction().getParent(); GV = M->getNamedValue(Token.stringValue()); if (!GV) return error(Twine("use of undefined global value '") + Token.range() + @@ -1557,7 +1557,7 @@ bool MIParser::parseDIExpression(MDNode *&Expr) { if (expectAndConsume(MIToken::rparen)) return true; - Expr = DIExpression::get(MF.getFunction()->getContext(), Elements); + Expr = DIExpression::get(MF.getFunction().getContext(), Elements); return false; } @@ -2102,7 +2102,7 @@ bool MIParser::parseOperandsOffset(MachineOperand &Op) { bool MIParser::parseIRValue(const Value *&V) { switch (Token.kind()) { case MIToken::NamedIRValue: { - V = MF.getFunction()->getValueSymbolTable()->lookup(Token.stringValue()); + V = MF.getFunction().getValueSymbolTable()->lookup(Token.stringValue()); break; } case MIToken::IRValue: { @@ -2361,7 +2361,7 @@ bool MIParser::parseMachineMemoryOperand(MachineMemOperand *&Dest) { // Optional synchronization scope. SyncScope::ID SSID; - if (parseOptionalScope(MF.getFunction()->getContext(), SSID)) + if (parseOptionalScope(MF.getFunction().getContext(), SSID)) return true; // Up to two atomic orderings (cmpxchg provides guarantees on failure). @@ -2542,12 +2542,12 @@ static const BasicBlock *getIRBlockFromSlot( const BasicBlock *MIParser::getIRBlock(unsigned Slot) { if (Slots2BasicBlocks.empty()) - initSlots2BasicBlocks(*MF.getFunction(), Slots2BasicBlocks); + initSlots2BasicBlocks(MF.getFunction(), Slots2BasicBlocks); return getIRBlockFromSlot(Slot, Slots2BasicBlocks); } const BasicBlock *MIParser::getIRBlock(unsigned Slot, const Function &F) { - if (&F == MF.getFunction()) + if (&F == &MF.getFunction()) return getIRBlock(Slot); DenseMap<unsigned, const BasicBlock *> CustomSlots2BasicBlocks; initSlots2BasicBlocks(F, CustomSlots2BasicBlocks); @@ -2578,7 +2578,7 @@ static void initSlots2Values(const Function &F, const Value *MIParser::getIRValue(unsigned Slot) { if (Slots2Values.empty()) - initSlots2Values(*MF.getFunction(), Slots2Values); + initSlots2Values(MF.getFunction(), Slots2Values); auto ValueInfo = Slots2Values.find(Slot); if (ValueInfo == Slots2Values.end()) return nullptr; diff --git a/lib/CodeGen/MIRParser/MIRParser.cpp b/lib/CodeGen/MIRParser/MIRParser.cpp index 836cc1db48a..7d8e62736a3 100644 --- a/lib/CodeGen/MIRParser/MIRParser.cpp +++ b/lib/CodeGen/MIRParser/MIRParser.cpp @@ -551,7 +551,7 @@ bool MIRParserImpl::initializeFrameInfo(PerFunctionMIParsingState &PFS, const yaml::MachineFunction &YamlMF) { MachineFunction &MF = PFS.MF; MachineFrameInfo &MFI = MF.getFrameInfo(); - const Function &F = *MF.getFunction(); + const Function &F = MF.getFunction(); const yaml::MachineFrameInfo &YamlMFI = YamlMF.FrameInfo; MFI.setFrameAddressIsTaken(YamlMFI.IsFrameAddressTaken); MFI.setReturnAddressIsTaken(YamlMFI.IsReturnAddressTaken); @@ -722,7 +722,7 @@ bool MIRParserImpl::initializeConstantPool(PerFunctionMIParsingState &PFS, MachineConstantPool &ConstantPool, const yaml::MachineFunction &YamlMF) { DenseMap<unsigned, unsigned> &ConstantPoolSlots = PFS.ConstantPoolSlots; const MachineFunction &MF = PFS.MF; - const auto &M = *MF.getFunction()->getParent(); + const auto &M = *MF.getFunction().getParent(); SMDiagnostic Error; for (const auto &YamlConstant : YamlMF.Constants) { if (YamlConstant.IsTargetSpecific) diff --git a/lib/CodeGen/MIRPrinter.cpp b/lib/CodeGen/MIRPrinter.cpp index 7c4e098b564..3568f96d2b9 100644 --- a/lib/CodeGen/MIRPrinter.cpp +++ b/lib/CodeGen/MIRPrinter.cpp @@ -213,8 +213,8 @@ void MIRPrinter::print(const MachineFunction &MF) { MachineFunctionProperties::Property::Selected); convert(YamlMF, MF.getRegInfo(), MF.getSubtarget().getRegisterInfo()); - ModuleSlotTracker MST(MF.getFunction()->getParent()); - MST.incorporateFunction(*MF.getFunction()); + ModuleSlotTracker MST(MF.getFunction().getParent()); + MST.incorporateFunction(MF.getFunction()); convert(MST, YamlMF.FrameInfo, MF.getFrameInfo()); convertStackObjects(YamlMF, MF, MST); if (const auto *ConstantPool = MF.getConstantPool()) @@ -696,7 +696,7 @@ void MIPrinter::print(const MachineInstr &MI) { if (!MI.memoperands_empty()) { OS << " :: "; - const LLVMContext &Context = MF->getFunction()->getContext(); + const LLVMContext &Context = MF->getFunction().getContext(); bool NeedComma = false; for (const auto *Op : MI.memoperands()) { if (NeedComma) diff --git a/lib/CodeGen/MachineBasicBlock.cpp b/lib/CodeGen/MachineBasicBlock.cpp index c105335fddb..209abf34d88 100644 --- a/lib/CodeGen/MachineBasicBlock.cpp +++ b/lib/CodeGen/MachineBasicBlock.cpp @@ -267,8 +267,8 @@ void MachineBasicBlock::print(raw_ostream &OS, const SlotIndexes *Indexes) << " is null\n"; return; } - const Function *F = MF->getFunction(); - const Module *M = F ? F->getParent() : nullptr; + const Function &F = MF->getFunction(); + const Module *M = F.getParent(); ModuleSlotTracker MST(M); print(OS, MST, Indexes); } diff --git a/lib/CodeGen/MachineBlockFrequencyInfo.cpp b/lib/CodeGen/MachineBlockFrequencyInfo.cpp index 2c336e45056..3459a9f71a7 100644 --- a/lib/CodeGen/MachineBlockFrequencyInfo.cpp +++ b/lib/CodeGen/MachineBlockFrequencyInfo.cpp @@ -224,14 +224,14 @@ MachineBlockFrequencyInfo::getBlockFreq(const MachineBasicBlock *MBB) const { Optional<uint64_t> MachineBlockFrequencyInfo::getBlockProfileCount( const MachineBasicBlock *MBB) const { - const Function *F = MBFI->getFunction()->getFunction(); - return MBFI ? MBFI->getBlockProfileCount(*F, MBB) : None; + const Function &F = MBFI->getFunction()->getFunction(); + return MBFI ? MBFI->getBlockProfileCount(F, MBB) : None; } Optional<uint64_t> MachineBlockFrequencyInfo::getProfileCountFromFreq(uint64_t Freq) const { - const Function *F = MBFI->getFunction()->getFunction(); - return MBFI ? MBFI->getProfileCountFromFreq(*F, Freq) : None; + const Function &F = MBFI->getFunction()->getFunction(); + return MBFI ? MBFI->getProfileCountFromFreq(F, Freq) : None; } bool diff --git a/lib/CodeGen/MachineBlockPlacement.cpp b/lib/CodeGen/MachineBlockPlacement.cpp index 87af9533b32..4ce68960773 100644 --- a/lib/CodeGen/MachineBlockPlacement.cpp +++ b/lib/CodeGen/MachineBlockPlacement.cpp @@ -1235,7 +1235,7 @@ void MachineBlockPlacement::precomputeTriangleChains() { // When profile is available, we need to handle the triangle-shape CFG. static BranchProbability getLayoutSuccessorProbThreshold( const MachineBasicBlock *BB) { - if (!BB->getParent()->getFunction()->getEntryCount()) + if (!BB->getParent()->getFunction().getEntryCount()) return BranchProbability(StaticLikelyProb, 100); if (BB->succ_size() == 2) { const MachineBasicBlock *Succ1 = *BB->succ_begin(); @@ -1769,7 +1769,7 @@ MachineBlockPlacement::findBestLoopTop(const MachineLoop &L, // i.e. when the layout predecessor does not fallthrough to the loop header. // In practice this never happens though: there always seems to be a preheader // that can fallthrough and that is also placed before the header. - if (F->getFunction()->optForSize()) + if (F->getFunction().optForSize()) return L.getHeader(); // Check that the header hasn't been fused with a preheader block due to @@ -2178,7 +2178,7 @@ MachineBlockPlacement::collectLoopBlockSet(const MachineLoop &L) { // will be merged into the first outer loop chain for which this block is not // cold anymore. This needs precise profile data and we only do this when // profile data is available. - if (F->getFunction()->getEntryCount() || ForceLoopColdBlock) { + if (F->getFunction().getEntryCount() || ForceLoopColdBlock) { BlockFrequency LoopFreq(0); for (auto LoopPred : L.getHeader()->predecessors()) if (!L.contains(LoopPred)) @@ -2220,7 +2220,7 @@ void MachineBlockPlacement::buildLoopChains(const MachineLoop &L) { // for better layout. bool RotateLoopWithProfile = ForcePreciseRotationCost || - (PreciseRotationCost && F->getFunction()->getEntryCount()); + (PreciseRotationCost && F->getFunction().getEntryCount()); // First check to see if there is an obviously preferable top block for the // loop. This will default to the header, but may end up as one of the @@ -2485,7 +2485,7 @@ void MachineBlockPlacement::alignBlocks() { // exclusively on the loop info here so that we can align backedges in // unnatural CFGs and backedges that were introduced purely because of the // loop rotations done during this layout pass. - if (F->getFunction()->optForSize()) + if (F->getFunction().optForSize()) return; BlockChain &FunctionChain = *BlockToChain[&F->front()]; if (FunctionChain.begin() == FunctionChain.end()) @@ -2715,7 +2715,7 @@ bool MachineBlockPlacement::maybeTailDuplicateBlock( } bool MachineBlockPlacement::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; // Check for single-block functions and skip them. @@ -2760,7 +2760,7 @@ bool MachineBlockPlacement::runOnMachineFunction(MachineFunction &MF) { if (TailDupPlacement) { MPDT = &getAnalysis<MachinePostDominatorTree>(); - if (MF.getFunction()->optForSize()) + if (MF.getFunction().optForSize()) TailDupSize = 1; bool PreRegAlloc = false; TailDup.initMF(MF, PreRegAlloc, MBPI, /* LayoutMode */ true, TailDupSize); @@ -2817,7 +2817,7 @@ bool MachineBlockPlacement::runOnMachineFunction(MachineFunction &MF) { } if (ViewBlockLayoutWithBFI != GVDT_None && (ViewBlockFreqFuncName.empty() || - F->getFunction()->getName().equals(ViewBlockFreqFuncName))) { + F->getFunction().getName().equals(ViewBlockFreqFuncName))) { MBFI->view("MBP." + MF.getName(), false); } diff --git a/lib/CodeGen/MachineCSE.cpp b/lib/CodeGen/MachineCSE.cpp index da63b41858e..53c0d840ac8 100644 --- a/lib/CodeGen/MachineCSE.cpp +++ b/lib/CodeGen/MachineCSE.cpp @@ -727,7 +727,7 @@ bool MachineCSE::PerformCSE(MachineDomTreeNode *Node) { } bool MachineCSE::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; TII = MF.getSubtarget().getInstrInfo(); diff --git a/lib/CodeGen/MachineCombiner.cpp b/lib/CodeGen/MachineCombiner.cpp index 4f40fde6918..702d2122847 100644 --- a/lib/CodeGen/MachineCombiner.cpp +++ b/lib/CodeGen/MachineCombiner.cpp @@ -548,7 +548,7 @@ bool MachineCombiner::runOnMachineFunction(MachineFunction &MF) { MLI = &getAnalysis<MachineLoopInfo>(); Traces = &getAnalysis<MachineTraceMetrics>(); MinInstr = nullptr; - OptSize = MF.getFunction()->optForSize(); + OptSize = MF.getFunction().optForSize(); DEBUG(dbgs() << getPassName() << ": " << MF.getName() << '\n'); if (!TII->useMachineCombiner()) { diff --git a/lib/CodeGen/MachineCopyPropagation.cpp b/lib/CodeGen/MachineCopyPropagation.cpp index bc4495ccd08..fcec05adc73 100644 --- a/lib/CodeGen/MachineCopyPropagation.cpp +++ b/lib/CodeGen/MachineCopyPropagation.cpp @@ -378,7 +378,7 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) { } bool MachineCopyPropagation::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; Changed = false; diff --git a/lib/CodeGen/MachineFunction.cpp b/lib/CodeGen/MachineFunction.cpp index 5ffc8914b96..bc8eb1429d9 100644 --- a/lib/CodeGen/MachineFunction.cpp +++ b/lib/CodeGen/MachineFunction.cpp @@ -244,7 +244,7 @@ getOrCreateJumpTableInfo(unsigned EntryKind) { /// Should we be emitting segmented stack stuff for the function bool MachineFunction::shouldSplitStack() const { - return getFunction()->hasFnAttribute("split-stack"); + return getFunction().hasFnAttribute("split-stack"); } /// This discards all of the MachineBasicBlock numbers and recomputes them. @@ -485,8 +485,7 @@ LLVM_DUMP_METHOD void MachineFunction::dump() const { #endif StringRef MachineFunction::getName() const { - assert(getFunction() && "No function!"); - return getFunction()->getName(); + return getFunction().getName(); } void MachineFunction::print(raw_ostream &OS, const SlotIndexes *Indexes) const { @@ -519,8 +518,8 @@ void MachineFunction::print(raw_ostream &OS, const SlotIndexes *Indexes) const { OS << '\n'; } - ModuleSlotTracker MST(getFunction()->getParent()); - MST.incorporateFunction(*getFunction()); + ModuleSlotTracker MST(getFunction().getParent()); + MST.incorporateFunction(getFunction()); for (const auto &BB : *this) { OS << '\n'; BB.print(OS, MST, Indexes); diff --git a/lib/CodeGen/MachineInstr.cpp b/lib/CodeGen/MachineInstr.cpp index 96722b26ee8..14655c6eb70 100644 --- a/lib/CodeGen/MachineInstr.cpp +++ b/lib/CodeGen/MachineInstr.cpp @@ -1211,7 +1211,7 @@ void MachineInstr::print(raw_ostream &OS, bool SkipOpers, bool SkipDebugLoc, const Module *M = nullptr; if (const MachineBasicBlock *MBB = getParent()) if (const MachineFunction *MF = MBB->getParent()) - M = MF->getFunction()->getParent(); + M = MF->getFunction().getParent(); ModuleSlotTracker MST(M); print(OS, MST, SkipOpers, SkipDebugLoc, TII); diff --git a/lib/CodeGen/MachineLICM.cpp b/lib/CodeGen/MachineLICM.cpp index a251a08a516..75d449c7ac6 100644 --- a/lib/CodeGen/MachineLICM.cpp +++ b/lib/CodeGen/MachineLICM.cpp @@ -280,7 +280,7 @@ static bool LoopIsOuterMostWithPredecessor(MachineLoop *CurLoop) { } bool MachineLICM::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; Changed = FirstInLoop = false; diff --git a/lib/CodeGen/MachineOptimizationRemarkEmitter.cpp b/lib/CodeGen/MachineOptimizationRemarkEmitter.cpp index b3227b16954..ca4452218da 100644 --- a/lib/CodeGen/MachineOptimizationRemarkEmitter.cpp +++ b/lib/CodeGen/MachineOptimizationRemarkEmitter.cpp @@ -50,7 +50,7 @@ void MachineOptimizationRemarkEmitter::emit( auto &OptDiag = cast<DiagnosticInfoMIROptimization>(OptDiagCommon); computeHotness(OptDiag); - LLVMContext &Ctx = MF.getFunction()->getContext(); + LLVMContext &Ctx = MF.getFunction().getContext(); // Only emit it if its hotness meets the threshold. if (OptDiag.getHotness().getValueOr(0) < @@ -71,7 +71,7 @@ bool MachineOptimizationRemarkEmitterPass::runOnMachineFunction( MachineFunction &MF) { MachineBlockFrequencyInfo *MBFI; - if (MF.getFunction()->getContext().getDiagnosticsHotnessRequested()) + if (MF.getFunction().getContext().getDiagnosticsHotnessRequested()) MBFI = &getAnalysis<LazyMachineBlockFrequencyInfoPass>().getBFI(); else MBFI = nullptr; diff --git a/lib/CodeGen/MachinePipeliner.cpp b/lib/CodeGen/MachinePipeliner.cpp index 293242446f2..18cb9af499a 100644 --- a/lib/CodeGen/MachinePipeliner.cpp +++ b/lib/CodeGen/MachinePipeliner.cpp @@ -729,13 +729,13 @@ INITIALIZE_PASS_END(MachinePipeliner, DEBUG_TYPE, /// The "main" function for implementing Swing Modulo Scheduling. bool MachinePipeliner::runOnMachineFunction(MachineFunction &mf) { - if (skipFunction(*mf.getFunction())) + if (skipFunction(mf.getFunction())) return false; if (!EnableSWP) return false; - if (mf.getFunction()->getAttributes().hasAttribute( + if (mf.getFunction().getAttributes().hasAttribute( AttributeList::FunctionIndex, Attribute::OptimizeForSize) && !EnableSWPOptSize.getPosition()) return false; diff --git a/lib/CodeGen/MachineRegisterInfo.cpp b/lib/CodeGen/MachineRegisterInfo.cpp index a075543aecf..b82ab02a6e6 100644 --- a/lib/CodeGen/MachineRegisterInfo.cpp +++ b/lib/CodeGen/MachineRegisterInfo.cpp @@ -531,7 +531,7 @@ static bool isNoReturnDef(const MachineOperand &MO) { const MachineFunction &MF = *MBB.getParent(); // We need to keep correct unwind information even if the function will // not return, since the runtime may need it. - if (MF.getFunction()->hasFnAttribute(Attribute::UWTable)) + if (MF.getFunction().hasFnAttribute(Attribute::UWTable)) return false; const Function *Called = getCalledFunction(MI); return !(Called == nullptr || !Called->hasFnAttribute(Attribute::NoReturn) || diff --git a/lib/CodeGen/MachineScheduler.cpp b/lib/CodeGen/MachineScheduler.cpp index 6be13737ee3..e15eb658a05 100644 --- a/lib/CodeGen/MachineScheduler.cpp +++ b/lib/CodeGen/MachineScheduler.cpp @@ -351,7 +351,7 @@ ScheduleDAGInstrs *PostMachineScheduler::createPostMachineScheduler() { /// design would be to split blocks at scheduling boundaries, but LLVM has a /// general bias against block splitting purely for implementation simplicity. bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) { - if (skipFunction(*mf.getFunction())) + if (skipFunction(mf.getFunction())) return false; if (EnableMachineSched.getNumOccurrences()) { @@ -389,7 +389,7 @@ bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) { } bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) { - if (skipFunction(*mf.getFunction())) + if (skipFunction(mf.getFunction())) return false; if (EnablePostRAMachineSched.getNumOccurrences()) { diff --git a/lib/CodeGen/MachineSink.cpp b/lib/CodeGen/MachineSink.cpp index fd4bd1a588c..bedfdd84b1c 100644 --- a/lib/CodeGen/MachineSink.cpp +++ b/lib/CodeGen/MachineSink.cpp @@ -292,7 +292,7 @@ MachineSinking::AllUsesDominatedByBlock(unsigned Reg, } bool MachineSinking::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; DEBUG(dbgs() << "******** Machine Sinking ********\n"); diff --git a/lib/CodeGen/MachineVerifier.cpp b/lib/CodeGen/MachineVerifier.cpp index 1752c6cd5f5..c9fe7681e28 100644 --- a/lib/CodeGen/MachineVerifier.cpp +++ b/lib/CodeGen/MachineVerifier.cpp @@ -637,12 +637,12 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) { const MCAsmInfo *AsmInfo = TM->getMCAsmInfo(); const BasicBlock *BB = MBB->getBasicBlock(); - const Function *Fn = MF->getFunction(); + const Function &F = MF->getFunction(); if (LandingPadSuccs.size() > 1 && !(AsmInfo && AsmInfo->getExceptionHandlingType() == ExceptionHandling::SjLj && BB && isa<SwitchInst>(BB->getTerminator())) && - !isFuncletEHPersonality(classifyEHPersonality(Fn->getPersonalityFn()))) + !isFuncletEHPersonality(classifyEHPersonality(F.getPersonalityFn()))) report("MBB has more than one landing pad successor", MBB); // Call AnalyzeBranch. If it succeeds, there several more conditions to check. diff --git a/lib/CodeGen/OptimizePHIs.cpp b/lib/CodeGen/OptimizePHIs.cpp index de2aa2955e9..8972867ba08 100644 --- a/lib/CodeGen/OptimizePHIs.cpp +++ b/lib/CodeGen/OptimizePHIs.cpp @@ -72,7 +72,7 @@ INITIALIZE_PASS(OptimizePHIs, DEBUG_TYPE, "Optimize machine instruction PHIs", false, false) bool OptimizePHIs::runOnMachineFunction(MachineFunction &Fn) { - if (skipFunction(*Fn.getFunction())) + if (skipFunction(Fn.getFunction())) return false; MRI = &Fn.getRegInfo(); diff --git a/lib/CodeGen/PatchableFunction.cpp b/lib/CodeGen/PatchableFunction.cpp index cb900ce94be..0957705b19b 100644 --- a/lib/CodeGen/PatchableFunction.cpp +++ b/lib/CodeGen/PatchableFunction.cpp @@ -54,11 +54,11 @@ static bool doesNotGeneratecode(const MachineInstr &MI) { } bool PatchableFunction::runOnMachineFunction(MachineFunction &MF) { - if (!MF.getFunction()->hasFnAttribute("patchable-function")) + if (!MF.getFunction().hasFnAttribute("patchable-function")) return false; #ifndef NDEBUG - Attribute PatchAttr = MF.getFunction()->getFnAttribute("patchable-function"); + Attribute PatchAttr = MF.getFunction().getFnAttribute("patchable-function"); StringRef PatchType = PatchAttr.getValueAsString(); assert(PatchType == "prologue-short-redirect" && "Only possibility today!"); #endif diff --git a/lib/CodeGen/PeepholeOptimizer.cpp b/lib/CodeGen/PeepholeOptimizer.cpp index e4c2aa46478..45078081987 100644 --- a/lib/CodeGen/PeepholeOptimizer.cpp +++ b/lib/CodeGen/PeepholeOptimizer.cpp @@ -1662,7 +1662,7 @@ bool PeepholeOptimizer::optimizeRecurrence(MachineInstr &PHI) { } bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; DEBUG(dbgs() << "********** PEEPHOLE OPTIMIZER **********\n"); diff --git a/lib/CodeGen/PostRASchedulerList.cpp b/lib/CodeGen/PostRASchedulerList.cpp index a03bb68ffcc..5d86faafdd8 100644 --- a/lib/CodeGen/PostRASchedulerList.cpp +++ b/lib/CodeGen/PostRASchedulerList.cpp @@ -279,7 +279,7 @@ bool PostRAScheduler::enablePostRAScheduler( } bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) { - if (skipFunction(*Fn.getFunction())) + if (skipFunction(Fn.getFunction())) return false; TII = Fn.getSubtarget().getInstrInfo(); diff --git a/lib/CodeGen/PrologEpilogInserter.cpp b/lib/CodeGen/PrologEpilogInserter.cpp index 57c27550e06..a8d8ad8ac7d 100644 --- a/lib/CodeGen/PrologEpilogInserter.cpp +++ b/lib/CodeGen/PrologEpilogInserter.cpp @@ -171,7 +171,7 @@ using StackObjSet = SmallSetVector<int, 8>; /// runOnMachineFunction - Insert prolog/epilog code and replace abstract /// frame indexes with appropriate references. bool PEI::runOnMachineFunction(MachineFunction &Fn) { - const Function* F = Fn.getFunction(); + const Function &F = Fn.getFunction(); const TargetRegisterInfo *TRI = Fn.getSubtarget().getRegisterInfo(); const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering(); @@ -206,7 +206,7 @@ bool PEI::runOnMachineFunction(MachineFunction &Fn) { // called functions. Because of this, calculateCalleeSavedRegisters() // must be called before this function in order to set the AdjustsStack // and MaxCallFrameSize variables. - if (!F->hasFnAttribute(Attribute::Naked)) + if (!F.hasFnAttribute(Attribute::Naked)) insertPrologEpilogCode(Fn); // Replace all MO_FrameIndex operands with physical register references @@ -224,8 +224,8 @@ bool PEI::runOnMachineFunction(MachineFunction &Fn) { MachineFrameInfo &MFI = Fn.getFrameInfo(); uint64_t StackSize = MFI.getStackSize(); if (WarnStackSize.getNumOccurrences() > 0 && WarnStackSize < StackSize) { - DiagnosticInfoStackSize DiagStackSize(*F, StackSize); - F->getContext().diagnose(DiagStackSize); + DiagnosticInfoStackSize DiagStackSize(F, StackSize); + F.getContext().diagnose(DiagStackSize); } delete RS; @@ -508,7 +508,7 @@ void PEI::spillCalleeSavedRegs(MachineFunction &Fn) { assert(Fn.getProperties().hasProperty( MachineFunctionProperties::Property::NoVRegs)); - const Function *F = Fn.getFunction(); + const Function &F = Fn.getFunction(); const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering(); MachineFrameInfo &MFI = Fn.getFrameInfo(); MinCSFrameIndex = std::numeric_limits<unsigned>::max(); @@ -522,7 +522,7 @@ void PEI::spillCalleeSavedRegs(MachineFunction &Fn) { assignCalleeSavedSpillSlots(Fn, SavedRegs, MinCSFrameIndex, MaxCSFrameIndex); // Add the code to save and restore the callee saved registers. - if (!F->hasFnAttribute(Attribute::Naked)) { + if (!F.hasFnAttribute(Attribute::Naked)) { MFI.setCalleeSavedInfoValid(true); std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); @@ -952,7 +952,7 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) { ORE->emit([&]() { return MachineOptimizationRemarkAnalysis(DEBUG_TYPE, "StackSize", - Fn.getFunction()->getSubprogram(), + Fn.getFunction().getSubprogram(), &Fn.front()) << ore::NV("NumStackBytes", StackSize) << " stack bytes in function"; }); @@ -993,7 +993,7 @@ void PEI::insertPrologEpilogCode(MachineFunction &Fn) { // approach is rather similar to that of Segmented Stacks, but it uses a // different conditional check and another BIF for allocating more stack // space. - if (Fn.getFunction()->getCallingConv() == CallingConv::HiPE) + if (Fn.getFunction().getCallingConv() == CallingConv::HiPE) for (MachineBasicBlock *SaveBlock : SaveBlocks) TFI.adjustForHiPEPrologue(Fn, *SaveBlock); } diff --git a/lib/CodeGen/RegAllocGreedy.cpp b/lib/CodeGen/RegAllocGreedy.cpp index 58d9050b7c2..186ef577e31 100644 --- a/lib/CodeGen/RegAllocGreedy.cpp +++ b/lib/CodeGen/RegAllocGreedy.cpp @@ -2642,7 +2642,7 @@ bool RAGreedy::tryRecoloringCandidates(PQueue &RecoloringQueue, unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg, SmallVectorImpl<unsigned> &NewVRegs) { CutOffInfo = CO_None; - LLVMContext &Ctx = MF->getFunction()->getContext(); + LLVMContext &Ctx = MF->getFunction().getContext(); SmallVirtRegSet FixedRegisters; unsigned Reg = selectOrSplitImpl(VirtReg, NewVRegs, FixedRegisters); if (Reg == ~0U && (CutOffInfo != CO_None)) { diff --git a/lib/CodeGen/RegAllocPBQP.cpp b/lib/CodeGen/RegAllocPBQP.cpp index 1a9ffb1c3e4..351e91c932e 100644 --- a/lib/CodeGen/RegAllocPBQP.cpp +++ b/lib/CodeGen/RegAllocPBQP.cpp @@ -799,7 +799,7 @@ bool RegAllocPBQP::runOnMachineFunction(MachineFunction &MF) { findVRegIntervalsToAlloc(MF, LIS); #ifndef NDEBUG - const Function &F = *MF.getFunction(); + const Function &F = MF.getFunction(); std::string FullyQualifiedName = F.getParent()->getModuleIdentifier() + "." + F.getName().str(); #endif diff --git a/lib/CodeGen/RegUsageInfoCollector.cpp b/lib/CodeGen/RegUsageInfoCollector.cpp index 2b418feb29e..f49ea25bbf3 100644 --- a/lib/CodeGen/RegUsageInfoCollector.cpp +++ b/lib/CodeGen/RegUsageInfoCollector.cpp @@ -95,7 +95,7 @@ bool RegUsageInfoCollector::runOnMachineFunction(MachineFunction &MF) { unsigned RegMaskSize = (TRI->getNumRegs() + 31) / 32; RegMask.resize(RegMaskSize, 0xFFFFFFFF); - const Function *F = MF.getFunction(); + const Function &F = MF.getFunction(); PhysicalRegisterUsageInfo *PRUI = &getAnalysis<PhysicalRegisterUsageInfo>(); @@ -127,7 +127,7 @@ bool RegUsageInfoCollector::runOnMachineFunction(MachineFunction &MF) { if (!TargetFrameLowering::isSafeForNoCSROpt(F)) { const uint32_t *CallPreservedMask = - TRI->getCallPreservedMask(MF, F->getCallingConv()); + TRI->getCallPreservedMask(MF, F.getCallingConv()); if (CallPreservedMask) { // Set callee saved register as preserved. for (unsigned i = 0; i < RegMaskSize; ++i) @@ -145,7 +145,7 @@ bool RegUsageInfoCollector::runOnMachineFunction(MachineFunction &MF) { DEBUG(dbgs() << " \n----------------------------------------\n"); - PRUI->storeUpdateRegUsageInfo(F, std::move(RegMask)); + PRUI->storeUpdateRegUsageInfo(&F, std::move(RegMask)); return false; } diff --git a/lib/CodeGen/RegUsageInfoPropagate.cpp b/lib/CodeGen/RegUsageInfoPropagate.cpp index f6d45067816..5b12d00e126 100644 --- a/lib/CodeGen/RegUsageInfoPropagate.cpp +++ b/lib/CodeGen/RegUsageInfoPropagate.cpp @@ -102,7 +102,7 @@ static const Function *findCalledFunction(const Module &M, MachineInstr &MI) { } bool RegUsageInfoPropagationPass::runOnMachineFunction(MachineFunction &MF) { - const Module *M = MF.getFunction()->getParent(); + const Module *M = MF.getFunction().getParent(); PhysicalRegisterUsageInfo *PRUI = &getAnalysis<PhysicalRegisterUsageInfo>(); DEBUG(dbgs() << " ++++++++++++++++++++ " << getPassName() diff --git a/lib/CodeGen/ResetMachineFunctionPass.cpp b/lib/CodeGen/ResetMachineFunctionPass.cpp index 01b3db43b28..f1885aa7428 100644 --- a/lib/CodeGen/ResetMachineFunctionPass.cpp +++ b/lib/CodeGen/ResetMachineFunctionPass.cpp @@ -51,7 +51,7 @@ namespace { ++NumFunctionsReset; MF.reset(); if (EmitFallbackDiag) { - const Function &F = *MF.getFunction(); + const Function &F = MF.getFunction(); DiagnosticInfoISelFallback DiagFallback(F); F.getContext().diagnose(DiagFallback); } diff --git a/lib/CodeGen/ScheduleDAGInstrs.cpp b/lib/CodeGen/ScheduleDAGInstrs.cpp index ac4468f749e..9249fa84b38 100644 --- a/lib/CodeGen/ScheduleDAGInstrs.cpp +++ b/lib/CodeGen/ScheduleDAGInstrs.cpp @@ -114,7 +114,7 @@ ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf, : ScheduleDAG(mf), MLI(mli), MFI(mf.getFrameInfo()), RemoveKillFlags(RemoveKillFlags), UnknownValue(UndefValue::get( - Type::getVoidTy(mf.getFunction()->getContext()))) { + Type::getVoidTy(mf.getFunction().getContext()))) { DbgValues.clear(); const TargetSubtargetInfo &ST = mf.getSubtarget(); diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index da2ca8851e3..f3f8cab6623 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -161,7 +161,7 @@ namespace { DAGCombiner(SelectionDAG &D, AliasAnalysis *AA, CodeGenOpt::Level OL) : DAG(D), TLI(D.getTargetLoweringInfo()), Level(BeforeLegalizeTypes), OptLevel(OL), AA(AA) { - ForCodeSize = DAG.getMachineFunction().getFunction()->optForSize(); + ForCodeSize = DAG.getMachineFunction().getFunction().optForSize(); MaximumLegalStoreInBits = 0; for (MVT VT : MVT::all_valuetypes()) @@ -2933,7 +2933,7 @@ SDValue DAGCombiner::visitSDIV(SDNode *N) { // If integer divide is expensive and we satisfy the requirements, emit an // alternate sequence. Targets may check function attributes for size/speed // trade-offs. - AttributeList Attr = DAG.getMachineFunction().getFunction()->getAttributes(); + AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); if (N1C && !TLI.isIntDivCheap(N->getValueType(0), Attr)) if (SDValue Op = BuildSDIV(N)) return Op; @@ -3004,7 +3004,7 @@ SDValue DAGCombiner::visitUDIV(SDNode *N) { } // fold (udiv x, c) -> alternate - AttributeList Attr = DAG.getMachineFunction().getFunction()->getAttributes(); + AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); if (N1C && !TLI.isIntDivCheap(N->getValueType(0), Attr)) if (SDValue Op = BuildUDIV(N)) return Op; @@ -3063,7 +3063,7 @@ SDValue DAGCombiner::visitREM(SDNode *N) { } } - AttributeList Attr = DAG.getMachineFunction().getFunction()->getAttributes(); + AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); // If X/C can be simplified by the division-by-constant logic, lower // X%C to the equivalent of X-X/C*C. @@ -12940,7 +12940,7 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) { if (MemVT.getSizeInBits() * 2 > MaximumLegalStoreInBits) return false; - bool NoVectors = DAG.getMachineFunction().getFunction()->hasFnAttribute( + bool NoVectors = DAG.getMachineFunction().getFunction().hasFnAttribute( Attribute::NoImplicitFloat); // This function cannot currently deal with non-byte-sized memory sizes. @@ -16986,7 +16986,7 @@ SDValue DAGCombiner::SimplifySetCC(EVT VT, SDValue N0, SDValue N1, SDValue DAGCombiner::BuildSDIV(SDNode *N) { // when optimising for minimum size, we don't want to expand a div to a mul // and a shift. - if (DAG.getMachineFunction().getFunction()->optForMinSize()) + if (DAG.getMachineFunction().getFunction().optForMinSize()) return SDValue(); ConstantSDNode *C = isConstOrConstSplat(N->getOperand(1)); @@ -17032,7 +17032,7 @@ SDValue DAGCombiner::BuildSDIVPow2(SDNode *N) { SDValue DAGCombiner::BuildUDIV(SDNode *N) { // when optimising for minimum size, we don't want to expand a div to a mul // and a shift. - if (DAG.getMachineFunction().getFunction()->optForMinSize()) + if (DAG.getMachineFunction().getFunction().optForMinSize()) return SDValue(); ConstantSDNode *C = isConstOrConstSplat(N->getOperand(1)); diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp index fdbd3e1105b..bb1dc17b7a1 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -2014,10 +2014,10 @@ SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, // isTailCall may be true since the callee does not reference caller stack // frame. Check if it's in the right position and that the return types match. SDValue TCChain = InChain; - const Function *F = DAG.getMachineFunction().getFunction(); + const Function &F = DAG.getMachineFunction().getFunction(); bool isTailCall = TLI.isInTailCallPosition(DAG, Node, TCChain) && - (RetTy == F->getReturnType() || F->getReturnType()->isVoidTy()); + (RetTy == F.getReturnType() || F.getReturnType()->isVoidTy()); if (isTailCall) InChain = TCChain; diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index ea076a98cf6..12a21e74079 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -909,7 +909,7 @@ void SelectionDAG::init(MachineFunction &NewMF, ORE = &NewORE; TLI = getSubtarget().getTargetLowering(); TSI = getSubtarget().getSelectionDAGInfo(); - Context = &MF->getFunction()->getContext(); + Context = &MF->getFunction().getContext(); } SelectionDAG::~SelectionDAG() { @@ -1331,7 +1331,7 @@ SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT, assert((TargetFlags == 0 || isTarget) && "Cannot set target flags on target-independent globals"); if (Alignment == 0) - Alignment = MF->getFunction()->optForSize() + Alignment = MF->getFunction().optForSize() ? getDataLayout().getABITypeAlignment(C->getType()) : getDataLayout().getPrefTypeAlignment(C->getType()); unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; @@ -5100,8 +5100,8 @@ static bool shouldLowerMemFuncForSize(const MachineFunction &MF) { // On Darwin, -Os means optimize for size without hurting performance, so // only really optimize for size when -Oz (MinSize) is used. if (MF.getTarget().getTargetTriple().isOSDarwin()) - return MF.getFunction()->optForMinSize(); - return MF.getFunction()->optForSize(); + return MF.getFunction().optForMinSize(); + return MF.getFunction().optForSize(); } static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 8f3ffb62175..466945b8b4f 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -1573,9 +1573,9 @@ void SelectionDAGBuilder::visitRet(const ReturnInst &I) { EVT(TLI.getPointerTy(DL)))); } - bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg(); + bool isVarArg = DAG.getMachineFunction().getFunction().isVarArg(); CallingConv::ID CallConv = - DAG.getMachineFunction().getFunction()->getCallingConv(); + DAG.getMachineFunction().getFunction().getCallingConv(); Chain = DAG.getTargetLoweringInfo().LowerReturn( Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG); @@ -2110,7 +2110,7 @@ static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL, const TargetLowering &TLI = DAG.getTargetLoweringInfo(); EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout()); MachineFunction &MF = DAG.getMachineFunction(); - Value *Global = TLI.getSDagStackGuard(*MF.getFunction()->getParent()); + Value *Global = TLI.getSDagStackGuard(*MF.getFunction().getParent()); MachineSDNode *Node = DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, DL, PtrTy, Chain); if (Global) { @@ -2144,7 +2144,7 @@ void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD, SDValue Guard; SDLoc dl = getCurSDLoc(); SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy); - const Module &M = *ParentBB->getParent()->getFunction()->getParent(); + const Module &M = *ParentBB->getParent()->getFunction().getParent(); unsigned Align = DL->getPrefTypeAlignment(Type::getInt8PtrTy(M.getContext())); // Generate code to load the content of the guard slot. @@ -4766,8 +4766,8 @@ static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS, if (Val == 0) return DAG.getConstantFP(1.0, DL, LHS.getValueType()); - const Function *F = DAG.getMachineFunction().getFunction(); - if (!F->optForSize() || + const Function &F = DAG.getMachineFunction().getFunction(); + if (!F.optForSize() || // If optimizing for size, don't insert too many multiplies. // This inserts up to 5 multiplies. countPopulation(Val) + Log2_32(Val) < 7) { @@ -5640,7 +5640,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) { case Intrinsic::stackguard: { EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout()); MachineFunction &MF = DAG.getMachineFunction(); - const Module &M = *MF.getFunction()->getParent(); + const Module &M = *MF.getFunction().getParent(); SDValue Chain = getRoot(); if (TLI.useLoadStackGuardNode()) { Res = getLoadStackGuard(DAG, sdl, Chain); @@ -5748,9 +5748,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) { return nullptr; case Intrinsic::gcroot: { MachineFunction &MF = DAG.getMachineFunction(); - const Function *F = MF.getFunction(); - (void)F; - assert(F->hasGC() && + assert(MF.getFunction().hasGC() && "only valid in functions with gc specified, enforced by Verifier"); assert(GFI && "implied by previous"); const Value *Alloca = I.getArgOperand(0)->stripPointerCasts(); @@ -9869,7 +9867,7 @@ MachineBasicBlock *SelectionDAGBuilder::peelDominantCaseCluster( // Don't perform if there is only one cluster or optimizing for size. if (SwitchPeelThreshold > 100 || !FuncInfo.BPI || Clusters.size() < 2 || TM.getOptLevel() == CodeGenOpt::None || - SwitchMBB->getParent()->getFunction()->optForMinSize()) + SwitchMBB->getParent()->getFunction().optForMinSize()) return SwitchMBB; BranchProbability TopCaseProb = BranchProbability(SwitchPeelThreshold, 100); @@ -10021,7 +10019,7 @@ void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) { unsigned NumClusters = W.LastCluster - W.FirstCluster + 1; if (NumClusters > 3 && TM.getOptLevel() != CodeGenOpt::None && - !DefaultMBB->getParent()->getFunction()->optForMinSize()) { + !DefaultMBB->getParent()->getFunction().optForMinSize()) { // For optimized builds, lower large range as a balanced binary tree. splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB); continue; diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index 3a8ad51940d..18f6997ef83 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -212,7 +212,7 @@ namespace llvm { IS.OptLevel = NewOptLevel; IS.TM.setOptLevel(NewOptLevel); DEBUG(dbgs() << "\nChanging optimization level for Function " - << IS.MF->getFunction()->getName() << "\n"); + << IS.MF->getFunction().getName() << "\n"); DEBUG(dbgs() << "\tBefore: -O" << SavedOptLevel << " ; After: -O" << NewOptLevel << "\n"); SavedFastISel = IS.TM.Options.EnableFastISel; @@ -228,7 +228,7 @@ namespace llvm { if (IS.OptLevel == SavedOptLevel) return; DEBUG(dbgs() << "\nRestoring optimization level for Function " - << IS.MF->getFunction()->getName() << "\n"); + << IS.MF->getFunction().getName() << "\n"); DEBUG(dbgs() << "\tBefore: -O" << IS.OptLevel << " ; After: -O" << SavedOptLevel << "\n"); IS.OptLevel = SavedOptLevel; @@ -384,7 +384,7 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) { assert((!EnableFastISelAbort || TM.Options.EnableFastISel) && "-fast-isel-abort > 0 requires -fast-isel"); - const Function &Fn = *mf.getFunction(); + const Function &Fn = mf.getFunction(); MF = &mf; // Reset the target options before resetting the optimization diff --git a/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/lib/CodeGen/SelectionDAG/TargetLowering.cpp index 1dff66f3627..58276052c10 100644 --- a/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -52,11 +52,11 @@ bool TargetLowering::isPositionIndependent() const { /// so, it sets Chain to the input chain of the tail call. bool TargetLowering::isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, SDValue &Chain) const { - const Function *F = DAG.getMachineFunction().getFunction(); + const Function &F = DAG.getMachineFunction().getFunction(); // Conservatively require the attributes of the call to match those of // the return. Ignore noalias because it doesn't affect the call sequence. - AttributeList CallerAttrs = F->getAttributes(); + AttributeList CallerAttrs = F.getAttributes(); if (AttrBuilder(CallerAttrs, AttributeList::ReturnIndex) .removeAttribute(Attribute::NoAlias) .hasAttributes()) @@ -2963,7 +2963,7 @@ static SDValue BuildExactSDIV(const TargetLowering &TLI, SDValue Op1, APInt d, SDValue TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, std::vector<SDNode *> *Created) const { - AttributeList Attr = DAG.getMachineFunction().getFunction()->getAttributes(); + AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); if (TLI.isIntDivCheap(N->getValueType(0), Attr)) return SDValue(N,0); // Lower SDIV as SDIV diff --git a/lib/CodeGen/ShrinkWrap.cpp b/lib/CodeGen/ShrinkWrap.cpp index 2d9012978fd..b35bf6ba3a7 100644 --- a/lib/CodeGen/ShrinkWrap.cpp +++ b/lib/CodeGen/ShrinkWrap.cpp @@ -449,7 +449,7 @@ static bool isIrreducibleCFG(const MachineFunction &MF, } bool ShrinkWrap::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction()) || MF.empty() || !isShrinkWrapEnabled(MF)) + if (skipFunction(MF.getFunction()) || MF.empty() || !isShrinkWrapEnabled(MF)) return false; DEBUG(dbgs() << "**** Analysing " << MF.getName() << '\n'); @@ -569,10 +569,10 @@ bool ShrinkWrap::isShrinkWrapEnabled(const MachineFunction &MF) { // of the crash. Since a crash can happen anywhere, the // frame must be lowered before anything else happen for the // sanitizers to be able to get a correct stack frame. - !(MF.getFunction()->hasFnAttribute(Attribute::SanitizeAddress) || - MF.getFunction()->hasFnAttribute(Attribute::SanitizeThread) || - MF.getFunction()->hasFnAttribute(Attribute::SanitizeMemory) || - MF.getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress)); + !(MF.getFunction().hasFnAttribute(Attribute::SanitizeAddress) || + MF.getFunction().hasFnAttribute(Attribute::SanitizeThread) || + MF.getFunction().hasFnAttribute(Attribute::SanitizeMemory) || + MF.getFunction().hasFnAttribute(Attribute::SanitizeHWAddress)); // If EnableShrinkWrap is set, it takes precedence on whatever the // target sets. The rational is that we assume we want to test // something related to shrink-wrapping. diff --git a/lib/CodeGen/StackColoring.cpp b/lib/CodeGen/StackColoring.cpp index 18bba42907b..608845498b4 100644 --- a/lib/CodeGen/StackColoring.cpp +++ b/lib/CodeGen/StackColoring.cpp @@ -1129,8 +1129,7 @@ void StackColoring::expungeSlotMap(DenseMap<int, int> &SlotRemap, bool StackColoring::runOnMachineFunction(MachineFunction &Func) { DEBUG(dbgs() << "********** Stack Coloring **********\n" - << "********** Function: " - << ((const Value*)Func.getFunction())->getName() << '\n'); + << "********** Function: " << Func.getName() << '\n'); MF = &Func; MFI = &MF->getFrameInfo(); Indexes = &getAnalysis<SlotIndexes>(); @@ -1170,7 +1169,7 @@ bool StackColoring::runOnMachineFunction(MachineFunction &Func) { // Don't continue because there are not enough lifetime markers, or the // stack is too small, or we are told not to optimize the slots. if (NumMarkers < 2 || TotalSize < 16 || DisableColoring || - skipFunction(*Func.getFunction())) { + skipFunction(Func.getFunction())) { DEBUG(dbgs()<<"Will not try to merge slots.\n"); return removeAllMarkers(); } diff --git a/lib/CodeGen/TailDuplication.cpp b/lib/CodeGen/TailDuplication.cpp index 7b4b700aaf8..df1eebf43b2 100644 --- a/lib/CodeGen/TailDuplication.cpp +++ b/lib/CodeGen/TailDuplication.cpp @@ -49,7 +49,7 @@ char &llvm::TailDuplicateID = TailDuplicatePass::ID; INITIALIZE_PASS(TailDuplicatePass, DEBUG_TYPE, "Tail Duplication", false, false) bool TailDuplicatePass::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; auto MBPI = &getAnalysis<MachineBranchProbabilityInfo>(); diff --git a/lib/CodeGen/TailDuplicator.cpp b/lib/CodeGen/TailDuplicator.cpp index 63eb6cc651b..f51c884839b 100644 --- a/lib/CodeGen/TailDuplicator.cpp +++ b/lib/CodeGen/TailDuplicator.cpp @@ -550,7 +550,7 @@ bool TailDuplicator::shouldTailDuplicate(bool IsSimple, unsigned MaxDuplicateCount; if (TailDupSize == 0 && TailDuplicateSize.getNumOccurrences() == 0 && - MF->getFunction()->optForSize()) + MF->getFunction().optForSize()) MaxDuplicateCount = 1; else if (TailDupSize == 0) MaxDuplicateCount = TailDuplicateSize; diff --git a/lib/CodeGen/TargetFrameLoweringImpl.cpp b/lib/CodeGen/TargetFrameLoweringImpl.cpp index 6f1a0038ee5..b2151eb4965 100644 --- a/lib/CodeGen/TargetFrameLoweringImpl.cpp +++ b/lib/CodeGen/TargetFrameLoweringImpl.cpp @@ -32,7 +32,7 @@ TargetFrameLowering::~TargetFrameLowering() = default; /// The default implementation just looks at attribute "no-frame-pointer-elim". bool TargetFrameLowering::noFramePointerElim(const MachineFunction &MF) const { - auto Attr = MF.getFunction()->getFnAttribute("no-frame-pointer-elim"); + auto Attr = MF.getFunction().getFnAttribute("no-frame-pointer-elim"); return Attr.getValueAsString() == "true"; } @@ -82,7 +82,7 @@ void TargetFrameLowering::determineCalleeSaves(MachineFunction &MF, return; // In Naked functions we aren't going to save any registers. - if (MF.getFunction()->hasFnAttribute(Attribute::Naked)) + if (MF.getFunction().hasFnAttribute(Attribute::Naked)) return; // Functions which call __builtin_unwind_init get all their registers saved. @@ -99,7 +99,7 @@ unsigned TargetFrameLowering::getStackAlignmentSkew( const MachineFunction &MF) const { // When HHVM function is called, the stack is skewed as the return address // is removed from the stack before we enter the function. - if (LLVM_UNLIKELY(MF.getFunction()->getCallingConv() == CallingConv::HHVM)) + if (LLVM_UNLIKELY(MF.getFunction().getCallingConv() == CallingConv::HHVM)) return MF.getTarget().getPointerSize(); return 0; diff --git a/lib/CodeGen/TargetLoweringBase.cpp b/lib/CodeGen/TargetLoweringBase.cpp index 22a43ab8ac5..543c12eebb4 100644 --- a/lib/CodeGen/TargetLoweringBase.cpp +++ b/lib/CodeGen/TargetLoweringBase.cpp @@ -1592,8 +1592,8 @@ void TargetLoweringBase::setMaximumJumpTableSize(unsigned Val) { /// Get the reciprocal estimate attribute string for a function that will /// override the target defaults. static StringRef getRecipEstimateForFunc(MachineFunction &MF) { - const Function *F = MF.getFunction(); - return F->getFnAttribute("reciprocal-estimates").getValueAsString(); + const Function &F = MF.getFunction(); + return F.getFnAttribute("reciprocal-estimates").getValueAsString(); } /// Construct a string for the given reciprocal operation of the given type. diff --git a/lib/CodeGen/TargetOptionsImpl.cpp b/lib/CodeGen/TargetOptionsImpl.cpp index 98e07bedb36..853e71d0efa 100644 --- a/lib/CodeGen/TargetOptionsImpl.cpp +++ b/lib/CodeGen/TargetOptionsImpl.cpp @@ -28,7 +28,7 @@ bool TargetOptions::DisableFramePointerElim(const MachineFunction &MF) const { return true; // Check to see if we should eliminate non-leaf frame pointers. - if (MF.getFunction()->hasFnAttribute("no-frame-pointer-elim-non-leaf")) + if (MF.getFunction().hasFnAttribute("no-frame-pointer-elim-non-leaf")) return MF.getFrameInfo().hasCalls(); return false; diff --git a/lib/CodeGen/TargetRegisterInfo.cpp b/lib/CodeGen/TargetRegisterInfo.cpp index f255ba4fef9..f03c3b8300f 100644 --- a/lib/CodeGen/TargetRegisterInfo.cpp +++ b/lib/CodeGen/TargetRegisterInfo.cpp @@ -422,21 +422,21 @@ TargetRegisterInfo::getRegAllocationHints(unsigned VirtReg, } bool TargetRegisterInfo::canRealignStack(const MachineFunction &MF) const { - return !MF.getFunction()->hasFnAttribute("no-realign-stack"); + return !MF.getFunction().hasFnAttribute("no-realign-stack"); } bool TargetRegisterInfo::needsStackRealignment( const MachineFunction &MF) const { const MachineFrameInfo &MFI = MF.getFrameInfo(); const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); - const Function *F = MF.getFunction(); + const Function &F = MF.getFunction(); unsigned StackAlign = TFI->getStackAlignment(); bool requiresRealignment = ((MFI.getMaxAlignment() > StackAlign) || - F->hasFnAttribute(Attribute::StackAlignment)); - if (MF.getFunction()->hasFnAttribute("stackrealign") || requiresRealignment) { + F.hasFnAttribute(Attribute::StackAlignment)); + if (F.hasFnAttribute("stackrealign") || requiresRealignment) { if (canRealignStack(MF)) return true; - DEBUG(dbgs() << "Can't realign function's stack: " << F->getName() << "\n"); + DEBUG(dbgs() << "Can't realign function's stack: " << F.getName() << "\n"); } return false; } diff --git a/lib/CodeGen/TwoAddressInstructionPass.cpp b/lib/CodeGen/TwoAddressInstructionPass.cpp index f48db12b975..774b76f84b7 100644 --- a/lib/CodeGen/TwoAddressInstructionPass.cpp +++ b/lib/CodeGen/TwoAddressInstructionPass.cpp @@ -1663,7 +1663,7 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &Func) { OptLevel = TM.getOptLevel(); // Disable optimizations if requested. We cannot skip the whole pass as some // fixups are necessary for correctness. - if (skipFunction(*Func.getFunction())) + if (skipFunction(Func.getFunction())) OptLevel = CodeGenOpt::None; bool MadeChange = false; diff --git a/lib/CodeGen/XRayInstrumentation.cpp b/lib/CodeGen/XRayInstrumentation.cpp index 60ac24e62a4..3d83afcf1fc 100644 --- a/lib/CodeGen/XRayInstrumentation.cpp +++ b/lib/CodeGen/XRayInstrumentation.cpp @@ -142,7 +142,7 @@ void XRayInstrumentation::prependRetWithPatchableExit( } bool XRayInstrumentation::runOnMachineFunction(MachineFunction &MF) { - auto &F = *MF.getFunction(); + auto &F = MF.getFunction(); auto InstrAttr = F.getFnAttribute("function-instrument"); bool AlwaysInstrument = !InstrAttr.hasAttribute(Attribute::None) && InstrAttr.isStringAttribute() && diff --git a/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp b/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp index 1135f0f1262..38a7e331bb9 100644 --- a/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp +++ b/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp @@ -308,7 +308,7 @@ public: //===----------------------------------------------------------------------===// bool AArch64A57FPLoadBalancing::runOnMachineFunction(MachineFunction &F) { - if (skipFunction(*F.getFunction())) + if (skipFunction(F.getFunction())) return false; if (!F.getSubtarget<AArch64Subtarget>().balanceFPOps()) diff --git a/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp b/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp index 7da56ef030a..338daecb49e 100644 --- a/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp +++ b/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp @@ -393,7 +393,7 @@ bool AArch64AdvSIMDScalar::runOnMachineFunction(MachineFunction &mf) { bool Changed = false; DEBUG(dbgs() << "***** AArch64AdvSIMDScalar *****\n"); - if (skipFunction(*mf.getFunction())) + if (skipFunction(mf.getFunction())) return false; MRI = &mf.getRegInfo(); diff --git a/lib/Target/AArch64/AArch64CallLowering.cpp b/lib/Target/AArch64/AArch64CallLowering.cpp index 838305858ea..08152c0d83d 100644 --- a/lib/Target/AArch64/AArch64CallLowering.cpp +++ b/lib/Target/AArch64/AArch64CallLowering.cpp @@ -220,7 +220,7 @@ void AArch64CallLowering::splitToValueTypes( bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, unsigned VReg) const { MachineFunction &MF = MIRBuilder.getMF(); - const Function &F = *MF.getFunction(); + const Function &F = MF.getFunction(); auto MIB = MIRBuilder.buildInstrNoInsert(AArch64::RET_ReallyLR); assert(((Val && VReg) || (!Val && !VReg)) && "Return value without a vreg"); @@ -322,7 +322,7 @@ bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const ArgInfo &OrigRet, ArrayRef<ArgInfo> OrigArgs) const { MachineFunction &MF = MIRBuilder.getMF(); - const Function &F = *MF.getFunction(); + const Function &F = MF.getFunction(); MachineRegisterInfo &MRI = MF.getRegInfo(); auto &DL = F.getParent()->getDataLayout(); diff --git a/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp b/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp index bb750c5093d..b88fba4452a 100644 --- a/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp +++ b/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp @@ -42,7 +42,7 @@ struct LDTLSCleanup : public MachineFunctionPass { } bool runOnMachineFunction(MachineFunction &MF) override { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); diff --git a/lib/Target/AArch64/AArch64CollectLOH.cpp b/lib/Target/AArch64/AArch64CollectLOH.cpp index d9d48e9900a..0a9167edcdb 100644 --- a/lib/Target/AArch64/AArch64CollectLOH.cpp +++ b/lib/Target/AArch64/AArch64CollectLOH.cpp @@ -482,7 +482,7 @@ static void handleNormalInst(const MachineInstr &MI, LOHInfo *LOHInfos) { } bool AArch64CollectLOH::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; DEBUG(dbgs() << "********** AArch64 Collect LOH **********\n" diff --git a/lib/Target/AArch64/AArch64CondBrTuning.cpp b/lib/Target/AArch64/AArch64CondBrTuning.cpp index 6fc57623ef4..30cefbad884 100644 --- a/lib/Target/AArch64/AArch64CondBrTuning.cpp +++ b/lib/Target/AArch64/AArch64CondBrTuning.cpp @@ -290,7 +290,7 @@ bool AArch64CondBrTuning::tryToTuneBranch(MachineInstr &MI, } bool AArch64CondBrTuning::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; DEBUG(dbgs() << "********** AArch64 Conditional Branch Tuning **********\n" diff --git a/lib/Target/AArch64/AArch64ConditionOptimizer.cpp b/lib/Target/AArch64/AArch64ConditionOptimizer.cpp index f765825cdee..d14bde33d94 100644 --- a/lib/Target/AArch64/AArch64ConditionOptimizer.cpp +++ b/lib/Target/AArch64/AArch64ConditionOptimizer.cpp @@ -327,7 +327,7 @@ bool AArch64ConditionOptimizer::adjustTo(MachineInstr *CmpMI, bool AArch64ConditionOptimizer::runOnMachineFunction(MachineFunction &MF) { DEBUG(dbgs() << "********** AArch64 Conditional Compares **********\n" << "********** Function: " << MF.getName() << '\n'); - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; TII = MF.getSubtarget().getInstrInfo(); diff --git a/lib/Target/AArch64/AArch64ConditionalCompares.cpp b/lib/Target/AArch64/AArch64ConditionalCompares.cpp index f7c97117ba5..b0bda7c43c1 100644 --- a/lib/Target/AArch64/AArch64ConditionalCompares.cpp +++ b/lib/Target/AArch64/AArch64ConditionalCompares.cpp @@ -924,7 +924,7 @@ bool AArch64ConditionalCompares::tryConvert(MachineBasicBlock *MBB) { bool AArch64ConditionalCompares::runOnMachineFunction(MachineFunction &MF) { DEBUG(dbgs() << "********** AArch64 Conditional Compares **********\n" << "********** Function: " << MF.getName() << '\n'); - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; TII = MF.getSubtarget().getInstrInfo(); @@ -936,7 +936,7 @@ bool AArch64ConditionalCompares::runOnMachineFunction(MachineFunction &MF) { MBPI = &getAnalysis<MachineBranchProbabilityInfo>(); Traces = &getAnalysis<MachineTraceMetrics>(); MinInstr = nullptr; - MinSize = MF.getFunction()->optForMinSize(); + MinSize = MF.getFunction().optForMinSize(); bool Changed = false; CmpConv.runOnMachineFunction(MF, MBPI); diff --git a/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp b/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp index 0298c76d68e..8e7e740da6f 100644 --- a/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp +++ b/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp @@ -198,7 +198,7 @@ void AArch64DeadRegisterDefinitions::processMachineBasicBlock( // Scan the function for instructions that have a dead definition of a // register. Replace that register with the zero register when possible. bool AArch64DeadRegisterDefinitions::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; TRI = MF.getSubtarget().getRegisterInfo(); diff --git a/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp b/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp index 7b4ab7cc1a3..d1ddb2e3ef7 100644 --- a/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp +++ b/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp @@ -798,7 +798,7 @@ bool FalkorHWPFFix::runOnMachineFunction(MachineFunction &Fn) { if (ST.getProcFamily() != AArch64Subtarget::Falkor) return false; - if (skipFunction(*Fn.getFunction())) + if (skipFunction(Fn.getFunction())) return false; TII = static_cast<const AArch64InstrInfo *>(ST.getInstrInfo()); diff --git a/lib/Target/AArch64/AArch64FrameLowering.cpp b/lib/Target/AArch64/AArch64FrameLowering.cpp index 72330d9b7cb..73944359223 100644 --- a/lib/Target/AArch64/AArch64FrameLowering.cpp +++ b/lib/Target/AArch64/AArch64FrameLowering.cpp @@ -174,7 +174,7 @@ bool AArch64FrameLowering::canUseRedZone(const MachineFunction &MF) const { return false; // Don't use the red zone if the function explicitly asks us not to. // This is typically used for kernel code. - if (MF.getFunction()->hasFnAttribute(Attribute::NoRedZone)) + if (MF.getFunction().hasFnAttribute(Attribute::NoRedZone)) return false; const MachineFrameInfo &MFI = MF.getFrameInfo(); @@ -459,13 +459,13 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const { MachineBasicBlock::iterator MBBI = MBB.begin(); const MachineFrameInfo &MFI = MF.getFrameInfo(); - const Function *Fn = MF.getFunction(); + const Function &F = MF.getFunction(); const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); const TargetInstrInfo *TII = Subtarget.getInstrInfo(); MachineModuleInfo &MMI = MF.getMMI(); AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); - bool needsFrameMoves = MMI.hasDebugInfo() || Fn->needsUnwindTableEntry(); + bool needsFrameMoves = MMI.hasDebugInfo() || F.needsUnwindTableEntry(); bool HasFP = hasFP(MF); // Debug location must be unknown since the first debug location is used @@ -474,7 +474,7 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF, // All calls are tail calls in GHC calling conv, and functions have no // prologue/epilogue. - if (MF.getFunction()->getCallingConv() == CallingConv::GHC) + if (MF.getFunction().getCallingConv() == CallingConv::GHC) return; int NumBytes = (int)MFI.getStackSize(); @@ -507,7 +507,7 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF, } bool IsWin64 = - Subtarget.isCallingConvWin64(MF.getFunction()->getCallingConv()); + Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()); unsigned FixedObject = IsWin64 ? alignTo(AFI->getVarArgsGPRSize(), 16) : 0; auto PrologueSaveSize = AFI->getCalleeSavedStackSize() + FixedObject; @@ -716,7 +716,7 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF, // All calls are tail calls in GHC calling conv, and functions have no // prologue/epilogue. - if (MF.getFunction()->getCallingConv() == CallingConv::GHC) + if (MF.getFunction().getCallingConv() == CallingConv::GHC) return; // Initial and residual are named for consistency with the prologue. Note that @@ -765,7 +765,7 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF, // it as the 2nd argument of AArch64ISD::TC_RETURN. bool IsWin64 = - Subtarget.isCallingConvWin64(MF.getFunction()->getCallingConv()); + Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()); unsigned FixedObject = IsWin64 ? alignTo(AFI->getVarArgsGPRSize(), 16) : 0; auto PrologueSaveSize = AFI->getCalleeSavedStackSize() + FixedObject; @@ -857,7 +857,7 @@ int AArch64FrameLowering::resolveFrameIndexReference(const MachineFunction &MF, const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); bool IsWin64 = - Subtarget.isCallingConvWin64(MF.getFunction()->getCallingConv()); + Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()); unsigned FixedObject = IsWin64 ? alignTo(AFI->getVarArgsGPRSize(), 16) : 0; int FPOffset = MFI.getObjectOffset(FI) + FixedObject + 16; int Offset = MFI.getObjectOffset(FI) + MFI.getStackSize(); @@ -928,7 +928,7 @@ static unsigned getPrologueDeath(MachineFunction &MF, unsigned Reg) { static bool produceCompactUnwindFrame(MachineFunction &MF) { const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); - AttributeList Attrs = MF.getFunction()->getAttributes(); + AttributeList Attrs = MF.getFunction().getAttributes(); return Subtarget.isTargetMachO() && !(Subtarget.getTargetLowering()->supportSwiftError() && Attrs.hasAttrSomewhere(Attribute::SwiftError)); @@ -959,7 +959,7 @@ static void computeCalleeSaveRegisterPairs( AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); MachineFrameInfo &MFI = MF.getFrameInfo(); - CallingConv::ID CC = MF.getFunction()->getCallingConv(); + CallingConv::ID CC = MF.getFunction().getCallingConv(); unsigned Count = CSI.size(); (void)CC; // MachO's compact unwind format relies on all registers being stored in @@ -1154,7 +1154,7 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF, RegScavenger *RS) const { // All calls are tail calls in GHC calling conv, and functions have no // prologue/epilogue. - if (MF.getFunction()->getCallingConv() == CallingConv::GHC) + if (MF.getFunction().getCallingConv() == CallingConv::GHC) return; TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); diff --git a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index 06005f6b688..0b10246b0cc 100644 --- a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -53,7 +53,7 @@ public: } bool runOnMachineFunction(MachineFunction &MF) override { - ForCodeSize = MF.getFunction()->optForSize(); + ForCodeSize = MF.getFunction().optForSize(); Subtarget = &MF.getSubtarget<AArch64Subtarget>(); return SelectionDAGISel::runOnMachineFunction(MF); } diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp index aaf2811563d..1242cf5be18 100644 --- a/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -2731,7 +2731,7 @@ SDValue AArch64TargetLowering::LowerFormalArguments( SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo &MFI = MF.getFrameInfo(); - bool IsWin64 = Subtarget->isCallingConvWin64(MF.getFunction()->getCallingConv()); + bool IsWin64 = Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv()); // Assign locations to all of the incoming arguments. SmallVector<CCValAssign, 16> ArgLocs; @@ -2745,7 +2745,7 @@ SDValue AArch64TargetLowering::LowerFormalArguments( // we use a special version of AnalyzeFormalArguments to pass in ValVT and // LocVT. unsigned NumArgs = Ins.size(); - Function::const_arg_iterator CurOrigArg = MF.getFunction()->arg_begin(); + Function::const_arg_iterator CurOrigArg = MF.getFunction().arg_begin(); unsigned CurArgIdx = 0; for (unsigned i = 0; i != NumArgs; ++i) { MVT ValVT = Ins[i].VT; @@ -2935,7 +2935,7 @@ void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo, MachineFrameInfo &MFI = MF.getFrameInfo(); AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>(); auto PtrVT = getPointerTy(DAG.getDataLayout()); - bool IsWin64 = Subtarget->isCallingConvWin64(MF.getFunction()->getCallingConv()); + bool IsWin64 = Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv()); SmallVector<SDValue, 8> MemOps; @@ -3087,15 +3087,15 @@ bool AArch64TargetLowering::isEligibleForTailCallOptimization( return false; MachineFunction &MF = DAG.getMachineFunction(); - const Function *CallerF = MF.getFunction(); - CallingConv::ID CallerCC = CallerF->getCallingConv(); + const Function &CallerF = MF.getFunction(); + CallingConv::ID CallerCC = CallerF.getCallingConv(); bool CCMatch = CallerCC == CalleeCC; // Byval parameters hand the function a pointer directly into the stack area // we want to reuse during a tail call. Working around this *is* possible (see // X86) but less efficient and uglier in LowerCall. - for (Function::const_arg_iterator i = CallerF->arg_begin(), - e = CallerF->arg_end(); + for (Function::const_arg_iterator i = CallerF.arg_begin(), + e = CallerF.arg_end(); i != e; ++i) if (i->hasByValAttr()) return false; @@ -4185,7 +4185,7 @@ SDValue AArch64TargetLowering::LowerFCOPYSIGN(SDValue Op, } SDValue AArch64TargetLowering::LowerCTPOP(SDValue Op, SelectionDAG &DAG) const { - if (DAG.getMachineFunction().getFunction()->hasFnAttribute( + if (DAG.getMachineFunction().getFunction().hasFnAttribute( Attribute::NoImplicitFloat)) return SDValue(); @@ -4668,7 +4668,7 @@ SDValue AArch64TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { MachineFunction &MF = DAG.getMachineFunction(); - if (Subtarget->isCallingConvWin64(MF.getFunction()->getCallingConv())) + if (Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv())) return LowerWin64_VASTART(Op, DAG); else if (Subtarget->isTargetDarwin()) return LowerDarwin_VASTART(Op, DAG); @@ -7909,9 +7909,9 @@ EVT AArch64TargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign, // instruction to materialize the v2i64 zero and one store (with restrictive // addressing mode). Just do two i64 store of zero-registers. bool Fast; - const Function *F = MF.getFunction(); + const Function &F = MF.getFunction(); if (Subtarget->hasFPARMv8() && !IsMemset && Size >= 16 && - !F->hasFnAttribute(Attribute::NoImplicitFloat) && + !F.hasFnAttribute(Attribute::NoImplicitFloat) && (memOpAlign(SrcAlign, DstAlign, 16) || (allowsMisalignedMemoryAccesses(MVT::f128, 0, 1, &Fast) && Fast))) return MVT::f128; @@ -8156,7 +8156,7 @@ SDValue AArch64TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, std::vector<SDNode *> *Created) const { - AttributeList Attr = DAG.getMachineFunction().getFunction()->getAttributes(); + AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); if (isIntDivCheap(N->getValueType(0), Attr)) return SDValue(N,0); // Lower SDIV as SDIV @@ -9577,7 +9577,7 @@ static SDValue splitStores(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, return SDValue(); // Don't split at -Oz. - if (DAG.getMachineFunction().getFunction()->optForMinSize()) + if (DAG.getMachineFunction().getFunction().optForMinSize()) return SDValue(); // Don't split v2i64 vectors. Memcpy lowering produces those and splitting @@ -10939,7 +10939,7 @@ void AArch64TargetLowering::insertCopiesSplitCSR( // fine for CXX_FAST_TLS since the C++-style TLS access functions should be // nounwind. If we want to generalize this later, we may need to emit // CFI pseudo-instructions. - assert(Entry->getParent()->getFunction()->hasFnAttribute( + assert(Entry->getParent()->getFunction().hasFnAttribute( Attribute::NoUnwind) && "Function should be nounwind in insertCopiesSplitCSR!"); Entry->addLiveIn(*I); diff --git a/lib/Target/AArch64/AArch64ISelLowering.h b/lib/Target/AArch64/AArch64ISelLowering.h index f88c0ac6653..8d78b5b6b5b 100644 --- a/lib/Target/AArch64/AArch64ISelLowering.h +++ b/lib/Target/AArch64/AArch64ISelLowering.h @@ -415,7 +415,7 @@ public: // Do not merge to float value size (128 bytes) if no implicit // float attribute is set. - bool NoFloat = DAG.getMachineFunction().getFunction()->hasFnAttribute( + bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute( Attribute::NoImplicitFloat); if (NoFloat) @@ -444,8 +444,8 @@ public: } bool supportSplitCSR(MachineFunction *MF) const override { - return MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS && - MF->getFunction()->hasFnAttribute(Attribute::NoUnwind); + return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS && + MF->getFunction().hasFnAttribute(Attribute::NoUnwind); } void initializeSplitCSR(MachineBasicBlock *Entry) const override; void insertCopiesSplitCSR( diff --git a/lib/Target/AArch64/AArch64InstrInfo.cpp b/lib/Target/AArch64/AArch64InstrInfo.cpp index 74aee126d45..e26f15bedb7 100644 --- a/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -4753,21 +4753,21 @@ AArch64InstrInfo::getOutlininingCandidateInfo( bool AArch64InstrInfo::isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const { - const Function *F = MF.getFunction(); + const Function &F = MF.getFunction(); // If F uses a redzone, then don't outline from it because it might mess up // the stack. - if (!F->hasFnAttribute(Attribute::NoRedZone)) + if (!F.hasFnAttribute(Attribute::NoRedZone)) return false; // If anyone is using the address of this function, don't outline from it. - if (F->hasAddressTaken()) + if (F.hasAddressTaken()) return false; // Can F be deduplicated by the linker? If it can, don't outline from it. - if (!OutlineFromLinkOnceODRs && F->hasLinkOnceODRLinkage()) + if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage()) return false; - + return true; } diff --git a/lib/Target/AArch64/AArch64InstrInfo.td b/lib/Target/AArch64/AArch64InstrInfo.td index 841265c3367..79826ca2ed8 100644 --- a/lib/Target/AArch64/AArch64InstrInfo.td +++ b/lib/Target/AArch64/AArch64InstrInfo.td @@ -328,10 +328,10 @@ def AArch64umaxv : SDNode<"AArch64ISD::UMAXV", SDT_AArch64UnaryVec>; // the Function object through the <Target>Subtarget and objections were raised // to that (see post-commit review comments for r301750). let RecomputePerFunction = 1 in { - def ForCodeSize : Predicate<"MF->getFunction()->optForSize()">; - def NotForCodeSize : Predicate<"!MF->getFunction()->optForSize()">; + def ForCodeSize : Predicate<"MF->getFunction().optForSize()">; + def NotForCodeSize : Predicate<"!MF->getFunction().optForSize()">; // Avoid generating STRQro if it is slow, unless we're optimizing for code size. - def UseSTRQro : Predicate<"!Subtarget->isSTRQroSlow() || MF->getFunction()->optForSize()">; + def UseSTRQro : Predicate<"!Subtarget->isSTRQroSlow() || MF->getFunction().optForSize()">; } include "AArch64InstrFormats.td" diff --git a/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp index c406228b7fe..8a29456430b 100644 --- a/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp +++ b/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp @@ -1759,7 +1759,7 @@ bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB, } bool AArch64LoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) { - if (skipFunction(*Fn.getFunction())) + if (skipFunction(Fn.getFunction())) return false; Subtarget = &static_cast<const AArch64Subtarget &>(Fn.getSubtarget()); diff --git a/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp b/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp index 98480835376..e5822b11432 100644 --- a/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp +++ b/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp @@ -485,7 +485,7 @@ bool AArch64RedundantCopyElimination::optimizeBlock(MachineBasicBlock *MBB) { bool AArch64RedundantCopyElimination::runOnMachineFunction( MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; TRI = MF.getSubtarget().getRegisterInfo(); MRI = &MF.getRegInfo(); diff --git a/lib/Target/AArch64/AArch64RegisterInfo.cpp b/lib/Target/AArch64/AArch64RegisterInfo.cpp index 1059bc37c8f..88dd297e007 100644 --- a/lib/Target/AArch64/AArch64RegisterInfo.cpp +++ b/lib/Target/AArch64/AArch64RegisterInfo.cpp @@ -42,22 +42,22 @@ AArch64RegisterInfo::AArch64RegisterInfo(const Triple &TT) const MCPhysReg * AArch64RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { assert(MF && "Invalid MachineFunction pointer."); - if (MF->getFunction()->getCallingConv() == CallingConv::GHC) + if (MF->getFunction().getCallingConv() == CallingConv::GHC) // GHC set of callee saved regs is empty as all those regs are // used for passing STG regs around return CSR_AArch64_NoRegs_SaveList; - if (MF->getFunction()->getCallingConv() == CallingConv::AnyReg) + if (MF->getFunction().getCallingConv() == CallingConv::AnyReg) return CSR_AArch64_AllRegs_SaveList; - if (MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS) + if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS) return MF->getInfo<AArch64FunctionInfo>()->isSplitCSR() ? CSR_AArch64_CXX_TLS_Darwin_PE_SaveList : CSR_AArch64_CXX_TLS_Darwin_SaveList; if (MF->getSubtarget<AArch64Subtarget>().getTargetLowering() ->supportSwiftError() && - MF->getFunction()->getAttributes().hasAttrSomewhere( + MF->getFunction().getAttributes().hasAttrSomewhere( Attribute::SwiftError)) return CSR_AArch64_AAPCS_SwiftError_SaveList; - if (MF->getFunction()->getCallingConv() == CallingConv::PreserveMost) + if (MF->getFunction().getCallingConv() == CallingConv::PreserveMost) return CSR_AArch64_RT_MostRegs_SaveList; else return CSR_AArch64_AAPCS_SaveList; @@ -66,7 +66,7 @@ AArch64RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { const MCPhysReg *AArch64RegisterInfo::getCalleeSavedRegsViaCopy( const MachineFunction *MF) const { assert(MF && "Invalid MachineFunction pointer."); - if (MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS && + if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS && MF->getInfo<AArch64FunctionInfo>()->isSplitCSR()) return CSR_AArch64_CXX_TLS_Darwin_ViaCopy_SaveList; return nullptr; @@ -84,7 +84,7 @@ AArch64RegisterInfo::getCallPreservedMask(const MachineFunction &MF, return CSR_AArch64_CXX_TLS_Darwin_RegMask; if (MF.getSubtarget<AArch64Subtarget>().getTargetLowering() ->supportSwiftError() && - MF.getFunction()->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) + MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError)) return CSR_AArch64_AAPCS_SwiftError_RegMask; if (CC == CallingConv::PreserveMost) return CSR_AArch64_RT_MostRegs_RegMask; diff --git a/lib/Target/AArch64/AArch64SIMDInstrOpt.cpp b/lib/Target/AArch64/AArch64SIMDInstrOpt.cpp index 7d439058580..e1851875abc 100644 --- a/lib/Target/AArch64/AArch64SIMDInstrOpt.cpp +++ b/lib/Target/AArch64/AArch64SIMDInstrOpt.cpp @@ -690,7 +690,7 @@ unsigned AArch64SIMDInstrOpt::determineSrcReg(MachineInstr &MI) const { } bool AArch64SIMDInstrOpt::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; TII = MF.getSubtarget().getInstrInfo(); diff --git a/lib/Target/AArch64/AArch64StorePairSuppress.cpp b/lib/Target/AArch64/AArch64StorePairSuppress.cpp index 78fc322158b..571e61d7083 100644 --- a/lib/Target/AArch64/AArch64StorePairSuppress.cpp +++ b/lib/Target/AArch64/AArch64StorePairSuppress.cpp @@ -120,7 +120,7 @@ bool AArch64StorePairSuppress::isNarrowFPStore(const MachineInstr &MI) { } bool AArch64StorePairSuppress::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; const TargetSubtargetInfo &ST = MF.getSubtarget(); diff --git a/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp b/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp index c53235db423..bb628b8c558 100644 --- a/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp +++ b/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp @@ -205,7 +205,7 @@ void AMDGPUAsmPrinter::EmitFunctionBodyStart() { if (TM.getTargetTriple().getOS() != Triple::AMDHSA) return; - HSAMetadataStream.emitKernel(*MF->getFunction(), + HSAMetadataStream.emitKernel(MF->getFunction(), getHSACodeProps(*MF, CurrentProgramInfo), getHSADebugProps(*MF, CurrentProgramInfo)); } @@ -215,14 +215,14 @@ void AMDGPUAsmPrinter::EmitFunctionEntryLabel() { const AMDGPUSubtarget &STM = MF->getSubtarget<AMDGPUSubtarget>(); if (MFI->isEntryFunction() && STM.isAmdCodeObjectV2(*MF)) { SmallString<128> SymbolName; - getNameWithPrefix(SymbolName, MF->getFunction()), + getNameWithPrefix(SymbolName, &MF->getFunction()), getTargetStreamer()->EmitAMDGPUSymbolType( SymbolName, ELF::STT_AMDGPU_HSA_KERNEL); } const AMDGPUSubtarget &STI = MF->getSubtarget<AMDGPUSubtarget>(); if (STI.dumpCode()) { // Disassemble function name label to text. - DisasmLines.push_back(MF->getFunction()->getName().str() + ":"); + DisasmLines.push_back(MF->getName().str() + ":"); DisasmLineMaxLen = std::max(DisasmLineMaxLen, DisasmLines.back().size()); HexLines.push_back(""); } @@ -314,7 +314,7 @@ bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) { getSIProgramInfo(CurrentProgramInfo, MF); } else { auto I = CallGraphResourceInfo.insert( - std::make_pair(MF.getFunction(), SIFunctionResourceInfo())); + std::make_pair(&MF.getFunction(), SIFunctionResourceInfo())); SIFunctionResourceInfo &Info = I.first->second; assert(I.second && "should only be called once per function"); Info = analyzeResourceUsage(MF); @@ -343,7 +343,7 @@ bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) { if (STM.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) { if (!MFI->isEntryFunction()) { OutStreamer->emitRawComment(" Function info:", false); - SIFunctionResourceInfo &Info = CallGraphResourceInfo[MF.getFunction()]; + SIFunctionResourceInfo &Info = CallGraphResourceInfo[&MF.getFunction()]; emitCommonFunctionComments( Info.NumVGPR, Info.getTotalNumSGPRs(MF.getSubtarget<SISubtarget>()), @@ -469,7 +469,7 @@ void AMDGPUAsmPrinter::EmitProgramInfoR600(const MachineFunction &MF) { unsigned RsrcReg; if (STM.getGeneration() >= R600Subtarget::EVERGREEN) { // Evergreen / Northern Islands - switch (MF.getFunction()->getCallingConv()) { + switch (MF.getFunction().getCallingConv()) { default: LLVM_FALLTHROUGH; case CallingConv::AMDGPU_CS: RsrcReg = R_0288D4_SQ_PGM_RESOURCES_LS; break; case CallingConv::AMDGPU_GS: RsrcReg = R_028878_SQ_PGM_RESOURCES_GS; break; @@ -478,7 +478,7 @@ void AMDGPUAsmPrinter::EmitProgramInfoR600(const MachineFunction &MF) { } } else { // R600 / R700 - switch (MF.getFunction()->getCallingConv()) { + switch (MF.getFunction().getCallingConv()) { default: LLVM_FALLTHROUGH; case CallingConv::AMDGPU_GS: LLVM_FALLTHROUGH; case CallingConv::AMDGPU_CS: LLVM_FALLTHROUGH; @@ -493,7 +493,7 @@ void AMDGPUAsmPrinter::EmitProgramInfoR600(const MachineFunction &MF) { OutStreamer->EmitIntValue(R_02880C_DB_SHADER_CONTROL, 4); OutStreamer->EmitIntValue(S_02880C_KILL_ENABLE(killPixel), 4); - if (AMDGPU::isCompute(MF.getFunction()->getCallingConv())) { + if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) { OutStreamer->EmitIntValue(R_0288E8_SQ_LDS_ALLOC, 4); OutStreamer->EmitIntValue(alignTo(MFI->getLDSSize(), 4) >> 2, 4); } @@ -787,9 +787,9 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo, ProgInfo.DynamicCallStack = Info.HasDynamicallySizedStack || Info.HasRecursion; if (!isUInt<32>(ProgInfo.ScratchSize)) { - DiagnosticInfoStackSize DiagStackSize(*MF.getFunction(), + DiagnosticInfoStackSize DiagStackSize(MF.getFunction(), ProgInfo.ScratchSize, DS_Error); - MF.getFunction()->getContext().diagnose(DiagStackSize); + MF.getFunction().getContext().diagnose(DiagStackSize); } const SISubtarget &STM = MF.getSubtarget<SISubtarget>(); @@ -808,8 +808,8 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo, unsigned MaxAddressableNumSGPRs = STM.getAddressableNumSGPRs(); if (ProgInfo.NumSGPR > MaxAddressableNumSGPRs) { // This can happen due to a compiler bug or when using inline asm. - LLVMContext &Ctx = MF.getFunction()->getContext(); - DiagnosticInfoResourceLimit Diag(*MF.getFunction(), + LLVMContext &Ctx = MF.getFunction().getContext(); + DiagnosticInfoResourceLimit Diag(MF.getFunction(), "addressable scalar registers", ProgInfo.NumSGPR, DS_Error, DK_ResourceLimit, @@ -836,8 +836,8 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo, if (ProgInfo.NumSGPR > MaxAddressableNumSGPRs) { // This can happen due to a compiler bug or when using inline asm to use // the registers which are usually reserved for vcc etc. - LLVMContext &Ctx = MF.getFunction()->getContext(); - DiagnosticInfoResourceLimit Diag(*MF.getFunction(), + LLVMContext &Ctx = MF.getFunction().getContext(); + DiagnosticInfoResourceLimit Diag(MF.getFunction(), "scalar registers", ProgInfo.NumSGPR, DS_Error, DK_ResourceLimit, @@ -856,15 +856,15 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo, } if (MFI->getNumUserSGPRs() > STM.getMaxNumUserSGPRs()) { - LLVMContext &Ctx = MF.getFunction()->getContext(); - DiagnosticInfoResourceLimit Diag(*MF.getFunction(), "user SGPRs", + LLVMContext &Ctx = MF.getFunction().getContext(); + DiagnosticInfoResourceLimit Diag(MF.getFunction(), "user SGPRs", MFI->getNumUserSGPRs(), DS_Error); Ctx.diagnose(Diag); } if (MFI->getLDSSize() > static_cast<unsigned>(STM.getLocalMemorySize())) { - LLVMContext &Ctx = MF.getFunction()->getContext(); - DiagnosticInfoResourceLimit Diag(*MF.getFunction(), "local memory", + LLVMContext &Ctx = MF.getFunction().getContext(); + DiagnosticInfoResourceLimit Diag(MF.getFunction(), "local memory", MFI->getLDSSize(), DS_Error); Ctx.diagnose(Diag); } @@ -977,9 +977,9 @@ void AMDGPUAsmPrinter::EmitProgramInfoSI(const MachineFunction &MF, const SIProgramInfo &CurrentProgramInfo) { const SISubtarget &STM = MF.getSubtarget<SISubtarget>(); const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); - unsigned RsrcReg = getRsrcReg(MF.getFunction()->getCallingConv()); + unsigned RsrcReg = getRsrcReg(MF.getFunction().getCallingConv()); - if (AMDGPU::isCompute(MF.getFunction()->getCallingConv())) { + if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) { OutStreamer->EmitIntValue(R_00B848_COMPUTE_PGM_RSRC1, 4); OutStreamer->EmitIntValue(CurrentProgramInfo.ComputePGMRSrc1, 4); @@ -997,13 +997,13 @@ void AMDGPUAsmPrinter::EmitProgramInfoSI(const MachineFunction &MF, OutStreamer->EmitIntValue(S_00B028_VGPRS(CurrentProgramInfo.VGPRBlocks) | S_00B028_SGPRS(CurrentProgramInfo.SGPRBlocks), 4); unsigned Rsrc2Val = 0; - if (STM.isVGPRSpillingEnabled(*MF.getFunction())) { + if (STM.isVGPRSpillingEnabled(MF.getFunction())) { OutStreamer->EmitIntValue(R_0286E8_SPI_TMPRING_SIZE, 4); OutStreamer->EmitIntValue(S_0286E8_WAVESIZE(CurrentProgramInfo.ScratchBlocks), 4); if (TM.getTargetTriple().getOS() == Triple::AMDPAL) Rsrc2Val = S_00B84C_SCRATCH_EN(CurrentProgramInfo.ScratchBlocks > 0); } - if (MF.getFunction()->getCallingConv() == CallingConv::AMDGPU_PS) { + if (MF.getFunction().getCallingConv() == CallingConv::AMDGPU_PS) { OutStreamer->EmitIntValue(R_0286CC_SPI_PS_INPUT_ENA, 4); OutStreamer->EmitIntValue(MFI->getPSInputEnable(), 4); OutStreamer->EmitIntValue(R_0286D0_SPI_PS_INPUT_ADDR, 4); @@ -1036,13 +1036,13 @@ void AMDGPUAsmPrinter::EmitPALMetadata(const MachineFunction &MF, // we can use the same fixed value that .AMDGPU.config has for Mesa. Note // that we use a register number rather than a byte offset, so we need to // divide by 4. - unsigned Rsrc1Reg = getRsrcReg(MF.getFunction()->getCallingConv()) / 4; + unsigned Rsrc1Reg = getRsrcReg(MF.getFunction().getCallingConv()) / 4; unsigned Rsrc2Reg = Rsrc1Reg + 1; // Also calculate the PAL metadata key for *S_SCRATCH_SIZE. It can be used // with a constant offset to access any non-register shader-specific PAL // metadata key. unsigned ScratchSizeKey = PALMD::Key::CS_SCRATCH_SIZE; - switch (MF.getFunction()->getCallingConv()) { + switch (MF.getFunction().getCallingConv()) { case CallingConv::AMDGPU_PS: ScratchSizeKey = PALMD::Key::PS_SCRATCH_SIZE; break; @@ -1068,7 +1068,7 @@ void AMDGPUAsmPrinter::EmitPALMetadata(const MachineFunction &MF, PALMD::Key::VS_NUM_USED_SGPRS - PALMD::Key::VS_SCRATCH_SIZE; PALMetadataMap[NumUsedVgprsKey] = CurrentProgramInfo.NumVGPRsForWavesPerEU; PALMetadataMap[NumUsedSgprsKey] = CurrentProgramInfo.NumSGPRsForWavesPerEU; - if (AMDGPU::isCompute(MF.getFunction()->getCallingConv())) { + if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) { PALMetadataMap[Rsrc1Reg] |= CurrentProgramInfo.ComputePGMRSrc1; PALMetadataMap[Rsrc2Reg] |= CurrentProgramInfo.ComputePGMRSrc2; // ScratchSize is in bytes, 16 aligned. @@ -1083,7 +1083,7 @@ void AMDGPUAsmPrinter::EmitPALMetadata(const MachineFunction &MF, PALMetadataMap[ScratchSizeKey] |= alignTo(CurrentProgramInfo.ScratchSize, 16); } - if (MF.getFunction()->getCallingConv() == CallingConv::AMDGPU_PS) { + if (MF.getFunction().getCallingConv() == CallingConv::AMDGPU_PS) { PALMetadataMap[Rsrc2Reg] |= S_00B02C_EXTRA_LDS_SIZE(CurrentProgramInfo.LDSBlocks); PALMetadataMap[R_0286CC_SPI_PS_INPUT_ENA / 4] |= MFI->getPSInputEnable(); diff --git a/lib/Target/AMDGPU/AMDGPUCallLowering.cpp b/lib/Target/AMDGPU/AMDGPUCallLowering.cpp index 6d6fccb10cb..5a913873193 100644 --- a/lib/Target/AMDGPU/AMDGPUCallLowering.cpp +++ b/lib/Target/AMDGPU/AMDGPUCallLowering.cpp @@ -43,7 +43,7 @@ unsigned AMDGPUCallLowering::lowerParameterPtr(MachineIRBuilder &MIRBuilder, MachineFunction &MF = MIRBuilder.getMF(); const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); MachineRegisterInfo &MRI = MF.getRegInfo(); - const Function &F = *MF.getFunction(); + const Function &F = MF.getFunction(); const DataLayout &DL = F.getParent()->getDataLayout(); PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUASI.CONSTANT_ADDRESS); LLT PtrType = getLLTForType(*PtrTy, DL); @@ -64,7 +64,7 @@ void AMDGPUCallLowering::lowerParameter(MachineIRBuilder &MIRBuilder, Type *ParamTy, unsigned Offset, unsigned DstReg) const { MachineFunction &MF = MIRBuilder.getMF(); - const Function &F = *MF.getFunction(); + const Function &F = MF.getFunction(); const DataLayout &DL = F.getParent()->getDataLayout(); PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUASI.CONSTANT_ADDRESS); MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); diff --git a/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/lib/Target/AMDGPU/AMDGPUISelLowering.cpp index dd97c5ca974..49929441ef2 100644 --- a/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -1069,7 +1069,7 @@ SDValue AMDGPUTargetLowering::lowerUnhandledCall(CallLoweringInfo &CLI, SDValue Callee = CLI.Callee; SelectionDAG &DAG = CLI.DAG; - const Function &Fn = *DAG.getMachineFunction().getFunction(); + const Function &Fn = DAG.getMachineFunction().getFunction(); StringRef FuncName("<unknown>"); @@ -1097,7 +1097,7 @@ SDValue AMDGPUTargetLowering::LowerCall(CallLoweringInfo &CLI, SDValue AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { - const Function &Fn = *DAG.getMachineFunction().getFunction(); + const Function &Fn = DAG.getMachineFunction().getFunction(); DiagnosticInfoUnsupported NoDynamicAlloca(Fn, "unsupported dynamic alloca", SDLoc(Op).getDebugLoc()); @@ -1190,7 +1190,7 @@ SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI, } } - const Function &Fn = *DAG.getMachineFunction().getFunction(); + const Function &Fn = DAG.getMachineFunction().getFunction(); DiagnosticInfoUnsupported BadInit( Fn, "unsupported initializer for address space", SDLoc(Op).getDebugLoc()); DAG.getContext()->diagnose(BadInit); diff --git a/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp b/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp index c15b37f9e9c..23fd8113932 100644 --- a/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp +++ b/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp @@ -153,7 +153,7 @@ void AMDGPUMCInstLower::lower(const MachineInstr *MI, MCInst &OutMI) const { int MCOpcode = TII->pseudoToMCOpcode(Opcode); if (MCOpcode == -1) { - LLVMContext &C = MI->getParent()->getParent()->getFunction()->getContext(); + LLVMContext &C = MI->getParent()->getParent()->getFunction().getContext(); C.emitError("AMDGPUMCInstLower::lower - Pseudo instruction doesn't have " "a target-specific version: " + Twine(MI->getOpcode())); } @@ -205,7 +205,7 @@ void AMDGPUAsmPrinter::EmitInstruction(const MachineInstr *MI) { StringRef Err; if (!STI.getInstrInfo()->verifyInstruction(*MI, Err)) { - LLVMContext &C = MI->getParent()->getParent()->getFunction()->getContext(); + LLVMContext &C = MI->getParent()->getParent()->getFunction().getContext(); C.emitError("Illegal instruction detected: " + Err); MI->print(errs()); } diff --git a/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp b/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp index 9fb7f5f8892..b7c8c121353 100644 --- a/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp +++ b/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp @@ -19,7 +19,7 @@ AMDGPUMachineFunction::AMDGPUMachineFunction(const MachineFunction &MF) : MaxKernArgAlign(0), LDSSize(0), ABIArgOffset(0), - IsEntryFunction(AMDGPU::isEntryFunctionCC(MF.getFunction()->getCallingConv())), + IsEntryFunction(AMDGPU::isEntryFunctionCC(MF.getFunction().getCallingConv())), NoSignedZerosFPMath(MF.getTarget().Options.NoSignedZerosFPMath) { // FIXME: Should initialize KernArgSize based on ExplicitKernelArgOffset, // except reserved size is not correctly aligned. diff --git a/lib/Target/AMDGPU/AMDGPURegisterInfo.cpp b/lib/Target/AMDGPU/AMDGPURegisterInfo.cpp index 8454dede0e1..5e4d33aaa69 100644 --- a/lib/Target/AMDGPU/AMDGPURegisterInfo.cpp +++ b/lib/Target/AMDGPU/AMDGPURegisterInfo.cpp @@ -43,7 +43,7 @@ unsigned AMDGPURegisterInfo::getSubRegFromChannel(unsigned Channel) const { // Forced to be here by one .inc const MCPhysReg *SIRegisterInfo::getCalleeSavedRegs( const MachineFunction *MF) const { - CallingConv::ID CC = MF->getFunction()->getCallingConv(); + CallingConv::ID CC = MF->getFunction().getCallingConv(); switch (CC) { case CallingConv::C: case CallingConv::Fast: diff --git a/lib/Target/AMDGPU/AMDGPUSubtarget.cpp b/lib/Target/AMDGPU/AMDGPUSubtarget.cpp index ca04097e1cb..80feaa44766 100644 --- a/lib/Target/AMDGPU/AMDGPUSubtarget.cpp +++ b/lib/Target/AMDGPU/AMDGPUSubtarget.cpp @@ -468,7 +468,7 @@ unsigned SISubtarget::getReservedNumSGPRs(const MachineFunction &MF) const { } unsigned SISubtarget::getMaxNumSGPRs(const MachineFunction &MF) const { - const Function &F = *MF.getFunction(); + const Function &F = MF.getFunction(); const SIMachineFunctionInfo &MFI = *MF.getInfo<SIMachineFunctionInfo>(); // Compute maximum number of SGPRs function can use using default/requested @@ -518,7 +518,7 @@ unsigned SISubtarget::getMaxNumSGPRs(const MachineFunction &MF) const { } unsigned SISubtarget::getMaxNumVGPRs(const MachineFunction &MF) const { - const Function &F = *MF.getFunction(); + const Function &F = MF.getFunction(); const SIMachineFunctionInfo &MFI = *MF.getInfo<SIMachineFunctionInfo>(); // Compute maximum number of VGPRs function can use using default/requested diff --git a/lib/Target/AMDGPU/AMDGPUSubtarget.h b/lib/Target/AMDGPU/AMDGPUSubtarget.h index 09ad88fca17..cf4a691d4b5 100644 --- a/lib/Target/AMDGPU/AMDGPUSubtarget.h +++ b/lib/Target/AMDGPU/AMDGPUSubtarget.h @@ -382,7 +382,7 @@ public: unsigned getOccupancyWithLocalMemSize(const MachineFunction &MF) const { const auto *MFI = MF.getInfo<SIMachineFunctionInfo>(); - return getOccupancyWithLocalMemSize(MFI->getLDSSize(), *MF.getFunction()); + return getOccupancyWithLocalMemSize(MFI->getLDSSize(), MF.getFunction()); } bool hasFP16Denormals() const { @@ -410,7 +410,7 @@ public: } bool enableIEEEBit(const MachineFunction &MF) const { - return AMDGPU::isCompute(MF.getFunction()->getCallingConv()); + return AMDGPU::isCompute(MF.getFunction().getCallingConv()); } bool useFlatForGlobal() const { @@ -482,12 +482,12 @@ public: } bool isMesaKernel(const MachineFunction &MF) const { - return isMesa3DOS() && !AMDGPU::isShader(MF.getFunction()->getCallingConv()); + return isMesa3DOS() && !AMDGPU::isShader(MF.getFunction().getCallingConv()); } // Covers VS/PS/CS graphics shaders bool isMesaGfxShader(const MachineFunction &MF) const { - return isMesa3DOS() && AMDGPU::isShader(MF.getFunction()->getCallingConv()); + return isMesa3DOS() && AMDGPU::isShader(MF.getFunction().getCallingConv()); } bool isAmdCodeObjectV2(const MachineFunction &MF) const { diff --git a/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp b/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp index 223fdf77941..0a0e43123ae 100644 --- a/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp +++ b/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp @@ -1641,7 +1641,7 @@ AMDGPUCFGStructurizer::normalizeInfiniteLoopExit(MachineLoop* LoopRep) { FuncRep->push_back(DummyExitBlk); //insert to function SHOWNEWBLK(DummyExitBlk, "DummyExitBlock to normalize infiniteLoop: "); DEBUG(dbgs() << "Old branch instr: " << *BranchMI << "\n";); - LLVMContext &Ctx = LoopHeader->getParent()->getFunction()->getContext(); + LLVMContext &Ctx = LoopHeader->getParent()->getFunction().getContext(); Ctx.emitError("Extra register needed to handle CFG"); return nullptr; } diff --git a/lib/Target/AMDGPU/GCNIterativeScheduler.cpp b/lib/Target/AMDGPU/GCNIterativeScheduler.cpp index 178d993cb2a..a0e4f7ff24c 100644 --- a/lib/Target/AMDGPU/GCNIterativeScheduler.cpp +++ b/lib/Target/AMDGPU/GCNIterativeScheduler.cpp @@ -566,7 +566,7 @@ void GCNIterativeScheduler::scheduleILP( bool TryMaximizeOccupancy) { const auto &ST = MF.getSubtarget<SISubtarget>(); auto TgtOcc = std::min(ST.getOccupancyWithLocalMemSize(MF), - ST.getWavesPerEU(*MF.getFunction()).second); + ST.getWavesPerEU(MF.getFunction()).second); sortRegionsByPressure(TgtOcc); auto Occ = Regions.front()->MaxPressure.getOccupancy(ST); diff --git a/lib/Target/AMDGPU/GCNSchedStrategy.cpp b/lib/Target/AMDGPU/GCNSchedStrategy.cpp index 0e80e936ab8..d414b899050 100644 --- a/lib/Target/AMDGPU/GCNSchedStrategy.cpp +++ b/lib/Target/AMDGPU/GCNSchedStrategy.cpp @@ -37,7 +37,7 @@ static unsigned getMaxWaves(unsigned SGPRs, unsigned VGPRs, ST.getOccupancyWithNumVGPRs(VGPRs)); return std::min(MinRegOccupancy, ST.getOccupancyWithLocalMemSize(MFI->getLDSSize(), - *MF.getFunction())); + MF.getFunction())); } void GCNMaxOccupancySchedStrategy::initialize(ScheduleDAGMI *DAG) { @@ -315,7 +315,7 @@ GCNScheduleDAGMILive::GCNScheduleDAGMILive(MachineSchedContext *C, ST(MF.getSubtarget<SISubtarget>()), MFI(*MF.getInfo<SIMachineFunctionInfo>()), StartingOccupancy(ST.getOccupancyWithLocalMemSize(MFI.getLDSSize(), - *MF.getFunction())), + MF.getFunction())), MinOccupancy(StartingOccupancy), Stage(0), RegionIdx(0) { DEBUG(dbgs() << "Starting occupancy is " << StartingOccupancy << ".\n"); diff --git a/lib/Target/AMDGPU/R600ClauseMergePass.cpp b/lib/Target/AMDGPU/R600ClauseMergePass.cpp index 8db66e600ec..5e1ba6b506d 100644 --- a/lib/Target/AMDGPU/R600ClauseMergePass.cpp +++ b/lib/Target/AMDGPU/R600ClauseMergePass.cpp @@ -180,7 +180,7 @@ bool R600ClauseMergePass::mergeIfPossible(MachineInstr &RootCFAlu, } bool R600ClauseMergePass::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; const R600Subtarget &ST = MF.getSubtarget<R600Subtarget>(); diff --git a/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp b/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp index be6a45da116..0e788df1c9c 100644 --- a/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp +++ b/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp @@ -512,14 +512,14 @@ public: R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>(); - CFStack CFStack(ST, MF.getFunction()->getCallingConv()); + CFStack CFStack(ST, MF.getFunction().getCallingConv()); for (MachineFunction::iterator MB = MF.begin(), ME = MF.end(); MB != ME; ++MB) { MachineBasicBlock &MBB = *MB; unsigned CfCount = 0; std::vector<std::pair<unsigned, std::set<MachineInstr *>>> LoopStack; std::vector<MachineInstr * > IfThenElseStack; - if (MF.getFunction()->getCallingConv() == CallingConv::AMDGPU_VS) { + if (MF.getFunction().getCallingConv() == CallingConv::AMDGPU_VS) { BuildMI(MBB, MBB.begin(), MBB.findDebugLoc(MBB.begin()), getHWInstrDesc(CF_CALL_FS)); CfCount++; diff --git a/lib/Target/AMDGPU/R600InstrInfo.cpp b/lib/Target/AMDGPU/R600InstrInfo.cpp index 21945c4cce1..23e646c8147 100644 --- a/lib/Target/AMDGPU/R600InstrInfo.cpp +++ b/lib/Target/AMDGPU/R600InstrInfo.cpp @@ -197,7 +197,7 @@ bool R600InstrInfo::usesVertexCache(unsigned Opcode) const { bool R600InstrInfo::usesVertexCache(const MachineInstr &MI) const { const MachineFunction *MF = MI.getParent()->getParent(); - return !AMDGPU::isCompute(MF->getFunction()->getCallingConv()) && + return !AMDGPU::isCompute(MF->getFunction().getCallingConv()) && usesVertexCache(MI.getOpcode()); } @@ -207,7 +207,7 @@ bool R600InstrInfo::usesTextureCache(unsigned Opcode) const { bool R600InstrInfo::usesTextureCache(const MachineInstr &MI) const { const MachineFunction *MF = MI.getParent()->getParent(); - return (AMDGPU::isCompute(MF->getFunction()->getCallingConv()) && + return (AMDGPU::isCompute(MF->getFunction().getCallingConv()) && usesVertexCache(MI.getOpcode())) || usesTextureCache(MI.getOpcode()); } diff --git a/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp b/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp index 95bc7ca564c..4a14d95f1cc 100644 --- a/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp +++ b/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp @@ -336,7 +336,7 @@ void R600VectorRegMerger::trackRSI(const RegSeqInfo &RSI) { } bool R600VectorRegMerger::runOnMachineFunction(MachineFunction &Fn) { - if (skipFunction(*Fn.getFunction())) + if (skipFunction(Fn.getFunction())) return false; const R600Subtarget &ST = Fn.getSubtarget<R600Subtarget>(); diff --git a/lib/Target/AMDGPU/SIFoldOperands.cpp b/lib/Target/AMDGPU/SIFoldOperands.cpp index 0766eba01fd..78318198034 100644 --- a/lib/Target/AMDGPU/SIFoldOperands.cpp +++ b/lib/Target/AMDGPU/SIFoldOperands.cpp @@ -926,7 +926,7 @@ bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) { } bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; MRI = &MF.getRegInfo(); diff --git a/lib/Target/AMDGPU/SIFrameLowering.cpp b/lib/Target/AMDGPU/SIFrameLowering.cpp index 08a7419612b..89bb98dbd02 100644 --- a/lib/Target/AMDGPU/SIFrameLowering.cpp +++ b/lib/Target/AMDGPU/SIFrameLowering.cpp @@ -394,7 +394,7 @@ void SIFrameLowering::emitEntryFunctionScratchSetup(const SISubtarget &ST, // We now have the GIT ptr - now get the scratch descriptor from the entry // at offset 0. PointerType *PtrTy = - PointerType::get(Type::getInt64Ty(MF.getFunction()->getContext()), + PointerType::get(Type::getInt64Ty(MF.getFunction().getContext()), AMDGPUAS::CONSTANT_ADDRESS); MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); const MCInstrDesc &LoadDwordX4 = TII->get(AMDGPU::S_LOAD_DWORDX4_IMM); @@ -425,7 +425,7 @@ void SIFrameLowering::emitEntryFunctionScratchSetup(const SISubtarget &ST, if (MFI->hasImplicitBufferPtr()) { unsigned Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1); - if (AMDGPU::isCompute(MF.getFunction()->getCallingConv())) { + if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) { const MCInstrDesc &Mov64 = TII->get(AMDGPU::S_MOV_B64); BuildMI(MBB, I, DL, Mov64, Rsrc01) @@ -435,7 +435,7 @@ void SIFrameLowering::emitEntryFunctionScratchSetup(const SISubtarget &ST, const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM); PointerType *PtrTy = - PointerType::get(Type::getInt64Ty(MF.getFunction()->getContext()), + PointerType::get(Type::getInt64Ty(MF.getFunction().getContext()), AMDGPUAS::CONSTANT_ADDRESS); MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); auto MMO = MF.getMachineMemOperand(PtrInfo, diff --git a/lib/Target/AMDGPU/SIISelLowering.cpp b/lib/Target/AMDGPU/SIISelLowering.cpp index d3e2e11b721..50ee88fa635 100644 --- a/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/lib/Target/AMDGPU/SIISelLowering.cpp @@ -1460,14 +1460,14 @@ SDValue SITargetLowering::LowerFormalArguments( const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); MachineFunction &MF = DAG.getMachineFunction(); - FunctionType *FType = MF.getFunction()->getFunctionType(); + FunctionType *FType = MF.getFunction().getFunctionType(); SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) { - const Function *Fn = MF.getFunction(); + const Function &Fn = MF.getFunction(); DiagnosticInfoUnsupported NoGraphicsHSA( - *Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc()); + Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc()); DAG.getContext()->diagnose(NoGraphicsHSA); return DAG.getEntryNode(); } @@ -1696,7 +1696,7 @@ SDValue SITargetLowering::LowerFormalArguments( auto &ArgUsageInfo = DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>(); - ArgUsageInfo.setFuncArgInfo(*MF.getFunction(), Info->getArgInfo()); + ArgUsageInfo.setFuncArgInfo(MF.getFunction(), Info->getArgInfo()); unsigned StackArgSize = CCInfo.getNextStackOffset(); Info->setBytesInStackArgArea(StackArgSize); @@ -2032,8 +2032,8 @@ bool SITargetLowering::isEligibleForTailCallOptimization( return false; MachineFunction &MF = DAG.getMachineFunction(); - const Function *CallerF = MF.getFunction(); - CallingConv::ID CallerCC = CallerF->getCallingConv(); + const Function &CallerF = MF.getFunction(); + CallingConv::ID CallerCC = CallerF.getCallingConv(); const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); @@ -2054,7 +2054,7 @@ bool SITargetLowering::isEligibleForTailCallOptimization( if (IsVarArg) return false; - for (const Argument &Arg : CallerF->args()) { + for (const Argument &Arg : CallerF.args()) { if (Arg.hasByValAttr()) return false; } @@ -3594,11 +3594,11 @@ SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const { case SISubtarget::TrapIDLLVMTrap: return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain); case SISubtarget::TrapIDLLVMDebugTrap: { - DiagnosticInfoUnsupported NoTrap(*MF.getFunction(), + DiagnosticInfoUnsupported NoTrap(MF.getFunction(), "debugtrap handler not supported", Op.getDebugLoc(), DS_Warning); - LLVMContext &Ctx = MF.getFunction()->getContext(); + LLVMContext &Ctx = MF.getFunction().getContext(); Ctx.diagnose(NoTrap); return Chain; } @@ -3711,7 +3711,7 @@ SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op, const MachineFunction &MF = DAG.getMachineFunction(); DiagnosticInfoUnsupported InvalidAddrSpaceCast( - *MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc()); + MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc()); DAG.getContext()->diagnose(InvalidAddrSpaceCast); return DAG.getUNDEF(ASC->getValueType(0)); @@ -3913,7 +3913,7 @@ SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG, static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL, EVT VT) { - DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(), + DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(), "non-hsa intrinsic with hsa target", DL.getDebugLoc()); DAG.getContext()->diagnose(BadIntrin); @@ -3922,7 +3922,7 @@ static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL, static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL, EVT VT) { - DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(), + DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(), "intrinsic not supported on subtarget", DL.getDebugLoc()); DAG.getContext()->diagnose(BadIntrin); @@ -3951,7 +3951,7 @@ SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, case Intrinsic::amdgcn_queue_ptr: { if (!Subtarget->isAmdCodeObjectV2(MF)) { DiagnosticInfoUnsupported BadIntrin( - *MF.getFunction(), "unsupported hsa intrinsic without hsa target", + MF.getFunction(), "unsupported hsa intrinsic without hsa target", DL.getDebugLoc()); DAG.getContext()->diagnose(BadIntrin); return DAG.getUNDEF(VT); @@ -4129,7 +4129,7 @@ SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, return SDValue(); DiagnosticInfoUnsupported BadIntrin( - *MF.getFunction(), "intrinsic not supported on subtarget", + MF.getFunction(), "intrinsic not supported on subtarget", DL.getDebugLoc()); DAG.getContext()->diagnose(BadIntrin); return DAG.getUNDEF(VT); @@ -4559,7 +4559,7 @@ SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op, case Intrinsic::amdgcn_s_barrier: { if (getTargetMachine().getOptLevel() > CodeGenOpt::None) { const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); - unsigned WGSize = ST.getFlatWorkGroupSizes(*MF.getFunction()).second; + unsigned WGSize = ST.getFlatWorkGroupSizes(MF.getFunction()).second; if (WGSize <= ST.getWavefrontSize()) return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other, Op.getOperand(0)), 0); diff --git a/lib/Target/AMDGPU/SIInsertSkips.cpp b/lib/Target/AMDGPU/SIInsertSkips.cpp index 1b8c9f27712..a2f844d7854 100644 --- a/lib/Target/AMDGPU/SIInsertSkips.cpp +++ b/lib/Target/AMDGPU/SIInsertSkips.cpp @@ -166,7 +166,7 @@ bool SIInsertSkips::skipIfDead(MachineInstr &MI, MachineBasicBlock &NextBB) { MachineBasicBlock &MBB = *MI.getParent(); MachineFunction *MF = MBB.getParent(); - if (MF->getFunction()->getCallingConv() != CallingConv::AMDGPU_PS || + if (MF->getFunction().getCallingConv() != CallingConv::AMDGPU_PS || !shouldSkip(MBB, MBB.getParent()->back())) return false; diff --git a/lib/Target/AMDGPU/SIInstrInfo.cpp b/lib/Target/AMDGPU/SIInstrInfo.cpp index 6ec5667cece..61967605432 100644 --- a/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -375,7 +375,7 @@ static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, unsigned BaseReg1, if (!Base1 || !Base2) return false; const MachineFunction &MF = *MI1.getParent()->getParent(); - const DataLayout &DL = MF.getFunction()->getParent()->getDataLayout(); + const DataLayout &DL = MF.getFunction().getParent()->getDataLayout(); Base1 = GetUnderlyingObject(Base1, DL); Base2 = GetUnderlyingObject(Base1, DL); @@ -442,10 +442,10 @@ static void reportIllegalCopy(const SIInstrInfo *TII, MachineBasicBlock &MBB, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, bool KillSrc) { MachineFunction *MF = MBB.getParent(); - DiagnosticInfoUnsupported IllegalCopy(*MF->getFunction(), + DiagnosticInfoUnsupported IllegalCopy(MF->getFunction(), "illegal SGPR to VGPR copy", DL, DS_Error); - LLVMContext &C = MF->getFunction()->getContext(); + LLVMContext &C = MF->getFunction().getContext(); C.diagnose(IllegalCopy); BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_ILLEGAL_COPY), DestReg) @@ -873,8 +873,8 @@ void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, return; } - if (!ST.isVGPRSpillingEnabled(*MF->getFunction())) { - LLVMContext &Ctx = MF->getFunction()->getContext(); + if (!ST.isVGPRSpillingEnabled(MF->getFunction())) { + LLVMContext &Ctx = MF->getFunction().getContext(); Ctx.emitError("SIInstrInfo::storeRegToStackSlot - Do not know how to" " spill register"); BuildMI(MBB, MI, DL, get(AMDGPU::KILL)) @@ -975,8 +975,8 @@ void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, return; } - if (!ST.isVGPRSpillingEnabled(*MF->getFunction())) { - LLVMContext &Ctx = MF->getFunction()->getContext(); + if (!ST.isVGPRSpillingEnabled(MF->getFunction())) { + LLVMContext &Ctx = MF->getFunction().getContext(); Ctx.emitError("SIInstrInfo::loadRegFromStackSlot - Do not know how to" " restore register"); BuildMI(MBB, MI, DL, get(AMDGPU::IMPLICIT_DEF), DestReg); @@ -1017,7 +1017,7 @@ unsigned SIInstrInfo::calculateLDSSpillAddress( if (TIDReg == AMDGPU::NoRegister) return TIDReg; - if (!AMDGPU::isShader(MF->getFunction()->getCallingConv()) && + if (!AMDGPU::isShader(MF->getFunction().getCallingConv()) && WorkGroupSize > WavefrontSize) { unsigned TIDIGXReg = MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_X); @@ -3444,7 +3444,7 @@ void SIInstrInfo::legalizeOperands(MachineInstr &MI) const { // scratch memory access. In both cases, the legalization never involves // conversion to the addr64 form. if (isMIMG(MI) || - (AMDGPU::isShader(MF.getFunction()->getCallingConv()) && + (AMDGPU::isShader(MF.getFunction().getCallingConv()) && (isMUBUF(MI) || isMTBUF(MI)))) { MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc); if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) { diff --git a/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp b/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp index d9fdb81b8f4..84cd47a101a 100644 --- a/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp +++ b/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp @@ -913,7 +913,7 @@ bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) { } bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; STM = &MF.getSubtarget<SISubtarget>(); diff --git a/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp b/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp index 0a92cd17654..6013ebc81d9 100644 --- a/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp +++ b/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp @@ -51,9 +51,9 @@ SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF) ImplicitArgPtr(false), GITPtrHigh(0xffffffff) { const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); - const Function *F = MF.getFunction(); - FlatWorkGroupSizes = ST.getFlatWorkGroupSizes(*F); - WavesPerEU = ST.getWavesPerEU(*F); + const Function &F = MF.getFunction(); + FlatWorkGroupSizes = ST.getFlatWorkGroupSizes(F); + WavesPerEU = ST.getWavesPerEU(F); if (!isEntryFunction()) { // Non-entry functions have no special inputs for now, other registers @@ -68,21 +68,21 @@ SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF) ArgInfo.PrivateSegmentWaveByteOffset = ArgDescriptor::createRegister(ScratchWaveOffsetReg); - if (F->hasFnAttribute("amdgpu-implicitarg-ptr")) + if (F.hasFnAttribute("amdgpu-implicitarg-ptr")) ImplicitArgPtr = true; } else { - if (F->hasFnAttribute("amdgpu-implicitarg-ptr")) + if (F.hasFnAttribute("amdgpu-implicitarg-ptr")) KernargSegmentPtr = true; } - CallingConv::ID CC = F->getCallingConv(); + CallingConv::ID CC = F.getCallingConv(); if (CC == CallingConv::AMDGPU_KERNEL || CC == CallingConv::SPIR_KERNEL) { - if (!F->arg_empty()) + if (!F.arg_empty()) KernargSegmentPtr = true; WorkGroupIDX = true; WorkItemIDX = true; } else if (CC == CallingConv::AMDGPU_PS) { - PSInputAddr = AMDGPU::getInitialPSInputAddr(*F); + PSInputAddr = AMDGPU::getInitialPSInputAddr(F); } if (ST.debuggerEmitPrologue()) { @@ -94,27 +94,27 @@ SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF) WorkItemIDY = true; WorkItemIDZ = true; } else { - if (F->hasFnAttribute("amdgpu-work-group-id-x")) + if (F.hasFnAttribute("amdgpu-work-group-id-x")) WorkGroupIDX = true; - if (F->hasFnAttribute("amdgpu-work-group-id-y")) + if (F.hasFnAttribute("amdgpu-work-group-id-y")) WorkGroupIDY = true; - if (F->hasFnAttribute("amdgpu-work-group-id-z")) + if (F.hasFnAttribute("amdgpu-work-group-id-z")) WorkGroupIDZ = true; - if (F->hasFnAttribute("amdgpu-work-item-id-x")) + if (F.hasFnAttribute("amdgpu-work-item-id-x")) WorkItemIDX = true; - if (F->hasFnAttribute("amdgpu-work-item-id-y")) + if (F.hasFnAttribute("amdgpu-work-item-id-y")) WorkItemIDY = true; - if (F->hasFnAttribute("amdgpu-work-item-id-z")) + if (F.hasFnAttribute("amdgpu-work-item-id-z")) WorkItemIDZ = true; } const MachineFrameInfo &FrameInfo = MF.getFrameInfo(); - bool MaySpill = ST.isVGPRSpillingEnabled(*F); + bool MaySpill = ST.isVGPRSpillingEnabled(F); bool HasStackObjects = FrameInfo.hasStackObjects(); if (isEntryFunction()) { @@ -139,30 +139,30 @@ SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF) if (HasStackObjects || MaySpill) PrivateSegmentBuffer = true; - if (F->hasFnAttribute("amdgpu-dispatch-ptr")) + if (F.hasFnAttribute("amdgpu-dispatch-ptr")) DispatchPtr = true; - if (F->hasFnAttribute("amdgpu-queue-ptr")) + if (F.hasFnAttribute("amdgpu-queue-ptr")) QueuePtr = true; - if (F->hasFnAttribute("amdgpu-dispatch-id")) + if (F.hasFnAttribute("amdgpu-dispatch-id")) DispatchID = true; } else if (ST.isMesaGfxShader(MF)) { if (HasStackObjects || MaySpill) ImplicitBufferPtr = true; } - if (F->hasFnAttribute("amdgpu-kernarg-segment-ptr")) + if (F.hasFnAttribute("amdgpu-kernarg-segment-ptr")) KernargSegmentPtr = true; if (ST.hasFlatAddressSpace() && isEntryFunction() && IsCOV2) { // TODO: This could be refined a lot. The attribute is a poor way of // detecting calls that may require it before argument lowering. - if (HasStackObjects || F->hasFnAttribute("amdgpu-flat-scratch")) + if (HasStackObjects || F.hasFnAttribute("amdgpu-flat-scratch")) FlatScratchInit = true; } - Attribute A = F->getFnAttribute("amdgpu-git-ptr-high"); + Attribute A = F.getFnAttribute("amdgpu-git-ptr-high"); StringRef S = A.getValueAsString(); if (!S.empty()) S.consumeInteger(0, GITPtrHigh); diff --git a/lib/Target/AMDGPU/SIMemoryLegalizer.cpp b/lib/Target/AMDGPU/SIMemoryLegalizer.cpp index c66aed9ef75..c73fb10b7ea 100644 --- a/lib/Target/AMDGPU/SIMemoryLegalizer.cpp +++ b/lib/Target/AMDGPU/SIMemoryLegalizer.cpp @@ -340,9 +340,9 @@ Optional<SIMemOpInfo> SIMemOpInfo::getAtomicRmwInfo( /* static */ void SIMemOpInfo::reportUnknownSyncScope( const MachineBasicBlock::iterator &MI) { - DiagnosticInfoUnsupported Diag(*MI->getParent()->getParent()->getFunction(), + DiagnosticInfoUnsupported Diag(MI->getParent()->getParent()->getFunction(), "Unsupported synchronization scope"); - LLVMContext *CTX = &MI->getParent()->getParent()->getFunction()->getContext(); + LLVMContext *CTX = &MI->getParent()->getParent()->getFunction().getContext(); CTX->diagnose(Diag); } diff --git a/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp b/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp index aa95161c1b6..2dc6f2702b3 100644 --- a/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp +++ b/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp @@ -205,7 +205,7 @@ static bool isLiveOut(const MachineBasicBlock &MBB, unsigned Reg) { } bool SIOptimizeExecMasking::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); diff --git a/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp b/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp index 5533ba1d9d0..83074773c49 100644 --- a/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp +++ b/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp @@ -103,7 +103,7 @@ static MachineInstr* getOrExecSource(const MachineInstr &MI, } bool SIOptimizeExecMaskingPreRA::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); diff --git a/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/lib/Target/AMDGPU/SIPeepholeSDWA.cpp index 7b4652e8aa6..5ed7fdf220b 100644 --- a/lib/Target/AMDGPU/SIPeepholeSDWA.cpp +++ b/lib/Target/AMDGPU/SIPeepholeSDWA.cpp @@ -1050,7 +1050,7 @@ void SIPeepholeSDWA::legalizeScalarOperands(MachineInstr &MI, const SISubtarget bool SIPeepholeSDWA::runOnMachineFunction(MachineFunction &MF) { const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); - if (!ST.hasSDWA() || skipFunction(*MF.getFunction())) + if (!ST.hasSDWA() || skipFunction(MF.getFunction())) return false; MRI = &MF.getRegInfo(); diff --git a/lib/Target/AMDGPU/SIRegisterInfo.cpp b/lib/Target/AMDGPU/SIRegisterInfo.cpp index 1b813a334ef..65cdc13e03c 100644 --- a/lib/Target/AMDGPU/SIRegisterInfo.cpp +++ b/lib/Target/AMDGPU/SIRegisterInfo.cpp @@ -1514,7 +1514,7 @@ unsigned SIRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC, const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); unsigned Occupancy = ST.getOccupancyWithLocalMemSize(MFI->getLDSSize(), - *MF.getFunction()); + MF.getFunction()); switch (RC->getID()) { default: return AMDGPURegisterInfo::getRegPressureLimit(RC, MF); diff --git a/lib/Target/AMDGPU/SIShrinkInstructions.cpp b/lib/Target/AMDGPU/SIShrinkInstructions.cpp index 874fbadca7f..41f989ad322 100644 --- a/lib/Target/AMDGPU/SIShrinkInstructions.cpp +++ b/lib/Target/AMDGPU/SIShrinkInstructions.cpp @@ -286,7 +286,7 @@ static void shrinkScalarCompare(const SIInstrInfo *TII, MachineInstr &MI) { } bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; MachineRegisterInfo &MRI = MF.getRegInfo(); diff --git a/lib/Target/AMDGPU/SIWholeQuadMode.cpp b/lib/Target/AMDGPU/SIWholeQuadMode.cpp index 23464e0c6fd..53aefe82973 100644 --- a/lib/Target/AMDGPU/SIWholeQuadMode.cpp +++ b/lib/Target/AMDGPU/SIWholeQuadMode.cpp @@ -307,7 +307,7 @@ void SIWholeQuadMode::markInstructionUses(const MachineInstr &MI, char Flag, char SIWholeQuadMode::scanInstructions(MachineFunction &MF, std::vector<WorkItem> &Worklist) { char GlobalFlags = 0; - bool WQMOutputs = MF.getFunction()->hasFnAttribute("amdgpu-ps-wqm-outputs"); + bool WQMOutputs = MF.getFunction().hasFnAttribute("amdgpu-ps-wqm-outputs"); SmallVector<MachineInstr *, 4> SetInactiveInstrs; // We need to visit the basic blocks in reverse post-order so that we visit @@ -842,7 +842,7 @@ bool SIWholeQuadMode::runOnMachineFunction(MachineFunction &MF) { Blocks.clear(); LiveMaskQueries.clear(); LowerToCopyInstrs.clear(); - CallingConv = MF.getFunction()->getCallingConv(); + CallingConv = MF.getFunction().getCallingConv(); const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); diff --git a/lib/Target/ARC/ARCBranchFinalize.cpp b/lib/Target/ARC/ARCBranchFinalize.cpp index e5b0f8f3208..9341e7bdda4 100644 --- a/lib/Target/ARC/ARCBranchFinalize.cpp +++ b/lib/Target/ARC/ARCBranchFinalize.cpp @@ -142,7 +142,7 @@ void ARCBranchFinalize::replaceWithCmpBcc(MachineInstr *MI) const { bool ARCBranchFinalize::runOnMachineFunction(MachineFunction &MF) { DEBUG(dbgs() << "Running ARC Branch Finalize on " - << MF.getFunction()->getName() << "\n"); + << MF.getName() << "\n"); std::vector<MachineInstr *> Branches; bool Changed = false; unsigned MaxSize = 0; @@ -172,7 +172,7 @@ bool ARCBranchFinalize::runOnMachineFunction(MachineFunction &MF) { isInt<9>(MaxSize) ? replaceWithBRcc(P.first) : replaceWithCmpBcc(P.first); } - DEBUG(dbgs() << "Estimated function size for " << MF.getFunction()->getName() + DEBUG(dbgs() << "Estimated function size for " << MF.getName() << ": " << MaxSize << "\n"); return Changed; diff --git a/lib/Target/ARC/ARCFrameLowering.cpp b/lib/Target/ARC/ARCFrameLowering.cpp index 2976798eedf..195a781950b 100644 --- a/lib/Target/ARC/ARCFrameLowering.cpp +++ b/lib/Target/ARC/ARCFrameLowering.cpp @@ -88,7 +88,7 @@ determineLastCalleeSave(const std::vector<CalleeSavedInfo> &CSI) { void ARCFrameLowering::determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS) const { - DEBUG(dbgs() << "Determine Callee Saves: " << MF.getFunction()->getName() + DEBUG(dbgs() << "Determine Callee Saves: " << MF.getName() << "\n"); TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); SavedRegs.set(ARC::BLINK); @@ -115,7 +115,7 @@ void ARCFrameLowering::adjustStackToMatchRecords( /// registers onto the stack, when enough callee saved registers are required. void ARCFrameLowering::emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const { - DEBUG(dbgs() << "Emit Prologue: " << MF.getFunction()->getName() << "\n"); + DEBUG(dbgs() << "Emit Prologue: " << MF.getName() << "\n"); auto *AFI = MF.getInfo<ARCFunctionInfo>(); MachineModuleInfo &MMI = MF.getMMI(); MCContext &Context = MMI.getContext(); @@ -131,7 +131,7 @@ void ARCFrameLowering::emitPrologue(MachineFunction &MF, unsigned StackSlotsUsedByFunclet = 0; bool SavedBlink = false; unsigned AlreadyAdjusted = 0; - if (MF.getFunction()->isVarArg()) { + if (MF.getFunction().isVarArg()) { // Add in the varargs area here first. DEBUG(dbgs() << "Varargs\n"); unsigned VarArgsBytes = MFI.getObjectSize(AFI->getVarArgsFrameIndex()); @@ -235,7 +235,7 @@ void ARCFrameLowering::emitPrologue(MachineFunction &MF, /// registers onto the stack, when enough callee saved registers are required. void ARCFrameLowering::emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const { - DEBUG(dbgs() << "Emit Epilogue: " << MF.getFunction()->getName() << "\n"); + DEBUG(dbgs() << "Emit Epilogue: " << MF.getName() << "\n"); auto *AFI = MF.getInfo<ARCFunctionInfo>(); const ARCInstrInfo *TII = MF.getSubtarget<ARCSubtarget>().getInstrInfo(); MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator(); @@ -302,7 +302,7 @@ void ARCFrameLowering::emitEpilogue(MachineFunction &MF, } // Relieve the varargs area if necessary. - if (MF.getFunction()->isVarArg()) { + if (MF.getFunction().isVarArg()) { // Add in the varargs area here first. DEBUG(dbgs() << "Varargs\n"); unsigned VarArgsBytes = MFI.getObjectSize(AFI->getVarArgsFrameIndex()); @@ -383,7 +383,7 @@ bool ARCFrameLowering::spillCalleeSavedRegisters( const std::vector<CalleeSavedInfo> &CSI, const TargetRegisterInfo *TRI) const { DEBUG(dbgs() << "Spill callee saved registers: " - << MBB.getParent()->getFunction()->getName() << "\n"); + << MBB.getParent()->getName() << "\n"); // There are routines for saving at least 3 registers (r13 to r15, etc.) unsigned Last = determineLastCalleeSave(CSI); if (UseSaveRestoreFunclet && Last > ARC::R14) { @@ -400,7 +400,7 @@ bool ARCFrameLowering::restoreCalleeSavedRegisters( MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, std::vector<CalleeSavedInfo> &CSI, const TargetRegisterInfo *TRI) const { DEBUG(dbgs() << "Restore callee saved registers: " - << MBB.getParent()->getFunction()->getName() << "\n"); + << MBB.getParent()->getName() << "\n"); // There are routines for saving at least 3 registers (r13 to r15, etc.) unsigned Last = determineLastCalleeSave(CSI); if (UseSaveRestoreFunclet && Last > ARC::R14) { @@ -415,7 +415,7 @@ void ARCFrameLowering::processFunctionBeforeFrameFinalized( MachineFunction &MF, RegScavenger *RS) const { const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo(); DEBUG(dbgs() << "Process function before frame finalized: " - << MF.getFunction()->getName() << "\n"); + << MF.getName() << "\n"); MachineFrameInfo &MFI = MF.getFrameInfo(); DEBUG(dbgs() << "Current stack size: " << MFI.getStackSize() << "\n"); const TargetRegisterClass *RC = &ARC::GPR32RegClass; @@ -440,8 +440,7 @@ static void emitRegUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator ARCFrameLowering::eliminateCallFramePseudoInstr( MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const { - DEBUG(dbgs() << "EmitCallFramePseudo: " << MF.getFunction()->getName() - << "\n"); + DEBUG(dbgs() << "EmitCallFramePseudo: " << MF.getName() << "\n"); const ARCInstrInfo *TII = MF.getSubtarget<ARCSubtarget>().getInstrInfo(); MachineInstr &Old = *I; DebugLoc dl = Old.getDebugLoc(); diff --git a/lib/Target/ARC/ARCRegisterInfo.cpp b/lib/Target/ARC/ARCRegisterInfo.cpp index 59b22c559f2..cb9f89d3499 100644 --- a/lib/Target/ARC/ARCRegisterInfo.cpp +++ b/lib/Target/ARC/ARCRegisterInfo.cpp @@ -125,8 +125,7 @@ static void ReplaceFrameIndex(MachineBasicBlock::iterator II, ARCRegisterInfo::ARCRegisterInfo() : ARCGenRegisterInfo(ARC::BLINK) {} bool ARCRegisterInfo::needsFrameMoves(const MachineFunction &MF) { - return MF.getMMI().hasDebugInfo() || - MF.getFunction()->needsUnwindTableEntry(); + return MF.getMMI().hasDebugInfo() || MF.getFunction().needsUnwindTableEntry(); } const MCPhysReg * diff --git a/lib/Target/ARM/A15SDOptimizer.cpp b/lib/Target/ARM/A15SDOptimizer.cpp index 34e41ba5410..16d5f74d19e 100644 --- a/lib/Target/ARM/A15SDOptimizer.cpp +++ b/lib/Target/ARM/A15SDOptimizer.cpp @@ -655,7 +655,7 @@ bool A15SDOptimizer::runOnInstruction(MachineInstr *MI) { } bool A15SDOptimizer::runOnMachineFunction(MachineFunction &Fn) { - if (skipFunction(*Fn.getFunction())) + if (skipFunction(Fn.getFunction())) return false; const ARMSubtarget &STI = Fn.getSubtarget<ARMSubtarget>(); diff --git a/lib/Target/ARM/ARMAsmPrinter.cpp b/lib/Target/ARM/ARMAsmPrinter.cpp index 1779c893ec9..d3d79fe975b 100644 --- a/lib/Target/ARM/ARMAsmPrinter.cpp +++ b/lib/Target/ARM/ARMAsmPrinter.cpp @@ -109,7 +109,7 @@ bool ARMAsmPrinter::runOnMachineFunction(MachineFunction &MF) { Subtarget = &MF.getSubtarget<ARMSubtarget>(); SetupMachineFunction(MF); - const Function* F = MF.getFunction(); + const Function &F = MF.getFunction(); const TargetMachine& TM = MF.getTarget(); // Collect all globals that had their storage promoted to a constant pool. @@ -120,13 +120,13 @@ bool ARMAsmPrinter::runOnMachineFunction(MachineFunction &MF) { // Calculate this function's optimization goal. unsigned OptimizationGoal; - if (F->hasFnAttribute(Attribute::OptimizeNone)) + if (F.hasFnAttribute(Attribute::OptimizeNone)) // For best debugging illusion, speed and small size sacrificed OptimizationGoal = 6; - else if (F->optForMinSize()) + else if (F.optForMinSize()) // Aggressively for small size, speed and debug illusion sacrificed OptimizationGoal = 4; - else if (F->optForSize()) + else if (F.optForSize()) // For small size, but speed and debugging illusion preserved OptimizationGoal = 3; else if (TM.getOptLevel() == CodeGenOpt::Aggressive) @@ -146,7 +146,7 @@ bool ARMAsmPrinter::runOnMachineFunction(MachineFunction &MF) { OptimizationGoals = 0; if (Subtarget->isTargetCOFF()) { - bool Internal = F->hasInternalLinkage(); + bool Internal = F.hasInternalLinkage(); COFF::SymbolStorageClass Scl = Internal ? COFF::IMAGE_SYM_CLASS_STATIC : COFF::IMAGE_SYM_CLASS_EXTERNAL; int Type = COFF::IMAGE_SYM_DTYPE_FUNCTION << COFF::SCT_COMPLEX_TYPE_SHIFT; diff --git a/lib/Target/ARM/ARMBaseInstrInfo.cpp b/lib/Target/ARM/ARMBaseInstrInfo.cpp index a92a9165f27..8c1727724a9 100644 --- a/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ b/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -1512,18 +1512,18 @@ static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) { 4, ACPV->getModifier(), ACPV->mustAddCurrentAddress()); else if (ACPV->isExtSymbol()) NewCPV = ARMConstantPoolSymbol:: - Create(MF.getFunction()->getContext(), + Create(MF.getFunction().getContext(), cast<ARMConstantPoolSymbol>(ACPV)->getSymbol(), PCLabelId, 4); else if (ACPV->isBlockAddress()) NewCPV = ARMConstantPoolConstant:: Create(cast<ARMConstantPoolConstant>(ACPV)->getBlockAddress(), PCLabelId, ARMCP::CPBlockAddress, 4); else if (ACPV->isLSDA()) - NewCPV = ARMConstantPoolConstant::Create(MF.getFunction(), PCLabelId, + NewCPV = ARMConstantPoolConstant::Create(&MF.getFunction(), PCLabelId, ARMCP::CPLSDA, 4); else if (ACPV->isMachineBasicBlock()) NewCPV = ARMConstantPoolMBB:: - Create(MF.getFunction()->getContext(), + Create(MF.getFunction().getContext(), cast<ARMConstantPoolMBB>(ACPV)->getMBB(), PCLabelId, 4); else llvm_unreachable("Unexpected ARM constantpool value type!!"); @@ -1843,7 +1843,7 @@ isProfitableToIfCvt(MachineBasicBlock &MBB, // If we are optimizing for size, see if the branch in the predecessor can be // lowered to cbn?z by the constant island lowering pass, and return false if // so. This results in a shorter instruction sequence. - if (MBB.getParent()->getFunction()->optForSize()) { + if (MBB.getParent()->getFunction().optForSize()) { MachineBasicBlock *Pred = *MBB.pred_begin(); if (!Pred->empty()) { MachineInstr *LastMI = &*Pred->rbegin(); @@ -2210,7 +2210,7 @@ bool llvm::tryFoldSPUpdateIntoPushPop(const ARMSubtarget &Subtarget, unsigned NumBytes) { // This optimisation potentially adds lots of load and store // micro-operations, it's only really a great benefit to code-size. - if (!MF.getFunction()->optForMinSize()) + if (!MF.getFunction().optForMinSize()) return false; // If only one register is pushed/popped, LLVM can use an LDR/STR @@ -3982,7 +3982,7 @@ int ARMBaseInstrInfo::getOperandLatencyImpl( if (Latency > 0 && Subtarget.isThumb2()) { const MachineFunction *MF = DefMI.getParent()->getParent(); // FIXME: Use Function::optForSize(). - if (MF->getFunction()->hasFnAttribute(Attribute::OptimizeForSize)) + if (MF->getFunction().hasFnAttribute(Attribute::OptimizeForSize)) --Latency; } return Latency; diff --git a/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/lib/Target/ARM/ARMBaseRegisterInfo.cpp index 0aec874e5dd..4b9a4376adf 100644 --- a/lib/Target/ARM/ARMBaseRegisterInfo.cpp +++ b/lib/Target/ARM/ARMBaseRegisterInfo.cpp @@ -71,17 +71,17 @@ ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { ? CSR_iOS_SaveList : (UseSplitPush ? CSR_AAPCS_SplitPush_SaveList : CSR_AAPCS_SaveList); - const Function *F = MF->getFunction(); - if (F->getCallingConv() == CallingConv::GHC) { + const Function &F = MF->getFunction(); + if (F.getCallingConv() == CallingConv::GHC) { // GHC set of callee saved regs is empty as all those regs are // used for passing STG regs around return CSR_NoRegs_SaveList; - } else if (F->hasFnAttribute("interrupt")) { + } else if (F.hasFnAttribute("interrupt")) { if (STI.isMClass()) { // M-class CPUs have hardware which saves the registers needed to allow a // function conforming to the AAPCS to function as a handler. return UseSplitPush ? CSR_AAPCS_SplitPush_SaveList : CSR_AAPCS_SaveList; - } else if (F->getFnAttribute("interrupt").getValueAsString() == "FIQ") { + } else if (F.getFnAttribute("interrupt").getValueAsString() == "FIQ") { // Fast interrupt mode gives the handler a private copy of R8-R14, so less // need to be saved to restore user-mode state. return CSR_FIQ_SaveList; @@ -93,7 +93,7 @@ ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { } if (STI.getTargetLowering()->supportSwiftError() && - F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) { + F.getAttributes().hasAttrSomewhere(Attribute::SwiftError)) { if (STI.isTargetDarwin()) return CSR_iOS_SwiftError_SaveList; @@ -101,7 +101,7 @@ ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { CSR_AAPCS_SwiftError_SaveList; } - if (STI.isTargetDarwin() && F->getCallingConv() == CallingConv::CXX_FAST_TLS) + if (STI.isTargetDarwin() && F.getCallingConv() == CallingConv::CXX_FAST_TLS) return MF->getInfo<ARMFunctionInfo>()->isSplitCSR() ? CSR_iOS_CXX_TLS_PE_SaveList : CSR_iOS_CXX_TLS_SaveList; @@ -111,7 +111,7 @@ ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { const MCPhysReg *ARMBaseRegisterInfo::getCalleeSavedRegsViaCopy( const MachineFunction *MF) const { assert(MF && "Invalid MachineFunction pointer."); - if (MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS && + if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS && MF->getInfo<ARMFunctionInfo>()->isSplitCSR()) return CSR_iOS_CXX_TLS_ViaCopy_SaveList; return nullptr; @@ -126,7 +126,7 @@ ARMBaseRegisterInfo::getCallPreservedMask(const MachineFunction &MF, return CSR_NoRegs_RegMask; if (STI.getTargetLowering()->supportSwiftError() && - MF.getFunction()->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) + MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError)) return STI.isTargetDarwin() ? CSR_iOS_SwiftError_RegMask : CSR_AAPCS_SwiftError_RegMask; @@ -440,7 +440,7 @@ void ARMBaseRegisterInfo::emitLoadConstPool( const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); MachineConstantPool *ConstantPool = MF.getConstantPool(); const Constant *C = - ConstantInt::get(Type::getInt32Ty(MF.getFunction()->getContext()), Val); + ConstantInt::get(Type::getInt32Ty(MF.getFunction().getContext()), Val); unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4); BuildMI(MBB, MBBI, dl, TII.get(ARM::LDRcp)) diff --git a/lib/Target/ARM/ARMCallLowering.cpp b/lib/Target/ARM/ARMCallLowering.cpp index 7338ac8bcb5..eab4b3b13f3 100644 --- a/lib/Target/ARM/ARMCallLowering.cpp +++ b/lib/Target/ARM/ARMCallLowering.cpp @@ -190,7 +190,7 @@ void ARMCallLowering::splitToValueTypes( LLVMContext &Ctx = OrigArg.Ty->getContext(); const DataLayout &DL = MF.getDataLayout(); MachineRegisterInfo &MRI = MF.getRegInfo(); - const Function *F = MF.getFunction(); + const Function &F = MF.getFunction(); SmallVector<EVT, 4> SplitVTs; SmallVector<uint64_t, 4> Offsets; @@ -218,7 +218,7 @@ void ARMCallLowering::splitToValueTypes( bool NeedsConsecutiveRegisters = TLI.functionArgumentNeedsConsecutiveRegisters( - SplitTy, F->getCallingConv(), F->isVarArg()); + SplitTy, F.getCallingConv(), F.isVarArg()); if (NeedsConsecutiveRegisters) { Flags.setInConsecutiveRegs(); if (i == e - 1) @@ -244,7 +244,7 @@ bool ARMCallLowering::lowerReturnVal(MachineIRBuilder &MIRBuilder, return true; auto &MF = MIRBuilder.getMF(); - const auto &F = *MF.getFunction(); + const auto &F = MF.getFunction(); auto DL = MF.getDataLayout(); auto &TLI = *getTLI<ARMTargetLowering>(); diff --git a/lib/Target/ARM/ARMExpandPseudoInsts.cpp b/lib/Target/ARM/ARMExpandPseudoInsts.cpp index f1def9822c2..b14b2c6a813 100644 --- a/lib/Target/ARM/ARMExpandPseudoInsts.cpp +++ b/lib/Target/ARM/ARMExpandPseudoInsts.cpp @@ -1259,7 +1259,7 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB, MachineConstantPool *MCP = MF->getConstantPool(); unsigned PCLabelID = AFI->createPICLabelUId(); MachineConstantPoolValue *CPV = - ARMConstantPoolSymbol::Create(MF->getFunction()->getContext(), + ARMConstantPoolSymbol::Create(MF->getFunction().getContext(), "__aeabi_read_tp", PCLabelID, 0); unsigned Reg = MI.getOperand(0).getReg(); MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(), diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp index 1090f62106f..0ea435062ec 100644 --- a/lib/Target/ARM/ARMFastISel.cpp +++ b/lib/Target/ARM/ARMFastISel.cpp @@ -2958,7 +2958,7 @@ unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV, unsigned Align, MVT VT) { bool UseGOT_PREL = !TM.shouldAssumeDSOLocal(*GV->getParent(), GV); - LLVMContext *Context = &MF->getFunction()->getContext(); + LLVMContext *Context = &MF->getFunction().getContext(); unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create( diff --git a/lib/Target/ARM/ARMFrameLowering.cpp b/lib/Target/ARM/ARMFrameLowering.cpp index e9a13b9802b..4ff864ac6cc 100644 --- a/lib/Target/ARM/ARMFrameLowering.cpp +++ b/lib/Target/ARM/ARMFrameLowering.cpp @@ -203,10 +203,10 @@ static int sizeOfSPAdjustment(const MachineInstr &MI) { static bool WindowsRequiresStackProbe(const MachineFunction &MF, size_t StackSizeInBytes) { const MachineFrameInfo &MFI = MF.getFrameInfo(); - const Function *F = MF.getFunction(); + const Function &F = MF.getFunction(); unsigned StackProbeSize = (MFI.getStackProtectorIndex() > 0) ? 4080 : 4096; - if (F->hasFnAttribute("stack-probe-size")) - F->getFnAttribute("stack-probe-size") + if (F.hasFnAttribute("stack-probe-size")) + F.getFnAttribute("stack-probe-size") .getValueAsString() .getAsInteger(0, StackProbeSize); return StackSizeInBytes >= StackProbeSize; @@ -370,7 +370,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF, // All calls are tail calls in GHC calling conv, and functions have no // prologue/epilogue. - if (MF.getFunction()->getCallingConv() == CallingConv::GHC) + if (MF.getFunction().getCallingConv() == CallingConv::GHC) return; StackAdjustingInsts DefCFAOffsetCandidates; @@ -448,7 +448,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF, int FramePtrOffsetInPush = 0; if (HasFP) { int FPOffset = MFI.getObjectOffset(FramePtrSpillFI); - assert(getMaxFPOffset(*MF.getFunction(), *AFI) <= FPOffset && + assert(getMaxFPOffset(MF.getFunction(), *AFI) <= FPOffset && "Max FP estimation is wrong"); FramePtrOffsetInPush = FPOffset + ArgRegsSaveSize; AFI->setFramePtrSpillOffset(MFI.getObjectOffset(FramePtrSpillFI) + @@ -766,7 +766,7 @@ void ARMFrameLowering::emitEpilogue(MachineFunction &MF, // All calls are tail calls in GHC calling conv, and functions have no // prologue/epilogue. - if (MF.getFunction()->getCallingConv() == CallingConv::GHC) + if (MF.getFunction().getCallingConv() == CallingConv::GHC) return; // First put ourselves on the first (from top) terminator instructions. @@ -1533,7 +1533,7 @@ checkNumAlignedDPRCS2Regs(MachineFunction &MF, BitVector &SavedRegs) { return; // Naked functions don't spill callee-saved registers. - if (MF.getFunction()->hasFnAttribute(Attribute::Naked)) + if (MF.getFunction().hasFnAttribute(Attribute::Naked)) return; // We are planning to use NEON instructions vst1 / vld1. @@ -1744,7 +1744,7 @@ void ARMFrameLowering::determineCalleeSaves(MachineFunction &MF, EstimatedStackSize += 16; // For possible paddings. unsigned EstimatedRSStackSizeLimit = estimateRSStackSizeLimit(MF, this); - int MaxFPOffset = getMaxFPOffset(*MF.getFunction(), *AFI); + int MaxFPOffset = getMaxFPOffset(MF.getFunction(), *AFI); bool BigFrameOffsets = EstimatedStackSize >= EstimatedRSStackSizeLimit || MFI.hasVarSizedObjects() || (MFI.adjustsStack() && !canSimplifyCallFramePseudos(MF)) || @@ -2102,7 +2102,7 @@ void ARMFrameLowering::adjustForSegmentedStacks( // Sadly, this currently doesn't support varargs, platforms other than // android/linux. Note that thumb1/thumb2 are support for android/linux. - if (MF.getFunction()->isVarArg()) + if (MF.getFunction().isVarArg()) report_fatal_error("Segmented stacks do not support vararg functions."); if (!ST->isTargetAndroid() && !ST->isTargetLinux()) report_fatal_error("Segmented stacks not supported on this platform."); @@ -2250,7 +2250,7 @@ void ARMFrameLowering::adjustForSegmentedStacks( if (Thumb && ST->isThumb1Only()) { unsigned PCLabelId = ARMFI->createPICLabelUId(); ARMConstantPoolValue *NewCPV = ARMConstantPoolSymbol::Create( - MF.getFunction()->getContext(), "__STACK_LIMIT", PCLabelId, 0); + MF.getFunction().getContext(), "__STACK_LIMIT", PCLabelId, 0); MachineConstantPool *MCP = MF.getConstantPool(); unsigned CPI = MCP->getConstantPoolIndex(NewCPV, 4); diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index f60500d4819..1b4d7ff5084 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -1773,7 +1773,7 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, bool isStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); bool isThisReturn = false; bool isSibCall = false; - auto Attr = MF.getFunction()->getFnAttribute("disable-tail-calls"); + auto Attr = MF.getFunction().getFnAttribute("disable-tail-calls"); // Disable tail calls if they're not supported. if (!Subtarget->supportsTailCall() || Attr.getValueAsString() == "true") @@ -1782,7 +1782,7 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, if (isTailCall) { // Check if it's really possible to do a tail call. isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, - isVarArg, isStructRet, MF.getFunction()->hasStructRetAttr(), + isVarArg, isStructRet, MF.getFunction().hasStructRetAttr(), Outs, OutVals, Ins, DAG); if (!isTailCall && CLI.CS && CLI.CS.isMustTailCall()) report_fatal_error("failed to perform tail call elimination on a call " @@ -1981,7 +1981,7 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, bool isDirect = false; const TargetMachine &TM = getTargetMachine(); - const Module *Mod = MF.getFunction()->getParent(); + const Module *Mod = MF.getFunction().getParent(); const GlobalValue *GV = nullptr; if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) GV = G->getGlobal(); @@ -2033,7 +2033,7 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, auto *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal(); auto *BB = CLI.CS.getParent(); bool PreferIndirect = - Subtarget->isThumb() && MF.getFunction()->optForMinSize() && + Subtarget->isThumb() && MF.getFunction().optForMinSize() && count_if(GV->users(), [&BB](const User *U) { return isa<Instruction>(U) && cast<Instruction>(U)->getParent() == BB; }) > 2; @@ -2105,7 +2105,7 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, CallOpc = ARMISD::CALL_NOLINK; else if (doesNotRet && isDirect && Subtarget->hasRetAddrStack() && // Emit regular call when code size is the priority - !MF.getFunction()->optForMinSize()) + !MF.getFunction().optForMinSize()) // "mov lr, pc; b _foo" to avoid confusing the RSP CallOpc = ARMISD::CALL_NOLINK; else @@ -2280,8 +2280,8 @@ ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG& DAG) const { MachineFunction &MF = DAG.getMachineFunction(); - const Function *CallerF = MF.getFunction(); - CallingConv::ID CallerCC = CallerF->getCallingConv(); + const Function &CallerF = MF.getFunction(); + CallingConv::ID CallerCC = CallerF.getCallingConv(); assert(Subtarget->supportsTailCall()); @@ -2298,7 +2298,7 @@ ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, // Exception-handling functions need a special set of instructions to indicate // a return to the hardware. Tail-calling another function would probably // break this. - if (CallerF->hasFnAttribute("interrupt")) + if (CallerF.hasFnAttribute("interrupt")) return false; // Also avoid sibcall optimization if either caller or callee uses struct @@ -2410,9 +2410,9 @@ ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv, static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps, const SDLoc &DL, SelectionDAG &DAG) { const MachineFunction &MF = DAG.getMachineFunction(); - const Function *F = MF.getFunction(); + const Function &F = MF.getFunction(); - StringRef IntKind = F->getFnAttribute("interrupt").getValueAsString(); + StringRef IntKind = F.getFnAttribute("interrupt").getValueAsString(); // See ARM ARM v7 B1.8.3. On exception entry LR is set to a possibly offset // version of the "preferred return address". These offsets affect the return @@ -2553,7 +2553,7 @@ ARMTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, // // M-class CPUs actually use a normal return sequence with a special // (hardware-provided) value in LR, so the normal code path works. - if (DAG.getMachineFunction().getFunction()->hasFnAttribute("interrupt") && + if (DAG.getMachineFunction().getFunction().hasFnAttribute("interrupt") && !Subtarget->isMClass()) { if (Subtarget->isThumb1Only()) report_fatal_error("interrupt attribute is not supported in Thumb1"); @@ -2691,7 +2691,7 @@ SDValue ARMTargetLowering::LowerConstantPool(SDValue Op, auto T = const_cast<Type*>(CP->getType()); auto C = const_cast<Constant*>(CP->getConstVal()); auto M = const_cast<Module*>(DAG.getMachineFunction(). - getFunction()->getParent()); + getFunction().getParent()); auto GV = new GlobalVariable( *M, T, /*isConst=*/true, GlobalVariable::InternalLinkage, C, Twine(DAG.getDataLayout().getPrivateGlobalPrefix()) + "CP" + @@ -2800,7 +2800,7 @@ ARMTargetLowering::LowerGlobalTLSAddressDarwin(SDValue Op, // trashed: R0 (it takes an argument), LR (it's a call) and CPSR (let's not be // silly). auto TRI = - getTargetMachine().getSubtargetImpl(*F.getFunction())->getRegisterInfo(); + getTargetMachine().getSubtargetImpl(F.getFunction())->getRegisterInfo(); auto ARI = static_cast<const ARMRegisterInfo *>(TRI); const uint32_t *Mask = ARI->getTLSCallPreservedMask(DAG.getMachineFunction()); @@ -3055,7 +3055,7 @@ static SDValue promoteToConstantPool(const GlobalValue *GV, SelectionDAG &DAG, // This is a win if the constant is only used in one function (so it doesn't // need to be duplicated) or duplicating the constant wouldn't increase code // size (implying the constant is no larger than 4 bytes). - const Function *F = DAG.getMachineFunction().getFunction(); + const Function &F = DAG.getMachineFunction().getFunction(); // We rely on this decision to inline being idemopotent and unrelated to the // use-site. We know that if we inline a variable at one use site, we'll @@ -3113,7 +3113,7 @@ static SDValue promoteToConstantPool(const GlobalValue *GV, SelectionDAG &DAG, // in multiple functions but it no larger than a pointer. We also check if // GVar has constant (non-ConstantExpr) users. If so, it essentially has its // address taken. - if (!allUsersAreInFunction(GVar, F) && + if (!allUsersAreInFunction(GVar, &F) && !(Size <= 4 && allUsersAreInFunctions(GVar))) return SDValue(); @@ -3322,7 +3322,7 @@ ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, bool IsPositionIndependent = isPositionIndependent(); unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0; ARMConstantPoolValue *CPV = - ARMConstantPoolConstant::Create(MF.getFunction(), ARMPCLabelIndex, + ARMConstantPoolConstant::Create(&MF.getFunction(), ARMPCLabelIndex, ARMCP::CPLSDA, PCAdj); CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); @@ -3598,7 +3598,7 @@ SDValue ARMTargetLowering::LowerFormalArguments( SmallVector<SDValue, 16> ArgValues; SDValue ArgValue; - Function::const_arg_iterator CurOrigArg = MF.getFunction()->arg_begin(); + Function::const_arg_iterator CurOrigArg = MF.getFunction().arg_begin(); unsigned CurArgIdx = 0; // Initially ArgRegsSaveSize is zero. @@ -7754,9 +7754,9 @@ static SDValue LowerFPOWI(SDValue Op, const ARMSubtarget &Subtarget, SDValue InChain = DAG.getEntryNode(); SDValue TCChain = InChain; - const auto *F = DAG.getMachineFunction().getFunction(); + const Function &F = DAG.getMachineFunction().getFunction(); bool IsTC = TLI.isInTailCallPosition(DAG, Op.getNode(), TCChain) && - F->getReturnType() == LCRTy; + F.getReturnType() == LCRTy; if (IsTC) InChain = TCChain; @@ -7954,7 +7954,7 @@ void ARMTargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI, MachineRegisterInfo *MRI = &MF->getRegInfo(); MachineConstantPool *MCP = MF->getConstantPool(); ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>(); - const Function *F = MF->getFunction(); + const Function &F = MF->getFunction(); bool isThumb = Subtarget->isThumb(); bool isThumb2 = Subtarget->isThumb2(); @@ -7962,7 +7962,7 @@ void ARMTargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI, unsigned PCLabelId = AFI->createPICLabelUId(); unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8; ARMConstantPoolValue *CPV = - ARMConstantPoolMBB::Create(F->getContext(), DispatchBB, PCLabelId, PCAdj); + ARMConstantPoolMBB::Create(F.getContext(), DispatchBB, PCLabelId, PCAdj); unsigned CPI = MCP->getConstantPoolIndex(CPV, 4); const TargetRegisterClass *TRC = isThumb ? &ARM::tGPRRegClass @@ -8248,7 +8248,7 @@ void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI, .add(predOps(ARMCC::AL)); } else { MachineConstantPool *ConstantPool = MF->getConstantPool(); - Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext()); + Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext()); const Constant *C = ConstantInt::get(Int32Ty, NumLPads); // MachineConstantPool wants an explicit alignment. @@ -8349,7 +8349,7 @@ void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI, .add(predOps(ARMCC::AL)); } else { MachineConstantPool *ConstantPool = MF->getConstantPool(); - Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext()); + Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext()); const Constant *C = ConstantInt::get(Int32Ty, NumLPads); // MachineConstantPool wants an explicit alignment. @@ -8645,7 +8645,7 @@ ARMTargetLowering::EmitStructByval(MachineInstr &MI, UnitSize = 2; } else { // Check whether we can use NEON instructions. - if (!MF->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat) && + if (!MF->getFunction().hasFnAttribute(Attribute::NoImplicitFloat) && Subtarget->hasNEON()) { if ((Align % 16 == 0) && SizeVal >= 16) UnitSize = 16; @@ -8751,7 +8751,7 @@ ARMTargetLowering::EmitStructByval(MachineInstr &MI, .add(predOps(ARMCC::AL)); } else { MachineConstantPool *ConstantPool = MF->getConstantPool(); - Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext()); + Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext()); const Constant *C = ConstantInt::get(Int32Ty, LoopSize); // MachineConstantPool wants an explicit alignment. @@ -12417,11 +12417,11 @@ EVT ARMTargetLowering::getOptimalMemOpType(uint64_t Size, bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc, MachineFunction &MF) const { - const Function *F = MF.getFunction(); + const Function &F = MF.getFunction(); // See if we can use NEON instructions for this... if ((!IsMemset || ZeroMemset) && Subtarget->hasNEON() && - !F->hasFnAttribute(Attribute::NoImplicitFloat)) { + !F.hasFnAttribute(Attribute::NoImplicitFloat)) { bool Fast; if (Size >= 16 && (memOpAlign(SrcAlign, DstAlign, 16) || @@ -14364,7 +14364,7 @@ void ARMTargetLowering::insertCopiesSplitCSR( // fine for CXX_FAST_TLS since the C++-style TLS access functions should be // nounwind. If we want to generalize this later, we may need to emit // CFI pseudo-instructions. - assert(Entry->getParent()->getFunction()->hasFnAttribute( + assert(Entry->getParent()->getFunction().hasFnAttribute( Attribute::NoUnwind) && "Function should be nounwind in insertCopiesSplitCSR!"); Entry->addLiveIn(*I); diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h index 5cffed2a3d4..0a1af8d89f9 100644 --- a/lib/Target/ARM/ARMISelLowering.h +++ b/lib/Target/ARM/ARMISelLowering.h @@ -692,8 +692,8 @@ class VectorType; SDValue ThisVal) const; bool supportSplitCSR(MachineFunction *MF) const override { - return MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS && - MF->getFunction()->hasFnAttribute(Attribute::NoUnwind); + return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS && + MF->getFunction().hasFnAttribute(Attribute::NoUnwind); } void initializeSplitCSR(MachineBasicBlock *Entry) const override; diff --git a/lib/Target/ARM/ARMLegalizerInfo.cpp b/lib/Target/ARM/ARMLegalizerInfo.cpp index 6a541991adb..ddcdb1ff42f 100644 --- a/lib/Target/ARM/ARMLegalizerInfo.cpp +++ b/lib/Target/ARM/ARMLegalizerInfo.cpp @@ -318,7 +318,7 @@ bool ARMLegalizerInfo::legalizeCustom(MachineInstr &MI, // Our divmod libcalls return a struct containing the quotient and the // remainder. We need to create a virtual register for it. - auto &Ctx = MIRBuilder.getMF().getFunction()->getContext(); + auto &Ctx = MIRBuilder.getMF().getFunction().getContext(); Type *ArgTy = Type::getInt32Ty(Ctx); StructType *RetTy = StructType::get(Ctx, {ArgTy, ArgTy}, /* Packed */ true); auto RetVal = MRI.createGenericVirtualRegister( @@ -359,7 +359,7 @@ bool ARMLegalizerInfo::legalizeCustom(MachineInstr &MI, return true; } - auto &Ctx = MIRBuilder.getMF().getFunction()->getContext(); + auto &Ctx = MIRBuilder.getMF().getFunction().getContext(); assert((OpSize == 32 || OpSize == 64) && "Unsupported operand size"); auto *ArgTy = OpSize == 32 ? Type::getFloatTy(Ctx) : Type::getDoubleTy(Ctx); auto *RetTy = Type::getInt32Ty(Ctx); diff --git a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp index c61e72ebb21..8b3a2e22379 100644 --- a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp +++ b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp @@ -1273,7 +1273,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLSMultiple(MachineInstr *MI) { // can still change to a writeback form as that will save us 2 bytes // of code size. It can create WAW hazards though, so only do it if // we're minimizing code size. - if (!MBB.getParent()->getFunction()->optForMinSize() || !BaseKill) + if (!MBB.getParent()->getFunction().optForMinSize() || !BaseKill) return false; bool HighRegsUsed = false; @@ -1953,7 +1953,7 @@ bool ARMLoadStoreOpt::CombineMovBx(MachineBasicBlock &MBB) { } bool ARMLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) { - if (skipFunction(*Fn.getFunction())) + if (skipFunction(Fn.getFunction())) return false; MF = &Fn; @@ -2035,7 +2035,7 @@ INITIALIZE_PASS(ARMPreAllocLoadStoreOpt, "arm-prera-ldst-opt", ARM_PREALLOC_LOAD_STORE_OPT_NAME, false, false) bool ARMPreAllocLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) { - if (AssumeMisalignedLoadStores || skipFunction(*Fn.getFunction())) + if (AssumeMisalignedLoadStores || skipFunction(Fn.getFunction())) return false; TD = &Fn.getDataLayout(); @@ -2130,9 +2130,9 @@ ARMPreAllocLoadStoreOpt::CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1, return false; unsigned Align = (*Op0->memoperands_begin())->getAlignment(); - const Function *Func = MF->getFunction(); + const Function &Func = MF->getFunction(); unsigned ReqAlign = STI->hasV6Ops() - ? TD->getABITypeAlignment(Type::getInt64Ty(Func->getContext())) + ? TD->getABITypeAlignment(Type::getInt64Ty(Func.getContext())) : 8; // Pre-v6 need 8-byte align if (Align < ReqAlign) return false; diff --git a/lib/Target/ARM/ARMOptimizeBarriersPass.cpp b/lib/Target/ARM/ARMOptimizeBarriersPass.cpp index 7e4d598a6e0..cff4a256100 100644 --- a/lib/Target/ARM/ARMOptimizeBarriersPass.cpp +++ b/lib/Target/ARM/ARMOptimizeBarriersPass.cpp @@ -49,7 +49,7 @@ static bool CanMovePastDMB(const MachineInstr *MI) { } bool ARMOptimizeBarriersPass::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; // Vector to store the DMBs we will remove after the first iteration diff --git a/lib/Target/ARM/ARMSelectionDAGInfo.cpp b/lib/Target/ARM/ARMSelectionDAGInfo.cpp index 33dcf9b8fef..d4fbf76f299 100644 --- a/lib/Target/ARM/ARMSelectionDAGInfo.cpp +++ b/lib/Target/ARM/ARMSelectionDAGInfo.cpp @@ -171,7 +171,7 @@ SDValue ARMSelectionDAGInfo::EmitTargetCodeForMemcpy( // Code size optimisation: do not inline memcpy if expansion results in // more instructions than the libary call. - if (NumMEMCPYs > 1 && DAG.getMachineFunction().getFunction()->optForMinSize()) { + if (NumMEMCPYs > 1 && DAG.getMachineFunction().getFunction().optForMinSize()) { return SDValue(); } diff --git a/lib/Target/ARM/ARMSubtarget.cpp b/lib/Target/ARM/ARMSubtarget.cpp index b6875e71c1c..4d4a88126ce 100644 --- a/lib/Target/ARM/ARMSubtarget.cpp +++ b/lib/Target/ARM/ARMSubtarget.cpp @@ -373,7 +373,7 @@ bool ARMSubtarget::useStride4VFPs(const MachineFunction &MF) const { // For general targets, the prologue can grow when VFPs are allocated with // stride 4 (more vpush instructions). But WatchOS uses a compact unwind // format which it's more important to get right. - return isTargetWatchABI() || (isSwift() && !MF.getFunction()->optForMinSize()); + return isTargetWatchABI() || (isSwift() && !MF.getFunction().optForMinSize()); } bool ARMSubtarget::useMovt(const MachineFunction &MF) const { @@ -381,7 +381,7 @@ bool ARMSubtarget::useMovt(const MachineFunction &MF) const { // immediates as it is inherently position independent, and may be out of // range otherwise. return !NoMovt && hasV8MBaselineOps() && - (isTargetWindows() || !MF.getFunction()->optForMinSize() || genExecuteOnly()); + (isTargetWindows() || !MF.getFunction().optForMinSize() || genExecuteOnly()); } bool ARMSubtarget::useFastISel() const { diff --git a/lib/Target/ARM/MLxExpansionPass.cpp b/lib/Target/ARM/MLxExpansionPass.cpp index 00c41c403f6..153e7b1e219 100644 --- a/lib/Target/ARM/MLxExpansionPass.cpp +++ b/lib/Target/ARM/MLxExpansionPass.cpp @@ -371,7 +371,7 @@ bool MLxExpansion::ExpandFPMLxInstructions(MachineBasicBlock &MBB) { } bool MLxExpansion::runOnMachineFunction(MachineFunction &Fn) { - if (skipFunction(*Fn.getFunction())) + if (skipFunction(Fn.getFunction())) return false; TII = static_cast<const ARMBaseInstrInfo *>(Fn.getSubtarget().getInstrInfo()); diff --git a/lib/Target/ARM/Thumb2SizeReduction.cpp b/lib/Target/ARM/Thumb2SizeReduction.cpp index a0b98a43108..3920c73fba6 100644 --- a/lib/Target/ARM/Thumb2SizeReduction.cpp +++ b/lib/Target/ARM/Thumb2SizeReduction.cpp @@ -449,7 +449,7 @@ Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI, break; case ARM::t2LDR_POST: case ARM::t2STR_POST: { - if (!MBB.getParent()->getFunction()->optForMinSize()) + if (!MBB.getParent()->getFunction().optForMinSize()) return false; if (!MI->hasOneMemOperand() || @@ -1084,7 +1084,7 @@ bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB) { } bool Thumb2SizeReduce::runOnMachineFunction(MachineFunction &MF) { - if (PredicateFtor && !PredicateFtor(*MF.getFunction())) + if (PredicateFtor && !PredicateFtor(MF.getFunction())) return false; STI = &static_cast<const ARMSubtarget &>(MF.getSubtarget()); @@ -1094,8 +1094,8 @@ bool Thumb2SizeReduce::runOnMachineFunction(MachineFunction &MF) { TII = static_cast<const Thumb2InstrInfo *>(STI->getInstrInfo()); // Optimizing / minimizing size? Minimizing size implies optimizing for size. - OptimizeSize = MF.getFunction()->optForSize(); - MinimizeSize = MF.getFunction()->optForMinSize(); + OptimizeSize = MF.getFunction().optForSize(); + MinimizeSize = MF.getFunction().optForMinSize(); BlockInfo.clear(); BlockInfo.resize(MF.getNumBlockIDs()); diff --git a/lib/Target/ARM/ThumbRegisterInfo.cpp b/lib/Target/ARM/ThumbRegisterInfo.cpp index d2bebb9eeec..d190edf5913 100644 --- a/lib/Target/ARM/ThumbRegisterInfo.cpp +++ b/lib/Target/ARM/ThumbRegisterInfo.cpp @@ -70,7 +70,7 @@ static void emitThumb1LoadConstPool(MachineBasicBlock &MBB, const TargetInstrInfo &TII = *STI.getInstrInfo(); MachineConstantPool *ConstantPool = MF.getConstantPool(); const Constant *C = ConstantInt::get( - Type::getInt32Ty(MBB.getParent()->getFunction()->getContext()), Val); + Type::getInt32Ty(MBB.getParent()->getFunction().getContext()), Val); unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4); BuildMI(MBB, MBBI, dl, TII.get(ARM::tLDRpci)) @@ -89,7 +89,7 @@ static void emitThumb2LoadConstPool(MachineBasicBlock &MBB, const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); MachineConstantPool *ConstantPool = MF.getConstantPool(); const Constant *C = ConstantInt::get( - Type::getInt32Ty(MBB.getParent()->getFunction()->getContext()), Val); + Type::getInt32Ty(MBB.getParent()->getFunction().getContext()), Val); unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4); BuildMI(MBB, MBBI, dl, TII.get(ARM::t2LDRpci)) diff --git a/lib/Target/AVR/AVRFrameLowering.cpp b/lib/Target/AVR/AVRFrameLowering.cpp index 5101cf586f1..3b732236577 100644 --- a/lib/Target/AVR/AVRFrameLowering.cpp +++ b/lib/Target/AVR/AVRFrameLowering.cpp @@ -53,7 +53,7 @@ bool AVRFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const { void AVRFrameLowering::emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const { MachineBasicBlock::iterator MBBI = MBB.begin(); - CallingConv::ID CallConv = MF.getFunction()->getCallingConv(); + CallingConv::ID CallConv = MF.getFunction().getCallingConv(); DebugLoc DL = (MBBI != MBB.end()) ? MBBI->getDebugLoc() : DebugLoc(); const AVRSubtarget &STI = MF.getSubtarget<AVRSubtarget>(); const AVRInstrInfo &TII = *STI.getInstrInfo(); @@ -143,7 +143,7 @@ void AVRFrameLowering::emitPrologue(MachineFunction &MF, void AVRFrameLowering::emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const { - CallingConv::ID CallConv = MF.getFunction()->getCallingConv(); + CallingConv::ID CallConv = MF.getFunction().getCallingConv(); bool isHandler = (CallConv == CallingConv::AVR_INTR || CallConv == CallingConv::AVR_SIGNAL); diff --git a/lib/Target/AVR/AVRISelLowering.cpp b/lib/Target/AVR/AVRISelLowering.cpp index 69d97487878..d9e27e91405 100644 --- a/lib/Target/AVR/AVRISelLowering.cpp +++ b/lib/Target/AVR/AVRISelLowering.cpp @@ -1039,7 +1039,7 @@ SDValue AVRTargetLowering::LowerFormalArguments( CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, *DAG.getContext()); - analyzeArguments(nullptr, MF.getFunction(), &DL, 0, &Ins, CallConv, ArgLocs, CCInfo, + analyzeArguments(nullptr, &MF.getFunction(), &DL, 0, &Ins, CallConv, ArgLocs, CCInfo, false, isVarArg); SDValue ArgValue; @@ -1391,7 +1391,7 @@ AVRTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, // Don't emit the ret/reti instruction when the naked attribute is present in // the function being compiled. - if (MF.getFunction()->getAttributes().hasAttribute( + if (MF.getFunction().getAttributes().hasAttribute( AttributeList::FunctionIndex, Attribute::Naked)) { return Chain; } diff --git a/lib/Target/AVR/AVRRegisterInfo.cpp b/lib/Target/AVR/AVRRegisterInfo.cpp index b6ac93452cb..d171a620760 100644 --- a/lib/Target/AVR/AVRRegisterInfo.cpp +++ b/lib/Target/AVR/AVRRegisterInfo.cpp @@ -34,7 +34,7 @@ AVRRegisterInfo::AVRRegisterInfo() : AVRGenRegisterInfo(0) {} const uint16_t * AVRRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { - CallingConv::ID CC = MF->getFunction()->getCallingConv(); + CallingConv::ID CC = MF->getFunction().getCallingConv(); return ((CC == CallingConv::AVR_INTR || CC == CallingConv::AVR_SIGNAL) ? CSR_Interrupts_SaveList diff --git a/lib/Target/BPF/BPFISelLowering.cpp b/lib/Target/BPF/BPFISelLowering.cpp index 7d535563c75..3ea96e3148f 100644 --- a/lib/Target/BPF/BPFISelLowering.cpp +++ b/lib/Target/BPF/BPFISelLowering.cpp @@ -36,7 +36,7 @@ using namespace llvm; static void fail(const SDLoc &DL, SelectionDAG &DAG, const Twine &Msg) { MachineFunction &MF = DAG.getMachineFunction(); DAG.getContext()->diagnose( - DiagnosticInfoUnsupported(*MF.getFunction(), Msg, DL.getDebugLoc())); + DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc())); } static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg, @@ -48,7 +48,7 @@ static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg, Val->print(OS); OS.flush(); DAG.getContext()->diagnose( - DiagnosticInfoUnsupported(*MF.getFunction(), Str, DL.getDebugLoc())); + DiagnosticInfoUnsupported(MF.getFunction(), Str, DL.getDebugLoc())); } BPFTargetLowering::BPFTargetLowering(const TargetMachine &TM, @@ -227,7 +227,7 @@ SDValue BPFTargetLowering::LowerFormalArguments( } } - if (IsVarArg || MF.getFunction()->hasStructRetAttr()) { + if (IsVarArg || MF.getFunction().hasStructRetAttr()) { fail(DL, DAG, "functions with VarArgs or StructRet are not supported"); } @@ -382,7 +382,7 @@ BPFTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, // CCState - Info about the registers and stack slot. CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext()); - if (MF.getFunction()->getReturnType()->isAggregateType()) { + if (MF.getFunction().getReturnType()->isAggregateType()) { fail(DL, DAG, "only integer returns supported"); return DAG.getNode(Opc, DL, MVT::Other, Chain); } diff --git a/lib/Target/BPF/BPFRegisterInfo.cpp b/lib/Target/BPF/BPFRegisterInfo.cpp index 00d609e8960..6f706781609 100644 --- a/lib/Target/BPF/BPFRegisterInfo.cpp +++ b/lib/Target/BPF/BPFRegisterInfo.cpp @@ -45,12 +45,12 @@ BitVector BPFRegisterInfo::getReservedRegs(const MachineFunction &MF) const { static void WarnSize(int Offset, MachineFunction &MF, DebugLoc& DL) { if (Offset <= -512) { - auto F = MF.getFunction(); - DiagnosticInfoUnsupported DiagStackSize(*F, + const Function &F = MF.getFunction(); + DiagnosticInfoUnsupported DiagStackSize(F, "Looks like the BPF stack limit of 512 bytes is exceeded. " "Please move large on stack variables into BPF per-cpu array map.\n", DL); - F->getContext().diagnose(DiagStackSize); + F.getContext().diagnose(DiagStackSize); } } diff --git a/lib/Target/Hexagon/HexagonBitSimplify.cpp b/lib/Target/Hexagon/HexagonBitSimplify.cpp index f14beaad339..9e73766b6fd 100644 --- a/lib/Target/Hexagon/HexagonBitSimplify.cpp +++ b/lib/Target/Hexagon/HexagonBitSimplify.cpp @@ -2631,7 +2631,7 @@ bool BitSimplification::processBlock(MachineBasicBlock &B, } bool HexagonBitSimplify::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; auto &HST = MF.getSubtarget<HexagonSubtarget>(); @@ -3181,7 +3181,7 @@ bool HexagonLoopRescheduling::processLoop(LoopCand &C) { } bool HexagonLoopRescheduling::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; auto &HST = MF.getSubtarget<HexagonSubtarget>(); diff --git a/lib/Target/Hexagon/HexagonBitTracker.cpp b/lib/Target/Hexagon/HexagonBitTracker.cpp index 8297c474b8f..b6e220beb0c 100644 --- a/lib/Target/Hexagon/HexagonBitTracker.cpp +++ b/lib/Target/Hexagon/HexagonBitTracker.cpp @@ -61,7 +61,7 @@ HexagonEvaluator::HexagonEvaluator(const HexagonRegisterInfo &tri, // passed via registers. unsigned InVirtReg, InPhysReg = 0; - for (const Argument &Arg : MF.getFunction()->args()) { + for (const Argument &Arg : MF.getFunction().args()) { Type *ATy = Arg.getType(); unsigned Width = 0; if (ATy->isIntegerTy()) diff --git a/lib/Target/Hexagon/HexagonCFGOptimizer.cpp b/lib/Target/Hexagon/HexagonCFGOptimizer.cpp index 6e43574ecb1..a22ac8c9fdf 100644 --- a/lib/Target/Hexagon/HexagonCFGOptimizer.cpp +++ b/lib/Target/Hexagon/HexagonCFGOptimizer.cpp @@ -114,7 +114,7 @@ bool HexagonCFGOptimizer::isOnFallThroughPath(MachineBasicBlock *MBB) { } bool HexagonCFGOptimizer::runOnMachineFunction(MachineFunction &Fn) { - if (skipFunction(*Fn.getFunction())) + if (skipFunction(Fn.getFunction())) return false; // Loop over all of the basic blocks. diff --git a/lib/Target/Hexagon/HexagonConstExtenders.cpp b/lib/Target/Hexagon/HexagonConstExtenders.cpp index 1e55c4b038e..294a6da69f5 100644 --- a/lib/Target/Hexagon/HexagonConstExtenders.cpp +++ b/lib/Target/Hexagon/HexagonConstExtenders.cpp @@ -1831,7 +1831,7 @@ const MachineOperand &HCE::getStoredValueOp(const MachineInstr &MI) const { } bool HCE::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; DEBUG(MF.print(dbgs() << "Before " << getPassName() << '\n', nullptr)); diff --git a/lib/Target/Hexagon/HexagonConstPropagation.cpp b/lib/Target/Hexagon/HexagonConstPropagation.cpp index c59cc50c037..8ac96f3a4bf 100644 --- a/lib/Target/Hexagon/HexagonConstPropagation.cpp +++ b/lib/Target/Hexagon/HexagonConstPropagation.cpp @@ -280,7 +280,7 @@ namespace { public: MachineConstEvaluator(MachineFunction &Fn) : TRI(*Fn.getSubtarget().getRegisterInfo()), - MF(Fn), CX(Fn.getFunction()->getContext()) {} + MF(Fn), CX(Fn.getFunction().getContext()) {} virtual ~MachineConstEvaluator() = default; // The required interface: @@ -1890,10 +1890,8 @@ namespace { } bool runOnMachineFunction(MachineFunction &MF) override { - const Function *F = MF.getFunction(); - if (!F) - return false; - if (skipFunction(*F)) + const Function &F = MF.getFunction(); + if (skipFunction(F)) return false; HexagonConstEvaluator HCE(MF); @@ -2925,7 +2923,7 @@ bool HexagonConstEvaluator::rewriteHexConstDefs(MachineInstr &MI, DEBUG({ if (!NewInstrs.empty()) { MachineFunction &MF = *MI.getParent()->getParent(); - dbgs() << "In function: " << MF.getFunction()->getName() << "\n"; + dbgs() << "In function: " << MF.getName() << "\n"; dbgs() << "Rewrite: for " << MI << " created " << *NewInstrs[0]; for (unsigned i = 1; i < NewInstrs.size(); ++i) dbgs() << " " << *NewInstrs[i]; diff --git a/lib/Target/Hexagon/HexagonCopyToCombine.cpp b/lib/Target/Hexagon/HexagonCopyToCombine.cpp index d8135e95fba..087a77203fc 100644 --- a/lib/Target/Hexagon/HexagonCopyToCombine.cpp +++ b/lib/Target/Hexagon/HexagonCopyToCombine.cpp @@ -459,7 +459,7 @@ HexagonCopyToCombine::findPotentialNewifiableTFRs(MachineBasicBlock &BB) { } bool HexagonCopyToCombine::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; if (IsCombinesDisabled) return false; @@ -471,8 +471,8 @@ bool HexagonCopyToCombine::runOnMachineFunction(MachineFunction &MF) { TRI = ST->getRegisterInfo(); TII = ST->getInstrInfo(); - const Function *F = MF.getFunction(); - bool OptForSize = F->hasFnAttribute(Attribute::OptimizeForSize); + const Function &F = MF.getFunction(); + bool OptForSize = F.hasFnAttribute(Attribute::OptimizeForSize); // Combine aggressively (for code size) ShouldCombineAggressively = diff --git a/lib/Target/Hexagon/HexagonEarlyIfConv.cpp b/lib/Target/Hexagon/HexagonEarlyIfConv.cpp index 93ad2e7b5eb..0f1b9a4733c 100644 --- a/lib/Target/Hexagon/HexagonEarlyIfConv.cpp +++ b/lib/Target/Hexagon/HexagonEarlyIfConv.cpp @@ -1047,7 +1047,7 @@ void HexagonEarlyIfConversion::simplifyFlowGraph(const FlowPattern &FP) { } bool HexagonEarlyIfConversion::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; auto &ST = MF.getSubtarget<HexagonSubtarget>(); diff --git a/lib/Target/Hexagon/HexagonExpandCondsets.cpp b/lib/Target/Hexagon/HexagonExpandCondsets.cpp index 458a48e3308..c2feaf5737b 100644 --- a/lib/Target/Hexagon/HexagonExpandCondsets.cpp +++ b/lib/Target/Hexagon/HexagonExpandCondsets.cpp @@ -1243,7 +1243,7 @@ bool HexagonExpandCondsets::coalesceSegments( } bool HexagonExpandCondsets::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; HII = static_cast<const HexagonInstrInfo*>(MF.getSubtarget().getInstrInfo()); @@ -1253,7 +1253,7 @@ bool HexagonExpandCondsets::runOnMachineFunction(MachineFunction &MF) { MRI = &MF.getRegInfo(); DEBUG(LIS->print(dbgs() << "Before expand-condsets\n", - MF.getFunction()->getParent())); + MF.getFunction().getParent())); bool Changed = false; std::set<unsigned> CoalUpd, PredUpd; @@ -1281,7 +1281,7 @@ bool HexagonExpandCondsets::runOnMachineFunction(MachineFunction &MF) { KillUpd.insert(Op.getReg()); updateLiveness(KillUpd, false, true, false); DEBUG(LIS->print(dbgs() << "After coalescing\n", - MF.getFunction()->getParent())); + MF.getFunction().getParent())); // First, simply split all muxes into a pair of conditional transfers // and update the live intervals to reflect the new arrangement. The @@ -1298,7 +1298,7 @@ bool HexagonExpandCondsets::runOnMachineFunction(MachineFunction &MF) { // (because of predicated defs), so make sure they are left untouched. // Predication does not use live intervals. DEBUG(LIS->print(dbgs() << "After splitting\n", - MF.getFunction()->getParent())); + MF.getFunction().getParent())); // Traverse all blocks and collapse predicable instructions feeding // conditional transfers into predicated instructions. @@ -1307,7 +1307,7 @@ bool HexagonExpandCondsets::runOnMachineFunction(MachineFunction &MF) { for (auto &B : MF) Changed |= predicateInBlock(B, PredUpd); DEBUG(LIS->print(dbgs() << "After predicating\n", - MF.getFunction()->getParent())); + MF.getFunction().getParent())); PredUpd.insert(CoalUpd.begin(), CoalUpd.end()); updateLiveness(PredUpd, true, true, true); @@ -1315,7 +1315,7 @@ bool HexagonExpandCondsets::runOnMachineFunction(MachineFunction &MF) { DEBUG({ if (Changed) LIS->print(dbgs() << "After expand-condsets\n", - MF.getFunction()->getParent()); + MF.getFunction().getParent()); }); return Changed; diff --git a/lib/Target/Hexagon/HexagonFixupHwLoops.cpp b/lib/Target/Hexagon/HexagonFixupHwLoops.cpp index 6336075917e..a842b672736 100644 --- a/lib/Target/Hexagon/HexagonFixupHwLoops.cpp +++ b/lib/Target/Hexagon/HexagonFixupHwLoops.cpp @@ -89,7 +89,7 @@ static bool isHardwareLoop(const MachineInstr &MI) { } bool HexagonFixupHwLoops::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; return fixupLoopInstrs(MF); } diff --git a/lib/Target/Hexagon/HexagonFrameLowering.cpp b/lib/Target/Hexagon/HexagonFrameLowering.cpp index 3d8d561e31c..65a2fc35b11 100644 --- a/lib/Target/Hexagon/HexagonFrameLowering.cpp +++ b/lib/Target/Hexagon/HexagonFrameLowering.cpp @@ -225,7 +225,7 @@ namespace { bool HexagonCallFrameInformation::runOnMachineFunction(MachineFunction &MF) { auto &HFI = *MF.getSubtarget<HexagonSubtarget>().getFrameLowering(); bool NeedCFI = MF.getMMI().hasDebugInfo() || - MF.getFunction()->needsUnwindTableEntry(); + MF.getFunction().needsUnwindTableEntry(); if (!NeedCFI) return false; @@ -375,17 +375,17 @@ static bool isRestoreCall(unsigned Opc) { } static inline bool isOptNone(const MachineFunction &MF) { - return MF.getFunction()->hasFnAttribute(Attribute::OptimizeNone) || + return MF.getFunction().hasFnAttribute(Attribute::OptimizeNone) || MF.getTarget().getOptLevel() == CodeGenOpt::None; } static inline bool isOptSize(const MachineFunction &MF) { - const Function &F = *MF.getFunction(); + const Function &F = MF.getFunction(); return F.optForSize() && !F.optForMinSize(); } static inline bool isMinSize(const MachineFunction &MF) { - return MF.getFunction()->optForMinSize(); + return MF.getFunction().optForMinSize(); } /// Implements shrink-wrapping of the stack frame. By default, stack frame @@ -960,7 +960,7 @@ void HexagonFrameLowering::insertCFIInstructionsAt(MachineBasicBlock &MBB, } bool HexagonFrameLowering::hasFP(const MachineFunction &MF) const { - if (MF.getFunction()->hasFnAttribute(Attribute::Naked)) + if (MF.getFunction().hasFnAttribute(Attribute::Naked)) return false; auto &MFI = MF.getFrameInfo(); @@ -1396,8 +1396,7 @@ static void dump_registers(BitVector &Regs, const TargetRegisterInfo &TRI) { bool HexagonFrameLowering::assignCalleeSavedSpillSlots(MachineFunction &MF, const TargetRegisterInfo *TRI, std::vector<CalleeSavedInfo> &CSI) const { - DEBUG(dbgs() << __func__ << " on " - << MF.getFunction()->getName() << '\n'); + DEBUG(dbgs() << __func__ << " on " << MF.getName() << '\n'); MachineFrameInfo &MFI = MF.getFrameInfo(); BitVector SRegs(Hexagon::NUM_TARGET_REGS); diff --git a/lib/Target/Hexagon/HexagonGenInsert.cpp b/lib/Target/Hexagon/HexagonGenInsert.cpp index 99f3a2e9e88..c1841d735b8 100644 --- a/lib/Target/Hexagon/HexagonGenInsert.cpp +++ b/lib/Target/Hexagon/HexagonGenInsert.cpp @@ -1482,7 +1482,7 @@ bool HexagonGenInsert::removeDeadCode(MachineDomTreeNode *N) { } bool HexagonGenInsert::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; bool Timing = OptTiming, TimingDetail = Timing && OptTimingDetail; diff --git a/lib/Target/Hexagon/HexagonGenMux.cpp b/lib/Target/Hexagon/HexagonGenMux.cpp index dc1cdc8d096..5a001d6ed9c 100644 --- a/lib/Target/Hexagon/HexagonGenMux.cpp +++ b/lib/Target/Hexagon/HexagonGenMux.cpp @@ -368,7 +368,7 @@ bool HexagonGenMux::genMuxInBlock(MachineBasicBlock &B) { } bool HexagonGenMux::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; HII = MF.getSubtarget<HexagonSubtarget>().getInstrInfo(); HRI = MF.getSubtarget<HexagonSubtarget>().getRegisterInfo(); diff --git a/lib/Target/Hexagon/HexagonGenPredicate.cpp b/lib/Target/Hexagon/HexagonGenPredicate.cpp index 4eb24e07be4..9288ed03d4d 100644 --- a/lib/Target/Hexagon/HexagonGenPredicate.cpp +++ b/lib/Target/Hexagon/HexagonGenPredicate.cpp @@ -492,7 +492,7 @@ bool HexagonGenPredicate::eliminatePredCopies(MachineFunction &MF) { } bool HexagonGenPredicate::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; TII = MF.getSubtarget<HexagonSubtarget>().getInstrInfo(); diff --git a/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/lib/Target/Hexagon/HexagonHardwareLoops.cpp index d814fa79ea2..715fd52f3ac 100644 --- a/lib/Target/Hexagon/HexagonHardwareLoops.cpp +++ b/lib/Target/Hexagon/HexagonHardwareLoops.cpp @@ -377,7 +377,7 @@ FunctionPass *llvm::createHexagonHardwareLoops() { bool HexagonHardwareLoops::runOnMachineFunction(MachineFunction &MF) { DEBUG(dbgs() << "********* Hexagon Hardware Loops *********\n"); - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; bool Changed = false; diff --git a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp index 1101b232850..a6ac4e3df74 100644 --- a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp +++ b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp @@ -1746,10 +1746,10 @@ unsigned HexagonDAGToDAGISel::getUsesInFunction(const Value *V) { return GAUsesInFunction[V]; unsigned Result = 0; - const Function *CurF = CurDAG->getMachineFunction().getFunction(); + const Function &CurF = CurDAG->getMachineFunction().getFunction(); for (const User *U : V->users()) { if (isa<Instruction>(U) && - cast<Instruction>(U)->getParent()->getParent() == CurF) + cast<Instruction>(U)->getParent()->getParent() == &CurF) ++Result; } diff --git a/lib/Target/Hexagon/HexagonISelLowering.cpp b/lib/Target/Hexagon/HexagonISelLowering.cpp index 718e09ac57c..6ae52701f7f 100644 --- a/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -717,12 +717,12 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, else CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon); - auto Attr = MF.getFunction()->getFnAttribute("disable-tail-calls"); + auto Attr = MF.getFunction().getFnAttribute("disable-tail-calls"); if (Attr.getValueAsString() == "true") IsTailCall = false; if (IsTailCall) { - bool StructAttrFlag = MF.getFunction()->hasStructRetAttr(); + bool StructAttrFlag = MF.getFunction().hasStructRetAttr(); IsTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, IsVarArg, IsStructRet, StructAttrFlag, @@ -3006,8 +3006,8 @@ bool HexagonTargetLowering::IsEligibleForTailCallOptimization( const SmallVectorImpl<SDValue> &OutVals, const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG& DAG) const { - const Function *CallerF = DAG.getMachineFunction().getFunction(); - CallingConv::ID CallerCC = CallerF->getCallingConv(); + const Function &CallerF = DAG.getMachineFunction().getFunction(); + CallingConv::ID CallerCC = CallerF.getCallingConv(); bool CCMatch = CallerCC == CalleeCC; // *************************************************************************** diff --git a/lib/Target/Hexagon/HexagonMachineScheduler.cpp b/lib/Target/Hexagon/HexagonMachineScheduler.cpp index 8765fc98448..b1c549aa13f 100644 --- a/lib/Target/Hexagon/HexagonMachineScheduler.cpp +++ b/lib/Target/Hexagon/HexagonMachineScheduler.cpp @@ -188,7 +188,7 @@ bool VLIWResourceModel::reserveResources(SUnit *SU) { void VLIWMachineScheduler::schedule() { DEBUG(dbgs() << "********** MI Converging Scheduling VLIW " << printMBBReference(*BB) << " " << BB->getName() << " in_func " - << BB->getParent()->getFunction()->getName() << " at loop depth " + << BB->getParent()->getName() << " at loop depth " << MLI->getLoopDepth(BB) << " \n"); buildDAGWithRegPressure(); diff --git a/lib/Target/Hexagon/HexagonNewValueJump.cpp b/lib/Target/Hexagon/HexagonNewValueJump.cpp index 99c16f14919..ffa447cc131 100644 --- a/lib/Target/Hexagon/HexagonNewValueJump.cpp +++ b/lib/Target/Hexagon/HexagonNewValueJump.cpp @@ -434,7 +434,7 @@ bool HexagonNewValueJump::runOnMachineFunction(MachineFunction &MF) { DEBUG(dbgs() << "********** Hexagon New Value Jump **********\n" << "********** Function: " << MF.getName() << "\n"); - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; // If we move NewValueJump before register allocation we'll need live variable diff --git a/lib/Target/Hexagon/HexagonOptAddrMode.cpp b/lib/Target/Hexagon/HexagonOptAddrMode.cpp index d97ed4812f2..4738a4d3240 100644 --- a/lib/Target/Hexagon/HexagonOptAddrMode.cpp +++ b/lib/Target/Hexagon/HexagonOptAddrMode.cpp @@ -595,7 +595,7 @@ bool HexagonOptAddrMode::processBlock(NodeAddr<BlockNode *> BA) { } bool HexagonOptAddrMode::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; bool Changed = false; diff --git a/lib/Target/Hexagon/HexagonPeephole.cpp b/lib/Target/Hexagon/HexagonPeephole.cpp index 581761c904a..3c588a89b0d 100644 --- a/lib/Target/Hexagon/HexagonPeephole.cpp +++ b/lib/Target/Hexagon/HexagonPeephole.cpp @@ -108,7 +108,7 @@ INITIALIZE_PASS(HexagonPeephole, "hexagon-peephole", "Hexagon Peephole", false, false) bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; QII = static_cast<const HexagonInstrInfo *>(MF.getSubtarget().getInstrInfo()); diff --git a/lib/Target/Hexagon/HexagonRDFOpt.cpp b/lib/Target/Hexagon/HexagonRDFOpt.cpp index c73a2304e07..413bc8edf2b 100644 --- a/lib/Target/Hexagon/HexagonRDFOpt.cpp +++ b/lib/Target/Hexagon/HexagonRDFOpt.cpp @@ -280,7 +280,7 @@ bool HexagonDCE::rewrite(NodeAddr<InstrNode*> IA, SetVector<NodeId> &Remove) { } bool HexagonRDFOpt::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; if (RDFLimit.getPosition()) { diff --git a/lib/Target/Hexagon/HexagonSplitDouble.cpp b/lib/Target/Hexagon/HexagonSplitDouble.cpp index 68b5ddd4438..c9f5400018e 100644 --- a/lib/Target/Hexagon/HexagonSplitDouble.cpp +++ b/lib/Target/Hexagon/HexagonSplitDouble.cpp @@ -1163,7 +1163,7 @@ bool HexagonSplitDoubleRegs::runOnMachineFunction(MachineFunction &MF) { DEBUG(dbgs() << "Splitting double registers in function: " << MF.getName() << '\n'); - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; auto &ST = MF.getSubtarget<HexagonSubtarget>(); diff --git a/lib/Target/Hexagon/HexagonStoreWidening.cpp b/lib/Target/Hexagon/HexagonStoreWidening.cpp index fb3e6a0fb10..300f6de3355 100644 --- a/lib/Target/Hexagon/HexagonStoreWidening.cpp +++ b/lib/Target/Hexagon/HexagonStoreWidening.cpp @@ -585,7 +585,7 @@ bool HexagonStoreWidening::processBasicBlock(MachineBasicBlock &MBB) { } bool HexagonStoreWidening::runOnMachineFunction(MachineFunction &MFn) { - if (skipFunction(*MFn.getFunction())) + if (skipFunction(MFn.getFunction())) return false; MF = &MFn; diff --git a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp index e745447c32d..c2404235091 100644 --- a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp +++ b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp @@ -199,7 +199,7 @@ static MachineBasicBlock::iterator moveInstrOut(MachineInstr &MI, } bool HexagonPacketizer::runOnMachineFunction(MachineFunction &MF) { - if (DisablePacketizer || skipFunction(*MF.getFunction())) + if (DisablePacketizer || skipFunction(MF.getFunction())) return false; HII = MF.getSubtarget<HexagonSubtarget>().getInstrInfo(); diff --git a/lib/Target/Hexagon/RDFGraph.cpp b/lib/Target/Hexagon/RDFGraph.cpp index 8513ebd1c76..d1f6e5a4c8e 100644 --- a/lib/Target/Hexagon/RDFGraph.cpp +++ b/lib/Target/Hexagon/RDFGraph.cpp @@ -766,7 +766,7 @@ unsigned DataFlowGraph::DefStack::nextDown(unsigned P) const { RegisterSet DataFlowGraph::getLandingPadLiveIns() const { RegisterSet LR; - const Function &F = *MF.getFunction(); + const Function &F = MF.getFunction(); const Constant *PF = F.hasPersonalityFn() ? F.getPersonalityFn() : nullptr; const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering(); diff --git a/lib/Target/Lanai/LanaiISelLowering.cpp b/lib/Target/Lanai/LanaiISelLowering.cpp index 7e4fd24b60e..17567436384 100644 --- a/lib/Target/Lanai/LanaiISelLowering.cpp +++ b/lib/Target/Lanai/LanaiISelLowering.cpp @@ -513,7 +513,7 @@ SDValue LanaiTargetLowering::LowerCCCArguments( // The Lanai ABI for returning structs by value requires that we copy // the sret argument into rv for the return. Save the argument into // a virtual register so that we can access it from the return points. - if (MF.getFunction()->hasStructRetAttr()) { + if (MF.getFunction().hasStructRetAttr()) { unsigned Reg = LanaiMFI->getSRetReturnReg(); if (!Reg) { Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i32)); @@ -568,7 +568,7 @@ LanaiTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, // the sret argument into rv for the return. We saved the argument into // a virtual register in the entry block, so now we copy the value out // and into rv. - if (DAG.getMachineFunction().getFunction()->hasStructRetAttr()) { + if (DAG.getMachineFunction().getFunction().hasStructRetAttr()) { MachineFunction &MF = DAG.getMachineFunction(); LanaiMachineFunctionInfo *LanaiMFI = MF.getInfo<LanaiMachineFunctionInfo>(); unsigned Reg = LanaiMFI->getSRetReturnReg(); diff --git a/lib/Target/MSP430/MSP430ISelLowering.cpp b/lib/Target/MSP430/MSP430ISelLowering.cpp index 7cfcb965899..f5b2bda5d1e 100644 --- a/lib/Target/MSP430/MSP430ISelLowering.cpp +++ b/lib/Target/MSP430/MSP430ISelLowering.cpp @@ -746,7 +746,7 @@ MSP430TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); } - if (MF.getFunction()->hasStructRetAttr()) { + if (MF.getFunction().hasStructRetAttr()) { MSP430MachineFunctionInfo *FuncInfo = MF.getInfo<MSP430MachineFunctionInfo>(); unsigned Reg = FuncInfo->getSRetReturnReg(); diff --git a/lib/Target/MSP430/MSP430RegisterInfo.cpp b/lib/Target/MSP430/MSP430RegisterInfo.cpp index 7a3b7a8bd5f..54e53e19eb5 100644 --- a/lib/Target/MSP430/MSP430RegisterInfo.cpp +++ b/lib/Target/MSP430/MSP430RegisterInfo.cpp @@ -38,7 +38,7 @@ MSP430RegisterInfo::MSP430RegisterInfo() const MCPhysReg* MSP430RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { const MSP430FrameLowering *TFI = getFrameLowering(*MF); - const Function* F = MF->getFunction(); + const Function* F = &MF->getFunction(); static const MCPhysReg CalleeSavedRegs[] = { MSP430::FP, MSP430::R5, MSP430::R6, MSP430::R7, MSP430::R8, MSP430::R9, MSP430::R10, diff --git a/lib/Target/Mips/MipsAsmPrinter.cpp b/lib/Target/Mips/MipsAsmPrinter.cpp index fbf7b5e28b7..f9de78dc281 100644 --- a/lib/Target/Mips/MipsAsmPrinter.cpp +++ b/lib/Target/Mips/MipsAsmPrinter.cpp @@ -381,7 +381,7 @@ void MipsAsmPrinter::EmitFunctionBodyStart() { MCInstLowering.Initialize(&MF->getContext()); - bool IsNakedFunction = MF->getFunction()->hasFnAttribute(Attribute::Naked); + bool IsNakedFunction = MF->getFunction().hasFnAttribute(Attribute::Naked); if (!IsNakedFunction) emitFrameDirective(); diff --git a/lib/Target/Mips/MipsCCState.cpp b/lib/Target/Mips/MipsCCState.cpp index 6a03ee9927d..81a1cced93b 100644 --- a/lib/Target/Mips/MipsCCState.cpp +++ b/lib/Target/Mips/MipsCCState.cpp @@ -101,9 +101,9 @@ void MipsCCState::PreAnalyzeReturnForF128( const MachineFunction &MF = getMachineFunction(); for (unsigned i = 0; i < Outs.size(); ++i) { OriginalArgWasF128.push_back( - originalTypeIsF128(MF.getFunction()->getReturnType(), nullptr)); + originalTypeIsF128(MF.getFunction().getReturnType(), nullptr)); OriginalArgWasFloat.push_back( - MF.getFunction()->getReturnType()->isFloatingPointTy()); + MF.getFunction().getReturnType()->isFloatingPointTy()); } } @@ -149,7 +149,7 @@ void MipsCCState::PreAnalyzeFormalArgumentsForF128( const SmallVectorImpl<ISD::InputArg> &Ins) { const MachineFunction &MF = getMachineFunction(); for (unsigned i = 0; i < Ins.size(); ++i) { - Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); + Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin(); // SRet arguments cannot originate from f128 or {f128} returns so we just // push false. We have to handle this specially since SRet arguments @@ -161,7 +161,7 @@ void MipsCCState::PreAnalyzeFormalArgumentsForF128( continue; } - assert(Ins[i].getOrigArgIndex() < MF.getFunction()->arg_size()); + assert(Ins[i].getOrigArgIndex() < MF.getFunction().arg_size()); std::advance(FuncArg, Ins[i].getOrigArgIndex()); OriginalArgWasF128.push_back( diff --git a/lib/Target/Mips/MipsConstantIslandPass.cpp b/lib/Target/Mips/MipsConstantIslandPass.cpp index 4dad98b80ed..a9abc171b42 100644 --- a/lib/Target/Mips/MipsConstantIslandPass.cpp +++ b/lib/Target/Mips/MipsConstantIslandPass.cpp @@ -1661,7 +1661,7 @@ void MipsConstantIslands::prescanForConstants() { int64_t V = Literal.getImm(); DEBUG(dbgs() << "literal " << V << "\n"); Type *Int32Ty = - Type::getInt32Ty(MF->getFunction()->getContext()); + Type::getInt32Ty(MF->getFunction().getContext()); const Constant *C = ConstantInt::get(Int32Ty, V); unsigned index = MCP->getConstantPoolIndex(C, 4); I->getOperand(2).ChangeToImmediate(index); diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp index 4acae99e989..6448fd91756 100644 --- a/lib/Target/Mips/MipsISelLowering.cpp +++ b/lib/Target/Mips/MipsISelLowering.cpp @@ -3359,10 +3359,10 @@ SDValue MipsTargetLowering::LowerFormalArguments( MipsCCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs, *DAG.getContext()); CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(CallConv), 1); - const Function *Func = DAG.getMachineFunction().getFunction(); - Function::const_arg_iterator FuncArg = Func->arg_begin(); + const Function &Func = DAG.getMachineFunction().getFunction(); + Function::const_arg_iterator FuncArg = Func.arg_begin(); - if (Func->hasFnAttribute("interrupt") && !Func->arg_empty()) + if (Func.hasFnAttribute("interrupt") && !Func.arg_empty()) report_fatal_error( "Functions with the interrupt attribute cannot have arguments!"); @@ -3600,7 +3600,7 @@ MipsTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, // the sret argument into $v0 for the return. We saved the argument into // a virtual register in the entry block, so now we copy the value out // and into $v0. - if (MF.getFunction()->hasStructRetAttr()) { + if (MF.getFunction().hasStructRetAttr()) { MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); unsigned Reg = MipsFI->getSRetReturnReg(); @@ -3622,7 +3622,7 @@ MipsTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, RetOps.push_back(Flag); // ISRs must use "eret". - if (DAG.getMachineFunction().getFunction()->hasFnAttribute("interrupt")) + if (DAG.getMachineFunction().getFunction().hasFnAttribute("interrupt")) return LowerInterruptReturn(RetOps, DL, DAG); // Standard return on Mips is a "jr $ra" diff --git a/lib/Target/Mips/MipsRegisterInfo.cpp b/lib/Target/Mips/MipsRegisterInfo.cpp index d0dc043d243..0e0d82270c8 100644 --- a/lib/Target/Mips/MipsRegisterInfo.cpp +++ b/lib/Target/Mips/MipsRegisterInfo.cpp @@ -93,8 +93,8 @@ MipsRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC, const MCPhysReg * MipsRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { const MipsSubtarget &Subtarget = MF->getSubtarget<MipsSubtarget>(); - const Function *F = MF->getFunction(); - if (F->hasFnAttribute("interrupt")) { + const Function &F = MF->getFunction(); + if (F.hasFnAttribute("interrupt")) { if (Subtarget.hasMips64()) return Subtarget.hasMips64r6() ? CSR_Interrupt_64R6_SaveList : CSR_Interrupt_64_SaveList; @@ -238,7 +238,7 @@ getReservedRegs(const MachineFunction &MF) const { Reserved.set(Mips::RA_64); Reserved.set(Mips::T0); Reserved.set(Mips::T1); - if (MF.getFunction()->hasFnAttribute("saveS2") || MipsFI->hasSaveS2()) + if (MF.getFunction().hasFnAttribute("saveS2") || MipsFI->hasSaveS2()) Reserved.set(Mips::S2); } diff --git a/lib/Target/Mips/MipsSEFrameLowering.cpp b/lib/Target/Mips/MipsSEFrameLowering.cpp index 5d4fbffa20a..eb1eea7925c 100644 --- a/lib/Target/Mips/MipsSEFrameLowering.cpp +++ b/lib/Target/Mips/MipsSEFrameLowering.cpp @@ -434,7 +434,7 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF, BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) .addCFIIndex(CFIIndex); - if (MF.getFunction()->hasFnAttribute("interrupt")) + if (MF.getFunction().hasFnAttribute("interrupt")) emitInterruptPrologueStub(MF, MBB); const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); @@ -582,7 +582,7 @@ void MipsSEFrameLowering::emitInterruptPrologueStub( // Perform ISR handling like GCC StringRef IntKind = - MF.getFunction()->getFnAttribute("interrupt").getValueAsString(); + MF.getFunction().getFnAttribute("interrupt").getValueAsString(); const TargetRegisterClass *PtrRC = &Mips::GPR32RegClass; // EIC interrupt handling needs to read the Cause register to disable @@ -726,7 +726,7 @@ void MipsSEFrameLowering::emitEpilogue(MachineFunction &MF, } } - if (MF.getFunction()->hasFnAttribute("interrupt")) + if (MF.getFunction().hasFnAttribute("interrupt")) emitInterruptEpilogueStub(MF, MBB); // Get the number of bytes from FrameInfo @@ -809,8 +809,8 @@ spillCalleeSavedRegisters(MachineBasicBlock &MBB, // spilled to the stack frame. bool IsLOHI = (Reg == Mips::LO0 || Reg == Mips::LO0_64 || Reg == Mips::HI0 || Reg == Mips::HI0_64); - const Function *Func = MBB.getParent()->getFunction(); - if (IsLOHI && Func->hasFnAttribute("interrupt")) { + const Function &Func = MBB.getParent()->getFunction(); + if (IsLOHI && Func.hasFnAttribute("interrupt")) { DebugLoc DL = MI->getDebugLoc(); unsigned Op = 0; diff --git a/lib/Target/Mips/MipsSEISelDAGToDAG.cpp b/lib/Target/Mips/MipsSEISelDAGToDAG.cpp index 3c6a7d7a665..893cae93e58 100644 --- a/lib/Target/Mips/MipsSEISelDAGToDAG.cpp +++ b/lib/Target/Mips/MipsSEISelDAGToDAG.cpp @@ -161,7 +161,7 @@ void MipsSEDAGToDAGISel::initGlobalBaseReg(MachineFunction &MF) { // lui $v0, %hi(%neg(%gp_rel(fname))) // daddu $v1, $v0, $t9 // daddiu $globalbasereg, $v1, %lo(%neg(%gp_rel(fname))) - const GlobalValue *FName = MF.getFunction(); + const GlobalValue *FName = &MF.getFunction(); BuildMI(MBB, I, DL, TII.get(Mips::LUi64), V0) .addGlobalAddress(FName, 0, MipsII::MO_GPOFF_HI); BuildMI(MBB, I, DL, TII.get(Mips::DADDu), V1).addReg(V0) @@ -190,7 +190,7 @@ void MipsSEDAGToDAGISel::initGlobalBaseReg(MachineFunction &MF) { // lui $v0, %hi(%neg(%gp_rel(fname))) // addu $v1, $v0, $t9 // addiu $globalbasereg, $v1, %lo(%neg(%gp_rel(fname))) - const GlobalValue *FName = MF.getFunction(); + const GlobalValue *FName = &MF.getFunction(); BuildMI(MBB, I, DL, TII.get(Mips::LUi), V0) .addGlobalAddress(FName, 0, MipsII::MO_GPOFF_HI); BuildMI(MBB, I, DL, TII.get(Mips::ADDu), V1).addReg(V0).addReg(Mips::T9); @@ -1247,7 +1247,7 @@ bool MipsSEDAGToDAGISel::trySelect(SDNode *Node) { // handled by the ldi case. if (ResNonZero) { IntegerType *Int32Ty = - IntegerType::get(MF->getFunction()->getContext(), 32); + IntegerType::get(MF->getFunction().getContext(), 32); const ConstantInt *Const32 = ConstantInt::get(Int32Ty, 32); SDValue Ops[4] = {HiResNonZero ? SDValue(HiRes, 0) : Zero64Val, CurDAG->getConstant(*Const32, DL, MVT::i32), diff --git a/lib/Target/Mips/MipsSEInstrInfo.cpp b/lib/Target/Mips/MipsSEInstrInfo.cpp index 798d86622e5..59b7679971c 100644 --- a/lib/Target/Mips/MipsSEInstrInfo.cpp +++ b/lib/Target/Mips/MipsSEInstrInfo.cpp @@ -231,8 +231,8 @@ storeRegToStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, // Hi, Lo are normally caller save but they are callee save // for interrupt handling. - const Function *Func = MBB.getParent()->getFunction(); - if (Func->hasFnAttribute("interrupt")) { + const Function &Func = MBB.getParent()->getFunction(); + if (Func.hasFnAttribute("interrupt")) { if (Mips::HI32RegClass.hasSubClassEq(RC)) { BuildMI(MBB, I, DL, get(Mips::MFHI), Mips::K0); SrcReg = Mips::K0; @@ -262,8 +262,8 @@ loadRegFromStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOLoad); unsigned Opc = 0; - const Function *Func = MBB.getParent()->getFunction(); - bool ReqIndirectLoad = Func->hasFnAttribute("interrupt") && + const Function &Func = MBB.getParent()->getFunction(); + bool ReqIndirectLoad = Func.hasFnAttribute("interrupt") && (DestReg == Mips::LO0 || DestReg == Mips::LO0_64 || DestReg == Mips::HI0 || DestReg == Mips::HI0_64); diff --git a/lib/Target/Mips/MipsTargetMachine.cpp b/lib/Target/Mips/MipsTargetMachine.cpp index 9a12b98984e..85193bffef5 100644 --- a/lib/Target/Mips/MipsTargetMachine.cpp +++ b/lib/Target/Mips/MipsTargetMachine.cpp @@ -200,7 +200,7 @@ MipsTargetMachine::getSubtargetImpl(const Function &F) const { void MipsTargetMachine::resetSubtarget(MachineFunction *MF) { DEBUG(dbgs() << "resetSubtarget\n"); - Subtarget = const_cast<MipsSubtarget *>(getSubtargetImpl(*MF->getFunction())); + Subtarget = const_cast<MipsSubtarget *>(getSubtargetImpl(MF->getFunction())); MF->setSubtarget(Subtarget); } diff --git a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp index d0b47f61e11..2aa395642c4 100644 --- a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp +++ b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp @@ -457,8 +457,8 @@ void NVPTXAsmPrinter::printReturnValStr(const Function *F, raw_ostream &O) { void NVPTXAsmPrinter::printReturnValStr(const MachineFunction &MF, raw_ostream &O) { - const Function *F = MF.getFunction(); - printReturnValStr(F, O); + const Function &F = MF.getFunction(); + printReturnValStr(&F, O); } // Return true if MBB is the header of a loop marked with @@ -502,13 +502,13 @@ void NVPTXAsmPrinter::EmitFunctionEntryLabel() { raw_svector_ostream O(Str); if (!GlobalsEmitted) { - emitGlobals(*MF->getFunction()->getParent()); + emitGlobals(*MF->getFunction().getParent()); GlobalsEmitted = true; } // Set up MRI = &MF->getRegInfo(); - F = MF->getFunction(); + F = &MF->getFunction(); emitLinkageDirective(F, O); if (isKernelFunction(*F)) O << ".entry "; @@ -536,7 +536,7 @@ void NVPTXAsmPrinter::EmitFunctionBodyStart() { SmallString<128> Str; raw_svector_ostream O(Str); - emitDemotedVars(MF->getFunction(), O); + emitDemotedVars(&MF->getFunction(), O); OutStreamer->EmitRawText(O.str()); } @@ -1708,8 +1708,8 @@ void NVPTXAsmPrinter::emitFunctionParamList(const Function *F, raw_ostream &O) { void NVPTXAsmPrinter::emitFunctionParamList(const MachineFunction &MF, raw_ostream &O) { - const Function *F = MF.getFunction(); - emitFunctionParamList(F, O); + const Function &F = MF.getFunction(); + emitFunctionParamList(&F, O); } void NVPTXAsmPrinter::setAndEmitFunctionVirtualRegisters( @@ -2156,7 +2156,7 @@ NVPTXAsmPrinter::lowerConstantForGV(const Constant *CV, bool ProcessingGeneric) raw_string_ostream OS(S); OS << "Unsupported expression in static initializer: "; CE->printAsOperand(OS, /*PrintType=*/false, - !MF ? nullptr : MF->getFunction()->getParent()); + !MF ? nullptr : MF->getFunction().getParent()); report_fatal_error(OS.str()); } @@ -2170,7 +2170,7 @@ NVPTXAsmPrinter::lowerConstantForGV(const Constant *CV, bool ProcessingGeneric) raw_string_ostream OS(S); OS << "Unsupported expression in static initializer: "; CE->printAsOperand(OS, /*PrintType=*/ false, - !MF ? nullptr : MF->getFunction()->getParent()); + !MF ? nullptr : MF->getFunction().getParent()); report_fatal_error(OS.str()); } diff --git a/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp index 714260d372b..57e2acc0d7e 100644 --- a/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp +++ b/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp @@ -1003,7 +1003,7 @@ static bool canLowerToLDG(MemSDNode *N, const NVPTXSubtarget &Subtarget, return true; // Load wasn't explicitly invariant. Attempt to infer invariance. - if (!isKernelFunction(*F->getFunction())) + if (!isKernelFunction(F->getFunction())) return false; // We use GetUnderlyingObjects() here instead of diff --git a/lib/Target/NVPTX/NVPTXISelLowering.cpp b/lib/Target/NVPTX/NVPTXISelLowering.cpp index 21381896312..f1e4251a44b 100644 --- a/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -123,10 +123,10 @@ bool NVPTXTargetLowering::useF32FTZ(const MachineFunction &MF) const { // If nvptx-f32ftz is used on the command-line, always honor it return FtzEnabled; } else { - const Function *F = MF.getFunction(); + const Function &F = MF.getFunction(); // Otherwise, check for an nvptx-f32ftz attribute on the function - if (F->hasFnAttribute("nvptx-f32ftz")) - return F->getFnAttribute("nvptx-f32ftz").getValueAsString() == "true"; + if (F.hasFnAttribute("nvptx-f32ftz")) + return F.getFnAttribute("nvptx-f32ftz").getValueAsString() == "true"; else return false; } @@ -2329,7 +2329,7 @@ SDValue NVPTXTargetLowering::LowerFormalArguments( const DataLayout &DL = DAG.getDataLayout(); auto PtrVT = getPointerTy(DAG.getDataLayout()); - const Function *F = MF.getFunction(); + const Function *F = &MF.getFunction(); const AttributeList &PAL = F->getAttributes(); const TargetLowering *TLI = STI.getTargetLowering(); @@ -2525,7 +2525,7 @@ NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, const SmallVectorImpl<SDValue> &OutVals, const SDLoc &dl, SelectionDAG &DAG) const { MachineFunction &MF = DAG.getMachineFunction(); - Type *RetTy = MF.getFunction()->getReturnType(); + Type *RetTy = MF.getFunction().getReturnType(); bool isABI = (STI.getSmVersion() >= 20); assert(isABI && "Non-ABI compilation is not supported"); @@ -4022,9 +4022,9 @@ bool NVPTXTargetLowering::allowUnsafeFPMath(MachineFunction &MF) const { return true; // Allow unsafe math if unsafe-fp-math attribute explicitly says so. - const Function *F = MF.getFunction(); - if (F->hasFnAttribute("unsafe-fp-math")) { - Attribute Attr = F->getFnAttribute("unsafe-fp-math"); + const Function &F = MF.getFunction(); + if (F.hasFnAttribute("unsafe-fp-math")) { + Attribute Attr = F.getFnAttribute("unsafe-fp-math"); StringRef Val = Attr.getValueAsString(); if (Val == "true") return true; diff --git a/lib/Target/NVPTX/NVPTXPeephole.cpp b/lib/Target/NVPTX/NVPTXPeephole.cpp index 415889dc70c..02c32c68ee2 100644 --- a/lib/Target/NVPTX/NVPTXPeephole.cpp +++ b/lib/Target/NVPTX/NVPTXPeephole.cpp @@ -125,7 +125,7 @@ static void CombineCVTAToLocal(MachineInstr &Root) { } bool NVPTXPeephole::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; bool Changed = false; diff --git a/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp b/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp index 2022caca76e..82befe4b101 100644 --- a/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp +++ b/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp @@ -158,7 +158,7 @@ findIndexForHandle(MachineOperand &Op, MachineFunction &MF, unsigned &Idx) { unsigned Param = atoi(Sym.data()+ParamBaseName.size()); std::string NewSym; raw_string_ostream NewSymStr(NewSym); - NewSymStr << MF.getFunction()->getName() << "_param_" << Param; + NewSymStr << MF.getName() << "_param_" << Param; InstrsToRemove.insert(&TexHandleDef); Idx = MFI->getImageHandleSymbolIndex(NewSymStr.str().c_str()); diff --git a/lib/Target/PowerPC/PPCAsmPrinter.cpp b/lib/Target/PowerPC/PPCAsmPrinter.cpp index 545f0aa0c2f..17451900840 100644 --- a/lib/Target/PowerPC/PPCAsmPrinter.cpp +++ b/lib/Target/PowerPC/PPCAsmPrinter.cpp @@ -507,7 +507,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { MCInst TmpInst; bool isPPC64 = Subtarget->isPPC64(); bool isDarwin = TM.getTargetTriple().isOSDarwin(); - const Module *M = MF->getFunction()->getParent(); + const Module *M = MF->getFunction().getParent(); PICLevel::Level PL = M->getPICLevel(); // Lower multi-instruction pseudo operations. @@ -1228,7 +1228,7 @@ void PPCLinuxAsmPrinter::EmitFunctionEntryLabel() { // linux/ppc32 - Normal entry label. if (!Subtarget->isPPC64() && (!isPositionIndependent() || - MF->getFunction()->getParent()->getPICLevel() == PICLevel::SmallPIC)) + MF->getFunction().getParent()->getPICLevel() == PICLevel::SmallPIC)) return AsmPrinter::EmitFunctionEntryLabel(); if (!Subtarget->isPPC64()) { diff --git a/lib/Target/PowerPC/PPCBranchCoalescing.cpp b/lib/Target/PowerPC/PPCBranchCoalescing.cpp index 48b94a53823..32d801b13de 100644 --- a/lib/Target/PowerPC/PPCBranchCoalescing.cpp +++ b/lib/Target/PowerPC/PPCBranchCoalescing.cpp @@ -714,7 +714,7 @@ bool PPCBranchCoalescing::mergeCandidates(CoalescingCandidateInfo &SourceRegion, bool PPCBranchCoalescing::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction()) || MF.empty()) + if (skipFunction(MF.getFunction()) || MF.empty()) return false; bool didSomething = false; diff --git a/lib/Target/PowerPC/PPCEarlyReturn.cpp b/lib/Target/PowerPC/PPCEarlyReturn.cpp index 811e4dd9dfe..1699463c0a4 100644 --- a/lib/Target/PowerPC/PPCEarlyReturn.cpp +++ b/lib/Target/PowerPC/PPCEarlyReturn.cpp @@ -173,7 +173,7 @@ protected: public: bool runOnMachineFunction(MachineFunction &MF) override { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; TII = MF.getSubtarget().getInstrInfo(); diff --git a/lib/Target/PowerPC/PPCFrameLowering.cpp b/lib/Target/PowerPC/PPCFrameLowering.cpp index 0a01fdf9e67..c870a225669 100644 --- a/lib/Target/PowerPC/PPCFrameLowering.cpp +++ b/lib/Target/PowerPC/PPCFrameLowering.cpp @@ -434,7 +434,7 @@ unsigned PPCFrameLowering::determineFrameLayout(MachineFunction &MF, const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo(); unsigned LR = RegInfo->getRARegister(); - bool DisableRedZone = MF.getFunction()->hasFnAttribute(Attribute::NoRedZone); + bool DisableRedZone = MF.getFunction().hasFnAttribute(Attribute::NoRedZone); bool CanUseRedZone = !MFI.hasVarSizedObjects() && // No dynamic alloca. !MFI.adjustsStack() && // No calls. !MustSaveLR(MF, LR) && // No need to save LR. @@ -499,7 +499,7 @@ bool PPCFrameLowering::needsFP(const MachineFunction &MF) const { // Naked functions have no stack frame pushed, so we don't have a frame // pointer. - if (MF.getFunction()->hasFnAttribute(Attribute::Naked)) + if (MF.getFunction().hasFnAttribute(Attribute::Naked)) return false; return MF.getTarget().Options.DisableFramePointerElim(MF) || @@ -692,7 +692,7 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF, const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo(); DebugLoc dl; bool needsCFI = MMI.hasDebugInfo() || - MF.getFunction()->needsUnwindTableEntry(); + MF.getFunction().needsUnwindTableEntry(); // Get processor type. bool isPPC64 = Subtarget.isPPC64(); @@ -1505,7 +1505,7 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF, unsigned RetOpcode = MBBI->getOpcode(); if (MF.getTarget().Options.GuaranteedTailCallOpt && (RetOpcode == PPC::BLR || RetOpcode == PPC::BLR8) && - MF.getFunction()->getCallingConv() == CallingConv::Fast) { + MF.getFunction().getCallingConv() == CallingConv::Fast) { PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); unsigned CallerAllocatedAmt = FI->getMinReservedArea(); diff --git a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp index 5e7a9335968..d3a223fe03e 100644 --- a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp +++ b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp @@ -391,7 +391,7 @@ SDNode *PPCDAGToDAGISel::getGlobalBaseReg() { // Insert the set of GlobalBaseReg into the first MBB of the function MachineBasicBlock &FirstMBB = MF->front(); MachineBasicBlock::iterator MBBI = FirstMBB.begin(); - const Module *M = MF->getFunction()->getParent(); + const Module *M = MF->getFunction().getParent(); DebugLoc dl; if (PPCLowering->getPointerTy(CurDAG->getDataLayout()) == MVT::i32) { diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp index ac864baffd6..18e567fa589 100644 --- a/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/lib/Target/PowerPC/PPCISelLowering.cpp @@ -2573,7 +2573,7 @@ SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, const GlobalValue *GV = GA->getGlobal(); EVT PtrVT = getPointerTy(DAG.getDataLayout()); bool is64bit = Subtarget.isPPC64(); - const Module *M = DAG.getMachineFunction().getFunction()->getParent(); + const Module *M = DAG.getMachineFunction().getFunction().getParent(); PICLevel::Level picLevel = M->getPICLevel(); TLSModel::Model Model = getTargetMachine().getTLSModel(GV); @@ -3542,7 +3542,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_64SVR4( unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; unsigned &QFPR_idx = FPR_idx; SmallVector<SDValue, 8> MemOps; - Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); + Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin(); unsigned CurArgIdx = 0; for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { SDValue ArgVal; @@ -3986,7 +3986,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_Darwin( SmallVector<SDValue, 8> MemOps; unsigned nAltivecParamsAtEnd = 0; - Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); + Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin(); unsigned CurArgIdx = 0; for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { SDValue ArgVal; @@ -4422,9 +4422,9 @@ PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4( // Variadic argument functions are not supported. if (isVarArg) return false; - auto *Caller = DAG.getMachineFunction().getFunction(); + auto &Caller = DAG.getMachineFunction().getFunction(); // Check that the calling conventions are compatible for tco. - if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(), CalleeCC)) + if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC)) return false; // Caller contains any byval parameter is not supported. @@ -4446,7 +4446,7 @@ PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4( // If the caller and callee potentially have different TOC bases then we // cannot tail call since we need to restore the TOC pointer after the call. // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977 - if (!callsShareTOCBase(Caller, Callee, getTargetMachine())) + if (!callsShareTOCBase(&Caller, Callee, getTargetMachine())) return false; // TCO allows altering callee ABI, so we don't have to check further. @@ -4458,7 +4458,7 @@ PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4( // If callee use the same argument list that caller is using, then we can // apply SCO on this case. If it is not, then we need to check if callee needs // stack for passing arguments. - if (!hasSameArgumentList(Caller, CS) && + if (!hasSameArgumentList(&Caller, CS) && needStackSlotPassParameters(Subtarget, Outs)) { return false; } @@ -4483,7 +4483,7 @@ PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, return false; MachineFunction &MF = DAG.getMachineFunction(); - CallingConv::ID CallerCC = MF.getFunction()->getCallingConv(); + CallingConv::ID CallerCC = MF.getFunction().getCallingConv(); if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { // Functions containing by val parameters are not supported. for (unsigned i = 0; i != Ins.size(); i++) { @@ -4735,7 +4735,7 @@ PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, SDValue &Chain, // we're building with the leopard linker or later, which automatically // synthesizes these stubs. const TargetMachine &TM = DAG.getTarget(); - const Module *Mod = DAG.getMachineFunction().getFunction()->getParent(); + const Module *Mod = DAG.getMachineFunction().getFunction().getParent(); const GlobalValue *GV = nullptr; if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) GV = G->getGlobal(); @@ -5028,7 +5028,7 @@ SDValue PPCTargetLowering::FinishCall( // any other variadic arguments). Ops.insert(std::next(Ops.begin()), AddTOC); } else if (CallOpc == PPCISD::CALL && - !callsShareTOCBase(MF.getFunction(), Callee, DAG.getTarget())) { + !callsShareTOCBase(&MF.getFunction(), Callee, DAG.getTarget())) { // Otherwise insert NOP for non-local calls. CallOpc = PPCISD::CALL_NOP; } @@ -9797,7 +9797,7 @@ PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI, // Naked functions never have a base pointer, and so we use r1. For all // other functions, this decision must be delayed until during PEI. unsigned BaseReg; - if (MF->getFunction()->hasFnAttribute(Attribute::Naked)) + if (MF->getFunction().hasFnAttribute(Attribute::Naked)) BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1; else BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP; @@ -13251,7 +13251,7 @@ SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, // Naked functions never have a frame pointer, and so we use r1. For all // other functions, this decision must be delayed until during PEI. unsigned FrameReg; - if (MF.getFunction()->hasFnAttribute(Attribute::Naked)) + if (MF.getFunction().hasFnAttribute(Attribute::Naked)) FrameReg = isPPC64 ? PPC::X1 : PPC::R1; else FrameReg = isPPC64 ? PPC::FP8 : PPC::FP; @@ -13495,12 +13495,12 @@ EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size, bool MemcpyStrSrc, MachineFunction &MF) const { if (getTargetMachine().getOptLevel() != CodeGenOpt::None) { - const Function *F = MF.getFunction(); + const Function &F = MF.getFunction(); // When expanding a memset, require at least two QPX instructions to cover // the cost of loading the value to be stored from the constant pool. if (Subtarget.hasQPX() && Size >= 32 && (!IsMemset || Size >= 64) && (!SrcAlign || SrcAlign >= 32) && (!DstAlign || DstAlign >= 32) && - !F->hasFnAttribute(Attribute::NoImplicitFloat)) { + !F.hasFnAttribute(Attribute::NoImplicitFloat)) { return MVT::v4f64; } @@ -13719,7 +13719,7 @@ void PPCTargetLowering::insertCopiesSplitCSR( // fine for CXX_FAST_TLS since the C++-style TLS access functions should be // nounwind. If we want to generalize this later, we may need to emit // CFI pseudo-instructions. - assert(Entry->getParent()->getFunction()->hasFnAttribute( + assert(Entry->getParent()->getFunction().hasFnAttribute( Attribute::NoUnwind) && "Function should be nounwind in insertCopiesSplitCSR!"); Entry->addLiveIn(*I); diff --git a/lib/Target/PowerPC/PPCISelLowering.h b/lib/Target/PowerPC/PPCISelLowering.h index cd843e3b132..b119e5b4a56 100644 --- a/lib/Target/PowerPC/PPCISelLowering.h +++ b/lib/Target/PowerPC/PPCISelLowering.h @@ -586,8 +586,8 @@ namespace llvm { bool supportSplitCSR(MachineFunction *MF) const override { return - MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS && - MF->getFunction()->hasFnAttribute(Attribute::NoUnwind); + MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS && + MF->getFunction().hasFnAttribute(Attribute::NoUnwind); } void initializeSplitCSR(MachineBasicBlock *Entry) const override; diff --git a/lib/Target/PowerPC/PPCInstrInfo.cpp b/lib/Target/PowerPC/PPCInstrInfo.cpp index 031e6689f18..ffb5cc8757f 100644 --- a/lib/Target/PowerPC/PPCInstrInfo.cpp +++ b/lib/Target/PowerPC/PPCInstrInfo.cpp @@ -3133,7 +3133,7 @@ PPCInstrInfo::isSignOrZeroExtended(const MachineInstr &MI, bool SignExt, const PPCFunctionInfo *FuncInfo = MF->getInfo<PPCFunctionInfo>(); // We check the ZExt/SExt flags for a method parameter. if (MI.getParent()->getBasicBlock() == - &MF->getFunction()->getEntryBlock()) { + &MF->getFunction().getEntryBlock()) { unsigned VReg = MI.getOperand(0).getReg(); if (MF->getRegInfo().isLiveIn(VReg)) return SignExt ? FuncInfo->isLiveInSExt(VReg) : diff --git a/lib/Target/PowerPC/PPCMIPeephole.cpp b/lib/Target/PowerPC/PPCMIPeephole.cpp index 64c5e4ebb87..27ded63bf64 100644 --- a/lib/Target/PowerPC/PPCMIPeephole.cpp +++ b/lib/Target/PowerPC/PPCMIPeephole.cpp @@ -106,7 +106,7 @@ public: // Main entry point for this pass. bool runOnMachineFunction(MachineFunction &MF) override { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; initialize(MF); return simplifyCode(); diff --git a/lib/Target/PowerPC/PPCPreEmitPeephole.cpp b/lib/Target/PowerPC/PPCPreEmitPeephole.cpp index df0e9f3515a..d524c354ed3 100644 --- a/lib/Target/PowerPC/PPCPreEmitPeephole.cpp +++ b/lib/Target/PowerPC/PPCPreEmitPeephole.cpp @@ -56,7 +56,7 @@ namespace { } bool runOnMachineFunction(MachineFunction &MF) override { - if (skipFunction(*MF.getFunction()) || !RunPreEmitPeephole) + if (skipFunction(MF.getFunction()) || !RunPreEmitPeephole) return false; bool Changed = false; const PPCInstrInfo *TII = MF.getSubtarget<PPCSubtarget>().getInstrInfo(); diff --git a/lib/Target/PowerPC/PPCQPXLoadSplat.cpp b/lib/Target/PowerPC/PPCQPXLoadSplat.cpp index 544c7f2aeef..25b2b54cbe9 100644 --- a/lib/Target/PowerPC/PPCQPXLoadSplat.cpp +++ b/lib/Target/PowerPC/PPCQPXLoadSplat.cpp @@ -60,7 +60,7 @@ FunctionPass *llvm::createPPCQPXLoadSplatPass() { } bool PPCQPXLoadSplat::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; bool MadeChange = false; diff --git a/lib/Target/PowerPC/PPCReduceCRLogicals.cpp b/lib/Target/PowerPC/PPCReduceCRLogicals.cpp index 7ad50a6fea0..5b2d7191683 100644 --- a/lib/Target/PowerPC/PPCReduceCRLogicals.cpp +++ b/lib/Target/PowerPC/PPCReduceCRLogicals.cpp @@ -211,7 +211,7 @@ public: MachineInstr *lookThroughCRCopy(unsigned Reg, unsigned &Subreg, MachineInstr *&CpDef); bool runOnMachineFunction(MachineFunction &MF) override { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; // If the subtarget doesn't use CR bits, there's nothing to do. diff --git a/lib/Target/PowerPC/PPCRegisterInfo.cpp b/lib/Target/PowerPC/PPCRegisterInfo.cpp index 78467e81795..6b62a82ef7b 100644 --- a/lib/Target/PowerPC/PPCRegisterInfo.cpp +++ b/lib/Target/PowerPC/PPCRegisterInfo.cpp @@ -123,7 +123,7 @@ PPCRegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind) const MCPhysReg* PPCRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { const PPCSubtarget &Subtarget = MF->getSubtarget<PPCSubtarget>(); - if (MF->getFunction()->getCallingConv() == CallingConv::AnyReg) { + if (MF->getFunction().getCallingConv() == CallingConv::AnyReg) { if (Subtarget.hasVSX()) return CSR_64_AllRegs_VSX_SaveList; if (Subtarget.hasAltivec()) @@ -161,7 +161,7 @@ PPCRegisterInfo::getCalleeSavedRegsViaCopy(const MachineFunction *MF) const { return nullptr; if (!TM.isPPC64()) return nullptr; - if (MF->getFunction()->getCallingConv() != CallingConv::CXX_FAST_TLS) + if (MF->getFunction().getCallingConv() != CallingConv::CXX_FAST_TLS) return nullptr; if (!MF->getInfo<PPCFunctionInfo>()->isSplitCSR()) return nullptr; @@ -901,7 +901,7 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, // Naked functions have stack size 0, although getStackSize may not reflect // that because we didn't call all the pieces that compute it for naked // functions. - if (!MF.getFunction()->hasFnAttribute(Attribute::Naked)) { + if (!MF.getFunction().hasFnAttribute(Attribute::Naked)) { if (!(hasBasePointer(MF) && FrameIndex < 0)) Offset += MFI.getStackSize(); } diff --git a/lib/Target/PowerPC/PPCVSXFMAMutate.cpp b/lib/Target/PowerPC/PPCVSXFMAMutate.cpp index 04fa35815b6..f15af790de8 100644 --- a/lib/Target/PowerPC/PPCVSXFMAMutate.cpp +++ b/lib/Target/PowerPC/PPCVSXFMAMutate.cpp @@ -343,7 +343,7 @@ protected: public: bool runOnMachineFunction(MachineFunction &MF) override { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; // If we don't have VSX then go ahead and return without doing diff --git a/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp b/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp index 0320ecaf853..8a5fb9fdaef 100644 --- a/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp +++ b/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp @@ -191,7 +191,7 @@ private: public: // Main entry point for this pass. bool runOnMachineFunction(MachineFunction &MF) override { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; // If we don't have VSX on the subtarget, don't do anything. diff --git a/lib/Target/Sparc/SparcISelLowering.cpp b/lib/Target/Sparc/SparcISelLowering.cpp index d011ec87bac..d9548ff90d7 100644 --- a/lib/Target/Sparc/SparcISelLowering.cpp +++ b/lib/Target/Sparc/SparcISelLowering.cpp @@ -264,7 +264,7 @@ SparcTargetLowering::LowerReturn_32(SDValue Chain, CallingConv::ID CallConv, unsigned RetAddrOffset = 8; // Call Inst + Delay Slot // If the function returns a struct, copy the SRetReturnReg to I0 - if (MF.getFunction()->hasStructRetAttr()) { + if (MF.getFunction().hasStructRetAttr()) { SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>(); unsigned Reg = SFI->getSRetReturnReg(); if (!Reg) @@ -519,7 +519,7 @@ SDValue SparcTargetLowering::LowerFormalArguments_32( InVals.push_back(Load); } - if (MF.getFunction()->hasStructRetAttr()) { + if (MF.getFunction().hasStructRetAttr()) { // Copy the SRet Argument to SRetReturnReg. SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>(); unsigned Reg = SFI->getSRetReturnReg(); @@ -701,8 +701,8 @@ static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee, CalleeFn = dyn_cast<Function>(G->getGlobal()); } else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) { - const Function *Fn = DAG.getMachineFunction().getFunction(); - const Module *M = Fn->getParent(); + const Function &Fn = DAG.getMachineFunction().getFunction(); + const Module *M = Fn.getParent(); const char *CalleeName = E->getSymbol(); CalleeFn = M->getFunction(CalleeName); } @@ -1057,8 +1057,8 @@ SparcTargetLowering::getSRetArgSize(SelectionDAG &DAG, SDValue Callee) const CalleeFn = dyn_cast<Function>(G->getGlobal()); } else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) { - const Function *Fn = DAG.getMachineFunction().getFunction(); - const Module *M = Fn->getParent(); + const Function &F = DAG.getMachineFunction().getFunction(); + const Module *M = F.getParent(); const char *CalleeName = E->getSymbol(); CalleeFn = M->getFunction(CalleeName); if (!CalleeFn && isFP128ABICall(CalleeName)) diff --git a/lib/Target/SystemZ/SystemZElimCompare.cpp b/lib/Target/SystemZ/SystemZElimCompare.cpp index ca82740d3e8..55f7a7b8d0d 100644 --- a/lib/Target/SystemZ/SystemZElimCompare.cpp +++ b/lib/Target/SystemZ/SystemZElimCompare.cpp @@ -593,7 +593,7 @@ bool SystemZElimCompare::processBlock(MachineBasicBlock &MBB) { } bool SystemZElimCompare::runOnMachineFunction(MachineFunction &F) { - if (skipFunction(*F.getFunction())) + if (skipFunction(F.getFunction())) return false; TII = static_cast<const SystemZInstrInfo *>(F.getSubtarget().getInstrInfo()); diff --git a/lib/Target/SystemZ/SystemZFrameLowering.cpp b/lib/Target/SystemZ/SystemZFrameLowering.cpp index 3183c3acc69..b600aa61cd0 100644 --- a/lib/Target/SystemZ/SystemZFrameLowering.cpp +++ b/lib/Target/SystemZ/SystemZFrameLowering.cpp @@ -71,7 +71,7 @@ void SystemZFrameLowering::determineCalleeSaves(MachineFunction &MF, const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); bool HasFP = hasFP(MF); SystemZMachineFunctionInfo *MFI = MF.getInfo<SystemZMachineFunctionInfo>(); - bool IsVarArg = MF.getFunction()->isVarArg(); + bool IsVarArg = MF.getFunction().isVarArg(); // va_start stores incoming FPR varargs in the normal way, but delegates // the saving of incoming GPR varargs to spillCalleeSavedRegisters(). @@ -139,7 +139,7 @@ spillCalleeSavedRegisters(MachineBasicBlock &MBB, MachineFunction &MF = *MBB.getParent(); const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>(); - bool IsVarArg = MF.getFunction()->isVarArg(); + bool IsVarArg = MF.getFunction().isVarArg(); DebugLoc DL; // Scan the call-saved GPRs and find the bounds of the register spill area. @@ -374,7 +374,7 @@ void SystemZFrameLowering::emitPrologue(MachineFunction &MF, uint64_t StackSize = getAllocatedStackSize(MF); if (StackSize) { // Determine if we want to store a backchain. - bool StoreBackchain = MF.getFunction()->hasFnAttribute("backchain"); + bool StoreBackchain = MF.getFunction().hasFnAttribute("backchain"); // If we need backchain, save current stack pointer. R1 is free at this // point. diff --git a/lib/Target/SystemZ/SystemZISelLowering.cpp b/lib/Target/SystemZ/SystemZISelLowering.cpp index c239cd5ad46..adf368319dc 100644 --- a/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -3039,8 +3039,8 @@ SDValue SystemZTargetLowering:: lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { const TargetFrameLowering *TFI = Subtarget.getFrameLowering(); MachineFunction &MF = DAG.getMachineFunction(); - bool RealignOpt = !MF.getFunction()-> hasFnAttribute("no-realign-stack"); - bool StoreBackchain = MF.getFunction()->hasFnAttribute("backchain"); + bool RealignOpt = !MF.getFunction().hasFnAttribute("no-realign-stack"); + bool StoreBackchain = MF.getFunction().hasFnAttribute("backchain"); SDValue Chain = Op.getOperand(0); SDValue Size = Op.getOperand(1); @@ -3572,7 +3572,7 @@ SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const { MachineFunction &MF = DAG.getMachineFunction(); MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); - bool StoreBackchain = MF.getFunction()->hasFnAttribute("backchain"); + bool StoreBackchain = MF.getFunction().hasFnAttribute("backchain"); SDValue Chain = Op.getOperand(0); SDValue NewSP = Op.getOperand(1); diff --git a/lib/Target/SystemZ/SystemZLDCleanup.cpp b/lib/Target/SystemZ/SystemZLDCleanup.cpp index 0f759433876..f532e9e23b1 100644 --- a/lib/Target/SystemZ/SystemZLDCleanup.cpp +++ b/lib/Target/SystemZ/SystemZLDCleanup.cpp @@ -64,7 +64,7 @@ void SystemZLDCleanup::getAnalysisUsage(AnalysisUsage &AU) const { } bool SystemZLDCleanup::runOnMachineFunction(MachineFunction &F) { - if (skipFunction(*F.getFunction())) + if (skipFunction(F.getFunction())) return false; TII = static_cast<const SystemZInstrInfo *>(F.getSubtarget().getInstrInfo()); diff --git a/lib/Target/SystemZ/SystemZRegisterInfo.cpp b/lib/Target/SystemZ/SystemZRegisterInfo.cpp index 173f5b4e87a..856505e00a1 100644 --- a/lib/Target/SystemZ/SystemZRegisterInfo.cpp +++ b/lib/Target/SystemZ/SystemZRegisterInfo.cpp @@ -109,7 +109,7 @@ SystemZRegisterInfo::getRegAllocationHints(unsigned VirtReg, const MCPhysReg * SystemZRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { if (MF->getSubtarget().getTargetLowering()->supportSwiftError() && - MF->getFunction()->getAttributes().hasAttrSomewhere( + MF->getFunction().getAttributes().hasAttrSomewhere( Attribute::SwiftError)) return CSR_SystemZ_SwiftError_SaveList; return CSR_SystemZ_SaveList; @@ -119,7 +119,7 @@ const uint32_t * SystemZRegisterInfo::getCallPreservedMask(const MachineFunction &MF, CallingConv::ID CC) const { if (MF.getSubtarget().getTargetLowering()->supportSwiftError() && - MF.getFunction()->getAttributes().hasAttrSomewhere( + MF.getFunction().getAttributes().hasAttrSomewhere( Attribute::SwiftError)) return CSR_SystemZ_SwiftError_RegMask; return CSR_SystemZ_RegMask; diff --git a/lib/Target/SystemZ/SystemZShortenInst.cpp b/lib/Target/SystemZ/SystemZShortenInst.cpp index d9c8fab5634..195fa20a2c9 100644 --- a/lib/Target/SystemZ/SystemZShortenInst.cpp +++ b/lib/Target/SystemZ/SystemZShortenInst.cpp @@ -309,7 +309,7 @@ bool SystemZShortenInst::processBlock(MachineBasicBlock &MBB) { } bool SystemZShortenInst::runOnMachineFunction(MachineFunction &F) { - if (skipFunction(*F.getFunction())) + if (skipFunction(F.getFunction())) return false; const SystemZSubtarget &ST = F.getSubtarget<SystemZSubtarget>(); diff --git a/lib/Target/X86/X86AsmPrinter.cpp b/lib/Target/X86/X86AsmPrinter.cpp index 4881928f863..71526dd77f1 100644 --- a/lib/Target/X86/X86AsmPrinter.cpp +++ b/lib/Target/X86/X86AsmPrinter.cpp @@ -63,7 +63,7 @@ bool X86AsmPrinter::runOnMachineFunction(MachineFunction &MF) { SetupMachineFunction(MF); if (Subtarget->isTargetCOFF()) { - bool Local = MF.getFunction()->hasLocalLinkage(); + bool Local = MF.getFunction().hasLocalLinkage(); OutStreamer->BeginCOFFSymbolDef(CurrentFnSym); OutStreamer->EmitCOFFSymbolStorageClass( Local ? COFF::IMAGE_SYM_CLASS_STATIC : COFF::IMAGE_SYM_CLASS_EXTERNAL); diff --git a/lib/Target/X86/X86CallFrameOptimization.cpp b/lib/Target/X86/X86CallFrameOptimization.cpp index b4202799ae7..522dc7926b9 100644 --- a/lib/Target/X86/X86CallFrameOptimization.cpp +++ b/lib/Target/X86/X86CallFrameOptimization.cpp @@ -148,7 +148,7 @@ bool X86CallFrameOptimization::isLegal(MachineFunction &MF) { // is a danger of that being generated. if (STI->isTargetDarwin() && (!MF.getLandingPads().empty() || - (MF.getFunction()->needsUnwindTableEntry() && !TFL->hasFP(MF)))) + (MF.getFunction().needsUnwindTableEntry() && !TFL->hasFP(MF)))) return false; // It is not valid to change the stack pointer outside the prolog/epilog @@ -243,7 +243,7 @@ bool X86CallFrameOptimization::runOnMachineFunction(MachineFunction &MF) { assert(isPowerOf2_32(SlotSize) && "Expect power of 2 stack slot size"); Log2SlotSize = Log2_32(SlotSize); - if (skipFunction(*MF.getFunction()) || !isLegal(MF)) + if (skipFunction(MF.getFunction()) || !isLegal(MF)) return false; unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode(); diff --git a/lib/Target/X86/X86CallLowering.cpp b/lib/Target/X86/X86CallLowering.cpp index 3e1f3400b46..ccb982f9ac1 100644 --- a/lib/Target/X86/X86CallLowering.cpp +++ b/lib/Target/X86/X86CallLowering.cpp @@ -177,7 +177,7 @@ bool X86CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, MachineFunction &MF = MIRBuilder.getMF(); MachineRegisterInfo &MRI = MF.getRegInfo(); auto &DL = MF.getDataLayout(); - const Function &F = *MF.getFunction(); + const Function &F = MF.getFunction(); ArgInfo OrigArg{VReg, Val->getType()}; setArgFlags(OrigArg, AttributeList::ReturnIndex, DL, F); @@ -334,7 +334,7 @@ bool X86CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const ArgInfo &OrigRet, ArrayRef<ArgInfo> OrigArgs) const { MachineFunction &MF = MIRBuilder.getMF(); - const Function &F = *MF.getFunction(); + const Function &F = MF.getFunction(); MachineRegisterInfo &MRI = MF.getRegInfo(); auto &DL = F.getParent()->getDataLayout(); const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); diff --git a/lib/Target/X86/X86CmovConversion.cpp b/lib/Target/X86/X86CmovConversion.cpp index a4bb98956ea..489d9d86e25 100644 --- a/lib/Target/X86/X86CmovConversion.cpp +++ b/lib/Target/X86/X86CmovConversion.cpp @@ -164,7 +164,7 @@ void X86CmovConverterPass::getAnalysisUsage(AnalysisUsage &AU) const { } bool X86CmovConverterPass::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; if (!EnableCmovConverter) return false; diff --git a/lib/Target/X86/X86DomainReassignment.cpp b/lib/Target/X86/X86DomainReassignment.cpp index f32fb9c3151..f9e1ac39425 100644 --- a/lib/Target/X86/X86DomainReassignment.cpp +++ b/lib/Target/X86/X86DomainReassignment.cpp @@ -678,7 +678,7 @@ void X86DomainReassignment::initConverters() { } bool X86DomainReassignment::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; if (DisableX86DomainReassignment) return false; diff --git a/lib/Target/X86/X86ExpandPseudo.cpp b/lib/Target/X86/X86ExpandPseudo.cpp index 5dfd95f7130..ab2ef26d1cc 100644 --- a/lib/Target/X86/X86ExpandPseudo.cpp +++ b/lib/Target/X86/X86ExpandPseudo.cpp @@ -222,7 +222,7 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB, case X86::EH_RESTORE: { // Restore ESP and EBP, and optionally ESI if required. bool IsSEH = isAsynchronousEHPersonality(classifyEHPersonality( - MBB.getParent()->getFunction()->getPersonalityFn())); + MBB.getParent()->getFunction().getPersonalityFn())); X86FL->restoreWin32EHStackPointers(MBB, MBBI, DL, /*RestoreSP=*/IsSEH); MBBI->eraseFromParent(); return true; diff --git a/lib/Target/X86/X86FixupBWInsts.cpp b/lib/Target/X86/X86FixupBWInsts.cpp index 2f7dd5804fe..01d10fe4cae 100644 --- a/lib/Target/X86/X86FixupBWInsts.cpp +++ b/lib/Target/X86/X86FixupBWInsts.cpp @@ -146,12 +146,12 @@ INITIALIZE_PASS(FixupBWInstPass, FIXUPBW_NAME, FIXUPBW_DESC, false, false) FunctionPass *llvm::createX86FixupBWInsts() { return new FixupBWInstPass(); } bool FixupBWInstPass::runOnMachineFunction(MachineFunction &MF) { - if (!FixupBWInsts || skipFunction(*MF.getFunction())) + if (!FixupBWInsts || skipFunction(MF.getFunction())) return false; this->MF = &MF; TII = MF.getSubtarget<X86Subtarget>().getInstrInfo(); - OptForSize = MF.getFunction()->optForSize(); + OptForSize = MF.getFunction().optForSize(); MLI = &getAnalysis<MachineLoopInfo>(); LiveRegs.init(TII->getRegisterInfo()); diff --git a/lib/Target/X86/X86FixupLEAs.cpp b/lib/Target/X86/X86FixupLEAs.cpp index d27974ffe39..b41bf99f19b 100644 --- a/lib/Target/X86/X86FixupLEAs.cpp +++ b/lib/Target/X86/X86FixupLEAs.cpp @@ -191,12 +191,12 @@ FixupLEAPass::postRAConvertToLEA(MachineFunction::iterator &MFI, FunctionPass *llvm::createX86FixupLEAs() { return new FixupLEAPass(); } bool FixupLEAPass::runOnMachineFunction(MachineFunction &Func) { - if (skipFunction(*Func.getFunction())) + if (skipFunction(Func.getFunction())) return false; MF = &Func; const X86Subtarget &ST = Func.getSubtarget<X86Subtarget>(); - OptIncDec = !ST.slowIncDec() || Func.getFunction()->optForMinSize(); + OptIncDec = !ST.slowIncDec() || Func.getFunction().optForMinSize(); OptLEA = ST.LEAusesAG() || ST.slowLEA() || ST.slow3OpsLEA(); if (!OptLEA && !OptIncDec) diff --git a/lib/Target/X86/X86FloatingPoint.cpp b/lib/Target/X86/X86FloatingPoint.cpp index b73a08846e9..9a72e7114be 100644 --- a/lib/Target/X86/X86FloatingPoint.cpp +++ b/lib/Target/X86/X86FloatingPoint.cpp @@ -349,7 +349,7 @@ bool FPS::runOnMachineFunction(MachineFunction &MF) { // In regcall convention, some FP registers may not be passed through // the stack, so they will need to be assigned to the stack first - if ((Entry->getParent()->getFunction()->getCallingConv() == + if ((Entry->getParent()->getFunction().getCallingConv() == CallingConv::X86_RegCall) && (Bundle.Mask && !Bundle.FixCount)) { // In the register calling convention, up to one FP argument could be // saved in the first FP register. @@ -973,7 +973,7 @@ void FPS::handleCall(MachineBasicBlock::iterator &I) { unsigned R = MO.getReg() - X86::FP0; if (R < 8) { - if (MF->getFunction()->getCallingConv() != CallingConv::X86_RegCall) { + if (MF->getFunction().getCallingConv() != CallingConv::X86_RegCall) { assert(MO.isDef() && MO.isImplicit()); } diff --git a/lib/Target/X86/X86FrameLowering.cpp b/lib/Target/X86/X86FrameLowering.cpp index ead877a399f..80b1cc192a8 100644 --- a/lib/Target/X86/X86FrameLowering.cpp +++ b/lib/Target/X86/X86FrameLowering.cpp @@ -148,8 +148,7 @@ static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB, const X86RegisterInfo *TRI, bool Is64Bit) { const MachineFunction *MF = MBB.getParent(); - const Function *F = MF->getFunction(); - if (!F || MF->callsEHReturn()) + if (MF->callsEHReturn()) return 0; const TargetRegisterClass &AvailableRegs = *TRI->getGPRsForTailCall(*MF); @@ -820,7 +819,7 @@ uint64_t X86FrameLowering::calculateMaxStackAlign(const MachineFunction &MF) con const MachineFrameInfo &MFI = MF.getFrameInfo(); uint64_t MaxAlign = MFI.getMaxAlignment(); // Desired stack alignment. unsigned StackAlign = getStackAlignment(); - if (MF.getFunction()->hasFnAttribute("stackrealign")) { + if (MF.getFunction().hasFnAttribute("stackrealign")) { if (MFI.hasCalls()) MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign; else if (MaxAlign < SlotSize) @@ -935,28 +934,28 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF, "MF used frame lowering for wrong subtarget"); MachineBasicBlock::iterator MBBI = MBB.begin(); MachineFrameInfo &MFI = MF.getFrameInfo(); - const Function *Fn = MF.getFunction(); + const Function &Fn = MF.getFunction(); MachineModuleInfo &MMI = MF.getMMI(); X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); uint64_t MaxAlign = calculateMaxStackAlign(MF); // Desired stack alignment. uint64_t StackSize = MFI.getStackSize(); // Number of bytes to allocate. bool IsFunclet = MBB.isEHFuncletEntry(); EHPersonality Personality = EHPersonality::Unknown; - if (Fn->hasPersonalityFn()) - Personality = classifyEHPersonality(Fn->getPersonalityFn()); + if (Fn.hasPersonalityFn()) + Personality = classifyEHPersonality(Fn.getPersonalityFn()); bool FnHasClrFunclet = MF.hasEHFunclets() && Personality == EHPersonality::CoreCLR; bool IsClrFunclet = IsFunclet && FnHasClrFunclet; bool HasFP = hasFP(MF); - bool IsWin64CC = STI.isCallingConvWin64(Fn->getCallingConv()); + bool IsWin64CC = STI.isCallingConvWin64(Fn.getCallingConv()); bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); - bool NeedsWin64CFI = IsWin64Prologue && Fn->needsUnwindTableEntry(); + bool NeedsWin64CFI = IsWin64Prologue && Fn.needsUnwindTableEntry(); // FIXME: Emit FPO data for EH funclets. bool NeedsWinFPO = !IsFunclet && STI.isTargetWin32() && MMI.getModule()->getCodeViewFlag(); bool NeedsWinCFI = NeedsWin64CFI || NeedsWinFPO; bool NeedsDwarfCFI = - !IsWin64Prologue && (MMI.hasDebugInfo() || Fn->needsUnwindTableEntry()); + !IsWin64Prologue && (MMI.hasDebugInfo() || Fn.needsUnwindTableEntry()); unsigned FramePtr = TRI->getFrameRegister(MF); const unsigned MachineFramePtr = STI.isTarget64BitILP32() @@ -982,16 +981,16 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF, // The default stack probe size is 4096 if the function has no stackprobesize // attribute. unsigned StackProbeSize = 4096; - if (Fn->hasFnAttribute("stack-probe-size")) - Fn->getFnAttribute("stack-probe-size") + if (Fn.hasFnAttribute("stack-probe-size")) + Fn.getFnAttribute("stack-probe-size") .getValueAsString() .getAsInteger(0, StackProbeSize); // Re-align the stack on 64-bit if the x86-interrupt calling convention is // used and an error code was pushed, since the x86-64 ABI requires a 16-byte // stack alignment. - if (Fn->getCallingConv() == CallingConv::X86_INTR && Is64Bit && - Fn->arg_size() == 2) { + if (Fn.getCallingConv() == CallingConv::X86_INTR && Is64Bit && + Fn.arg_size() == 2) { StackSize += 8; MFI.setStackSize(StackSize); emitSPUpdate(MBB, MBBI, -8, /*InEpilogue=*/false); @@ -1002,7 +1001,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF, // pointer, calls, or dynamic alloca then we do not need to adjust the // stack pointer (we fit in the Red Zone). We also check that we don't // push and pop from the stack. - if (Is64Bit && !Fn->hasFnAttribute(Attribute::NoRedZone) && + if (Is64Bit && !Fn.hasFnAttribute(Attribute::NoRedZone) && !TRI->needsStackRealignment(MF) && !MFI.hasVarSizedObjects() && // No dynamic alloca. !MFI.adjustsStack() && // No calls. @@ -1447,7 +1446,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF, // 1. The interrupt handling function uses any of the "rep" instructions. // 2. Interrupt handling function calls another function. // - if (Fn->getCallingConv() == CallingConv::X86_INTR) + if (Fn.getCallingConv() == CallingConv::X86_INTR) BuildMI(MBB, MBBI, DL, TII.get(X86::CLD)) .setMIFlag(MachineInstr::FrameSetup); @@ -1508,7 +1507,7 @@ X86FrameLowering::getWinEHFuncletFrameSize(const MachineFunction &MF) const { // This is the amount of stack a funclet needs to allocate. unsigned UsedSize; EHPersonality Personality = - classifyEHPersonality(MF.getFunction()->getPersonalityFn()); + classifyEHPersonality(MF.getFunction().getPersonalityFn()); if (Personality == EHPersonality::CoreCLR) { // CLR funclets need to hold enough space to include the PSPSym, at the // same offset from the stack pointer (immediately after the prolog) as it @@ -1551,7 +1550,7 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF, bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); bool NeedsWin64CFI = - IsWin64Prologue && MF.getFunction()->needsUnwindTableEntry(); + IsWin64Prologue && MF.getFunction().needsUnwindTableEntry(); bool IsFunclet = MBBI == MBB.end() ? false : isFuncletReturnInstr(*MBBI); // Get the number of bytes to allocate from the FrameInfo. @@ -1981,7 +1980,7 @@ void X86FrameLowering::emitCatchRetReturnValue(MachineBasicBlock &MBB, MachineInstr *CatchRet) const { // SEH shouldn't use catchret. assert(!isAsynchronousEHPersonality(classifyEHPersonality( - MBB.getParent()->getFunction()->getPersonalityFn())) && + MBB.getParent()->getFunction().getPersonalityFn())) && "SEH should not use CATCHRET"); DebugLoc DL = CatchRet->getDebugLoc(); MachineBasicBlock *CatchRetTarget = CatchRet->getOperand(0).getMBB(); @@ -2021,9 +2020,9 @@ bool X86FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB, // Don't restore CSRs before an SEH catchret. SEH except blocks do not form // funclets. emitEpilogue transforms these to normal jumps. if (MI->getOpcode() == X86::CATCHRET) { - const Function *Func = MBB.getParent()->getFunction(); + const Function &F = MBB.getParent()->getFunction(); bool IsSEH = isAsynchronousEHPersonality( - classifyEHPersonality(Func->getPersonalityFn())); + classifyEHPersonality(F.getPersonalityFn())); if (IsSEH) return true; } @@ -2095,8 +2094,8 @@ void X86FrameLowering::determineCalleeSaves(MachineFunction &MF, static bool HasNestArgument(const MachineFunction *MF) { - const Function *F = MF->getFunction(); - for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); + const Function &F = MF->getFunction(); + for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; I++) { if (I->hasNestAttr()) return true; @@ -2110,7 +2109,7 @@ HasNestArgument(const MachineFunction *MF) { /// needed. Set primary to true for the first register, false for the second. static unsigned GetScratchRegister(bool Is64Bit, bool IsLP64, const MachineFunction &MF, bool Primary) { - CallingConv::ID CallingConvention = MF.getFunction()->getCallingConv(); + CallingConv::ID CallingConvention = MF.getFunction().getCallingConv(); // Erlang stuff. if (CallingConvention == CallingConv::HiPE) { @@ -2160,7 +2159,7 @@ void X86FrameLowering::adjustForSegmentedStacks( assert(!MF.getRegInfo().isLiveIn(ScratchReg) && "Scratch register is live-in"); - if (MF.getFunction()->isVarArg()) + if (MF.getFunction().isVarArg()) report_fatal_error("Segmented stacks do not support vararg functions."); if (!STI.isTargetLinux() && !STI.isTargetDarwin() && !STI.isTargetWin32() && !STI.isTargetWin64() && !STI.isTargetFreeBSD() && @@ -2434,8 +2433,8 @@ void X86FrameLowering::adjustForHiPEPrologue( Is64Bit ? "AMD64_LEAF_WORDS" : "X86_LEAF_WORDS"); const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5; const unsigned Guaranteed = HipeLeafWords * SlotSize; - unsigned CallerStkArity = MF.getFunction()->arg_size() > CCRegisteredArgs ? - MF.getFunction()->arg_size() - CCRegisteredArgs : 0; + unsigned CallerStkArity = MF.getFunction().arg_size() > CCRegisteredArgs ? + MF.getFunction().arg_size() - CCRegisteredArgs : 0; unsigned MaxStack = MFI.getStackSize() + CallerStkArity*SlotSize + SlotSize; assert(STI.isTargetLinux() && @@ -2649,10 +2648,10 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, Amount = alignTo(Amount, StackAlign); MachineModuleInfo &MMI = MF.getMMI(); - const Function *Fn = MF.getFunction(); + const Function &F = MF.getFunction(); bool WindowsCFI = MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); - bool DwarfCFI = !WindowsCFI && - (MMI.hasDebugInfo() || Fn->needsUnwindTableEntry()); + bool DwarfCFI = !WindowsCFI && + (MMI.hasDebugInfo() || F.needsUnwindTableEntry()); // If we have any exception handlers in this function, and we adjust // the SP before calls, we may need to indicate this to the unwinder @@ -2694,7 +2693,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, StackAdjustment += mergeSPUpdates(MBB, InsertPos, false); if (StackAdjustment) { - if (!(Fn->optForMinSize() && + if (!(F.optForMinSize() && adjustStackWithPops(MBB, InsertPos, DL, StackAdjustment))) BuildStackAdjustment(MBB, InsertPos, DL, StackAdjustment, /*InEpilogue=*/false); @@ -2767,13 +2766,13 @@ bool X86FrameLowering::canUseAsEpilogue(const MachineBasicBlock &MBB) const { bool X86FrameLowering::enableShrinkWrapping(const MachineFunction &MF) const { // If we may need to emit frameless compact unwind information, give // up as this is currently broken: PR25614. - return (MF.getFunction()->hasFnAttribute(Attribute::NoUnwind) || hasFP(MF)) && + return (MF.getFunction().hasFnAttribute(Attribute::NoUnwind) || hasFP(MF)) && // The lowering of segmented stack and HiPE only support entry blocks // as prologue blocks: PR26107. // This limitation may be lifted if we fix: // - adjustForSegmentedStacks // - adjustForHiPEPrologue - MF.getFunction()->getCallingConv() != CallingConv::HiPE && + MF.getFunction().getCallingConv() != CallingConv::HiPE && !MF.shouldSplitStack(); } @@ -3003,9 +3002,9 @@ void X86FrameLowering::processFunctionBeforeFrameFinalized( // If this function isn't doing Win64-style C++ EH, we don't need to do // anything. - const Function *Fn = MF.getFunction(); + const Function &F = MF.getFunction(); if (!STI.is64Bit() || !MF.hasEHFunclets() || - classifyEHPersonality(Fn->getPersonalityFn()) != EHPersonality::MSVC_CXX) + classifyEHPersonality(F.getPersonalityFn()) != EHPersonality::MSVC_CXX) return; // Win64 C++ EH needs to allocate the UnwindHelp object at some fixed offset diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp index 8df8098b4b9..a6c7c5f22a3 100644 --- a/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -619,8 +619,8 @@ static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) { void X86DAGToDAGISel::PreprocessISelDAG() { // OptFor[Min]Size are used in pattern predicates that isel is matching. - OptForSize = MF->getFunction()->optForSize(); - OptForMinSize = MF->getFunction()->optForMinSize(); + OptForSize = MF->getFunction().optForSize(); + OptForMinSize = MF->getFunction().optForMinSize(); assert((!OptForMinSize || OptForSize) && "OptForMinSize implies OptForSize"); for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(), @@ -753,9 +753,9 @@ void X86DAGToDAGISel::emitSpecialCodeForMain() { void X86DAGToDAGISel::EmitFunctionEntryCode() { // If this is main, emit special code for main. - if (const Function *Fn = MF->getFunction()) - if (Fn->hasExternalLinkage() && Fn->getName() == "main") - emitSpecialCodeForMain(); + const Function &F = MF->getFunction(); + if (F.hasExternalLinkage() && F.getName() == "main") + emitSpecialCodeForMain(); } static bool isDispSafeForFrameIndex(int64_t Val) { diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 94714bf6920..43971c3aaa1 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -94,7 +94,7 @@ static void errorUnsupported(SelectionDAG &DAG, const SDLoc &dl, const char *Msg) { MachineFunction &MF = DAG.getMachineFunction(); DAG.getContext()->diagnose( - DiagnosticInfoUnsupported(*MF.getFunction(), Msg, dl.getDebugLoc())); + DiagnosticInfoUnsupported(MF.getFunction(), Msg, dl.getDebugLoc())); } X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, @@ -1843,8 +1843,8 @@ X86TargetLowering::getOptimalMemOpType(uint64_t Size, bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc, MachineFunction &MF) const { - const Function *F = MF.getFunction(); - if (!F->hasFnAttribute(Attribute::NoImplicitFloat)) { + const Function &F = MF.getFunction(); + if (!F.hasFnAttribute(Attribute::NoImplicitFloat)) { if (Size >= 16 && (!Subtarget.isUnalignedMem16Slow() || ((DstAlign == 0 || DstAlign >= 16) && @@ -1940,7 +1940,7 @@ void X86TargetLowering::markLibCallAttributes(MachineFunction *MF, unsigned CC, if (CC != CallingConv::C && CC != CallingConv::X86_StdCall) return; unsigned ParamRegs = 0; - if (auto *M = MF->getFunction()->getParent()) + if (auto *M = MF->getFunction().getParent()) ParamRegs = M->getNumberRegisterParameters(); // Mark the first N int arguments as having reg @@ -2207,7 +2207,7 @@ X86TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, // For example, when they are used for argument passing. bool ShouldDisableCalleeSavedRegister = CallConv == CallingConv::X86_RegCall || - MF.getFunction()->hasFnAttribute("no_caller_saved_registers"); + MF.getFunction().hasFnAttribute("no_caller_saved_registers"); if (CallConv == CallingConv::X86_INTR && !Outs.empty()) report_fatal_error("X86 interrupts may not return any value"); @@ -2889,8 +2889,8 @@ static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF, return None; } - const Function *Fn = MF.getFunction(); - bool NoImplicitFloatOps = Fn->hasFnAttribute(Attribute::NoImplicitFloat); + const Function &F = MF.getFunction(); + bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat); bool isSoftFloat = Subtarget.useSoftFloat(); assert(!(isSoftFloat && NoImplicitFloatOps) && "SSE register cannot be used when SSE is disabled!"); @@ -2923,10 +2923,9 @@ SDValue X86TargetLowering::LowerFormalArguments( X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); const TargetFrameLowering &TFI = *Subtarget.getFrameLowering(); - const Function *Fn = MF.getFunction(); - if (Fn->hasExternalLinkage() && - Subtarget.isTargetCygMing() && - Fn->getName() == "main") + const Function &F = MF.getFunction(); + if (F.hasExternalLinkage() && Subtarget.isTargetCygMing() && + F.getName() == "main") FuncInfo->setForceFramePointer(true); MachineFrameInfo &MFI = MF.getFrameInfo(); @@ -3101,7 +3100,7 @@ SDValue X86TargetLowering::LowerFormalArguments( // Figure out if XMM registers are in use. assert(!(Subtarget.useSoftFloat() && - Fn->hasFnAttribute(Attribute::NoImplicitFloat)) && + F.hasFnAttribute(Attribute::NoImplicitFloat)) && "SSE register cannot be used when SSE is disabled!"); // 64-bit calling conventions support varargs and register parameters, so we @@ -3258,7 +3257,7 @@ SDValue X86TargetLowering::LowerFormalArguments( FuncInfo->setArgumentStackSize(StackSize); if (WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo()) { - EHPersonality Personality = classifyEHPersonality(Fn->getPersonalityFn()); + EHPersonality Personality = classifyEHPersonality(F.getPersonalityFn()); if (Personality == EHPersonality::CoreCLR) { assert(Is64Bit); // TODO: Add a mechanism to frame lowering that will allow us to indicate @@ -3275,7 +3274,7 @@ SDValue X86TargetLowering::LowerFormalArguments( } if (CallConv == CallingConv::X86_RegCall || - Fn->hasFnAttribute("no_caller_saved_registers")) { + F.hasFnAttribute("no_caller_saved_registers")) { MachineRegisterInfo &MRI = MF.getRegInfo(); for (std::pair<unsigned, unsigned> Pair : MRI.liveins()) MRI.disableCalleeSavedRegister(Pair.first); @@ -3366,7 +3365,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, StructReturnType SR = callIsStructReturn(Outs, Subtarget.isTargetMCU()); bool IsSibcall = false; X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>(); - auto Attr = MF.getFunction()->getFnAttribute("disable-tail-calls"); + auto Attr = MF.getFunction().getFnAttribute("disable-tail-calls"); const auto *CI = dyn_cast_or_null<CallInst>(CLI.CS.getInstruction()); const Function *Fn = CI ? CI->getCalledFunction() : nullptr; bool HasNCSR = (CI && CI->hasFnAttr("no_caller_saved_registers")) || @@ -3401,7 +3400,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, // Check if it's really possible to do a tail call. isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, SR != NotStructReturn, - MF.getFunction()->hasStructRetAttr(), CLI.RetTy, + MF.getFunction().hasStructRetAttr(), CLI.RetTy, Outs, OutVals, Ins, DAG); // Sibcalls are automatically detected tailcalls which do not require @@ -3747,7 +3746,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, } } } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { - const Module *Mod = DAG.getMachineFunction().getFunction()->getParent(); + const Module *Mod = DAG.getMachineFunction().getFunction().getParent(); unsigned char OpFlags = Subtarget.classifyGlobalFunctionReference(nullptr, *Mod); @@ -3796,10 +3795,10 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, // FIXME: Model this more precisely so that we can register allocate across // the normal edge and spill and fill across the exceptional edge. if (!Is64Bit && CLI.CS && CLI.CS.isInvoke()) { - const Function *CallerFn = MF.getFunction(); + const Function &CallerFn = MF.getFunction(); EHPersonality Pers = - CallerFn->hasPersonalityFn() - ? classifyEHPersonality(CallerFn->getPersonalityFn()) + CallerFn.hasPersonalityFn() + ? classifyEHPersonality(CallerFn.getPersonalityFn()) : EHPersonality::Unknown; if (isFuncletEHPersonality(Pers)) Mask = RegInfo->getNoPreservedMask(); @@ -4047,15 +4046,15 @@ bool X86TargetLowering::IsEligibleForTailCallOptimization( // If -tailcallopt is specified, make fastcc functions tail-callable. MachineFunction &MF = DAG.getMachineFunction(); - const Function *CallerF = MF.getFunction(); + const Function &CallerF = MF.getFunction(); // If the function return type is x86_fp80 and the callee return type is not, // then the FP_EXTEND of the call result is not a nop. It's not safe to // perform a tailcall optimization here. - if (CallerF->getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty()) + if (CallerF.getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty()) return false; - CallingConv::ID CallerCC = CallerF->getCallingConv(); + CallingConv::ID CallerCC = CallerF.getCallingConv(); bool CCMatch = CallerCC == CalleeCC; bool IsCalleeWin64 = Subtarget.isCallingConvWin64(CalleeCC); bool IsCallerWin64 = Subtarget.isCallingConvWin64(CallerCC); @@ -4639,7 +4638,7 @@ bool X86TargetLowering::canMergeStoresTo(unsigned AddressSpace, EVT MemVT, const SelectionDAG &DAG) const { // Do not merge to float value size (128 bytes) if no implicit // float attribute is set. - bool NoFloat = DAG.getMachineFunction().getFunction()->hasFnAttribute( + bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute( Attribute::NoImplicitFloat); if (NoFloat) { @@ -6927,7 +6926,7 @@ static SDValue lowerBuildVectorAsBroadcast(BuildVectorSDNode *BVOp, // TODO: If multiple splats are generated to load the same constant, // it may be detrimental to overall size. There needs to be a way to detect // that condition to know if this is truly a size win. - bool OptForSize = DAG.getMachineFunction().getFunction()->optForSize(); + bool OptForSize = DAG.getMachineFunction().getFunction().optForSize(); // Handle broadcasting a single constant scalar from the constant pool // into a vector. @@ -14903,7 +14902,7 @@ SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, // Bits [3:0] of the constant are the zero mask. The DAG Combiner may // combine either bitwise AND or insert of float 0.0 to set these bits. - bool MinSize = DAG.getMachineFunction().getFunction()->optForMinSize(); + bool MinSize = DAG.getMachineFunction().getFunction().optForMinSize(); if (IdxVal == 0 && (!MinSize || !MayFoldLoad(N1))) { // If this is an insertion of 32-bits into the low 32-bits of // a vector, we prefer to generate a blend with immediate rather @@ -15044,7 +15043,7 @@ X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const { // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the // global base reg. - const Module *Mod = DAG.getMachineFunction().getFunction()->getParent(); + const Module *Mod = DAG.getMachineFunction().getFunction().getParent(); unsigned char OpFlag = Subtarget.classifyGlobalReference(nullptr, *Mod); auto PtrVT = getPointerTy(DAG.getDataLayout()); @@ -16968,7 +16967,7 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, const SDLoc &dl, // An add of one will be selected as an INC. if (C->isOne() && (!Subtarget.slowIncDec() || - DAG.getMachineFunction().getFunction()->optForSize())) { + DAG.getMachineFunction().getFunction().optForSize())) { Opcode = X86ISD::INC; NumOperands = 1; break; @@ -16977,7 +16976,7 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, const SDLoc &dl, // An add of negative one (subtract of one) will be selected as a DEC. if (C->isAllOnesValue() && (!Subtarget.slowIncDec() || - DAG.getMachineFunction().getFunction()->optForSize())) { + DAG.getMachineFunction().getFunction().optForSize())) { Opcode = X86ISD::DEC; NumOperands = 1; break; @@ -17172,7 +17171,7 @@ SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, // with an immediate. 16 bit immediates are to be avoided. if ((Op0.getValueType() == MVT::i16 && (isa<ConstantSDNode>(Op0) || isa<ConstantSDNode>(Op1))) && - !DAG.getMachineFunction().getFunction()->optForMinSize() && + !DAG.getMachineFunction().getFunction().optForMinSize() && !Subtarget.isAtom()) { unsigned ExtendOp = isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND; @@ -19242,8 +19241,8 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, if (Is64Bit) { // The 64 bit implementation of segmented stacks needs to clobber both r10 // r11. This makes it impossible to use it along with nested parameters. - const Function *F = MF.getFunction(); - for (const auto &A : F->args()) { + const Function &F = MF.getFunction(); + for (const auto &A : F.args()) { if (A.hasNestAttr()) report_fatal_error("Cannot use segmented stacks with functions that " "have nested arguments."); @@ -19290,7 +19289,7 @@ SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { SDLoc DL(Op); if (!Subtarget.is64Bit() || - Subtarget.isCallingConvWin64(MF.getFunction()->getCallingConv())) { + Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv())) { // vastart just stores the address of the VarArgsFrameIndex slot into the // memory location argument. SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); @@ -19344,7 +19343,7 @@ SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { assert(Op.getNumOperands() == 4); MachineFunction &MF = DAG.getMachineFunction(); - if (Subtarget.isCallingConvWin64(MF.getFunction()->getCallingConv())) + if (Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv())) // The Win64 ABI uses char* instead of a structure. return DAG.expandVAArg(Op.getNode()); @@ -19375,7 +19374,7 @@ SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { if (ArgMode == 2) { // Sanity Check: Make sure using fp_offset makes sense. assert(!Subtarget.useSoftFloat() && - !(MF.getFunction()->hasFnAttribute(Attribute::NoImplicitFloat)) && + !(MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) && Subtarget.hasSSE1()); } @@ -19403,7 +19402,7 @@ static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget, // where a va_list is still an i8*. assert(Subtarget.is64Bit() && "This code only handles 64-bit va_copy!"); if (Subtarget.isCallingConvWin64( - DAG.getMachineFunction().getFunction()->getCallingConv())) + DAG.getMachineFunction().getFunction().getCallingConv())) // Probably a Win64 va_copy. return DAG.expandVACopy(Op.getNode()); @@ -23939,7 +23938,7 @@ static SDValue lowerAtomicArithWithLOCK(SDValue N, SelectionDAG &DAG, if (auto *C = dyn_cast<ConstantSDNode>(N->getOperand(2))) { // Convert to inc/dec if they aren't slow or we are optimizing for size. if (AllowIncDec && (!Subtarget.slowIncDec() || - DAG.getMachineFunction().getFunction()->optForSize())) { + DAG.getMachineFunction().getFunction().optForSize())) { if ((NewOpc == X86ISD::LADD && C->isOne()) || (NewOpc == X86ISD::LSUB && C->isAllOnesValue())) return DAG.getMemIntrinsicNode(X86ISD::LINC, SDLoc(N), @@ -26085,7 +26084,7 @@ MachineBasicBlock *X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter( int64_t RegSaveFrameIndex = MI.getOperand(1).getImm(); int64_t VarArgsFPOffset = MI.getOperand(2).getImm(); - if (!Subtarget.isCallingConvWin64(F->getFunction()->getCallingConv())) { + if (!Subtarget.isCallingConvWin64(F->getFunction().getCallingConv())) { // If %al is 0, branch around the XMM save block. BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg); BuildMI(MBB, DL, TII->get(X86::JE_1)).addMBB(EndMBB); @@ -26728,7 +26727,7 @@ X86TargetLowering::EmitLoweredCatchRet(MachineInstr &MI, DebugLoc DL = MI.getDebugLoc(); assert(!isAsynchronousEHPersonality( - classifyEHPersonality(MF->getFunction()->getPersonalityFn())) && + classifyEHPersonality(MF->getFunction().getPersonalityFn())) && "SEH does not use catchret!"); // Only 32-bit EH needs to worry about manually restoring stack pointers. @@ -26755,7 +26754,7 @@ MachineBasicBlock * X86TargetLowering::EmitLoweredCatchPad(MachineInstr &MI, MachineBasicBlock *BB) const { MachineFunction *MF = BB->getParent(); - const Constant *PerFn = MF->getFunction()->getPersonalityFn(); + const Constant *PerFn = MF->getFunction().getPersonalityFn(); bool IsSEH = isAsynchronousEHPersonality(classifyEHPersonality(PerFn)); // Only 32-bit SEH requires special handling for catchpad. if (IsSEH && Subtarget.is32Bit()) { @@ -32161,7 +32160,7 @@ static SDValue reduceVMULWidth(SDNode *N, SelectionDAG &DAG, // pmulld is supported since SSE41. It is better to use pmulld // instead of pmullw+pmulhw, except for subtargets where pmulld is slower than // the expansion. - bool OptForMinSize = DAG.getMachineFunction().getFunction()->optForMinSize(); + bool OptForMinSize = DAG.getMachineFunction().getFunction().optForMinSize(); if (Subtarget.hasSSE41() && (OptForMinSize || !Subtarget.isPMULLDSlow())) return SDValue(); @@ -32354,7 +32353,7 @@ static SDValue combineMul(SDNode *N, SelectionDAG &DAG, if (!MulConstantOptimization) return SDValue(); // An imul is usually smaller than the alternative sequence. - if (DAG.getMachineFunction().getFunction()->optForMinSize()) + if (DAG.getMachineFunction().getFunction().optForMinSize()) return SDValue(); if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) @@ -33572,7 +33571,7 @@ static SDValue combineOr(SDNode *N, SelectionDAG &DAG, return SDValue(); // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c) - bool OptForSize = DAG.getMachineFunction().getFunction()->optForSize(); + bool OptForSize = DAG.getMachineFunction().getFunction().optForSize(); // SHLD/SHRD instructions have lower register pressure, but on some // platforms they have higher latency than the equivalent @@ -34512,8 +34511,8 @@ static SDValue combineStore(SDNode *N, SelectionDAG &DAG, if (VT.getSizeInBits() != 64) return SDValue(); - const Function *F = DAG.getMachineFunction().getFunction(); - bool NoImplicitFloatOps = F->hasFnAttribute(Attribute::NoImplicitFloat); + const Function &F = DAG.getMachineFunction().getFunction(); + bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat); bool F64IsLegal = !Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2(); if ((VT.isVector() || @@ -35388,7 +35387,7 @@ static SDValue combineFMinNumFMaxNum(SDNode *N, SelectionDAG &DAG, // This takes at least 3 instructions, so favor a library call when operating // on a scalar and minimizing code size. - if (!VT.isVector() && DAG.getMachineFunction().getFunction()->optForMinSize()) + if (!VT.isVector() && DAG.getMachineFunction().getFunction().optForMinSize()) return SDValue(); SDValue Op0 = N->getOperand(0); @@ -38403,7 +38402,7 @@ void X86TargetLowering::insertCopiesSplitCSR( // fine for CXX_FAST_TLS since the C++-style TLS access functions should be // nounwind. If we want to generalize this later, we may need to emit // CFI pseudo-instructions. - assert(Entry->getParent()->getFunction()->hasFnAttribute( + assert(Entry->getParent()->getFunction().hasFnAttribute( Attribute::NoUnwind) && "Function should be nounwind in insertCopiesSplitCSR!"); Entry->addLiveIn(*I); @@ -38426,8 +38425,8 @@ bool X86TargetLowering::supportSwiftError() const { /// string if not applicable. StringRef X86TargetLowering::getStackProbeSymbolName(MachineFunction &MF) const { // If the function specifically requests stack probes, emit them. - if (MF.getFunction()->hasFnAttribute("probe-stack")) - return MF.getFunction()->getFnAttribute("probe-stack").getValueAsString(); + if (MF.getFunction().hasFnAttribute("probe-stack")) + return MF.getFunction().getFnAttribute("probe-stack").getValueAsString(); // Generally, if we aren't on Windows, the platform ABI does not include // support for stack probes, so don't emit them. diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h index 24a6cf4d74a..4db969bbca4 100644 --- a/lib/Target/X86/X86ISelLowering.h +++ b/lib/Target/X86/X86ISelLowering.h @@ -1228,8 +1228,8 @@ namespace llvm { const SDLoc &dl, SelectionDAG &DAG) const override; bool supportSplitCSR(MachineFunction *MF) const override { - return MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS && - MF->getFunction()->hasFnAttribute(Attribute::NoUnwind); + return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS && + MF->getFunction().hasFnAttribute(Attribute::NoUnwind); } void initializeSplitCSR(MachineBasicBlock *Entry) const override; void insertCopiesSplitCSR( diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index a4ddb31e67c..a246359fe33 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -7726,7 +7726,7 @@ static bool ExpandMOVImmSExti8(MachineInstrBuilder &MIB, bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); bool NeedsDwarfCFI = !IsWin64Prologue && - (MF.getMMI().hasDebugInfo() || MF.getFunction()->needsUnwindTableEntry()); + (MF.getMMI().hasDebugInfo() || MF.getFunction().needsUnwindTableEntry()); bool EmitCFI = !TFL->hasFP(MF) && NeedsDwarfCFI; if (EmitCFI) { TFL->BuildCFI(MBB, I, DL, @@ -8409,7 +8409,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl( // For CPUs that favor the register form of a call or push, // do not fold loads into calls or pushes, unless optimizing for size // aggressively. - if (isSlowTwoMemOps && !MF.getFunction()->optForMinSize() && + if (isSlowTwoMemOps && !MF.getFunction().optForMinSize() && (MI.getOpcode() == X86::CALL32r || MI.getOpcode() == X86::CALL64r || MI.getOpcode() == X86::PUSH16r || MI.getOpcode() == X86::PUSH32r || MI.getOpcode() == X86::PUSH64r)) @@ -8417,7 +8417,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl( // Avoid partial register update stalls unless optimizing for size. // TODO: we should block undef reg update as well. - if (!MF.getFunction()->optForSize() && hasPartialRegUpdate(MI.getOpcode())) + if (!MF.getFunction().optForSize() && hasPartialRegUpdate(MI.getOpcode())) return nullptr; unsigned NumOps = MI.getDesc().getNumOperands(); @@ -8586,7 +8586,7 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, // Unless optimizing for size, don't fold to avoid partial // register update stalls // TODO: we should block undef reg update as well. - if (!MF.getFunction()->optForSize() && hasPartialRegUpdate(MI.getOpcode())) + if (!MF.getFunction().optForSize() && hasPartialRegUpdate(MI.getOpcode())) return nullptr; // Don't fold subreg spills, or reloads that use a high subreg. @@ -8785,7 +8785,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl( // Avoid partial register update stalls unless optimizing for size. // TODO: we should block undef reg update as well. - if (!MF.getFunction()->optForSize() && hasPartialRegUpdate(MI.getOpcode())) + if (!MF.getFunction().optForSize() && hasPartialRegUpdate(MI.getOpcode())) return nullptr; // Determine the alignment of the load. @@ -8881,16 +8881,16 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl( Type *Ty; unsigned Opc = LoadMI.getOpcode(); if (Opc == X86::FsFLD0SS || Opc == X86::AVX512_FsFLD0SS) - Ty = Type::getFloatTy(MF.getFunction()->getContext()); + Ty = Type::getFloatTy(MF.getFunction().getContext()); else if (Opc == X86::FsFLD0SD || Opc == X86::AVX512_FsFLD0SD) - Ty = Type::getDoubleTy(MF.getFunction()->getContext()); + Ty = Type::getDoubleTy(MF.getFunction().getContext()); else if (Opc == X86::AVX512_512_SET0 || Opc == X86::AVX512_512_SETALLONES) - Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()),16); + Ty = VectorType::get(Type::getInt32Ty(MF.getFunction().getContext()),16); else if (Opc == X86::AVX2_SETALLONES || Opc == X86::AVX_SET0 || Opc == X86::AVX512_256_SET0 || Opc == X86::AVX1_SETALLONES) - Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()), 8); + Ty = VectorType::get(Type::getInt32Ty(MF.getFunction().getContext()), 8); else - Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()), 4); + Ty = VectorType::get(Type::getInt32Ty(MF.getFunction().getContext()), 4); bool IsAllOnes = (Opc == X86::V_SETALLONES || Opc == X86::AVX2_SETALLONES || Opc == X86::AVX512_512_SETALLONES || @@ -10691,7 +10691,7 @@ namespace { LDTLSCleanup() : MachineFunctionPass(ID) {} bool runOnMachineFunction(MachineFunction &MF) override { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>(); @@ -10852,16 +10852,16 @@ X86InstrInfo::getOutlininingCandidateInfo( bool X86InstrInfo::isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const { - const Function *F = MF.getFunction(); + const Function &F = MF.getFunction(); // Does the function use a red zone? If it does, then we can't risk messing // with the stack. - if (!F->hasFnAttribute(Attribute::NoRedZone)) + if (!F.hasFnAttribute(Attribute::NoRedZone)) return false; // If we *don't* want to outline from things that could potentially be deduped // then return false. - if (!OutlineFromLinkOnceODRs && F->hasLinkOnceODRLinkage()) + if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage()) return false; // This function is viable for outlining, so return true. diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td index 7bc67c7e675..42e89cb4831 100644 --- a/lib/Target/X86/X86InstrInfo.td +++ b/lib/Target/X86/X86InstrInfo.td @@ -918,11 +918,11 @@ def IsNotPIC : Predicate<"!TM.isPositionIndependent()">; // the Function object through the <Target>Subtarget and objections were raised // to that (see post-commit review comments for r301750). let RecomputePerFunction = 1 in { - def OptForSize : Predicate<"MF->getFunction()->optForSize()">; - def OptForMinSize : Predicate<"MF->getFunction()->optForMinSize()">; - def OptForSpeed : Predicate<"!MF->getFunction()->optForSize()">; + def OptForSize : Predicate<"MF->getFunction().optForSize()">; + def OptForMinSize : Predicate<"MF->getFunction().optForMinSize()">; + def OptForSpeed : Predicate<"!MF->getFunction().optForSize()">; def UseIncDec : Predicate<"!Subtarget->slowIncDec() || " - "MF->getFunction()->optForSize()">; + "MF->getFunction().optForSize()">; } def CallImmAddr : Predicate<"Subtarget->isLegalToCallImmediateAddr()">; diff --git a/lib/Target/X86/X86OptimizeLEAs.cpp b/lib/Target/X86/X86OptimizeLEAs.cpp index 0b77014f2b6..1fc6f07b79f 100644 --- a/lib/Target/X86/X86OptimizeLEAs.cpp +++ b/lib/Target/X86/X86OptimizeLEAs.cpp @@ -672,7 +672,7 @@ bool OptimizeLEAPass::removeRedundantLEAs(MemOpMap &LEAs) { bool OptimizeLEAPass::runOnMachineFunction(MachineFunction &MF) { bool Changed = false; - if (DisableX86LEAOpt || skipFunction(*MF.getFunction())) + if (DisableX86LEAOpt || skipFunction(MF.getFunction())) return false; MRI = &MF.getRegInfo(); @@ -696,7 +696,7 @@ bool OptimizeLEAPass::runOnMachineFunction(MachineFunction &MF) { // Remove redundant address calculations. Do it only for -Os/-Oz since only // a code size gain is expected from this part of the pass. - if (MF.getFunction()->optForSize()) + if (MF.getFunction().optForSize()) Changed |= removeRedundantAddrCalc(LEAs); } diff --git a/lib/Target/X86/X86PadShortFunction.cpp b/lib/Target/X86/X86PadShortFunction.cpp index f2ee437116e..1da0fad8b6c 100644 --- a/lib/Target/X86/X86PadShortFunction.cpp +++ b/lib/Target/X86/X86PadShortFunction.cpp @@ -96,10 +96,10 @@ FunctionPass *llvm::createX86PadShortFunctions() { /// runOnMachineFunction - Loop over all of the basic blocks, inserting /// NOOP instructions before early exits. bool PadShortFunc::runOnMachineFunction(MachineFunction &MF) { - if (skipFunction(*MF.getFunction())) + if (skipFunction(MF.getFunction())) return false; - if (MF.getFunction()->optForSize()) { + if (MF.getFunction().optForSize()) { return false; } diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp index d690035e0a6..bc31e95aa6b 100644 --- a/lib/Target/X86/X86RegisterInfo.cpp +++ b/lib/Target/X86/X86RegisterInfo.cpp @@ -218,13 +218,13 @@ X86RegisterInfo::getPointerRegClass(const MachineFunction &MF, const TargetRegisterClass * X86RegisterInfo::getGPRsForTailCall(const MachineFunction &MF) const { - const Function *F = MF.getFunction(); - if (IsWin64 || (F && F->getCallingConv() == CallingConv::Win64)) + const Function &F = MF.getFunction(); + if (IsWin64 || (F.getCallingConv() == CallingConv::Win64)) return &X86::GR64_TCW64RegClass; else if (Is64Bit) return &X86::GR64_TCRegClass; - bool hasHipeCC = (F ? F->getCallingConv() == CallingConv::HiPE : false); + bool hasHipeCC = (F.getCallingConv() == CallingConv::HiPE); if (hasHipeCC) return &X86::GR32RegClass; return &X86::GR32_TCRegClass; @@ -266,17 +266,17 @@ X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { assert(MF && "MachineFunction required"); const X86Subtarget &Subtarget = MF->getSubtarget<X86Subtarget>(); - const Function *F = MF->getFunction(); + const Function &F = MF->getFunction(); bool HasSSE = Subtarget.hasSSE1(); bool HasAVX = Subtarget.hasAVX(); bool HasAVX512 = Subtarget.hasAVX512(); bool CallsEHReturn = MF->callsEHReturn(); - CallingConv::ID CC = F->getCallingConv(); + CallingConv::ID CC = F.getCallingConv(); // If attribute NoCallerSavedRegisters exists then we set X86_INTR calling // convention because it has the CSR list. - if (MF->getFunction()->hasFnAttribute("no_caller_saved_registers")) + if (MF->getFunction().hasFnAttribute("no_caller_saved_registers")) CC = CallingConv::X86_INTR; switch (CC) { @@ -362,7 +362,7 @@ X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { if (Is64Bit) { bool IsSwiftCC = Subtarget.getTargetLowering()->supportSwiftError() && - F->getAttributes().hasAttrSomewhere(Attribute::SwiftError); + F.getAttributes().hasAttrSomewhere(Attribute::SwiftError); if (IsSwiftCC) return IsWin64 ? CSR_Win64_SwiftError_SaveList : CSR_64_SwiftError_SaveList; @@ -380,7 +380,7 @@ X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { const MCPhysReg *X86RegisterInfo::getCalleeSavedRegsViaCopy( const MachineFunction *MF) const { assert(MF && "Invalid MachineFunction pointer."); - if (MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS && + if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS && MF->getInfo<X86MachineFunctionInfo>()->isSplitCSR()) return CSR_64_CXX_TLS_Darwin_ViaCopy_SaveList; return nullptr; @@ -473,9 +473,9 @@ X86RegisterInfo::getCallPreservedMask(const MachineFunction &MF, // Unlike getCalleeSavedRegs(), we don't have MMI so we can't check // callsEHReturn(). if (Is64Bit) { - const Function *F = MF.getFunction(); + const Function &F = MF.getFunction(); bool IsSwiftCC = Subtarget.getTargetLowering()->supportSwiftError() && - F->getAttributes().hasAttrSomewhere(Attribute::SwiftError); + F.getAttributes().hasAttrSomewhere(Attribute::SwiftError); if (IsSwiftCC) return IsWin64 ? CSR_Win64_SwiftError_RegMask : CSR_64_SwiftError_RegMask; return IsWin64 ? CSR_Win64_RegMask : CSR_64_RegMask; @@ -519,7 +519,7 @@ BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const { // Set the base-pointer register and its aliases as reserved if needed. if (hasBasePointer(MF)) { - CallingConv::ID CC = MF.getFunction()->getCallingConv(); + CallingConv::ID CC = MF.getFunction().getCallingConv(); const uint32_t *RegMask = getCallPreservedMask(MF, CC); if (MachineOperand::clobbersPhysReg(RegMask, getBaseRegister())) report_fatal_error( diff --git a/lib/Target/X86/X86SelectionDAGInfo.cpp b/lib/Target/X86/X86SelectionDAGInfo.cpp index d0065563596..1e04997ad29 100644 --- a/lib/Target/X86/X86SelectionDAGInfo.cpp +++ b/lib/Target/X86/X86SelectionDAGInfo.cpp @@ -247,7 +247,7 @@ SDValue X86SelectionDAGInfo::EmitTargetCodeForMemcpy( Repeats.AVT = Subtarget.is64Bit() ? MVT::i64 : MVT::i32; if (Repeats.BytesLeft() > 0 && - DAG.getMachineFunction().getFunction()->optForMinSize()) { + DAG.getMachineFunction().getFunction().optForMinSize()) { // When agressively optimizing for size, avoid generating the code to // handle BytesLeft. Repeats.AVT = MVT::i8; diff --git a/lib/Target/X86/X86VZeroUpper.cpp b/lib/Target/X86/X86VZeroUpper.cpp index 0b67e819a64..224262830b1 100644 --- a/lib/Target/X86/X86VZeroUpper.cpp +++ b/lib/Target/X86/X86VZeroUpper.cpp @@ -285,7 +285,7 @@ bool VZeroUpperInserter::runOnMachineFunction(MachineFunction &MF) { TII = ST.getInstrInfo(); MachineRegisterInfo &MRI = MF.getRegInfo(); EverMadeChange = false; - IsX86INTR = MF.getFunction()->getCallingConv() == CallingConv::X86_INTR; + IsX86INTR = MF.getFunction().getCallingConv() == CallingConv::X86_INTR; bool FnHasLiveInYmmOrZmm = checkFnHasLiveInYmmOrZmm(MRI); diff --git a/lib/Target/X86/X86WinAllocaExpander.cpp b/lib/Target/X86/X86WinAllocaExpander.cpp index 8a186e94d9c..1046696587d 100644 --- a/lib/Target/X86/X86WinAllocaExpander.cpp +++ b/lib/Target/X86/X86WinAllocaExpander.cpp @@ -279,9 +279,9 @@ bool X86WinAllocaExpander::runOnMachineFunction(MachineFunction &MF) { SlotSize = TRI->getSlotSize(); StackProbeSize = 4096; - if (MF.getFunction()->hasFnAttribute("stack-probe-size")) { + if (MF.getFunction().hasFnAttribute("stack-probe-size")) { MF.getFunction() - ->getFnAttribute("stack-probe-size") + .getFnAttribute("stack-probe-size") .getValueAsString() .getAsInteger(0, StackProbeSize); } diff --git a/lib/Target/XCore/XCoreFrameLowering.cpp b/lib/Target/XCore/XCoreFrameLowering.cpp index 3d8712dd03e..62b2c8eee15 100644 --- a/lib/Target/XCore/XCoreFrameLowering.cpp +++ b/lib/Target/XCore/XCoreFrameLowering.cpp @@ -238,7 +238,7 @@ void XCoreFrameLowering::emitPrologue(MachineFunction &MF, report_fatal_error("emitPrologue unsupported alignment: " + Twine(MFI.getMaxAlignment())); - const AttributeList &PAL = MF.getFunction()->getAttributes(); + const AttributeList &PAL = MF.getFunction().getAttributes(); if (PAL.hasAttrSomewhere(Attribute::Nest)) BuildMI(MBB, MBBI, dl, TII.get(XCore::LDWSP_ru6), XCore::R11).addImm(0); // FIX: Needs addMemOperand() but can't use getFixedStack() or getStack(). @@ -324,7 +324,7 @@ void XCoreFrameLowering::emitPrologue(MachineFunction &MF, if (XFI->hasEHSpillSlot()) { // The unwinder requires stack slot & CFI offsets for the exception info. // We do not save/spill these registers. - const Function *Fn = MF.getFunction(); + const Function *Fn = &MF.getFunction(); const Constant *PersonalityFn = Fn->hasPersonalityFn() ? Fn->getPersonalityFn() : nullptr; SmallVector<StackSlotInfo, 2> SpillList; @@ -359,7 +359,7 @@ void XCoreFrameLowering::emitEpilogue(MachineFunction &MF, if (RetOpcode == XCore::EH_RETURN) { // 'Restore' the exception info the unwinder has placed into the stack // slots. - const Function *Fn = MF.getFunction(); + const Function *Fn = &MF.getFunction(); const Constant *PersonalityFn = Fn->hasPersonalityFn() ? Fn->getPersonalityFn() : nullptr; SmallVector<StackSlotInfo, 2> SpillList; @@ -542,7 +542,7 @@ void XCoreFrameLowering::determineCalleeSaves(MachineFunction &MF, const MachineRegisterInfo &MRI = MF.getRegInfo(); bool LRUsed = MRI.isPhysRegModified(XCore::LR); - if (!LRUsed && !MF.getFunction()->isVarArg() && + if (!LRUsed && !MF.getFunction().isVarArg() && MF.getFrameInfo().estimateStackSize(MF)) // If we need to extend the stack it is more efficient to use entsp / retsp. // We force the LR to be saved so these instructions are used. diff --git a/lib/Target/XCore/XCoreInstrInfo.cpp b/lib/Target/XCore/XCoreInstrInfo.cpp index 7a9c6fc93f8..c885332b07a 100644 --- a/lib/Target/XCore/XCoreInstrInfo.cpp +++ b/lib/Target/XCore/XCoreInstrInfo.cpp @@ -443,7 +443,7 @@ MachineBasicBlock::iterator XCoreInstrInfo::loadImmediate( } MachineConstantPool *ConstantPool = MBB.getParent()->getConstantPool(); const Constant *C = ConstantInt::get( - Type::getInt32Ty(MBB.getParent()->getFunction()->getContext()), Value); + Type::getInt32Ty(MBB.getParent()->getFunction().getContext()), Value); unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4); return BuildMI(MBB, MI, dl, get(XCore::LDWCP_lru6), Reg) .addConstantPoolIndex(Idx) diff --git a/lib/Target/XCore/XCoreMachineFunctionInfo.cpp b/lib/Target/XCore/XCoreMachineFunctionInfo.cpp index 35089fabd5a..b7b0daab980 100644 --- a/lib/Target/XCore/XCoreMachineFunctionInfo.cpp +++ b/lib/Target/XCore/XCoreMachineFunctionInfo.cpp @@ -39,7 +39,7 @@ int XCoreFunctionInfo::createLRSpillSlot(MachineFunction &MF) { const TargetRegisterClass &RC = XCore::GRRegsRegClass; const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); MachineFrameInfo &MFI = MF.getFrameInfo(); - if (! MF.getFunction()->isVarArg()) { + if (! MF.getFunction().isVarArg()) { // A fixed offset of 0 allows us to save / restore LR using entsp / retsp. LRSpillSlot = MFI.CreateFixedObject(TRI.getSpillSize(RC), 0, true); } else { diff --git a/lib/Target/XCore/XCoreRegisterInfo.cpp b/lib/Target/XCore/XCoreRegisterInfo.cpp index a6cf6837009..70376d40a37 100644 --- a/lib/Target/XCore/XCoreRegisterInfo.cpp +++ b/lib/Target/XCore/XCoreRegisterInfo.cpp @@ -204,8 +204,7 @@ static void InsertSPConstInst(MachineBasicBlock::iterator II, } bool XCoreRegisterInfo::needsFrameMoves(const MachineFunction &MF) { - return MF.getMMI().hasDebugInfo() || - MF.getFunction()->needsUnwindTableEntry(); + return MF.getMMI().hasDebugInfo() || MF.getFunction().needsUnwindTableEntry(); } const MCPhysReg * |