19#define DEBUG_TYPE "frame-info"
21STATISTIC(NumRedZoneFunctions,
"Number of functions using red zone");
32 return AFI->hasStreamingModeChanges() &&
38 unsigned Opc =
MBBI->getOpcode();
39 if (
Opc == AArch64::CNTD_XPiI)
45 if (
Opc == AArch64::BL)
48 return Opc == TargetOpcode::COPY;
53 switch (
I->getOpcode()) {
56 case AArch64::LD1B_2Z_IMM:
57 case AArch64::ST1B_2Z_IMM:
58 case AArch64::STR_ZXI:
59 case AArch64::LDR_ZXI:
60 case AArch64::PTRUE_C_B:
63 case AArch64::SEH_SaveZReg:
70 switch (
I->getOpcode()) {
73 case AArch64::STR_PXI:
74 case AArch64::LDR_PXI:
77 case AArch64::SEH_SavePReg:
111 if (
Subtarget.isTargetWindows() &&
AFI->getSVECalleeSavedStackSize()) {
112 if (AFI->hasStackHazardSlotIndex())
113 reportFatalUsageError(
"SME hazard padding is not supported on Windows");
114 SVELayout = SVEStackLayout::CalleeSavesAboveFrameRecord;
115 }
else if (
AFI->hasSplitSVEObjects()) {
116 SVELayout = SVEStackLayout::Split;
129 if (
AFL.requiresSaveVG(
MF)) {
130 auto &TLI = *
Subtarget.getTargetLowering();
135 switch (
MBBI->getOpcode()) {
139 NewOpc = AArch64::STPXpre;
142 NewOpc = AArch64::STPDpre;
145 NewOpc = AArch64::STPQpre;
147 case AArch64::STRXui:
148 NewOpc = AArch64::STRXpre;
150 case AArch64::STRDui:
151 NewOpc = AArch64::STRDpre;
153 case AArch64::STRQui:
154 NewOpc = AArch64::STRQpre;
157 NewOpc = AArch64::LDPXpost;
160 NewOpc = AArch64::LDPDpost;
163 NewOpc = AArch64::LDPQpost;
165 case AArch64::LDRXui:
166 NewOpc = AArch64::LDRXpost;
168 case AArch64::LDRDui:
169 NewOpc = AArch64::LDRDpost;
171 case AArch64::LDRQui:
172 NewOpc = AArch64::LDRQpost;
176 int64_t MinOffset, MaxOffset;
178 NewOpc, Scale, Width, MinOffset, MaxOffset);
184 if (
MBBI->getOperand(
MBBI->getNumOperands() - 1).getImm() != 0 ||
185 CSStackSizeInc < MinOffset * (int64_t)Scale.
getFixedValue() ||
186 CSStackSizeInc > MaxOffset * (int64_t)Scale.
getFixedValue()) {
200 return std::prev(
MBBI);
205 auto SEH = std::next(
MBBI);
206 if (AArch64InstrInfo::isSEHInstruction(*SEH))
207 SEH->eraseFromParent();
214 unsigned OpndIdx = 0;
215 for (
unsigned OpndEnd =
MBBI->getNumOperands() - 1; OpndIdx < OpndEnd;
217 MIB.
add(
MBBI->getOperand(OpndIdx));
219 assert(
MBBI->getOperand(OpndIdx).getImm() == 0 &&
220 "Unexpected immediate offset in first/last callee-save save/restore "
222 assert(
MBBI->getOperand(OpndIdx - 1).getReg() == AArch64::SP &&
223 "Unexpected base register in callee-save save/restore instruction!");
224 assert(CSStackSizeInc % Scale == 0);
225 MIB.
addImm(CSStackSizeInc / (
int)Scale);
233 AFL.insertSEH(*MIB, *
TII, FrameFlag);
240 return std::prev(
MBB.erase(
MBBI));
245 unsigned LocalStackSize) {
247 unsigned ImmIdx =
MBBI->getNumOperands() - 1;
248 switch (
MBBI->getOpcode()) {
251 case AArch64::SEH_SaveFPLR:
252 case AArch64::SEH_SaveRegP:
253 case AArch64::SEH_SaveReg:
254 case AArch64::SEH_SaveFRegP:
255 case AArch64::SEH_SaveFReg:
256 case AArch64::SEH_SaveAnyRegQP:
257 case AArch64::SEH_SaveAnyRegQPX:
258 ImmOpnd = &
MBBI->getOperand(ImmIdx);
267 if (AArch64InstrInfo::isSEHInstruction(
MI))
270 unsigned Opc =
MI.getOpcode();
274 case AArch64::STRXui:
276 case AArch64::STRDui:
278 case AArch64::LDRXui:
280 case AArch64::LDRDui:
284 case AArch64::STRQui:
286 case AArch64::LDRQui:
293 unsigned OffsetIdx =
MI.getNumExplicitOperands() - 1;
294 assert(
MI.getOperand(OffsetIdx - 1).getReg() == AArch64::SP &&
295 "Unexpected base register in callee-save save/restore instruction!");
299 assert(LocalStackSize % Scale == 0);
300 OffsetOpnd.
setImm(OffsetOpnd.
getImm() + LocalStackSize / Scale);
305 assert(
MBBI !=
MI.getParent()->end() &&
"Expecting a valid instruction");
306 assert(AArch64InstrInfo::isSEHInstruction(*
MBBI) &&
307 "Expecting a SEH instruction");
314 if (
AFL.homogeneousPrologEpilog(
MF))
317 if (
AFI->getLocalStackSize() == 0)
328 if (
AFL.needsWinCFI(
MF) &&
AFI->getCalleeSavedStackSize() > 0 &&
329 MF.getFunction().hasOptSize())
334 if (StackBumpBytes >= 512 ||
335 AFL.windowsRequiresStackProbe(
MF, StackBumpBytes))
338 if (
MFI.hasVarSizedObjects())
347 if (
AFL.canUseRedZone(
MF))
352 if (
AFI->hasSVEStackSize())
366 return {{PPRCalleeSavesSize, PPRLocalsSize},
367 {ZPRCalleeSavesSize, ZPRLocalsSize}};
370 {ZPRCalleeSavesSize, PPRLocalsSize + ZPRLocalsSize}};
379 BeforePPRs =
SVE.PPR.CalleeSavesSize;
381 if (
SVE.ZPR.CalleeSavesSize)
382 AfterPPRs +=
SVE.PPR.LocalsSize +
SVE.ZPR.CalleeSavesSize;
384 AfterZPRs +=
SVE.PPR.LocalsSize;
386 return {BeforePPRs, AfterPPRs, AfterZPRs};
402 IsEpilogue ?
MBB.begin() :
MBB.getFirstTerminator();
403 auto AdjustI = [&](
auto MBBI) {
return IsEpilogue ? std::prev(
MBBI) :
MBBI; };
405 if (PPRCalleeSavesSize) {
406 PPRsI = AdjustI(PPRsI);
409 IsEpilogue ? (--PPRsI) : (++PPRsI);
412 if (ZPRCalleeSavesSize) {
413 ZPRsI = AdjustI(ZPRsI);
416 IsEpilogue ? (--ZPRsI) : (++ZPRsI);
419 return {{PPRsI,
MBBI}, {ZPRsI, PPRsI}};
420 return {{
MBBI, PPRsI}, {PPRsI, ZPRsI}};
428 EmitAsyncCFI =
AFI->needsAsyncDwarfUnwindInfo(
MF);
433 collectBlockLiveins();
450void AArch64PrologueEmitter::collectBlockLiveins() {
453 PrologueEndI =
MBB.begin();
454 while (PrologueEndI !=
MBB.end() &&
458 if (PrologueEndI !=
MBB.end()) {
474void AArch64PrologueEmitter::verifyPrologueClobbers()
const {
475 if (PrologueEndI ==
MBB.end())
478 for (MachineInstr &
MI :
479 make_range(
MBB.instr_begin(), PrologueEndI->getIterator())) {
480 for (
auto &
Op :
MI.operands())
481 if (
Op.isReg() &&
Op.isDef())
482 assert(!LiveRegs.contains(
Op.getReg()) &&
483 "live register clobbered by inserted prologue instructions");
488void AArch64PrologueEmitter::determineLocalsStackSize(
489 uint64_t StackSize, uint64_t PrologueSaveSize) {
490 AFI->setLocalStackSize(StackSize - PrologueSaveSize);
497 static const int64_t MAX_BYTES_PER_SCALABLE_BYTE = 16;
498 return Size.getScalable() * MAX_BYTES_PER_SCALABLE_BYTE +
Size.getFixed();
501void AArch64PrologueEmitter::allocateStackSpace(
503 StackOffset AllocSize,
bool EmitCFI, StackOffset InitialOffset,
504 bool FollowupAllocs) {
511 const uint64_t AndMask = ~(MaxAlign - 1);
514 Register TargetReg = RealignmentPadding
515 ?
AFL.findScratchNonCalleeSaveRegister(&
MBB)
522 if (RealignmentPadding) {
543 if (AllocSize.getScalable() == 0 && RealignmentPadding == 0) {
545 assert(ScratchReg != AArch64::NoRegister);
548 .
addImm(AllocSize.getFixed())
549 .
addImm(InitialOffset.getFixed())
550 .
addImm(InitialOffset.getScalable());
555 if (FollowupAllocs) {
571 int64_t ProbeSize =
AFI->getStackProbeSize();
572 if (
upperBound(AllocSize) + RealignmentPadding <= ProbeSize) {
573 Register ScratchReg = RealignmentPadding
574 ?
AFL.findScratchNonCalleeSaveRegister(&
MBB)
576 assert(ScratchReg != AArch64::NoRegister);
581 if (RealignmentPadding) {
587 AFI->setStackRealigned(
true);
589 if (FollowupAllocs ||
upperBound(AllocSize) + RealignmentPadding >
605 assert(TargetReg != AArch64::NoRegister);
610 if (RealignmentPadding) {
623 .buildDefCFARegister(AArch64::SP);
625 if (RealignmentPadding)
626 AFI->setStackRealigned(
true);
636 AFI->setHasRedZone(
false);
646 if (
AFI->getArgumentStackToRestore())
649 if (
AFI->shouldSignReturnAddress(
MF)) {
652 if (!
AFL.shouldSignReturnAddressEverywhere(
MF)) {
660 if (
AFI->needsShadowCallStackPrologueEpilogue(
MF)) {
661 emitShadowCallStackPrologue(PrologueBeginI,
DL);
673 if (
HasFP &&
AFI->hasSwiftAsyncContext())
674 emitSwiftAsyncContextFramePointer(PrologueBeginI,
DL);
683 if (std::optional<int> TBPI =
AFI->getTaggedBasePointerIndex())
684 AFI->setTaggedBasePointerOffset(-
MFI.getObjectOffset(*TBPI));
686 AFI->setTaggedBasePointerOffset(
MFI.getStackSize());
696 if (!
AFI->hasStackFrame() && !
AFL.windowsRequiresStackProbe(
MF, NumBytes))
697 return emitEmptyStackFramePrologue(NumBytes, PrologueBeginI,
DL);
699 bool IsWin64 =
Subtarget.isCallingConvWin64(F.getCallingConv(), F.isVarArg());
702 auto PrologueSaveSize =
AFI->getCalleeSavedStackSize() + FixedObject;
704 determineLocalsStackSize(NumBytes, PrologueSaveSize);
712 "unexpected SVE allocs after PPRs with CalleeSavesAboveFrameRecord");
719 allocateStackSpace(PrologueBeginI, 0, SaveSize,
false,
StackOffset{},
721 NumBytes -= FixedObject;
728 MBBI,
DL, -
AFI->getCalleeSavedStackSize(), EmitAsyncCFI);
729 NumBytes -=
AFI->getCalleeSavedStackSize();
730 }
else if (CombineSPBump) {
731 assert(!
AFL.getSVEStackSize(
MF) &&
"Cannot combine SP bump with SVE");
739 NumBytes -= PrologueSaveSize;
740 }
else if (PrologueSaveSize != 0) {
742 PrologueBeginI,
DL, -PrologueSaveSize, EmitAsyncCFI);
743 NumBytes -= PrologueSaveSize;
745 assert(NumBytes >= 0 &&
"Negative stack allocation size!?");
750 auto &TLI = *
Subtarget.getTargetLowering();
753 while (AfterGPRSavesI != EndI &&
760 AFI->getLocalStackSize());
767 emitFramePointerSetup(AfterGPRSavesI,
DL, FixedObject);
773 emitCalleeSavedGPRLocations(AfterGPRSavesI);
776 const bool NeedsRealignment =
778 const int64_t RealignmentPadding =
779 (NeedsRealignment &&
MFI.getMaxAlign() >
Align(16))
780 ?
MFI.getMaxAlign().value() - 16
783 if (
AFL.windowsRequiresStackProbe(
MF, NumBytes + RealignmentPadding))
784 emitWindowsStackProbe(AfterGPRSavesI,
DL, NumBytes, RealignmentPadding);
794 auto [PPRRange, ZPRRange] =
796 ZPR.CalleeSavesSize,
false);
797 AfterSVESavesI = ZPRRange.End;
799 emitCalleeSavedSVELocations(AfterSVESavesI);
801 allocateStackSpace(PPRRange.Begin, 0, SVEAllocs.
BeforePPRs,
802 EmitAsyncCFI && !
HasFP, CFAOffset,
806 assert(PPRRange.End == ZPRRange.Begin &&
807 "Expected ZPR callee saves after PPR locals");
808 allocateStackSpace(PPRRange.End, 0, SVEAllocs.
AfterPPRs,
809 EmitAsyncCFI && !
HasFP, CFAOffset,
822 assert(!(
AFL.canUseRedZone(
MF) && NeedsRealignment) &&
823 "Cannot use redzone with stack realignment");
824 if (!
AFL.canUseRedZone(
MF)) {
828 allocateStackSpace(AfterSVESavesI, RealignmentPadding, SVEAllocs.
AfterZPRs,
829 EmitAsyncCFI && !
HasFP, CFAOffset,
830 MFI.hasVarSizedObjects());
869 MBB.addLiveIn(AArch64::X1);
873 if (
EmitCFI && !EmitAsyncCFI) {
875 emitDefineCFAWithFP(AfterSVESavesI, FixedObject);
878 AFL.getSVEStackSize(
MF) +
885 emitCalleeSavedGPRLocations(AfterSVESavesI);
886 emitCalleeSavedSVELocations(AfterSVESavesI);
890void AArch64PrologueEmitter::emitShadowCallStackPrologue(
901 MBB.addLiveIn(AArch64::X18);
910 static const char CFIInst[] = {
911 dwarf::DW_CFA_val_expression,
914 static_cast<char>(
unsigned(dwarf::DW_OP_breg18)),
915 static_cast<char>(-8) & 0x7f,
918 .buildEscape(StringRef(CFIInst,
sizeof(CFIInst)));
922void AArch64PrologueEmitter::emitSwiftAsyncContextFramePointer(
924 switch (
MF.getTarget().Options.SwiftAsyncFramePointer) {
926 if (
Subtarget.swiftAsyncContextIsDynamicallySet()) {
968void AArch64PrologueEmitter::emitEmptyStackFramePrologue(
971 assert(!
HasFP &&
"unexpected function without stack frame but with FP");
973 "unexpected function without stack frame but with SVE objects");
975 AFI->setLocalStackSize(NumBytes);
985 if (
AFL.canUseRedZone(
MF)) {
986 AFI->setHasRedZone(
true);
987 ++NumRedZoneFunctions;
994 MCSymbol *FrameLabel =
MF.getContext().createTempSymbol();
997 .buildDefCFAOffset(NumBytes, FrameLabel);
1008void AArch64PrologueEmitter::emitFramePointerSetup(
1010 unsigned FixedObject) {
1011 int64_t FPOffset =
AFI->getCalleeSaveBaseToFrameRecordOffset();
1013 FPOffset +=
AFI->getLocalStackSize();
1015 if (
AFI->hasSwiftAsyncContext()) {
1019 const auto &
Attrs =
MF.getFunction().getAttributes();
1020 bool HaveInitialContext =
Attrs.hasAttrSomewhere(Attribute::SwiftAsync);
1021 if (HaveInitialContext)
1022 MBB.addLiveIn(AArch64::X22);
1023 Register Reg = HaveInitialContext ? AArch64::X22 : AArch64::XZR;
1061 emitDefineCFAWithFP(
MBBI, FixedObject);
1065void AArch64PrologueEmitter::emitDefineCFAWithFP(
1067 const int OffsetToFirstCalleeSaveFromFP =
1068 AFI->getCalleeSaveBaseToFrameRecordOffset() -
1069 AFI->getCalleeSavedStackSize();
1072 .buildDefCFA(
FramePtr, FixedObject - OffsetToFirstCalleeSaveFromFP);
1075void AArch64PrologueEmitter::emitWindowsStackProbe(
1077 int64_t RealignmentPadding)
const {
1078 if (
AFI->getSVECalleeSavedStackSize())
1083 unsigned X15Scratch = AArch64::NoRegister;
1085 [
this](
const MachineBasicBlock::RegisterMaskPair &LiveIn) {
1086 return RegInfo.isSuperOrSubRegisterEq(AArch64::X15,
1089 X15Scratch =
AFL.findScratchNonCalleeSaveRegister(&
MBB,
true);
1090 assert(X15Scratch != AArch64::NoRegister &&
1091 (X15Scratch < AArch64::X15 || X15Scratch > AArch64::X17));
1093 LiveRegs.removeReg(AArch64::X15);
1102 uint64_t NumWords = (NumBytes + RealignmentPadding) >> 4;
1110 if (NumBytes >= (1 << 28))
1112 "unwinding purposes");
1114 uint32_t LowNumWords = NumWords & 0xFFFF;
1121 if ((NumWords & 0xFFFF0000) != 0) {
1124 .
addImm((NumWords & 0xFFFF0000) >> 16)
1136 const char *ChkStk =
Subtarget.getChkStkName();
1137 switch (
MF.getTarget().getCodeModel()) {
1201 if (RealignmentPadding > 0) {
1202 if (RealignmentPadding >= 4096) {
1205 .
addImm(RealignmentPadding)
1215 .
addImm(RealignmentPadding)
1220 uint64_t AndMask = ~(
MFI.getMaxAlign().value() - 1);
1224 AFI->setStackRealigned(
true);
1230 if (X15Scratch != AArch64::NoRegister) {
1239void AArch64PrologueEmitter::emitCalleeSavedGPRLocations(
1241 const std::vector<CalleeSavedInfo> &CSI =
MFI.getCalleeSavedInfo();
1246 for (
const auto &
Info : CSI) {
1247 unsigned FrameIdx =
Info.getFrameIdx();
1248 if (
MFI.hasScalableStackID(FrameIdx))
1251 assert(!
Info.isSpilledToReg() &&
"Spilling to registers not implemented");
1252 int64_t
Offset =
MFI.getObjectOffset(FrameIdx) -
AFL.getOffsetOfLocalArea();
1253 CFIBuilder.buildOffset(
Info.getReg(),
Offset);
1257void AArch64PrologueEmitter::emitCalleeSavedSVELocations(
1260 const std::vector<CalleeSavedInfo> &CSI =
MFI.getCalleeSavedInfo();
1266 std::optional<int64_t> IncomingVGOffsetFromDefCFA;
1267 if (
AFL.requiresSaveVG(
MF)) {
1269 reverse(CSI), [](
auto &
Info) {
return Info.getReg() == AArch64::VG; });
1270 IncomingVGOffsetFromDefCFA =
MFI.getObjectOffset(IncomingVG.getFrameIdx()) -
1271 AFL.getOffsetOfLocalArea();
1274 StackOffset PPRStackSize =
AFL.getPPRStackSize(
MF);
1275 for (
const auto &
Info : CSI) {
1276 int FI =
Info.getFrameIdx();
1277 if (!
MFI.hasScalableStackID(FI))
1282 assert(!
Info.isSpilledToReg() &&
"Spilling to registers not implemented");
1283 MCRegister
Reg =
Info.getReg();
1297 CFIBuilder.insertCFIInst(
1303 switch (
MI.getOpcode()) {
1306 case AArch64::CATCHRET:
1307 case AArch64::CLEANUPRET:
1318 SEHEpilogueStartI =
MBB.end();
1325 "expected negative offset (with optional fixed portion)");
1327 if (int64_t FixedOffset =
Offset.getFixed()) {
1343 if (
MBB.end() != EpilogueEndI) {
1344 DL = EpilogueEndI->getDebugLoc();
1358 int64_t ArgumentStackToRestore =
AFL.getArgumentStackToRestore(
MF,
MBB);
1359 bool IsWin64 =
Subtarget.isCallingConvWin64(
MF.getFunction().getCallingConv(),
1360 MF.getFunction().isVarArg());
1363 int64_t AfterCSRPopSize = ArgumentStackToRestore;
1364 auto PrologueSaveSize =
AFI->getCalleeSavedStackSize() + FixedObject;
1369 if (
MF.hasEHFunclets())
1370 AFI->setLocalStackSize(NumBytes - PrologueSaveSize);
1374 auto FirstHomogenousEpilogI =
MBB.getFirstTerminator();
1375 if (FirstHomogenousEpilogI !=
MBB.begin()) {
1376 auto HomogeneousEpilog = std::prev(FirstHomogenousEpilogI);
1377 if (HomogeneousEpilog->getOpcode() == AArch64::HOM_Epilog)
1378 FirstHomogenousEpilogI = HomogeneousEpilog;
1388 assert(AfterCSRPopSize == 0);
1392 bool CombineSPBump = shouldCombineCSRLocalStackBump(NumBytes);
1394 unsigned ProloguePopSize = PrologueSaveSize;
1400 ProloguePopSize -= FixedObject;
1401 AfterCSRPopSize += FixedObject;
1405 if (!CombineSPBump && ProloguePopSize != 0) {
1407 while (Pop->getOpcode() == TargetOpcode::CFI_INSTRUCTION ||
1408 AArch64InstrInfo::isSEHInstruction(*Pop) ||
1411 Pop = std::prev(Pop);
1418 if (
OffsetOp.getImm() == 0 && AfterCSRPopSize >= 0) {
1424 if (AArch64InstrInfo::isSEHInstruction(*AfterLastPop))
1438 AfterCSRPopSize += ProloguePopSize;
1447 while (FirstGPRRestoreI != Begin) {
1454 }
else if (CombineSPBump)
1456 AFI->getLocalStackSize());
1466 BuildMI(
MBB, FirstGPRRestoreI, DL,
TII->get(AArch64::SEH_EpilogStart))
1468 SEHEpilogueStartI = FirstGPRRestoreI;
1469 --SEHEpilogueStartI;
1479 ?
MBB.getFirstTerminator()
1481 PPR.CalleeSavesSize, ZPR.CalleeSavesSize,
true);
1483 if (
HasFP &&
AFI->hasSwiftAsyncContext())
1484 emitSwiftAsyncContextFramePointer(EpilogueEndI, DL);
1487 if (CombineSPBump) {
1488 assert(!
AFI->hasSVEStackSize() &&
"Cannot combine SP bump with SVE");
1502 NumBytes -= PrologueSaveSize;
1503 assert(NumBytes >= 0 &&
"Negative stack allocation size!?");
1505 StackOffset SVECalleeSavesSize = ZPR.CalleeSavesSize + PPR.CalleeSavesSize;
1511 "unexpected SVE allocs after PPRs with CalleeSavesAboveFrameRecord");
1515 if (!
AFI->isStackRealigned() && !
MFI.hasVarSizedObjects()) {
1525 }
else if (
AFI->hasSVEStackSize()) {
1530 (
AFI->isStackRealigned() ||
MFI.hasVarSizedObjects()) ? AArch64::FP
1532 if (SVECalleeSavesSize && BaseForSVEDealloc == AArch64::FP) {
1536 -SVECalleeSavesSize - PPR.LocalsSize -
1540 moveSPBelowFP(ZPRRange.Begin, FPOffsetZPR);
1548 assert(!FPOffsetPPR.
getFixed() &&
"expected only scalable offset");
1552 }
else if (BaseForSVEDealloc == AArch64::SP) {
1561 NumBytes -= NonSVELocals.getFixed();
1569 assert(PPRRange.Begin == ZPRRange.End &&
1570 "Expected PPR restores after ZPR");
1583 emitCalleeSavedSVERestores(
1588 bool RedZone =
AFL.canUseRedZone(
MF);
1591 if (RedZone && AfterCSRPopSize == 0)
1598 bool NoCalleeSaveRestore = PrologueSaveSize == 0;
1599 int64_t StackRestoreBytes = RedZone ? 0 : NumBytes;
1600 if (NoCalleeSaveRestore)
1601 StackRestoreBytes += AfterCSRPopSize;
1604 MBB, FirstGPRRestoreI, DL, AArch64::SP, AArch64::SP,
1611 if (NoCalleeSaveRestore || AfterCSRPopSize == 0)
1621 if (!
IsFunclet && (
MFI.hasVarSizedObjects() ||
AFI->isStackRealigned())) {
1623 MBB, FirstGPRRestoreI, DL, AArch64::SP, AArch64::FP,
1626 }
else if (NumBytes)
1639 if (AfterCSRPopSize) {
1640 assert(AfterCSRPopSize > 0 &&
"attempting to reallocate arg stack that an "
1641 "interrupt may have clobbered");
1644 MBB,
MBB.getFirstTerminator(), DL, AArch64::SP, AArch64::SP,
1651bool AArch64EpilogueEmitter::shouldCombineCSRLocalStackBump(
1663 while (LastI != Begin) {
1665 if (LastI->isTransient())
1670 switch (LastI->getOpcode()) {
1671 case AArch64::STGloop:
1672 case AArch64::STZGloop:
1674 case AArch64::STZGi:
1675 case AArch64::ST2Gi:
1676 case AArch64::STZ2Gi:
1684void AArch64EpilogueEmitter::emitSwiftAsyncContextFramePointer(
1686 switch (
MF.getTarget().Options.SwiftAsyncFramePointer) {
1714void AArch64EpilogueEmitter::emitShadowCallStackEpilogue(
1728 if (
AFI->needsAsyncDwarfUnwindInfo(
MF))
1730 .buildRestore(AArch64::X18);
1733void AArch64EpilogueEmitter::emitCalleeSavedRestores(
1735 const std::vector<CalleeSavedInfo> &CSI =
MFI.getCalleeSavedInfo();
1741 for (
const auto &
Info : CSI) {
1742 if (
SVE !=
MFI.hasScalableStackID(
Info.getFrameIdx()))
1745 MCRegister
Reg =
Info.getReg();
1749 CFIBuilder.buildRestore(
Info.getReg());
1753void AArch64EpilogueEmitter::finalizeEpilogue()
const {
1754 if (
AFI->needsShadowCallStackPrologueEpilogue(
MF)) {
1755 emitShadowCallStackEpilogue(
MBB.getFirstTerminator(), DL);
1759 emitCalleeSavedGPRRestores(
MBB.getFirstTerminator());
1760 if (
AFI->shouldSignReturnAddress(
MF)) {
1763 if (!
AFL.shouldSignReturnAddressEverywhere(
MF)) {
1765 TII->get(AArch64::PAUTH_EPILOGUE))
1772 BuildMI(
MBB,
MBB.getFirstTerminator(), DL,
TII->get(AArch64::SEH_EpilogEnd))
1774 if (!
MF.hasWinCFI())
1775 MF.setHasWinCFI(
true);
1780 MBB.erase(SEHEpilogueStartI);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file contains the declaration of the AArch64PrologueEmitter and AArch64EpilogueEmitter classes,...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Analysis containing CSE Info
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
std::pair< Instruction::BinaryOps, Value * > OffsetOp
Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static Function * getFunction(FunctionType *Ty, const Twine &Name, Module *M)
static const unsigned FramePtr
void emitEpilogue()
Emit the epilogue.
AArch64EpilogueEmitter(MachineFunction &MF, MachineBasicBlock &MBB, const AArch64FrameLowering &AFL)
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
void setStackRealigned(bool s)
void emitPrologue()
Emit the prologue.
AArch64PrologueEmitter(MachineFunction &MF, MachineBasicBlock &MBB, const AArch64FrameLowering &AFL)
const MachineFrameInfo & MFI
AArch64FunctionInfo * AFI
MachineBasicBlock::iterator convertCalleeSaveRestoreToSPPrePostIncDec(MachineBasicBlock::iterator MBBI, const DebugLoc &DL, int CSStackSizeInc, bool EmitCFI, MachineInstr::MIFlag FrameFlag=MachineInstr::FrameSetup, int CFAOffset=0) const
SVEFrameSizes getSVEStackFrameSizes() const
bool isVGInstruction(MachineBasicBlock::iterator MBBI, const TargetLowering &TLI) const
AArch64PrologueEpilogueCommon(MachineFunction &MF, MachineBasicBlock &MBB, const AArch64FrameLowering &AFL)
const AArch64RegisterInfo & RegInfo
const AArch64FrameLowering & AFL
@ CalleeSavesAboveFrameRecord
void fixupCalleeSaveRestoreStackOffset(MachineInstr &MI, uint64_t LocalStackSize) const
bool shouldCombineCSRLocalStackBump(uint64_t StackBumpBytes) const
const AArch64Subtarget & Subtarget
SVEStackAllocations getSVEStackAllocations(SVEFrameSizes const &)
bool requiresGetVGCall() const
const TargetInstrInfo * TII
const AArch64TargetLowering * getTargetLowering() const override
bool hasInlineStackProbe(const MachineFunction &MF) const override
True if stack clash protection is enabled for this functions.
Helper class for creating CFI instructions and inserting them into MIR.
void buildDefCFAOffset(int64_t Offset, MCSymbol *Label=nullptr) const
void insertCFIInst(const MCCFIInstruction &CFIInst) const
void buildDefCFA(MCRegister Reg, int64_t Offset) const
A set of physical registers with utility functions to track liveness when walking backward/forward th...
MachineInstrBundleIterator< MachineInstr > iterator
Align getMaxAlign() const
Return the alignment in bytes that this function must be aligned to, which is greater than the defaul...
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
MachineOperand class - Representation of each machine instruction operand.
void setImm(int64_t immVal)
bool isSymbol() const
isSymbol - Tests if this is a MO_ExternalSymbol operand.
static MachineOperand CreateImm(int64_t Val)
const char * getSymbolName() const
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
Wrapper class representing virtual and physical registers.
StackOffset holds a fixed and a scalable offset in bytes.
int64_t getFixed() const
Returns the fixed component of the stack.
int64_t getScalable() const
Returns the scalable component of the stack.
static StackOffset getFixed(int64_t Fixed)
StringRef - Represent a constant reference to a string, i.e.
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
constexpr ScalarTy getFixedValue() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ MO_GOT
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
const unsigned StackProbeMaxUnprobedStack
Maximum allowed number of unprobed bytes above SP at an ABI boundary.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
This is an optimization pass for GlobalISel generic memory operations.
MCCFIInstruction createDefCFA(const TargetRegisterInfo &TRI, unsigned FrameReg, unsigned Reg, const StackOffset &Offset, bool LastAdjustmentWasScalable=true)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
unsigned getBLRCallOpcode(const MachineFunction &MF)
Return opcode to be used for indirect calls.
static bool matchLibcall(const TargetLowering &TLI, const MachineOperand &MO, RTLIB::Libcall LC)
static bool isPartOfSVECalleeSaves(MachineBasicBlock::iterator I)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
static bool isFuncletReturnInstr(const MachineInstr &MI)
auto reverse(ContainerTy &&C)
static void getLivePhysRegsUpTo(MachineInstr &MI, const TargetRegisterInfo &TRI, LivePhysRegs &LiveRegs)
Collect live registers from the end of MI's parent up to (including) MI in LiveRegs.
@ Always
Always set the bit.
@ Never
Never set the bit.
@ DeploymentBased
Determine whether to set the bit statically or dynamically based on the deployment target.
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false, bool NeedsWinCFI=false, bool *HasWinCFI=nullptr, bool EmitCFAOffset=false, StackOffset InitialOffset={}, unsigned FrameReg=AArch64::SP)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
static bool isPartOfPPRCalleeSaves(MachineBasicBlock::iterator I)
@ Success
The lock was released successfully.
static void fixupSEHOpcode(MachineBasicBlock::iterator MBBI, unsigned LocalStackSize)
MCCFIInstruction createCFAOffset(const TargetRegisterInfo &MRI, unsigned Reg, const StackOffset &OffsetFromDefCFA, std::optional< int64_t > IncomingVGOffsetFromDefCFA)
DWARFExpression::Operation Op
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
static SVEPartitions partitionSVECS(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, StackOffset PPRCalleeSavesSize, StackOffset ZPRCalleeSavesSize, bool IsEpilogue)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
static bool isPartOfZPRCalleeSaves(MachineBasicBlock::iterator I)
static int64_t upperBound(StackOffset Size)
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
MachineBasicBlock::iterator End
struct llvm::SVEPartitions::@327166152017175235362202041204223104077330276266 ZPR
struct llvm::SVEPartitions::@327166152017175235362202041204223104077330276266 PPR
MachineBasicBlock::iterator Begin
StackOffset totalSize() const