25#include "llvm/IR/IntrinsicsRISCV.h"
28#define DEBUG_TYPE "riscv-isel"
33#define GET_GLOBALISEL_PREDICATE_BITSET
34#include "RISCVGenGlobalISel.inc"
35#undef GET_GLOBALISEL_PREDICATE_BITSET
60 static constexpr unsigned MaxRecursionDepth = 6;
63 const unsigned Depth = 0)
const;
89 bool IsExternWeak =
false)
const;
97 unsigned &CurOp,
bool IsMasked,
98 bool IsStridedOrIndexed,
99 LLT *IndexVT =
nullptr)
const;
104 unsigned ShiftWidth)
const;
105 ComplexRendererFns selectShiftMaskXLen(
MachineOperand &Root)
const {
106 return selectShiftMask(Root, STI.
getXLen());
108 ComplexRendererFns selectShiftMask32(
MachineOperand &Root)
const {
109 return selectShiftMask(Root, 32);
113 ComplexRendererFns selectSExtBits(
MachineOperand &Root,
unsigned Bits)
const;
114 template <
unsigned Bits>
116 return selectSExtBits(Root, Bits);
119 ComplexRendererFns selectZExtBits(
MachineOperand &Root,
unsigned Bits)
const;
120 template <
unsigned Bits>
122 return selectZExtBits(Root, Bits);
125 ComplexRendererFns selectSHXADDOp(
MachineOperand &Root,
unsigned ShAmt)
const;
126 template <
unsigned ShAmt>
128 return selectSHXADDOp(Root, ShAmt);
132 unsigned ShAmt)
const;
133 template <
unsigned ShAmt>
134 ComplexRendererFns selectSHXADD_UWOp(
MachineOperand &Root)
const {
135 return selectSHXADD_UWOp(Root, ShAmt);
175#define GET_GLOBALISEL_PREDICATES_DECL
176#include "RISCVGenGlobalISel.inc"
177#undef GET_GLOBALISEL_PREDICATES_DECL
179#define GET_GLOBALISEL_TEMPORARIES_DECL
180#include "RISCVGenGlobalISel.inc"
181#undef GET_GLOBALISEL_TEMPORARIES_DECL
186#define GET_GLOBALISEL_IMPL
187#include "RISCVGenGlobalISel.inc"
188#undef GET_GLOBALISEL_IMPL
190RISCVInstructionSelector::RISCVInstructionSelector(
193 : STI(STI),
TII(*STI.getInstrInfo()),
TRI(*STI.getRegisterInfo()), RBI(RBI),
197#include
"RISCVGenGlobalISel.inc"
200#include
"RISCVGenGlobalISel.inc"
206bool RISCVInstructionSelector::hasAllNBitUsers(
const MachineInstr &
MI,
208 const unsigned Depth)
const {
210 assert((
MI.getOpcode() == TargetOpcode::G_ADD ||
211 MI.getOpcode() == TargetOpcode::G_SUB ||
212 MI.getOpcode() == TargetOpcode::G_MUL ||
213 MI.getOpcode() == TargetOpcode::G_SHL ||
214 MI.getOpcode() == TargetOpcode::G_LSHR ||
215 MI.getOpcode() == TargetOpcode::G_AND ||
216 MI.getOpcode() == TargetOpcode::G_OR ||
217 MI.getOpcode() == TargetOpcode::G_XOR ||
218 MI.getOpcode() == TargetOpcode::G_SEXT_INREG ||
Depth != 0) &&
219 "Unexpected opcode");
221 if (
Depth >= RISCVInstructionSelector::MaxRecursionDepth)
224 auto DestReg =
MI.getOperand(0).getReg();
225 for (
auto &UserOp :
MRI->use_nodbg_operands(DestReg)) {
226 assert(UserOp.getParent() &&
"UserOp must have a parent");
227 const MachineInstr &UserMI = *UserOp.getParent();
236 case RISCV::FCVT_D_W:
237 case RISCV::FCVT_S_W:
280InstructionSelector::ComplexRendererFns
281RISCVInstructionSelector::selectShiftMask(MachineOperand &Root,
282 unsigned ShiftWidth)
const {
286 using namespace llvm::MIPatternMatch;
292 ShAmtReg = ZExtSrcReg;
311 APInt ShMask(AndMask.
getBitWidth(), ShiftWidth - 1);
312 if (ShMask.isSubsetOf(AndMask)) {
313 ShAmtReg = AndSrcReg;
317 KnownBits Known = VT->getKnownBits(AndSrcReg);
318 if (ShMask.isSubsetOf(AndMask | Known.
Zero))
319 ShAmtReg = AndSrcReg;
326 if (Imm != 0 &&
Imm.urem(ShiftWidth) == 0)
331 if (Imm != 0 &&
Imm.urem(ShiftWidth) == 0) {
334 ShAmtReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
335 unsigned NegOpc = Subtarget->
is64Bit() ? RISCV::SUBW : RISCV::SUB;
336 return {{[=](MachineInstrBuilder &MIB) {
337 MachineIRBuilder(*MIB.getInstr())
338 .buildInstr(NegOpc, {ShAmtReg}, {
Register(RISCV::X0),
Reg});
339 MIB.addReg(ShAmtReg);
342 if (
Imm.urem(ShiftWidth) == ShiftWidth - 1) {
345 ShAmtReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
346 return {{[=](MachineInstrBuilder &MIB) {
347 MachineIRBuilder(*MIB.getInstr())
348 .buildInstr(RISCV::XORI, {ShAmtReg}, {
Reg})
350 MIB.addReg(ShAmtReg);
355 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(ShAmtReg); }}};
358InstructionSelector::ComplexRendererFns
359RISCVInstructionSelector::selectSExtBits(MachineOperand &Root,
360 unsigned Bits)
const {
364 MachineInstr *RootDef =
MRI->getVRegDef(RootReg);
366 if (RootDef->
getOpcode() == TargetOpcode::G_SEXT_INREG &&
369 {[=](MachineInstrBuilder &MIB) { MIB.add(RootDef->
getOperand(1)); }}};
372 unsigned Size =
MRI->getType(RootReg).getScalarSizeInBits();
373 if ((
Size - VT->computeNumSignBits(RootReg)) < Bits)
374 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
379InstructionSelector::ComplexRendererFns
380RISCVInstructionSelector::selectZExtBits(MachineOperand &Root,
381 unsigned Bits)
const {
389 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
393 MRI->getType(RegX).getScalarSizeInBits() == Bits)
394 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
396 unsigned Size =
MRI->getType(RootReg).getScalarSizeInBits();
398 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
403InstructionSelector::ComplexRendererFns
404RISCVInstructionSelector::selectSHXADDOp(MachineOperand &Root,
405 unsigned ShAmt)
const {
406 using namespace llvm::MIPatternMatch;
412 const unsigned XLen = STI.
getXLen();
431 if (
Mask.isShiftedMask()) {
432 unsigned Leading = XLen -
Mask.getActiveBits();
433 unsigned Trailing =
Mask.countr_zero();
436 if (*LeftShift && Leading == 0 && C2.
ult(Trailing) && Trailing == ShAmt) {
437 Register DstReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
438 return {{[=](MachineInstrBuilder &MIB) {
439 MachineIRBuilder(*MIB.getInstr())
440 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
448 if (!*LeftShift && Leading == C2 && Trailing == ShAmt) {
449 Register DstReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
450 return {{[=](MachineInstrBuilder &MIB) {
451 MachineIRBuilder(*MIB.getInstr())
452 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
453 .addImm(Leading + Trailing);
474 unsigned Leading = XLen -
Mask.getActiveBits();
475 unsigned Trailing =
Mask.countr_zero();
488 Register DstReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
489 return {{[=](MachineInstrBuilder &MIB) {
490 MachineIRBuilder(*MIB.getInstr())
491 .buildInstr(RISCV::SRLIW, {DstReg}, {RegY})
501InstructionSelector::ComplexRendererFns
502RISCVInstructionSelector::selectSHXADD_UWOp(MachineOperand &Root,
503 unsigned ShAmt)
const {
504 using namespace llvm::MIPatternMatch;
521 if (
Mask.isShiftedMask()) {
522 unsigned Leading =
Mask.countl_zero();
523 unsigned Trailing =
Mask.countr_zero();
524 if (Leading == 32 - ShAmt && C2 == Trailing && Trailing > ShAmt) {
525 Register DstReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
526 return {{[=](MachineInstrBuilder &MIB) {
527 MachineIRBuilder(*MIB.getInstr())
528 .buildInstr(RISCV::SLLI, {DstReg}, {RegX})
539InstructionSelector::ComplexRendererFns
540RISCVInstructionSelector::renderVLOp(MachineOperand &Root)
const {
541 assert(Root.
isReg() &&
"Expected operand to be a Register");
542 MachineInstr *RootDef =
MRI->getVRegDef(Root.
getReg());
544 if (RootDef->
getOpcode() == TargetOpcode::G_CONSTANT) {
546 if (
C->getValue().isAllOnes())
550 return {{[=](MachineInstrBuilder &MIB) {
555 uint64_t ZExtC =
C->getZExtValue();
556 return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(ZExtC); }}};
559 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.
getReg()); }}};
562InstructionSelector::ComplexRendererFns
563RISCVInstructionSelector::selectAddrRegImm(MachineOperand &Root)
const {
567 MachineInstr *RootDef =
MRI->getVRegDef(Root.
getReg());
568 if (RootDef->
getOpcode() == TargetOpcode::G_FRAME_INDEX) {
570 [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->
getOperand(1)); },
571 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
575 if (isBaseWithConstantOffset(Root, *
MRI)) {
578 MachineInstr *LHSDef =
MRI->getVRegDef(
LHS.getReg());
579 MachineInstr *RHSDef =
MRI->getVRegDef(
RHS.getReg());
583 if (LHSDef->
getOpcode() == TargetOpcode::G_FRAME_INDEX)
585 [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->
getOperand(1)); },
586 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
589 return {{[=](MachineInstrBuilder &MIB) { MIB.add(
LHS); },
590 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); }}};
596 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.
getReg()); },
597 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }}};
606 case CmpInst::Predicate::ICMP_EQ:
608 case CmpInst::Predicate::ICMP_NE:
610 case CmpInst::Predicate::ICMP_ULT:
612 case CmpInst::Predicate::ICMP_SLT:
614 case CmpInst::Predicate::ICMP_UGE:
616 case CmpInst::Predicate::ICMP_SGE:
682 CC = getRISCVCCFromICmp(Pred);
689 const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
694 return IsStore ? RISCV::SB_RL : RISCV::LB_AQ;
696 return IsStore ? RISCV::SH_RL : RISCV::LH_AQ;
698 return IsStore ? RISCV::SW_RL : RISCV::LW_AQ;
700 return IsStore ? RISCV::SD_RL : RISCV::LD_AQ;
708 const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
712 return IsStore ? RISCV::SB : RISCV::LBU;
714 return IsStore ? RISCV::SH : RISCV::LH;
716 return IsStore ? RISCV::SW : RISCV::LW;
718 return IsStore ? RISCV::SD : RISCV::LD;
724void RISCVInstructionSelector::addVectorLoadStoreOperands(
725 MachineInstr &
I, SmallVectorImpl<SrcOp> &SrcOps,
unsigned &CurOp,
726 bool IsMasked,
bool IsStridedOrIndexed, LLT *IndexVT)
const {
728 auto PtrReg =
I.getOperand(CurOp++).getReg();
732 if (IsStridedOrIndexed) {
733 auto StrideReg =
I.getOperand(CurOp++).getReg();
736 *IndexVT =
MRI->getType(StrideReg);
741 auto MaskReg =
I.getOperand(CurOp++).getReg();
746bool RISCVInstructionSelector::selectIntrinsicWithSideEffects(
747 MachineInstr &
I, MachineIRBuilder &MIB)
const {
754 case Intrinsic::riscv_vlm:
755 case Intrinsic::riscv_vle:
756 case Intrinsic::riscv_vle_mask:
757 case Intrinsic::riscv_vlse:
758 case Intrinsic::riscv_vlse_mask: {
759 bool IsMasked = IntrinID == Intrinsic::riscv_vle_mask ||
760 IntrinID == Intrinsic::riscv_vlse_mask;
761 bool IsStrided = IntrinID == Intrinsic::riscv_vlse ||
762 IntrinID == Intrinsic::riscv_vlse_mask;
763 LLT VT =
MRI->getType(
I.getOperand(0).getReg());
767 const Register DstReg =
I.getOperand(0).getReg();
770 bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm;
775 if (HasPassthruOperand) {
776 auto PassthruReg =
I.getOperand(CurOp++).getReg();
782 addVectorLoadStoreOperands(
I, SrcOps, CurOp, IsMasked, IsStrided);
785 const RISCV::VLEPseudo *
P =
786 RISCV::getVLEPseudo(IsMasked, IsStrided,
false, Log2SEW,
787 static_cast<unsigned>(LMUL));
789 auto PseudoMI = MIB.
buildInstr(
P->Pseudo, {DstReg}, SrcOps);
792 auto VLOpFn = renderVLOp(
I.getOperand(CurOp++));
793 for (
auto &RenderFn : *VLOpFn)
797 PseudoMI.addImm(Log2SEW);
802 Policy =
I.getOperand(CurOp++).getImm();
803 PseudoMI.addImm(Policy);
806 PseudoMI.cloneMemRefs(
I);
811 case Intrinsic::riscv_vloxei:
812 case Intrinsic::riscv_vloxei_mask:
813 case Intrinsic::riscv_vluxei:
814 case Intrinsic::riscv_vluxei_mask: {
815 bool IsMasked = IntrinID == Intrinsic::riscv_vloxei_mask ||
816 IntrinID == Intrinsic::riscv_vluxei_mask;
817 bool IsOrdered = IntrinID == Intrinsic::riscv_vloxei ||
818 IntrinID == Intrinsic::riscv_vloxei_mask;
819 LLT VT =
MRI->getType(
I.getOperand(0).getReg());
823 const Register DstReg =
I.getOperand(0).getReg();
826 bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm;
831 if (HasPassthruOperand) {
832 auto PassthruReg =
I.getOperand(CurOp++).getReg();
839 addVectorLoadStoreOperands(
I, SrcOps, CurOp, IsMasked,
true, &IndexVT);
845 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
847 "values when XLEN=32");
849 const RISCV::VLX_VSXPseudo *
P = RISCV::getVLXPseudo(
850 IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
851 static_cast<unsigned>(IndexLMUL));
853 auto PseudoMI = MIB.
buildInstr(
P->Pseudo, {DstReg}, SrcOps);
856 auto VLOpFn = renderVLOp(
I.getOperand(CurOp++));
857 for (
auto &RenderFn : *VLOpFn)
861 PseudoMI.addImm(Log2SEW);
866 Policy =
I.getOperand(CurOp++).getImm();
867 PseudoMI.addImm(Policy);
870 PseudoMI.cloneMemRefs(
I);
875 case Intrinsic::riscv_vsm:
876 case Intrinsic::riscv_vse:
877 case Intrinsic::riscv_vse_mask:
878 case Intrinsic::riscv_vsse:
879 case Intrinsic::riscv_vsse_mask: {
880 bool IsMasked = IntrinID == Intrinsic::riscv_vse_mask ||
881 IntrinID == Intrinsic::riscv_vsse_mask;
882 bool IsStrided = IntrinID == Intrinsic::riscv_vsse ||
883 IntrinID == Intrinsic::riscv_vsse_mask;
884 LLT VT =
MRI->getType(
I.getOperand(1).getReg());
892 auto PassthruReg =
I.getOperand(CurOp++).getReg();
895 addVectorLoadStoreOperands(
I, SrcOps, CurOp, IsMasked, IsStrided);
898 const RISCV::VSEPseudo *
P = RISCV::getVSEPseudo(
899 IsMasked, IsStrided, Log2SEW,
static_cast<unsigned>(LMUL));
901 auto PseudoMI = MIB.
buildInstr(
P->Pseudo, {}, SrcOps);
904 auto VLOpFn = renderVLOp(
I.getOperand(CurOp++));
905 for (
auto &RenderFn : *VLOpFn)
909 PseudoMI.addImm(Log2SEW);
912 PseudoMI.cloneMemRefs(
I);
917 case Intrinsic::riscv_vsoxei:
918 case Intrinsic::riscv_vsoxei_mask:
919 case Intrinsic::riscv_vsuxei:
920 case Intrinsic::riscv_vsuxei_mask: {
921 bool IsMasked = IntrinID == Intrinsic::riscv_vsoxei_mask ||
922 IntrinID == Intrinsic::riscv_vsuxei_mask;
923 bool IsOrdered = IntrinID == Intrinsic::riscv_vsoxei ||
924 IntrinID == Intrinsic::riscv_vsoxei_mask;
925 LLT VT =
MRI->getType(
I.getOperand(1).getReg());
933 auto PassthruReg =
I.getOperand(CurOp++).getReg();
937 addVectorLoadStoreOperands(
I, SrcOps, CurOp, IsMasked,
true, &IndexVT);
943 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
945 "values when XLEN=32");
947 const RISCV::VLX_VSXPseudo *
P = RISCV::getVSXPseudo(
948 IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
949 static_cast<unsigned>(IndexLMUL));
951 auto PseudoMI = MIB.
buildInstr(
P->Pseudo, {}, SrcOps);
954 auto VLOpFn = renderVLOp(
I.getOperand(CurOp++));
955 for (
auto &RenderFn : *VLOpFn)
959 PseudoMI.addImm(Log2SEW);
962 PseudoMI.cloneMemRefs(
I);
970bool RISCVInstructionSelector::select(MachineInstr &
MI) {
971 MachineIRBuilder MIB(
MI);
973 preISelLower(
MI, MIB);
974 const unsigned Opc =
MI.getOpcode();
976 if (!
MI.isPreISelOpcode() ||
Opc == TargetOpcode::G_PHI) {
977 if (
Opc == TargetOpcode::PHI ||
Opc == TargetOpcode::G_PHI) {
978 const Register DefReg =
MI.getOperand(0).getReg();
979 const LLT DefTy =
MRI->getType(DefReg);
982 MRI->getRegClassOrRegBank(DefReg);
984 const TargetRegisterClass *DefRC =
993 DefRC = getRegClassForTypeOnBank(DefTy, RB);
1000 MI.setDesc(
TII.get(TargetOpcode::PHI));
1011 if (selectImpl(
MI, *CoverageInfo))
1015 case TargetOpcode::G_ANYEXT:
1016 case TargetOpcode::G_PTRTOINT:
1017 case TargetOpcode::G_INTTOPTR:
1018 case TargetOpcode::G_TRUNC:
1019 case TargetOpcode::G_FREEZE:
1021 case TargetOpcode::G_CONSTANT: {
1023 int64_t
Imm =
MI.getOperand(1).getCImm()->getSExtValue();
1025 if (!materializeImm(DstReg, Imm, MIB))
1028 MI.eraseFromParent();
1031 case TargetOpcode::G_ZEXT:
1032 case TargetOpcode::G_SEXT: {
1033 bool IsSigned =
Opc != TargetOpcode::G_ZEXT;
1036 LLT SrcTy =
MRI->getType(SrcReg);
1043 RISCV::GPRBRegBankID &&
1044 "Unexpected ext regbank");
1047 if (IsSigned && SrcSize == 32) {
1048 MI.setDesc(
TII.get(RISCV::ADDIW));
1054 if (!IsSigned && SrcSize == 32 && STI.hasStdExtZba()) {
1055 MI.setDesc(
TII.get(RISCV::ADD_UW));
1061 if (SrcSize == 16 && STI.hasStdExtZbb()) {
1062 MI.setDesc(
TII.get(IsSigned ? RISCV::SEXT_H
1063 : STI.isRV64() ? RISCV::ZEXT_H_RV64
1064 : RISCV::ZEXT_H_RV32));
1069 if (!IsSigned && SrcSize == 16 && STI.hasStdExtZbkb()) {
1070 MI.setDesc(
TII.get(STI.
is64Bit() ? RISCV::PACKW : RISCV::PACK));
1077 MIB.
buildInstr(RISCV::SLLI, {&RISCV::GPRRegClass}, {SrcReg})
1078 .addImm(STI.
getXLen() - SrcSize);
1080 auto ShiftRight = MIB.
buildInstr(IsSigned ? RISCV::SRAI : RISCV::SRLI,
1081 {DstReg}, {ShiftLeft})
1082 .addImm(STI.
getXLen() - SrcSize);
1084 MI.eraseFromParent();
1087 case TargetOpcode::G_FCONSTANT: {
1090 const APFloat &FPimm =
MI.getOperand(1).getFPImm()->getValueAPF();
1091 unsigned Size =
MRI->getType(DstReg).getSizeInBits();
1097 GPRReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1099 if (!materializeImm(GPRReg,
Imm.getSExtValue(), MIB))
1103 unsigned Opcode =
Size == 64 ? RISCV::FMV_D_X
1104 :
Size == 32 ? RISCV::FMV_W_X
1106 auto FMV = MIB.
buildInstr(Opcode, {DstReg}, {GPRReg});
1107 if (!FMV.constrainAllUses(
TII,
TRI, RBI))
1112 "Unexpected size or subtarget");
1116 MachineInstrBuilder FCVT =
1122 MI.eraseFromParent();
1127 Register GPRRegHigh =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1128 Register GPRRegLow =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1130 if (!materializeImm(GPRRegHigh,
Imm.extractBits(32, 32).getSExtValue(),
1133 if (!materializeImm(GPRRegLow,
Imm.trunc(32).getSExtValue(), MIB))
1135 MachineInstrBuilder PairF64 = MIB.
buildInstr(
1136 RISCV::BuildPairF64Pseudo, {DstReg}, {GPRRegLow, GPRRegHigh});
1141 MI.eraseFromParent();
1144 case TargetOpcode::G_GLOBAL_VALUE: {
1145 auto *GV =
MI.getOperand(1).getGlobal();
1146 if (GV->isThreadLocal()) {
1151 return selectAddr(
MI, MIB, GV->isDSOLocal(), GV->hasExternalWeakLinkage());
1153 case TargetOpcode::G_JUMP_TABLE:
1154 case TargetOpcode::G_CONSTANT_POOL:
1155 return selectAddr(
MI, MIB,
MRI);
1156 case TargetOpcode::G_BRCOND: {
1162 .addMBB(
MI.getOperand(1).getMBB());
1163 MI.eraseFromParent();
1166 case TargetOpcode::G_BRINDIRECT:
1167 MI.setDesc(
TII.get(RISCV::PseudoBRIND));
1170 case TargetOpcode::G_SELECT:
1171 return selectSelect(
MI, MIB);
1172 case TargetOpcode::G_FCMP:
1173 return selectFPCompare(
MI, MIB);
1174 case TargetOpcode::G_FENCE: {
1179 emitFence(FenceOrdering, FenceSSID, MIB);
1180 MI.eraseFromParent();
1183 case TargetOpcode::G_IMPLICIT_DEF:
1184 return selectImplicitDef(
MI, MIB);
1185 case TargetOpcode::G_UNMERGE_VALUES:
1187 case TargetOpcode::G_LOAD:
1188 case TargetOpcode::G_STORE: {
1192 LLT PtrTy =
MRI->getType(PtrReg);
1195 if (RB.
getID() != RISCV::GPRBRegBankID)
1202 "Load/Store pointer operand isn't a GPR");
1203 assert(PtrTy.
isPointer() &&
"Load/Store pointer operand isn't a pointer");
1219 if (NewOpc ==
MI.getOpcode())
1223 auto AddrModeFns = selectAddrRegImm(
MI.getOperand(1));
1228 auto NewInst = MIB.
buildInstr(NewOpc, {}, {},
MI.getFlags());
1234 for (
auto &Fn : *AddrModeFns)
1236 MI.eraseFromParent();
1240 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1241 return selectIntrinsicWithSideEffects(
MI, MIB);
1247bool RISCVInstructionSelector::selectUnmergeValues(
1248 MachineInstr &
MI, MachineIRBuilder &MIB)
const {
1249 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
1251 if (!Subtarget->hasStdExtZfa())
1255 if (
MI.getNumOperands() != 3)
1260 if (!isRegInFprb(Src) || !isRegInGprb(
Lo) || !isRegInGprb(
Hi))
1263 MachineInstr *ExtractLo = MIB.
buildInstr(RISCV::FMV_X_W_FPR64, {
Lo}, {Src});
1267 MachineInstr *ExtractHi = MIB.
buildInstr(RISCV::FMVH_X_D, {
Hi}, {Src});
1271 MI.eraseFromParent();
1275bool RISCVInstructionSelector::replacePtrWithInt(MachineOperand &
Op,
1276 MachineIRBuilder &MIB) {
1278 assert(
MRI->getType(PtrReg).isPointer() &&
"Operand is not a pointer!");
1282 MRI->setRegBank(PtrToInt.getReg(0), RBI.
getRegBank(RISCV::GPRBRegBankID));
1283 Op.setReg(PtrToInt.getReg(0));
1284 return select(*PtrToInt);
1287void RISCVInstructionSelector::preISelLower(MachineInstr &
MI,
1288 MachineIRBuilder &MIB) {
1289 switch (
MI.getOpcode()) {
1290 case TargetOpcode::G_PTR_ADD: {
1294 replacePtrWithInt(
MI.getOperand(1), MIB);
1295 MI.setDesc(
TII.get(TargetOpcode::G_ADD));
1296 MRI->setType(DstReg, sXLen);
1299 case TargetOpcode::G_PTRMASK: {
1302 replacePtrWithInt(
MI.getOperand(1), MIB);
1303 MI.setDesc(
TII.get(TargetOpcode::G_AND));
1304 MRI->setType(DstReg, sXLen);
1310void RISCVInstructionSelector::renderNegImm(MachineInstrBuilder &MIB,
1311 const MachineInstr &
MI,
1313 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1314 "Expected G_CONSTANT");
1315 int64_t CstVal =
MI.getOperand(1).getCImm()->getSExtValue();
1319void RISCVInstructionSelector::renderImmSubFromXLen(MachineInstrBuilder &MIB,
1320 const MachineInstr &
MI,
1322 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1323 "Expected G_CONSTANT");
1324 uint64_t CstVal =
MI.getOperand(1).getCImm()->getZExtValue();
1328void RISCVInstructionSelector::renderImmSubFrom32(MachineInstrBuilder &MIB,
1329 const MachineInstr &
MI,
1331 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1332 "Expected G_CONSTANT");
1333 uint64_t CstVal =
MI.getOperand(1).getCImm()->getZExtValue();
1337void RISCVInstructionSelector::renderImmPlus1(MachineInstrBuilder &MIB,
1338 const MachineInstr &
MI,
1340 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1341 "Expected G_CONSTANT");
1342 int64_t CstVal =
MI.getOperand(1).getCImm()->getSExtValue();
1346void RISCVInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
1347 const MachineInstr &
MI,
1349 assert(
MI.getOpcode() == TargetOpcode::G_FRAME_INDEX &&
OpIdx == -1 &&
1350 "Expected G_FRAME_INDEX");
1351 MIB.
add(
MI.getOperand(1));
1354void RISCVInstructionSelector::renderTrailingZeros(MachineInstrBuilder &MIB,
1355 const MachineInstr &
MI,
1357 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1358 "Expected G_CONSTANT");
1359 uint64_t
C =
MI.getOperand(1).getCImm()->getZExtValue();
1363void RISCVInstructionSelector::renderXLenSubTrailingOnes(
1364 MachineInstrBuilder &MIB,
const MachineInstr &
MI,
int OpIdx)
const {
1365 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1366 "Expected G_CONSTANT");
1367 uint64_t
C =
MI.getOperand(1).getCImm()->getZExtValue();
1371void RISCVInstructionSelector::renderAddiPairImmSmall(MachineInstrBuilder &MIB,
1372 const MachineInstr &
MI,
1374 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1375 "Expected G_CONSTANT");
1376 int64_t
Imm =
MI.getOperand(1).getCImm()->getSExtValue();
1377 int64_t Adj =
Imm < 0 ? -2048 : 2047;
1381void RISCVInstructionSelector::renderAddiPairImmLarge(MachineInstrBuilder &MIB,
1382 const MachineInstr &
MI,
1384 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1385 "Expected G_CONSTANT");
1386 int64_t
Imm =
MI.getOperand(1).getCImm()->getSExtValue() < 0 ? -2048 : 2047;
1390const TargetRegisterClass *RISCVInstructionSelector::getRegClassForTypeOnBank(
1391 LLT Ty,
const RegisterBank &RB)
const {
1392 if (RB.
getID() == RISCV::GPRBRegBankID) {
1394 return &RISCV::GPRRegClass;
1397 if (RB.
getID() == RISCV::FPRBRegBankID) {
1399 return &RISCV::FPR16RegClass;
1401 return &RISCV::FPR32RegClass;
1403 return &RISCV::FPR64RegClass;
1406 if (RB.
getID() == RISCV::VRBRegBankID) {
1408 return &RISCV::VRRegClass;
1411 return &RISCV::VRM2RegClass;
1414 return &RISCV::VRM4RegClass;
1417 return &RISCV::VRM8RegClass;
1423bool RISCVInstructionSelector::isRegInGprb(
Register Reg)
const {
1427bool RISCVInstructionSelector::isRegInFprb(
Register Reg)
const {
1431bool RISCVInstructionSelector::selectCopy(MachineInstr &
MI)
const {
1437 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
1440 "Register class not available for LLT, register bank combination");
1451 MI.setDesc(
TII.get(RISCV::COPY));
1455bool RISCVInstructionSelector::selectImplicitDef(MachineInstr &
MI,
1456 MachineIRBuilder &MIB)
const {
1457 assert(
MI.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
1459 const Register DstReg =
MI.getOperand(0).getReg();
1460 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
1464 "Register class not available for LLT, register bank combination");
1470 MI.setDesc(
TII.get(TargetOpcode::IMPLICIT_DEF));
1474bool RISCVInstructionSelector::materializeImm(
Register DstReg, int64_t Imm,
1475 MachineIRBuilder &MIB)
const {
1483 unsigned NumInsts = Seq.
size();
1486 for (
unsigned i = 0; i < NumInsts; i++) {
1488 ?
MRI->createVirtualRegister(&RISCV::GPRRegClass)
1490 const RISCVMatInt::Inst &
I = Seq[i];
1493 switch (
I.getOpndKind()) {
1502 {SrcReg, Register(RISCV::X0)});
1522bool RISCVInstructionSelector::selectAddr(MachineInstr &
MI,
1523 MachineIRBuilder &MIB,
bool IsLocal,
1524 bool IsExternWeak)
const {
1525 assert((
MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1526 MI.getOpcode() == TargetOpcode::G_JUMP_TABLE ||
1527 MI.getOpcode() == TargetOpcode::G_CONSTANT_POOL) &&
1528 "Unexpected opcode");
1530 const MachineOperand &DispMO =
MI.getOperand(1);
1533 const LLT DefTy =
MRI->getType(DefReg);
1540 if (IsLocal && !Subtarget->allowTaggedGlobals()) {
1544 MI.setDesc(
TII.get(RISCV::PseudoLLA));
1552 MachineFunction &MF = *
MI.getParent()->getParent();
1566 MI.eraseFromParent();
1573 "Unsupported code model for lowering",
MI);
1580 Register AddrHiDest =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1581 MachineInstr *AddrHi = MIB.
buildInstr(RISCV::LUI, {AddrHiDest}, {})
1593 MI.eraseFromParent();
1606 MachineFunction &MF = *
MI.getParent()->getParent();
1620 MI.eraseFromParent();
1627 MI.setDesc(
TII.get(RISCV::PseudoLLA));
1634bool RISCVInstructionSelector::selectSelect(MachineInstr &
MI,
1635 MachineIRBuilder &MIB)
const {
1642 Register DstReg = SelectMI.getReg(0);
1644 unsigned Opc = RISCV::Select_GPR_Using_CC_GPR;
1646 unsigned Size =
MRI->getType(DstReg).getSizeInBits();
1647 Opc =
Size == 32 ? RISCV::Select_FPR32_Using_CC_GPR
1648 : RISCV::Select_FPR64_Using_CC_GPR;
1656 .
addReg(SelectMI.getTrueReg())
1657 .
addReg(SelectMI.getFalseReg());
1658 MI.eraseFromParent();
1669 return Size == 16 ? RISCV::FLT_H :
Size == 32 ? RISCV::FLT_S : RISCV::FLT_D;
1671 return Size == 16 ? RISCV::FLE_H :
Size == 32 ? RISCV::FLE_S : RISCV::FLE_D;
1673 return Size == 16 ? RISCV::FEQ_H :
Size == 32 ? RISCV::FEQ_S : RISCV::FEQ_D;
1686 assert(!isLegalFCmpPredicate(Pred) &&
"Predicate already legal?");
1689 if (isLegalFCmpPredicate(InvPred)) {
1697 if (isLegalFCmpPredicate(InvPred)) {
1702 if (isLegalFCmpPredicate(InvPred)) {
1714bool RISCVInstructionSelector::selectFPCompare(MachineInstr &
MI,
1715 MachineIRBuilder &MIB)
const {
1723 unsigned Size =
MRI->getType(
LHS).getSizeInBits();
1728 bool NeedInvert =
false;
1732 TmpReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1734 if (!
Cmp.constrainAllUses(
TII,
TRI, RBI))
1740 {&RISCV::GPRRegClass}, {
LHS,
RHS});
1741 if (!Cmp1.constrainAllUses(
TII,
TRI, RBI))
1744 {&RISCV::GPRRegClass}, {
RHS,
LHS});
1745 if (!Cmp2.constrainAllUses(
TII,
TRI, RBI))
1748 TmpReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1750 MIB.
buildInstr(RISCV::OR, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1751 if (!
Or.constrainAllUses(
TII,
TRI, RBI))
1758 {&RISCV::GPRRegClass}, {
LHS,
LHS});
1759 if (!Cmp1.constrainAllUses(
TII,
TRI, RBI))
1762 {&RISCV::GPRRegClass}, {
RHS,
RHS});
1763 if (!Cmp2.constrainAllUses(
TII,
TRI, RBI))
1766 TmpReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1768 MIB.
buildInstr(RISCV::AND, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1769 if (!
And.constrainAllUses(
TII,
TRI, RBI))
1776 auto Xor = MIB.
buildInstr(RISCV::XORI, {DstReg}, {TmpReg}).addImm(1);
1777 if (!
Xor.constrainAllUses(
TII,
TRI, RBI))
1781 MI.eraseFromParent();
1785void RISCVInstructionSelector::emitFence(
AtomicOrdering FenceOrdering,
1787 MachineIRBuilder &MIB)
const {
1788 if (STI.hasStdExtZtso()) {
1791 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
1801 MIB.
buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1809 MIB.
buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1815 unsigned Pred, Succ;
1816 switch (FenceOrdering) {
1819 case AtomicOrdering::AcquireRelease:
1823 case AtomicOrdering::Acquire:
1828 case AtomicOrdering::Release:
1833 case AtomicOrdering::SequentiallyConsistent:
1843InstructionSelector *
1847 return new RISCVInstructionSelector(TM, Subtarget, RBI);
unsigned const MachineRegisterInfo * MRI
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Provides analysis for querying information about KnownBits during GISel passes.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
static bool hasAllWUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI)
static bool hasAllNBitUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI, unsigned OrigBits)
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
MachineInstr unsigned OpIdx
static StringRef getName(Value *V)
static unsigned selectRegImmLoadStoreOp(unsigned GenericOpc, unsigned OpSize)
Select the RISC-V regimm opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the G...
static unsigned selectZalasrLoadStoreOp(unsigned GenericOpc, unsigned OpSize)
Select the RISC-V Zalasr opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the G...
static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size)
static bool legalizeFCmpPredicate(Register &LHS, Register &RHS, CmpInst::Predicate &Pred, bool &NeedInvert)
static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC, Register &LHS, Register &RHS, MachineRegisterInfo &MRI)
const SmallVectorImpl< MachineOperand > & Cond
This file declares the targeting of the RegisterBankInfo class for RISC-V.
APInt bitcastToAPInt() const
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
This is an important base class in LLVM.
virtual void setupMF(MachineFunction &mf, GISelValueTracking *vt, CodeGenCoverage *covinfo=nullptr, ProfileSummaryInfo *psi=nullptr, BlockFrequencyInfo *bfi=nullptr)
Setup per-MF executor state.
Register getPointerReg() const
Get the source register of the pointer value.
MachineMemOperand & getMMO() const
Get the MachineMemOperand on this instruction.
LocationSize getMemSizeInBits() const
Returns the size in bits of the memory access.
Register getReg(unsigned Idx) const
Access the Idx'th operand as a register and return it.
constexpr unsigned getScalarSizeInBits() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
constexpr unsigned getAddressSpace() const
TypeSize getValue() const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Helper class to build MachineInstr.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildPtrToInt(const DstOp &Dst, const SrcOp &Src)
Build and insert a G_PTRTOINT instruction.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
unsigned getOperandNo(const_mop_iterator I) const
Returns the number of the operand iterator I points to.
const MachineOperand & getOperand(unsigned i) const
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Analysis providing profile information.
This class provides the information for the target register banks.
static RISCVVType::VLMUL getLMUL(MVT VT)
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
This class implements the register bank concept.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
bool isPositionIndependent() const
CodeModel::Model getCodeModel() const
Returns the code model.
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
operand_type_match m_Reg()
SpecificConstantMatch m_SpecificICst(const APInt &RequestedValue)
Matches a constant equal to RequestedValue.
operand_type_match m_Pred()
UnaryOp_match< SrcTy, TargetOpcode::G_ZEXT > m_GZExt(const SrcTy &Src)
ConstantMatch< APInt > m_ICst(APInt &Cst)
BinaryOp_match< LHS, RHS, TargetOpcode::G_ADD, true > m_GAdd(const LHS &L, const RHS &R)
OneNonDBGUse_match< SubPat > m_OneNonDBGUse(const SubPat &SP)
CompareOp_match< Pred, LHS, RHS, TargetOpcode::G_ICMP > m_GICmp(const Pred &P, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SUB > m_GSub(const LHS &L, const RHS &R)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SHL, false > m_GShl(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_AND, true > m_GAnd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_LSHR, false > m_GLShr(const LHS &L, const RHS &R)
unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
SmallVector< Inst, 8 > InstSeq
static constexpr int64_t VLMaxSentinel
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
@ System
Synchronized with respect to all concurrently executing threads.
This is an optimization pass for GlobalISel generic memory operations.
PointerUnion< const TargetRegisterClass *, const RegisterBank * > RegClassOrRegBank
Convenient type to represent either a register class or a register bank.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool isStrongerThanMonotonic(AtomicOrdering AO)
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
LLVM_ABI bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
LLVM_ABI MVT getMVTForLLT(LLT Ty)
Get a rough equivalent of an MVT for a given LLT.
InstructionSelector * createRISCVInstructionSelector(const RISCVTargetMachine &TM, const RISCVSubtarget &Subtarget, const RISCVRegisterBankInfo &RBI)
LLVM_ABI std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel error as a missed optimization remark to the LLVMContext's diagnostic stream.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
AtomicOrdering
Atomic ordering for LLVM's memory model.
constexpr T maskTrailingZeros(unsigned N)
Create a bitmask with the N right-most bits set to 0, and all other bits set to 1.
@ Or
Bitwise or logical OR of integers.
@ Xor
Bitwise or logical XOR of integers.
@ And
Bitwise or logical AND of integers.
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
constexpr T maskTrailingOnes(unsigned N)
Create a bitmask with the N right-most bits set to 1, and all other bits set to 0.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.