LLVM 22.0.0git
AArch64ISelLowering.h
Go to the documentation of this file.
1//==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that AArch64 uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
15#define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
16
21#include "llvm/IR/CallingConv.h"
22#include "llvm/IR/Instruction.h"
23
24namespace llvm {
25
27
28namespace AArch64 {
29/// Possible values of current rounding mode, which is specified in bits
30/// 23:22 of FPCR.
32 RN = 0, // Round to Nearest
33 RP = 1, // Round towards Plus infinity
34 RM = 2, // Round towards Minus infinity
35 RZ = 3, // Round towards Zero
36 rmMask = 3 // Bit mask selecting rounding mode
37};
38
39// Bit position of rounding mode bits in FPCR.
40const unsigned RoundingBitsPos = 22;
41
42// Reserved bits should be preserved when modifying FPCR.
43const uint64_t ReservedFPControlBits = 0xfffffffff80040f8;
44
45// Registers used to pass function arguments.
48
49/// Maximum allowed number of unprobed bytes above SP at an ABI
50/// boundary.
51const unsigned StackProbeMaxUnprobedStack = 1024;
52
53/// Maximum number of iterations to unroll for a constant size probing loop.
54const unsigned StackProbeMaxLoopUnroll = 4;
55
56} // namespace AArch64
57
58namespace ARM64AS {
59enum : unsigned { PTR32_SPTR = 270, PTR32_UPTR = 271, PTR64 = 272 };
60}
61
62class AArch64Subtarget;
63
65public:
66 explicit AArch64TargetLowering(const TargetMachine &TM,
67 const AArch64Subtarget &STI);
68
69 const AArch64TargetMachine &getTM() const;
70
71 /// Control the following reassociation of operands: (op (op x, c1), y) -> (op
72 /// (op x, y), c1) where N0 is (op x, c1) and N1 is y.
74 SDValue N1) const override;
75
76 /// Selects the correct CCAssignFn for a given CallingConvention value.
77 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
78
79 /// Selects the correct CCAssignFn for a given CallingConvention value.
81
82 /// Determine which of the bits specified in Mask are known to be either zero
83 /// or one and return them in the KnownZero/KnownOne bitsets.
85 const APInt &DemandedElts,
86 const SelectionDAG &DAG,
87 unsigned Depth = 0) const override;
88
90 const APInt &DemandedElts,
91 const SelectionDAG &DAG,
92 unsigned Depth) const override;
93
94 MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const override {
95 if ((AS == ARM64AS::PTR32_SPTR) || (AS == ARM64AS::PTR32_UPTR)) {
96 // These are 32-bit pointers created using the `__ptr32` extension or
97 // similar. They are handled by marking them as being in a different
98 // address space, and will be extended to 64-bits when used as the target
99 // of a load or store operation, or cast to a 64-bit pointer type.
100 return MVT::i32;
101 } else {
102 // Returning i64 unconditionally here (i.e. even for ILP32) means that the
103 // *DAG* representation of pointers will always be 64-bits. They will be
104 // truncated and extended when transferred to memory, but the 64-bit DAG
105 // allows us to use AArch64's addressing modes much more easily.
106 return MVT::i64;
107 }
108 }
109
110 unsigned getVectorIdxWidth(const DataLayout &DL) const override {
111 // The VectorIdx type is i64, with both normal and ilp32.
112 return 64;
113 }
114
116 const APInt &DemandedElts,
117 TargetLoweringOpt &TLO) const override;
118
119 MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override;
120
121 /// Returns true if the target allows unaligned memory accesses of the
122 /// specified type.
124 EVT VT, unsigned AddrSpace = 0, Align Alignment = Align(1),
126 unsigned *Fast = nullptr) const override;
127 /// LLT variant.
128 bool allowsMisalignedMemoryAccesses(LLT Ty, unsigned AddrSpace,
129 Align Alignment,
131 unsigned *Fast = nullptr) const override;
132
133 /// Provide custom lowering hooks for some operations.
134 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
135
136 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
137
138 /// This method returns a target specific FastISel object, or null if the
139 /// target does not support "fast" ISel.
141 const TargetLibraryInfo *libInfo) const override;
142
143 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
144
145 bool isFPImmLegal(const APFloat &Imm, EVT VT,
146 bool ForCodeSize) const override;
147
148 /// Return true if the given shuffle mask can be codegen'd directly, or if it
149 /// should be stack expanded.
150 bool isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
151
152 /// Similar to isShuffleMaskLegal. Return true is the given 'select with zero'
153 /// shuffle mask can be codegen'd directly.
154 bool isVectorClearMaskLegal(ArrayRef<int> M, EVT VT) const override;
155
156 /// Return the ISD::SETCC ValueType.
158 EVT VT) const override;
159
161
163 MachineBasicBlock *BB) const;
164
166 MachineBasicBlock *BB) const;
167
169 MachineBasicBlock *MBB) const;
170
172 MachineBasicBlock *MBB) const;
173
174 MachineBasicBlock *EmitTileLoad(unsigned Opc, unsigned BaseReg,
176 MachineBasicBlock *BB) const;
178 MachineBasicBlock *EmitZAInstr(unsigned Opc, unsigned BaseReg,
179 MachineInstr &MI, MachineBasicBlock *BB) const;
181 unsigned Opcode, bool Op0IsDef) const;
183
184 // Note: The following group of functions are only used as part of the old SME
185 // ABI lowering. They will be removed once -aarch64-new-sme-abi=true is the
186 // default.
188 MachineBasicBlock *BB) const;
190 MachineBasicBlock *BB) const;
192 MachineBasicBlock *BB) const;
194 MachineBasicBlock *BB) const;
196 MachineBasicBlock *BB) const;
197
198 /// Replace (0, vreg) discriminator components with the operands of blend
199 /// or with (immediate, NoRegister) when possible.
201 MachineOperand &IntDiscOp,
202 MachineOperand &AddrDiscOp,
203 const TargetRegisterClass *AddrDiscRC) const;
204
207 MachineBasicBlock *MBB) const override;
208
209 bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
210 MachineFunction &MF,
211 unsigned Intrinsic) const override;
212
213 bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT,
214 std::optional<unsigned> ByteOffset) const override;
215
216 bool shouldRemoveRedundantExtend(SDValue Op) const override;
217
218 bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
219 bool isTruncateFree(EVT VT1, EVT VT2) const override;
220
221 bool isProfitableToHoist(Instruction *I) const override;
222
223 bool isZExtFree(Type *Ty1, Type *Ty2) const override;
224 bool isZExtFree(EVT VT1, EVT VT2) const override;
225 bool isZExtFree(SDValue Val, EVT VT2) const override;
226
228 Instruction *I, Loop *L, const TargetTransformInfo &TTI) const override;
229
230 bool hasPairedLoad(EVT LoadedType, Align &RequiredAlignment) const override;
231
233 return true;
234 }
235
236 unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
237
238 bool lowerInterleavedLoad(Instruction *Load, Value *Mask,
240 ArrayRef<unsigned> Indices, unsigned Factor,
241 const APInt &GapMask) const override;
242 bool lowerInterleavedStore(Instruction *Store, Value *Mask,
243 ShuffleVectorInst *SVI, unsigned Factor,
244 const APInt &GapMask) const override;
245
247 unsigned Factor) const;
248
250 IntrinsicInst *DI) const override;
251
253 Instruction *Store, Value *Mask,
254 ArrayRef<Value *> InterleaveValues) const override;
255
256 bool isLegalAddImmediate(int64_t) const override;
257 bool isLegalAddScalableImmediate(int64_t) const override;
258 bool isLegalICmpImmediate(int64_t) const override;
259
261 SDValue ConstNode) const override;
262
263 bool shouldConsiderGEPOffsetSplit() const override;
264
265 EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op,
266 const AttributeList &FuncAttributes) const override;
267
269 const AttributeList &FuncAttributes) const override;
270
271 /// Return true if the addressing mode represented by AM is legal for this
272 /// target, for a load/store of the specified type.
273 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
274 unsigned AS,
275 Instruction *I = nullptr) const override;
276
277 int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset,
278 int64_t MaxOffset) const override;
279
280 /// Return true if an FMA operation is faster than a pair of fmul and fadd
281 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
282 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
284 EVT VT) const override;
285 bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *Ty) const override;
286
288 CodeGenOptLevel OptLevel) const override;
289
290 /// Return true if the target has native support for
291 /// the specified value type and it is 'desirable' to use the type for the
292 /// given node type.
293 bool isTypeDesirableForOp(unsigned Opc, EVT VT) const override;
294
295 const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
297
298 /// Returns false if N is a bit extraction pattern of (X >> C) & Mask.
300 CombineLevel Level) const override;
301
302 bool isDesirableToPullExtFromShl(const MachineInstr &MI) const override {
303 return false;
304 }
305
306 /// Returns false if N is a bit extraction pattern of (X >> C) & Mask.
307 bool isDesirableToCommuteXorWithShift(const SDNode *N) const override;
308
309 /// Return true if it is profitable to fold a pair of shifts into a mask.
310 bool shouldFoldConstantShiftPairToMask(const SDNode *N) const override;
311
312 /// Return true if it is profitable to fold a pair of shifts into a mask.
314 EVT VT = Y.getValueType();
315
316 if (VT.isVector())
317 return false;
318
319 return VT.getScalarSizeInBits() <= 64;
320 }
321
322 bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT,
323 unsigned SelectOpcode, SDValue X,
324 SDValue Y) const override;
325
326 /// Returns true if it is beneficial to convert a load of a constant
327 /// to just the constant itself.
329 Type *Ty) const override;
330
331 /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
332 /// with this index.
333 bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
334 unsigned Index) const override;
335
336 bool shouldFormOverflowOp(unsigned Opcode, EVT VT,
337 bool MathUsed) const override {
338 // Using overflow ops for overflow checks only should beneficial on
339 // AArch64.
340 return TargetLowering::shouldFormOverflowOp(Opcode, VT, true);
341 }
342
343 Value *emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr,
344 AtomicOrdering Ord) const override;
345 Value *emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr,
346 AtomicOrdering Ord) const override;
347
348 void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const override;
349
350 bool isOpSuitableForLDPSTP(const Instruction *I) const;
351 bool isOpSuitableForLSE128(const Instruction *I) const;
352 bool isOpSuitableForRCPC3(const Instruction *I) const;
353 bool shouldInsertFencesForAtomic(const Instruction *I) const override;
354 bool
356
358 shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
362 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
363
366
367 bool useLoadStackGuardNode(const Module &M) const override;
369 getPreferredVectorAction(MVT VT) const override;
370
371 /// If the target has a standard location for the stack protector cookie,
372 /// returns the address of that location. Otherwise, returns nullptr.
373 Value *getIRStackGuard(IRBuilderBase &IRB) const override;
374
375 void insertSSPDeclarations(Module &M) const override;
376
377 /// If the target has a standard location for the unsafe stack pointer,
378 /// returns the address of that location. Otherwise, returns nullptr.
379 Value *getSafeStackPointerLocation(IRBuilderBase &IRB) const override;
380
381 /// If a physical register, this returns the register that receives the
382 /// exception address on entry to an EH pad.
384 getExceptionPointerRegister(const Constant *PersonalityFn) const override;
385
386 /// If a physical register, this returns the register that receives the
387 /// exception typeid on entry to a landing pad.
389 getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
390
391 bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
392
393 bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
394 const MachineFunction &MF) const override;
395
396 bool isCheapToSpeculateCttz(Type *) const override {
397 return true;
398 }
399
400 bool isCheapToSpeculateCtlz(Type *) const override {
401 return true;
402 }
403
404 bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
405
406 bool hasAndNotCompare(SDValue V) const override {
407 // We can use bics for any scalar.
408 return V.getValueType().isScalarInteger();
409 }
410
411 bool hasAndNot(SDValue Y) const override {
412 EVT VT = Y.getValueType();
413
414 if (!VT.isVector())
415 return hasAndNotCompare(Y);
416
417 if (VT.isScalableVector())
418 return true;
419
420 return VT.getFixedSizeInBits() >= 64; // vector 'bic'
421 }
422
425 unsigned OldShiftOpcode, unsigned NewShiftOpcode,
426 SelectionDAG &DAG) const override;
427
430 unsigned ExpansionFactor) const override;
431
433 unsigned KeptBits) const override {
434 // For vectors, we don't have a preference..
435 if (XVT.isVector())
436 return false;
437
438 auto VTIsOk = [](EVT VT) -> bool {
439 return VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||
440 VT == MVT::i64;
441 };
442
443 // We are ok with KeptBitsVT being byte/word/dword, what SXT supports.
444 // XVT will be larger than KeptBitsVT.
445 MVT KeptBitsVT = MVT::getIntegerVT(KeptBits);
446 return VTIsOk(XVT) && VTIsOk(KeptBitsVT);
447 }
448
449 bool preferIncOfAddToSubOfNot(EVT VT) const override;
450
451 bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override;
452
453 bool preferSelectsOverBooleanArithmetic(EVT VT) const override;
454
455 bool isComplexDeinterleavingSupported() const override;
457 ComplexDeinterleavingOperation Operation, Type *Ty) const override;
458
461 ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB,
462 Value *Accumulator = nullptr) const override;
463
464 bool supportSplitCSR(MachineFunction *MF) const override {
466 MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
467 }
468 void initializeSplitCSR(MachineBasicBlock *Entry) const override;
470 MachineBasicBlock *Entry,
471 const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
472
473 bool supportSwiftError() const override {
474 return true;
475 }
476
477 bool supportPtrAuthBundles() const override { return true; }
478
479 bool supportKCFIBundles() const override { return true; }
480
483 const TargetInstrInfo *TII) const override;
484
485 /// Enable aggressive FMA fusion on targets that want it.
486 bool enableAggressiveFMAFusion(EVT VT) const override;
487
488 bool aggressivelyPreferBuildVectorSources(EVT VecVT) const override {
489 return true;
490 }
491
492 /// Returns the size of the platform's va_list object.
493 unsigned getVaListSizeInBits(const DataLayout &DL) const override;
494
495 /// Returns true if \p VecTy is a legal interleaved access type. This
496 /// function checks the vector element type and the overall width of the
497 /// vector.
499 bool &UseScalable) const;
500
501 /// Returns the number of interleaved accesses that will be generated when
502 /// lowering accesses of the given type.
503 unsigned getNumInterleavedAccesses(VectorType *VecTy, const DataLayout &DL,
504 bool UseScalable) const;
505
507 const Instruction &I) const override;
508
510 Type *Ty, CallingConv::ID CallConv, bool isVarArg,
511 const DataLayout &DL) const override;
512
513 /// Used for exception handling on Win64.
514 bool needsFixedCatchObjects() const override;
515
516 bool fallBackToDAGISel(const Instruction &Inst) const override;
517
518 /// SVE code generation for fixed length vectors does not custom lower
519 /// BUILD_VECTOR. This makes BUILD_VECTOR legalisation a source of stores to
520 /// merge. However, merging them creates a BUILD_VECTOR that is just as
521 /// illegal as the original, thus leading to an infinite legalisation loop.
522 /// NOTE: Once BUILD_VECTOR is legal or can be custom lowered for all legal
523 /// vector types this override can be removed.
524 bool mergeStoresAfterLegalization(EVT VT) const override;
525
526 // If the platform/function should have a redzone, return the size in bytes.
527 unsigned getRedZoneSize(const Function &F) const {
528 if (F.hasFnAttribute(Attribute::NoRedZone))
529 return 0;
530 return 128;
531 }
532
533 bool isAllActivePredicate(SelectionDAG &DAG, SDValue N) const;
535
537 bool AllowUnknown = false) const override;
538
539 bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const override;
540
541 bool shouldExpandCttzElements(EVT VT) const override;
542
543 bool shouldExpandVectorMatch(EVT VT, unsigned SearchSize) const override;
544
545 /// If a change in streaming mode is required on entry to/return from a
546 /// function call it emits and returns the corresponding SMSTART or SMSTOP
547 /// node. \p Condition should be one of the enum values from
548 /// AArch64SME::ToggleCondition.
550 SDValue Chain, SDValue InGlue, unsigned Condition,
551 bool InsertVectorLengthCheck = false) const;
552
553 bool isVScaleKnownToBeAPowerOfTwo() const override { return true; }
554
555 // Normally SVE is only used for byte size vectors that do not fit within a
556 // NEON vector. This changes when OverrideNEON is true, allowing SVE to be
557 // used for 64bit and 128bit vectors as well.
558 bool useSVEForFixedLengthVectorVT(EVT VT, bool OverrideNEON = false) const;
559
560 // Follow NEON ABI rules even when using SVE for fixed length vectors.
562 EVT VT) const override;
565 EVT VT) const override;
567 CallingConv::ID CC, EVT VT,
568 EVT &IntermediateVT,
569 unsigned &NumIntermediates,
570 MVT &RegisterVT) const override;
571
572 /// True if stack clash protection is enabled for this functions.
573 bool hasInlineStackProbe(const MachineFunction &MF) const override;
574
575 /// In AArch64, true if FEAT_CPA is present. Allows pointer arithmetic
576 /// semantics to be preserved for instruction selection.
577 bool shouldPreservePtrArith(const Function &F, EVT PtrVT) const override;
578
579private:
580 /// Keep a pointer to the AArch64Subtarget around so that we can
581 /// make the right decision when generating code for different targets.
582 const AArch64Subtarget *Subtarget;
583
584 bool isExtFreeImpl(const Instruction *Ext) const override;
585
586 void addTypeForNEON(MVT VT);
587 void addTypeForFixedLengthSVE(MVT VT);
588 void addDRType(MVT VT);
589 void addQRType(MVT VT);
590
591 bool shouldExpandBuildVectorWithShuffles(EVT, unsigned) const override;
592
593 SDValue lowerEHPadEntry(SDValue Chain, SDLoc const &DL,
594 SelectionDAG &DAG) const override;
595
596 SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
597 bool isVarArg,
599 const SDLoc &DL, SelectionDAG &DAG,
600 SmallVectorImpl<SDValue> &InVals) const override;
601
602 void AdjustInstrPostInstrSelection(MachineInstr &MI,
603 SDNode *Node) const override;
604
605 SDValue LowerCall(CallLoweringInfo & /*CLI*/,
606 SmallVectorImpl<SDValue> &InVals) const override;
607
609 CallingConv::ID CallConv, bool isVarArg,
610 const SmallVectorImpl<CCValAssign> &RVLocs,
611 const SDLoc &DL, SelectionDAG &DAG,
612 SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
613 SDValue ThisVal, bool RequiresSMChange) const;
614
617 SDValue LowerStore128(SDValue Op, SelectionDAG &DAG) const;
619
622
624
625 SDValue LowerVECTOR_COMPRESS(SDValue Op, SelectionDAG &DAG) const;
626
628 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
629 SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
630
631 bool
632 isEligibleForTailCallOptimization(const CallLoweringInfo &CLI) const;
633
634 /// Finds the incoming stack arguments which overlap the given fixed stack
635 /// object and incorporates their load into the current chain. This prevents
636 /// an upcoming store from clobbering the stack argument before it's used.
637 SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
638 MachineFrameInfo &MFI, int ClobberedFI) const;
639
640 bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
641
642 void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &DL,
643 SDValue &Chain) const;
644
645 bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
646 bool isVarArg,
648 LLVMContext &Context, const Type *RetTy) const override;
649
650 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
652 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
653 SelectionDAG &DAG) const override;
654
656 unsigned Flag) const;
658 unsigned Flag) const;
660 unsigned Flag) const;
662 unsigned Flag) const;
664 unsigned Flag) const;
665 template <class NodeTy>
666 SDValue getGOT(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
667 template <class NodeTy>
668 SDValue getAddrLarge(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
669 template <class NodeTy>
670 SDValue getAddr(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
671 template <class NodeTy>
672 SDValue getAddrTiny(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
673 SDValue LowerADDROFRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
674 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
675 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
676 SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
677 SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
678 SDValue LowerELFTLSLocalExec(const GlobalValue *GV, SDValue ThreadBase,
679 const SDLoc &DL, SelectionDAG &DAG) const;
680 SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, const SDLoc &DL,
681 SelectionDAG &DAG) const;
682 SDValue LowerWindowsGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
683 SDValue LowerPtrAuthGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
684 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
687 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
690 SDValue TVal, SDValue FVal,
692 SDNodeFlags Flags, const SDLoc &dl,
693 SelectionDAG &DAG) const;
694 SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
696 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
697 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
698 SDValue LowerBRIND(SDValue Op, SelectionDAG &DAG) const;
699 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
700 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
701 SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
702 SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
703 SDValue LowerWin64_VASTART(SDValue Op, SelectionDAG &DAG) const;
708 SDValue LowerSPONENTRY(SDValue Op, SelectionDAG &DAG) const;
710 SDValue LowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
711 SDValue LowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
712 SDValue LowerGET_FPMODE(SDValue Op, SelectionDAG &DAG) const;
713 SDValue LowerSET_FPMODE(SDValue Op, SelectionDAG &DAG) const;
714 SDValue LowerRESET_FPMODE(SDValue Op, SelectionDAG &DAG) const;
715 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
717 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
718 SDValue LowerZERO_EXTEND_VECTOR_INREG(SDValue Op, SelectionDAG &DAG) const;
720 SDValue LowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG) const;
721 SDValue LowerDUPQLane(SDValue Op, SelectionDAG &DAG) const;
722 SDValue LowerToPredicatedOp(SDValue Op, SelectionDAG &DAG,
723 unsigned NewOp) const;
724 SDValue LowerToScalableOp(SDValue Op, SelectionDAG &DAG) const;
725 SDValue LowerVECTOR_SPLICE(SDValue Op, SelectionDAG &DAG) const;
728 SDValue LowerVECTOR_DEINTERLEAVE(SDValue Op, SelectionDAG &DAG) const;
729 SDValue LowerVECTOR_INTERLEAVE(SDValue Op, SelectionDAG &DAG) const;
730 SDValue LowerVECTOR_HISTOGRAM(SDValue Op, SelectionDAG &DAG) const;
731 SDValue LowerPARTIAL_REDUCE_MLA(SDValue Op, SelectionDAG &DAG) const;
732 SDValue LowerGET_ACTIVE_LANE_MASK(SDValue Op, SelectionDAG &DAG) const;
733 SDValue LowerDIV(SDValue Op, SelectionDAG &DAG) const;
735 SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
738 SDValue LowerCTPOP_PARITY(SDValue Op, SelectionDAG &DAG) const;
740 SDValue LowerBitreverse(SDValue Op, SelectionDAG &DAG) const;
741 SDValue LowerMinMax(SDValue Op, SelectionDAG &DAG) const;
743 SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
744 SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
746 SDValue LowerVectorFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) const;
747 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
749 SDValue LowerVectorXRINT(SDValue Op, SelectionDAG &DAG) const;
750 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
752 SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
753 SDValue LowerXOR(SDValue Op, SelectionDAG &DAG) const;
755 SDValue LowerLOOP_DEPENDENCE_MASK(SDValue Op, SelectionDAG &DAG) const;
757 SDValue LowerVSCALE(SDValue Op, SelectionDAG &DAG) const;
758 SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
759 SDValue LowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
760 SDValue LowerVECREDUCE_MUL(SDValue Op, SelectionDAG &DAG) const;
761 SDValue LowerATOMIC_LOAD_AND(SDValue Op, SelectionDAG &DAG) const;
762 SDValue LowerWindowsDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
763 SDValue LowerInlineDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
765
766 SDValue LowerAVG(SDValue Op, SelectionDAG &DAG, unsigned NewOp) const;
767
768 SDValue LowerFixedLengthVectorIntDivideToSVE(SDValue Op,
769 SelectionDAG &DAG) const;
770 SDValue LowerFixedLengthVectorIntExtendToSVE(SDValue Op,
771 SelectionDAG &DAG) const;
772 SDValue LowerFixedLengthVectorLoadToSVE(SDValue Op, SelectionDAG &DAG) const;
773 SDValue LowerFixedLengthVectorMLoadToSVE(SDValue Op, SelectionDAG &DAG) const;
774 SDValue LowerVECREDUCE_SEQ_FADD(SDValue ScalarOp, SelectionDAG &DAG) const;
775 SDValue LowerPredReductionToSVE(SDValue ScalarOp, SelectionDAG &DAG) const;
776 SDValue LowerReductionToSVE(unsigned Opcode, SDValue ScalarOp,
777 SelectionDAG &DAG) const;
778 SDValue LowerFixedLengthVectorSelectToSVE(SDValue Op, SelectionDAG &DAG) const;
779 SDValue LowerFixedLengthVectorSetccToSVE(SDValue Op, SelectionDAG &DAG) const;
780 SDValue LowerFixedLengthVectorStoreToSVE(SDValue Op, SelectionDAG &DAG) const;
781 SDValue LowerFixedLengthVectorMStoreToSVE(SDValue Op,
782 SelectionDAG &DAG) const;
783 SDValue LowerFixedLengthVectorTruncateToSVE(SDValue Op,
784 SelectionDAG &DAG) const;
785 SDValue LowerFixedLengthExtractVectorElt(SDValue Op, SelectionDAG &DAG) const;
786 SDValue LowerFixedLengthInsertVectorElt(SDValue Op, SelectionDAG &DAG) const;
787 SDValue LowerFixedLengthBitcastToSVE(SDValue Op, SelectionDAG &DAG) const;
788 SDValue LowerFixedLengthConcatVectorsToSVE(SDValue Op,
789 SelectionDAG &DAG) const;
790 SDValue LowerFixedLengthFPExtendToSVE(SDValue Op, SelectionDAG &DAG) const;
791 SDValue LowerFixedLengthFPRoundToSVE(SDValue Op, SelectionDAG &DAG) const;
792 SDValue LowerFixedLengthIntToFPToSVE(SDValue Op, SelectionDAG &DAG) const;
793 SDValue LowerFixedLengthFPToIntToSVE(SDValue Op, SelectionDAG &DAG) const;
794 SDValue LowerFixedLengthVECTOR_SHUFFLEToSVE(SDValue Op,
795 SelectionDAG &DAG) const;
796 SDValue LowerFixedLengthBuildVectorToSVE(SDValue Op, SelectionDAG &DAG) const;
797
798 SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
799 SmallVectorImpl<SDNode *> &Created) const override;
800 SDValue BuildSREMPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
801 SmallVectorImpl<SDNode *> &Created) const override;
802 SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
803 int &ExtraSteps, bool &UseOneConst,
804 bool Reciprocal) const override;
805 SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
806 int &ExtraSteps) const override;
807 SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG,
808 const DenormalMode &Mode) const override;
809 SDValue getSqrtResultForDenormInput(SDValue Operand,
810 SelectionDAG &DAG) const override;
811 unsigned combineRepeatedFPDivisors() const override;
812
813 ConstraintType getConstraintType(StringRef Constraint) const override;
814 Register getRegisterByName(const char* RegName, LLT VT,
815 const MachineFunction &MF) const override;
816
817 /// Examine constraint string and operand type and determine a weight value.
818 /// The operand object must already have been set up with the operand type.
820 getSingleConstraintMatchWeight(AsmOperandInfo &info,
821 const char *constraint) const override;
822
823 std::pair<unsigned, const TargetRegisterClass *>
824 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
825 StringRef Constraint, MVT VT) const override;
826
827 const char *LowerXConstraint(EVT ConstraintVT) const override;
828
829 void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint,
830 std::vector<SDValue> &Ops,
831 SelectionDAG &DAG) const override;
832
834 getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
835 if (ConstraintCode == "Q")
837 // FIXME: clang has code for 'Ump', 'Utf', 'Usa', and 'Ush' but these are
838 // followed by llvm_unreachable so we'll leave them unimplemented in
839 // the backend for now.
840 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
841 }
842
843 /// Handle Lowering flag assembly outputs.
844 SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Flag,
845 const SDLoc &DL,
846 const AsmOperandInfo &Constraint,
847 SelectionDAG &DAG) const override;
848
849 bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const override;
850 bool shouldRemoveExtendFromGSIndex(SDValue Extend, EVT DataVT) const override;
851 bool isVectorLoadExtDesirable(SDValue ExtVal) const override;
852 bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
853 bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
854 bool getIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
855 SDValue &Offset, SelectionDAG &DAG) const;
856 bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
858 SelectionDAG &DAG) const override;
859 bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
861 SelectionDAG &DAG) const override;
862 bool isIndexingLegal(MachineInstr &MI, Register Base, Register Offset,
863 bool IsPre, MachineRegisterInfo &MRI) const override;
864
865 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
866 SelectionDAG &DAG) const override;
867 void ReplaceBITCASTResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
868 SelectionDAG &DAG) const;
869 void ReplaceExtractSubVectorResults(SDNode *N,
870 SmallVectorImpl<SDValue> &Results,
871 SelectionDAG &DAG) const;
872 void ReplaceGetActiveLaneMaskResults(SDNode *N,
873 SmallVectorImpl<SDValue> &Results,
874 SelectionDAG &DAG) const;
875
876 bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override;
877
878 void finalizeLowering(MachineFunction &MF) const override;
879
880 bool shouldLocalize(const MachineInstr &MI,
881 const TargetTransformInfo *TTI) const override;
882
883 bool SimplifyDemandedBitsForTargetNode(SDValue Op,
884 const APInt &OriginalDemandedBits,
885 const APInt &OriginalDemandedElts,
886 KnownBits &Known,
887 TargetLoweringOpt &TLO,
888 unsigned Depth) const override;
889
890 bool canCreateUndefOrPoisonForTargetNode(SDValue Op,
891 const APInt &DemandedElts,
892 const SelectionDAG &DAG,
893 bool PoisonOnly, bool ConsiderFlags,
894 unsigned Depth) const override;
895
896 bool isTargetCanonicalConstantNode(SDValue Op) const override;
897
898 // With the exception of data-predicate transitions, no instructions are
899 // required to cast between legal scalable vector types. However:
900 // 1. Packed and unpacked types have different bit lengths, meaning BITCAST
901 // is not universally useable.
902 // 2. Most unpacked integer types are not legal and thus integer extends
903 // cannot be used to convert between unpacked and packed types.
904 // These can make "bitcasting" a multiphase process. REINTERPRET_CAST is used
905 // to transition between unpacked and packed types of the same element type,
906 // with BITCAST used otherwise.
907 // This function does not handle predicate bitcasts.
908 SDValue getSVESafeBitCast(EVT VT, SDValue Op, SelectionDAG &DAG) const;
909
910 // Returns the runtime value for PSTATE.SM by generating a call to
911 // __arm_sme_state.
912 SDValue getRuntimePStateSM(SelectionDAG &DAG, SDValue Chain, SDLoc DL,
913 EVT VT) const;
914
915 bool preferScalarizeSplat(SDNode *N) const override;
916
917 unsigned getMinimumJumpTableEntries() const override;
918
919 bool softPromoteHalfType() const override { return true; }
920
921 bool shouldScalarizeBinop(SDValue VecOp) const override {
922 return VecOp.getOpcode() == ISD::SETCC;
923 }
924
925 bool hasMultipleConditionRegisters(EVT VT) const override {
926 return VT.isScalableVector();
927 }
928};
929
930namespace AArch64 {
931FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
932 const TargetLibraryInfo *libInfo);
933} // end namespace AArch64
934
935} // end namespace llvm
936
937#endif
unsigned const MachineRegisterInfo * MRI
return SDValue()
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG)
static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG)
static SDValue LowerMLOAD(SDValue Op, SelectionDAG &DAG)
static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG)
static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG)
static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Function Alias Analysis Results
static SDValue getTargetNode(ConstantPoolSDNode *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned Flags)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
iv Induction Variable Users
Definition IVUsers.cpp:48
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define RegName(no)
lazy value info
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
PowerPC Reduce CR logical Operation
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
static SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG)
static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad, bool isV9, bool is64Bit)
static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad, bool isV9, bool is64Bit)
static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG)
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, const SparcSubtarget *Subtarget)
static bool Enabled
Definition Statistic.cpp:46
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
This file describes how to lower LLVM code to machine code.
static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG)
static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerBITCAST(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerMSCATTER(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG)
Lower SRA_PARTS and friends, which return two i32 values and take a 2 x i32 value to shift plus a shi...
static SDValue LowerAVG(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerMGATHER(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG)
static SDValue LowerABS(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
Value * RHS
Value * LHS
static SDValue LowerCallResult(SDValue Chain, SDValue InGlue, const SmallVectorImpl< CCValAssign > &RVLocs, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals)
LowerCallResult - Lower the result values of a call into the appropriate copies out of appropriate ph...
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
bool supportSplitCSR(MachineFunction *MF) const override
Return true if the target supports that a subset of CSRs for the given machine function is handled ex...
SDValue changeStreamingMode(SelectionDAG &DAG, SDLoc DL, bool Enable, SDValue Chain, SDValue InGlue, unsigned Condition, bool InsertVectorLengthCheck=false) const
If a change in streaming mode is required on entry to/return from a function call it emits and return...
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
bool shouldFoldMaskToVariableShiftPair(SDValue Y) const override
Return true if it is profitable to fold a pair of shifts into a mask.
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT, std::optional< unsigned > ByteOffset) const override
Return true if it is profitable to reduce a load to a smaller type.
void initializeSplitCSR(MachineBasicBlock *Entry) const override
Perform necessary initialization to handle a subset of CSRs explicitly via copies.
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
bool hasAndNotCompare(SDValue V) const override
Return true if the target should transform: (X & Y) == Y ---> (~X & Y) == 0 (X & Y) !...
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
bool isShuffleMaskLegal(ArrayRef< int > M, EVT VT) const override
Return true if the given shuffle mask can be codegen'd directly, or if it should be stack expanded.
unsigned getVaListSizeInBits(const DataLayout &DL) const override
Returns the size of the platform's va_list object.
MachineBasicBlock * EmitZAInstr(unsigned Opc, unsigned BaseReg, MachineInstr &MI, MachineBasicBlock *BB) const
void insertCopiesSplitCSR(MachineBasicBlock *Entry, const SmallVectorImpl< MachineBasicBlock * > &Exits) const override
Insert explicit copies in entry and exit blocks.
int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset) const override
Return the prefered common base offset.
bool shouldInsertTrailingFenceForAtomicStore(const Instruction *I) const override
Whether AtomicExpandPass should automatically insert a trailing fence without reducing the ordering f...
bool shouldExpandCttzElements(EVT VT) const override
Return true if the @llvm.experimental.cttz.elts intrinsic should be expanded using generic code in Se...
MachineBasicBlock * EmitInitTPIDR2Object(MachineInstr &MI, MachineBasicBlock *BB) const
bool lowerInterleavedStore(Instruction *Store, Value *Mask, ShuffleVectorInst *SVI, unsigned Factor, const APInt &GapMask) const override
Lower an interleaved store into a stN intrinsic.
MachineBasicBlock * EmitTileLoad(unsigned Opc, unsigned BaseReg, MachineInstr &MI, MachineBasicBlock *BB) const
unsigned getNumInterleavedAccesses(VectorType *VecTy, const DataLayout &DL, bool UseScalable) const
Returns the number of interleaved accesses that will be generated when lowering accesses of the given...
bool shouldFoldConstantShiftPairToMask(const SDNode *N) const override
Return true if it is profitable to fold a pair of shifts into a mask.
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Returns true if it is beneficial to convert a load of a constant to just the constant itself.
bool preferSelectsOverBooleanArithmetic(EVT VT) const override
Should we prefer selects to doing arithmetic on boolean types.
unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain targets require unusual breakdowns of certain types.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
Provide custom lowering hooks for some operations.
bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override
Should we generate fp_to_si_sat and fp_to_ui_sat from type FPVT to type VT from min(max(fptoi)) satur...
bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT, const MachineFunction &MF) const override
Returns if it's reasonable to merge stores to MemVT size.
bool aggressivelyPreferBuildVectorSources(EVT VecVT) const override
bool isIntDivCheap(EVT VT, AttributeList Attr) const override
Return true if integer divide is usually cheaper than a sequence of several shifts,...
bool shouldRemoveRedundantExtend(SDValue Op) const override
Return true (the default) if it is profitable to remove a sext_inreg(x) where the sext is redundant,...
CCAssignFn * CCAssignFnForReturn(CallingConv::ID CC) const
Selects the correct CCAssignFn for a given CallingConvention value.
bool supportPtrAuthBundles() const override
Return true if the target supports ptrauth operand bundles.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ISD::SETCC ValueType.
bool optimizeExtendOrTruncateConversion(Instruction *I, Loop *L, const TargetTransformInfo &TTI) const override
Try to optimize extending or truncating conversion instructions (like zext, trunc,...
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo) const override
This method returns a target specific FastISel object, or null if the target does not support "fast" ...
CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const
Selects the correct CCAssignFn for a given CallingConvention value.
bool lowerDeinterleaveIntrinsicToLoad(Instruction *Load, Value *Mask, IntrinsicInst *DI) const override
Lower a deinterleave intrinsic to a target specific load intrinsic.
MachineMemOperand::Flags getTargetMMOFlags(const Instruction &I) const override
This callback is used to inspect load/store instructions and add target-specific MachineMemOperand fl...
unsigned getVectorIdxWidth(const DataLayout &DL) const override
Returns the type to be used for the index operand vector operations.
bool hasInlineStackProbe(const MachineFunction &MF) const override
True if stack clash protection is enabled for this functions.
bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT, unsigned SelectOpcode, SDValue X, SDValue Y) const override
Return true if pulling a binary operation into a select with an identity constant is profitable.
bool isLegalICmpImmediate(int64_t) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
Value * emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override
Perform a store-conditional operation to Addr.
bool preferIncOfAddToSubOfNot(EVT VT) const override
These two forms are equivalent: sub y, (xor x, -1) add (add x, 1), y The variant with two add's is IR...
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass.
ShiftLegalizationStrategy preferredShiftLegalizationStrategy(SelectionDAG &DAG, SDNode *N, unsigned ExpansionFactor) const override
bool isOpSuitableForLSE128(const Instruction *I) const
bool isVScaleKnownToBeAPowerOfTwo() const override
Return true only if vscale must be a power of two.
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
void fixupPtrauthDiscriminator(MachineInstr &MI, MachineBasicBlock *BB, MachineOperand &IntDiscOp, MachineOperand &AddrDiscOp, const TargetRegisterClass *AddrDiscRC) const
Replace (0, vreg) discriminator components with the operands of blend or with (immediate,...
bool lowerInterleavedLoad(Instruction *Load, Value *Mask, ArrayRef< ShuffleVectorInst * > Shuffles, ArrayRef< unsigned > Indices, unsigned Factor, const APInt &GapMask) const override
Lower an interleaved load into a ldN intrinsic.
bool fallBackToDAGISel(const Instruction &Inst) const override
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
getTgtMemIntrinsic - Represent NEON load and store intrinsics as MemIntrinsicNodes.
bool isTypeDesirableForOp(unsigned Opc, EVT VT) const override
Return true if the target has native support for the specified value type and it is 'desirable' to us...
bool isLegalAddScalableImmediate(int64_t) const override
Return true if adding the specified scalable immediate is legal, that is the target has add instructi...
bool shouldFormOverflowOp(unsigned Opcode, EVT VT, bool MathUsed) const override
Try to convert math with an overflow comparison into the corresponding DAG node operation.
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
bool isProfitableToInterleaveWithGatherScatter() const override
Return true if the target interleave with shuffles are cheaper.
Value * createComplexDeinterleavingIR(IRBuilderBase &B, ComplexDeinterleavingOperation OperationType, ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB, Value *Accumulator=nullptr) const override
Create the IR node for the given complex deinterleaving operation.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const override
Returns true if the target allows unaligned memory accesses of the specified type.
MachineBasicBlock * EmitCheckMatchingVL(MachineInstr &MI, MachineBasicBlock *MBB) const
unsigned getMaxSupportedInterleaveFactor() const override
Get the maximum supported factor for interleaved memory accesses.
bool isLegalInterleavedAccessType(VectorType *VecTy, const DataLayout &DL, bool &UseScalable) const
Returns true if VecTy is a legal interleaved access type.
void insertSSPDeclarations(Module &M) const override
Inserts necessary declarations for SSP (stack protection) purpose.
bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const override
For some targets, an LLVM struct type must be broken down into multiple simple types,...
Value * emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr, AtomicOrdering Ord) const override
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type.
MachineBasicBlock * EmitLoweredCatchRet(MachineInstr &MI, MachineBasicBlock *BB) const
bool isComplexDeinterleavingSupported() const override
Does this target support complex deinterleaving.
bool isZExtFree(Type *Ty1, Type *Ty2) const override
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
EVT getAsmOperandValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const override
SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const
MachineBasicBlock * EmitZero(MachineInstr &MI, MachineBasicBlock *BB) const
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
Value * getSafeStackPointerLocation(IRBuilderBase &IRB) const override
If the target has a standard location for the unsafe stack pointer, returns the address of that locat...
bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override
Return if the target supports combining a chain like:
bool isProfitableToHoist(Instruction *I) const override
Check if it is profitable to hoist instruction in then/else to if.
bool isOpSuitableForRCPC3(const Instruction *I) const
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const override
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isCheapToSpeculateCttz(Type *) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
unsigned getRedZoneSize(const Function &F) const
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
MachineBasicBlock * EmitZTInstr(MachineInstr &MI, MachineBasicBlock *BB, unsigned Opcode, bool Op0IsDef) const
bool hasAndNot(SDValue Y) const override
Return true if the target has a bitwise and-not operation: X = ~A & B This can be used to simplify se...
MachineBasicBlock * EmitFill(MachineInstr &MI, MachineBasicBlock *BB) const
bool isCheapToSpeculateCtlz(Type *) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
bool lowerInterleavedStoreWithShuffle(StoreInst *SI, ShuffleVectorInst *SVI, unsigned Factor) const
If the interleaved vector elements are greater than supported MaxFactor, interleaving the data with a...
bool shouldExpandVectorMatch(EVT VT, unsigned SearchSize) const override
Return true if the @llvm.experimental.vector.match intrinsic should be expanded for vector type ‘VT’ ...
MachineBasicBlock * EmitEntryPStateSM(MachineInstr &MI, MachineBasicBlock *BB) const
bool shouldInsertFencesForAtomic(const Instruction *I) const override
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic.
bool isReassocProfitable(SelectionDAG &DAG, SDValue N0, SDValue N1) const override
Control the following reassociation of operands: (op (op x, c1), y) -> (op (op x, y),...
bool shouldPreservePtrArith(const Function &F, EVT PtrVT) const override
In AArch64, true if FEAT_CPA is present.
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const override
Returns how the given (atomic) store should be expanded by the IR-level AtomicExpand pass into.
MachineBasicBlock * EmitF128CSEL(MachineInstr &MI, MachineBasicBlock *BB) const
LLT getOptimalMemOpLLT(const MemOp &Op, const AttributeList &FuncAttributes) const override
LLT returning variant.
bool isDesirableToPullExtFromShl(const MachineInstr &MI) const override
GlobalISel - return true if it's profitable to perform the combine: shl ([sza]ext x),...
bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y, unsigned OldShiftOpcode, unsigned NewShiftOpcode, SelectionDAG &DAG) const override
Given the pattern (X & (C l>>/<< Y)) ==/!= 0 return true if it should be transformed into: ((X <</l>>...
MachineBasicBlock * EmitAllocateSMESaveBuffer(MachineInstr &MI, MachineBasicBlock *BB) const
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
bool needsFixedCatchObjects() const override
Used for exception handling on Win64.
MachineBasicBlock * EmitAllocateZABuffer(MachineInstr &MI, MachineBasicBlock *BB) const
const AArch64TargetMachine & getTM() const
unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const override
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
Value * getIRStackGuard(IRBuilderBase &IRB) const override
If the target has a standard location for the stack protector cookie, returns the address of that loc...
bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const override
EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op, const AttributeList &FuncAttributes) const override
Returns the target specific optimal type for load and store operations as a result of memset,...
bool generateFMAsInMachineCombiner(EVT VT, CodeGenOptLevel OptLevel) const override
bool isComplexDeinterleavingOperationSupported(ComplexDeinterleavingOperation Operation, Type *Ty) const override
Does this target support complex deinterleaving with the given operation and type.
bool isOpSuitableForLDPSTP(const Instruction *I) const
AArch64TargetLowering(const TargetMachine &TM, const AArch64Subtarget &STI)
MachineBasicBlock * EmitGetSMESaveSize(MachineInstr &MI, MachineBasicBlock *BB) const
bool hasPairedLoad(EVT LoadedType, Align &RequiredAlignment) const override
Return true if the target supplies and combines to a paired load two loaded values of type LoadedType...
bool isLegalAddImmediate(int64_t) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
bool shouldConsiderGEPOffsetSplit() const override
bool shouldTransformSignedTruncationCheck(EVT XVT, unsigned KeptBits) const override
Should we tranform the IR-optimal check for whether given truncation down into KeptBits would be trun...
bool isVectorClearMaskLegal(ArrayRef< int > M, EVT VT) const override
Similar to isShuffleMaskLegal.
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const override
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
bool useLoadStackGuardNode(const Module &M) const override
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const override
Return true if EXTRACT_SUBVECTOR is cheap for this result type with this index.
ArrayRef< MCPhysReg > getRoundingControlRegisters() const override
Returns a 0 terminated array of rounding control registers that can be attached into strict FP call.
bool lowerInterleaveIntrinsicToStore(Instruction *Store, Value *Mask, ArrayRef< Value * > InterleaveValues) const override
Lower an interleave intrinsic to a target specific store intrinsic.
MachineInstr * EmitKCFICheck(MachineBasicBlock &MBB, MachineBasicBlock::instr_iterator &MBBI, const TargetInstrInfo *TII) const override
bool isAllActivePredicate(SelectionDAG &DAG, SDValue N) const
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const override
This method can be implemented by targets that want to expose additional information about sign bits ...
bool isDesirableToCommuteXorWithShift(const SDNode *N) const override
Returns false if N is a bit extraction pattern of (X >> C) & Mask.
bool isDesirableToCommuteWithShift(const SDNode *N, CombineLevel Level) const override
Returns false if N is a bit extraction pattern of (X >> C) & Mask.
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
bool enableAggressiveFMAFusion(EVT VT) const override
Enable aggressive FMA fusion on targets that want it.
MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override
Return the type to use for a scalar shift opcode, given the shifted amount type.
MachineBasicBlock * EmitDynamicProbedAlloc(MachineInstr &MI, MachineBasicBlock *MBB) const
bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const override
Return true if the @llvm.get.active.lane.mask intrinsic should be expanded using generic code in Sele...
bool supportKCFIBundles() const override
Return true if the target supports kcfi operand bundles.
bool isMulAddWithConstProfitable(SDValue AddNode, SDValue ConstNode) const override
Return true if it may be profitable to transform (mul (add x, c1), c2) -> (add (mul x,...
bool useSVEForFixedLengthVectorVT(EVT VT, bool OverrideNEON=false) const
bool mergeStoresAfterLegalization(EVT VT) const override
SVE code generation for fixed length vectors does not custom lower BUILD_VECTOR.
Class for arbitrary precision integers.
Definition APInt.h:78
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
CCState - This class holds information needed while lowering arguments and return values.
This class represents a function call, abstracting a target machine's calling convention.
This is an important base class in LLVM.
Definition Constant.h:43
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition FastISel.h:66
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:727
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
An instruction for reading from memory.
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
Machine Value Type.
static MVT getIntegerVT(unsigned BitWidth)
Instructions::iterator instr_iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Function & getFunction()
Return the LLVM function that this machine code represents.
Representation of each machine instruction.
Flags
Flags values. These may be or'd together.
MachineOperand class - Representation of each machine instruction operand.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Wrapper class representing virtual and physical registers.
Definition Register.h:20
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
This instruction constructs a fixed permutation of two input vectors.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
virtual bool shouldFormOverflowOp(unsigned Opcode, EVT VT, bool MathUsed) const
Try to convert math with an overflow comparison into the corresponding DAG node operation.
ShiftLegalizationStrategy
Return the preferred strategy to legalize tihs SHIFT instruction, with ExpansionFactor being the recu...
virtual unsigned getMinimumJumpTableEntries() const
Return lower limit for number of blocks in a jump table.
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
virtual bool softPromoteHalfType() const
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
TargetLowering(const TargetLowering &)=delete
virtual unsigned combineRepeatedFPDivisors() const
Indicate whether this target prefers to combine FDIVs with the same divisor.
Primary interface to the complete machine description for the target machine.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM Value Representation.
Definition Value.h:75
Base class of all SIMD vector types.
A range adaptor for a pair of iterators.
ArrayRef< MCPhysReg > getFPRArgRegs()
Rounding
Possible values of current rounding mode, which is specified in bits 23:22 of FPCR.
const unsigned StackProbeMaxLoopUnroll
Maximum number of iterations to unroll for a constant size probing loop.
const unsigned StackProbeMaxUnprobedStack
Maximum allowed number of unprobed bytes above SP at an ABI boundary.
const unsigned RoundingBitsPos
const uint64_t ReservedFPControlBits
ArrayRef< MCPhysReg > getGPRArgRegs()
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ CXX_FAST_TLS
Used for access functions.
Definition CallingConv.h:72
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition ISDOpcodes.h:807
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:477
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
CodeGenOptLevel
Code generation optimization level.
Definition CodeGen.h:82
AtomicOrdering
Atomic ordering for LLVM's memory model.
TargetTransformInfo TTI
CombineLevel
Definition DAGCombine.h:15
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
DWARFExpression::Operation Op
@ Enable
Enable colors.
Definition WithColor.h:47
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Represent subnormal handling kind for floating point instruction inputs and outputs.
Extended Value Type.
Definition ValueTypes.h:35
uint64_t getScalarSizeInBits() const
Definition ValueTypes.h:385
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
Definition ValueTypes.h:381
bool isVector() const
Return true if this is a vector value type.
Definition ValueTypes.h:168
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
Definition ValueTypes.h:174
These are IR-level optimization flags that may be propagated to SDNodes.