LLVM 22.0.0git
HexagonTargetTransformInfo.cpp
Go to the documentation of this file.
1//===- HexagonTargetTransformInfo.cpp - Hexagon specific TTI pass ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7/// \file
8/// This file implements a TargetTransformInfo analysis pass specific to the
9/// Hexagon target machine. It uses the target's detailed information to provide
10/// more precise answers to certain TTI queries, while letting the target
11/// independent and default TTI implementations handle the rest.
12///
13//===----------------------------------------------------------------------===//
14
16#include "HexagonSubtarget.h"
19#include "llvm/IR/InstrTypes.h"
21#include "llvm/IR/User.h"
26
27using namespace llvm;
28
29#define DEBUG_TYPE "hexagontti"
30
31static cl::opt<bool> HexagonAutoHVX("hexagon-autohvx", cl::init(false),
32 cl::Hidden, cl::desc("Enable loop vectorizer for HVX"));
33
35 "hexagon-allow-scatter-gather-hvx", cl::init(false), cl::Hidden,
36 cl::desc("Allow auto-generation of HVX scatter-gather"));
37
39 "force-hvx-float", cl::Hidden,
40 cl::desc("Enable auto-vectorization of floatint point types on v68."));
41
42static cl::opt<bool> EmitLookupTables("hexagon-emit-lookup-tables",
43 cl::init(true), cl::Hidden,
44 cl::desc("Control lookup table emission on Hexagon target"));
45
46static cl::opt<bool> HexagonMaskedVMem("hexagon-masked-vmem", cl::init(true),
47 cl::Hidden, cl::desc("Enable masked loads/stores for HVX"));
48
49// Constant "cost factor" to make floating point operations more expensive
50// in terms of vectorization cost. This isn't the best way, but it should
51// do. Ultimately, the cost should use cycles.
52static const unsigned FloatFactor = 4;
53
54bool HexagonTTIImpl::useHVX() const {
55 return ST.useHVXOps() && HexagonAutoHVX;
56}
57
58bool HexagonTTIImpl::isHVXVectorType(Type *Ty) const {
59 auto *VecTy = dyn_cast<VectorType>(Ty);
60 if (!VecTy)
61 return false;
62 if (!ST.isTypeForHVX(VecTy))
63 return false;
64 if (ST.useHVXV69Ops() || !VecTy->getElementType()->isFloatingPointTy())
65 return true;
66 return ST.useHVXV68Ops() && EnableV68FloatAutoHVX;
67}
68
69unsigned HexagonTTIImpl::getTypeNumElements(Type *Ty) const {
70 if (auto *VTy = dyn_cast<FixedVectorType>(Ty))
71 return VTy->getNumElements();
72 assert((Ty->isIntegerTy() || Ty->isFloatingPointTy()) &&
73 "Expecting scalar type");
74 return 1;
75}
76
78HexagonTTIImpl::getPopcntSupport(unsigned IntTyWidthInBit) const {
79 // Return fast hardware support as every input < 64 bits will be promoted
80 // to 64 bits.
82}
83
84// The Hexagon target can unroll loops with run-time trip counts.
90
92 TTI::PeelingPreferences &PP) const {
94 // Only try to peel innermost loops with small runtime trip counts.
95 if (L && L->isInnermost() && canPeel(L) &&
96 SE.getSmallConstantTripCount(L) == 0 &&
99 PP.PeelCount = 2;
100 }
101}
102
108
109/// --- Vector TTI begin ---
110
111unsigned HexagonTTIImpl::getNumberOfRegisters(unsigned ClassID) const {
112 bool Vector = ClassID == 1;
113 if (Vector)
114 return useHVX() ? 32 : 0;
115 return 32;
116}
117
119 return useHVX() ? 2 : 1;
120}
121
135
137 return useHVX() ? ST.getVectorLength()*8 : 32;
138}
139
141 bool IsScalable) const {
142 assert(!IsScalable && "Scalable VFs are not supported for Hexagon");
143 return ElementCount::getFixed((8 * ST.getVectorLength()) / ElemWidth);
144}
145
151
155 if (ICA.getID() == Intrinsic::bswap) {
156 std::pair<InstructionCost, MVT> LT =
158 return LT.first + 2;
159 }
161}
162
165 const SCEV *S,
167 return 0;
168}
169
171 Align Alignment,
172 unsigned AddressSpace,
175 const Instruction *I) const {
176 assert(Opcode == Instruction::Load || Opcode == Instruction::Store);
177 // TODO: Handle other cost kinds.
179 return 1;
180
181 if (Opcode == Instruction::Store)
182 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
183 CostKind, OpInfo, I);
184
185 if (Src->isVectorTy()) {
186 VectorType *VecTy = cast<VectorType>(Src);
187 unsigned VecWidth = VecTy->getPrimitiveSizeInBits().getFixedValue();
188 if (isHVXVectorType(VecTy)) {
189 unsigned RegWidth =
191 .getFixedValue();
192 assert(RegWidth && "Non-zero vector register width expected");
193 // Cost of HVX loads.
194 if (VecWidth % RegWidth == 0)
195 return VecWidth / RegWidth;
196 // Cost of constructing HVX vector from scalar loads
197 const Align RegAlign(RegWidth / 8);
198 if (Alignment > RegAlign)
199 Alignment = RegAlign;
200 unsigned AlignWidth = 8 * Alignment.value();
201 unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
202 return 3 * NumLoads;
203 }
204
205 // Non-HVX vectors.
206 // Add extra cost for floating point types.
207 unsigned Cost =
209
210 // At this point unspecified alignment is considered as Align(1).
211 const Align BoundAlignment = std::min(Alignment, Align(8));
212 unsigned AlignWidth = 8 * BoundAlignment.value();
213 unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
214 if (Alignment == Align(4) || Alignment == Align(8))
215 return Cost * NumLoads;
216 // Loads of less than 32 bits will need extra inserts to compose a vector.
217 assert(BoundAlignment <= Align(8));
218 unsigned LogA = Log2(BoundAlignment);
219 return (3 - LogA) * Cost * NumLoads;
220 }
221
222 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, CostKind,
223 OpInfo, I);
224}
225
228 Align Alignment, unsigned AddressSpace,
230 return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
231 CostKind);
232}
233
236 VectorType *SrcTy, ArrayRef<int> Mask,
237 TTI::TargetCostKind CostKind, int Index,
239 const Instruction *CxtI) const {
240 return 1;
241}
242
244 unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
245 Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const {
246 return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
247 Alignment, CostKind, I);
248}
249
251 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
252 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
253 bool UseMaskForCond, bool UseMaskForGaps) const {
254 if (Indices.size() != Factor || UseMaskForCond || UseMaskForGaps)
255 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
256 Alignment, AddressSpace,
257 CostKind,
258 UseMaskForCond, UseMaskForGaps);
259 return getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace, CostKind);
260}
261
263 unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
265 TTI::OperandValueInfo Op2Info, const Instruction *I) const {
266 if (ValTy->isVectorTy() && CostKind == TTI::TCK_RecipThroughput) {
267 if (!isHVXVectorType(ValTy) && ValTy->isFPOrFPVectorTy())
269 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
270 if (Opcode == Instruction::FCmp)
271 return LT.first + FloatFactor * getTypeNumElements(ValTy);
272 }
273 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
274 Op1Info, Op2Info, I);
275}
276
278 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
280 ArrayRef<const Value *> Args, const Instruction *CxtI) const {
281 // TODO: Handle more cost kinds.
283 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
284 Op2Info, Args, CxtI);
285
286 if (Ty->isVectorTy()) {
287 if (!isHVXVectorType(Ty) && Ty->isFPOrFPVectorTy())
289 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
290 if (LT.second.isFloatingPoint())
291 return LT.first + FloatFactor * getTypeNumElements(Ty);
292 }
293 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info,
294 Args, CxtI);
295}
296
298 Type *SrcTy,
301 const Instruction *I) const {
302 auto isNonHVXFP = [this] (Type *Ty) {
303 return Ty->isVectorTy() && !isHVXVectorType(Ty) && Ty->isFPOrFPVectorTy();
304 };
305 if (isNonHVXFP(SrcTy) || isNonHVXFP(DstTy))
307
308 if (SrcTy->isFPOrFPVectorTy() || DstTy->isFPOrFPVectorTy()) {
309 unsigned SrcN = SrcTy->isFPOrFPVectorTy() ? getTypeNumElements(SrcTy) : 0;
310 unsigned DstN = DstTy->isFPOrFPVectorTy() ? getTypeNumElements(DstTy) : 0;
311
312 std::pair<InstructionCost, MVT> SrcLT = getTypeLegalizationCost(SrcTy);
313 std::pair<InstructionCost, MVT> DstLT = getTypeLegalizationCost(DstTy);
315 std::max(SrcLT.first, DstLT.first) + FloatFactor * (SrcN + DstN);
316 // TODO: Allow non-throughput costs that aren't binary.
318 return Cost == 0 ? 0 : 1;
319 return Cost;
320 }
321 return 1;
322}
323
326 unsigned Index,
327 const Value *Op0,
328 const Value *Op1) const {
329 Type *ElemTy = Val->isVectorTy() ? cast<VectorType>(Val)->getElementType()
330 : Val;
331 if (Opcode == Instruction::InsertElement) {
332 // Need two rotations for non-zero index.
333 unsigned Cost = (Index != 0) ? 2 : 0;
334 if (ElemTy->isIntegerTy(32))
335 return Cost;
336 // If it's not a 32-bit value, there will need to be an extract.
337 return Cost + getVectorInstrCost(Instruction::ExtractElement, Val, CostKind,
338 Index, Op0, Op1);
339 }
340
341 if (Opcode == Instruction::ExtractElement)
342 return 2;
343
344 return 1;
345}
346
347bool HexagonTTIImpl::isLegalMaskedStore(Type *DataType, Align /*Alignment*/,
348 unsigned /*AddressSpace*/) const {
349 // This function is called from scalarize-masked-mem-intrin, which runs
350 // in pre-isel. Use ST directly instead of calling isHVXVectorType.
351 return HexagonMaskedVMem && ST.isTypeForHVX(DataType);
352}
353
354bool HexagonTTIImpl::isLegalMaskedLoad(Type *DataType, Align /*Alignment*/,
355 unsigned /*AddressSpace*/) const {
356 // This function is called from scalarize-masked-mem-intrin, which runs
357 // in pre-isel. Use ST directly instead of calling isHVXVectorType.
358 return HexagonMaskedVMem && ST.isTypeForHVX(DataType);
359}
360
362 // For now assume we can not deal with all HVX datatypes.
363 if (!Ty->isVectorTy() || !ST.isTypeForHVX(Ty) ||
365 return false;
366 // This must be in sync with HexagonVectorCombine pass.
367 switch (Ty->getScalarSizeInBits()) {
368 case 8:
369 return (getTypeNumElements(Ty) == 128);
370 case 16:
371 if (getTypeNumElements(Ty) == 64 || getTypeNumElements(Ty) == 32)
372 return (Alignment >= 2);
373 break;
374 case 32:
375 if (getTypeNumElements(Ty) == 32)
376 return (Alignment >= 4);
377 break;
378 default:
379 break;
380 }
381 return false;
382}
383
385 if (!Ty->isVectorTy() || !ST.isTypeForHVX(Ty) ||
387 return false;
388 // This must be in sync with HexagonVectorCombine pass.
389 switch (Ty->getScalarSizeInBits()) {
390 case 8:
391 return (getTypeNumElements(Ty) == 128);
392 case 16:
393 if (getTypeNumElements(Ty) == 64)
394 return (Alignment >= 2);
395 break;
396 case 32:
397 if (getTypeNumElements(Ty) == 32)
398 return (Alignment >= 4);
399 break;
400 default:
401 break;
402 }
403 return false;
404}
405
407 Align Alignment) const {
408 return !isLegalMaskedGather(VTy, Alignment);
409}
410
412 Align Alignment) const {
413 return !isLegalMaskedScatter(VTy, Alignment);
414}
415
416/// --- Vector TTI end ---
417
419 return ST.getL1PrefetchDistance();
420}
421
423 return ST.getL1CacheLineSize();
424}
425
430 auto isCastFoldedIntoLoad = [this](const CastInst *CI) -> bool {
431 if (!CI->isIntegerCast())
432 return false;
433 // Only extensions from an integer type shorter than 32-bit to i32
434 // can be folded into the load.
435 const DataLayout &DL = getDataLayout();
436 unsigned SBW = DL.getTypeSizeInBits(CI->getSrcTy());
437 unsigned DBW = DL.getTypeSizeInBits(CI->getDestTy());
438 if (DBW != 32 || SBW >= DBW)
439 return false;
440
441 const LoadInst *LI = dyn_cast<const LoadInst>(CI->getOperand(0));
442 // Technically, this code could allow multiple uses of the load, and
443 // check if all the uses are the same extension operation, but this
444 // should be sufficient for most cases.
445 return LI && LI->hasOneUse();
446 };
447
448 if (const CastInst *CI = dyn_cast<const CastInst>(U))
449 if (isCastFoldedIntoLoad(CI))
451 return BaseT::getInstructionCost(U, Operands, CostKind);
452}
453
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
static const unsigned FloatFactor
static cl::opt< bool > EnableV68FloatAutoHVX("force-hvx-float", cl::Hidden, cl::desc("Enable auto-vectorization of floatint point types on v68."))
cl::opt< bool > HexagonAllowScatterGatherHVX("hexagon-allow-scatter-gather-hvx", cl::init(false), cl::Hidden, cl::desc("Allow auto-generation of HVX scatter-gather"))
static cl::opt< bool > EmitLookupTables("hexagon-emit-lookup-tables", cl::init(true), cl::Hidden, cl::desc("Control lookup table emission on Hexagon target"))
static cl::opt< bool > HexagonMaskedVMem("hexagon-masked-vmem", cl::init(true), cl::Hidden, cl::desc("Enable masked loads/stores for HVX"))
static cl::opt< bool > HexagonAutoHVX("hexagon-autohvx", cl::init(false), cl::Hidden, cl::desc("Enable loop vectorizer for HVX"))
This file implements a TargetTransformInfo analysis pass specific to the Hexagon target machine.
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
This pass exposes codegen information to IR-level passes.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition ArrayRef.h:143
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const override
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *DataTy, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind) const override
This is the base class for all instructions that perform data casts.
Definition InstrTypes.h:448
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:310
bool isLegalMaskedStore(Type *DataType, Align Alignment, unsigned AddressSpace) const override
InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
bool forceScalarizeMaskedScatter(VectorType *VTy, Align Alignment) const override
ElementCount getMinimumVF(unsigned ElemWidth, bool IsScalable) const override
bool isLegalMaskedLoad(Type *DataType, Align Alignment, unsigned AddressSpace) const override
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override
bool forceScalarizeMaskedGather(VectorType *VTy, Align Alignment) const override
InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override
unsigned getNumberOfRegisters(unsigned ClassID) const override
— Vector TTI begin —
InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *SE, const SCEV *S, TTI::TargetCostKind CostKind) const override
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
TTI::PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const override
unsigned getMinVectorRegisterBitWidth() const override
bool isLegalMaskedGather(Type *Ty, Align Alignment) const override
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const override
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const override
InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const override
Compute a cost of the given call instruction.
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1) const override
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override
Get intrinsic cost based on arguments.
InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
TTI::AddressingModeKind getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const override
Bias LSR towards creating post-increment opportunities.
bool shouldBuildLookupTables() const override
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind) const override
unsigned getMaxInterleaveFactor(ElementCount VF) const override
bool isLegalMaskedScatter(Type *Ty, Align Alignment) const override
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override
InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TTI::TargetCostKind CostKind) const override
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const override
unsigned getCacheLineSize() const override
unsigned getPrefetchDistance() const override
— Vector TTI end —
static InstructionCost getMax()
An instruction for reading from memory.
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
The optimization diagnostic interface.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
LLVM_ABI unsigned getSmallConstantMaxTripCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)
Returns the upper bound of the loop trip count as a normal unsigned value.
LLVM_ABI unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
virtual const DataLayout & getDataLayout() const
virtual InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TTI::TargetCostKind CostKind) const
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
PopcntSupportKind
Flags indicating the kind of support for population count.
@ TCC_Free
Expected to fold away in lowering.
AddressingModeKind
Which addressing mode Loop Strength Reduction will try to generate.
@ AMK_PostIndexed
Prefer post-indexed addressing mode.
ShuffleKind
The various kinds of shuffle patterns for vector queries.
CastContextHint
Represents a hint about the context in which a cast is used.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:344
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
Definition TypeSize.h:347
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:198
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:225
LLVM Value Representation.
Definition Value.h:75
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
Base class of all SIMD vector types.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:201
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
InstructionCost Cost
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
bool canPeel(const Loop *L)
Definition LoopPeel.cpp:91
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
unsigned Log2(Align A)
Returns the log2 of the alignment.
Definition Alignment.h:197
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
unsigned PeelCount
A forced peeling factor (the number of bodied of the original loop that should be peeled off before t...
Parameters that control the generic loop unrolling transformation.
bool Runtime
Allow runtime unrolling (unrolling of loops to expand the size of the loop body even when the number ...
bool Partial
Allow partial unrolling (unrolling of loops to expand the size of the loop body, not only to eliminat...