24 case Instruction::Add:
25 case Instruction::Sub:
26 case Instruction::Mul:
27 case Instruction::Shl: {
29 return OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap();
31 case Instruction::Trunc: {
33 return TI->hasNoUnsignedWrap() || TI->hasNoSignedWrap();
36 case Instruction::UDiv:
37 case Instruction::SDiv:
38 case Instruction::AShr:
39 case Instruction::LShr:
43 case Instruction::GetElementPtr: {
47 GEP->getInRange() != std::nullopt;
49 case Instruction::UIToFP:
50 case Instruction::ZExt:
52 return NNI->hasNonNeg();
54 case Instruction::ICmp:
58 return FP->hasNoNaNs() ||
FP->hasNoInfs();
67 return I && (
I->hasPoisonGeneratingReturnAttributes() ||
68 I->hasPoisonGeneratingMetadata());
73 return I->getSourceElementType();
79 return I->getResultElementType();
85 return CE->getInRange();
99 if (
StructType *STy = GTI.getStructTypeOrNull()) {
103 assert(GTI.isSequential() &&
"should be sequencial");
107 Offset = GTI.getSequentialElementStride(
DL) * ElemCount;
119 "The offset bit width does not match DL specification.");
129 if (SourceType->
isIntegerTy(8) && !Index.empty() && !ExternalAnalysis) {
131 if (CI && CI->getType()->isIntegerTy()) {
132 Offset += CI->getValue().sextOrTrunc(
Offset.getBitWidth());
138 bool UsedExternalAnalysis =
false;
140 Index = Index.sextOrTrunc(
Offset.getBitWidth());
145 if (!UsedExternalAnalysis) {
146 Offset += Index * IndexedSize;
150 bool Overflow =
false;
151 APInt OffsetPlus = Index.
smul_ov(IndexedSize, Overflow);
161 SourceType, Index.begin());
163 for (
auto GTI = begin, GTE = end; GTI != GTE; ++GTI) {
165 bool ScalableType = GTI.getIndexedType()->isScalableTy();
167 Value *V = GTI.getOperand();
171 if (ConstOffset && ConstOffset->getType()->isIntegerTy()) {
172 if (ConstOffset->isZero())
180 unsigned ElementIdx = ConstOffset->getZExtValue();
183 if (!AccumulateOffset(
189 if (!AccumulateOffset(ConstOffset->getValue(),
190 GTI.getSequentialElementStride(
DL)))
197 if (!ExternalAnalysis || STy || ScalableType)
200 if (!ExternalAnalysis(*V, AnalysisIndex))
202 UsedExternalAnalysis =
true;
203 if (!AccumulateOffset(AnalysisIndex, GTI.getSequentialElementStride(
DL)))
212 APInt &ConstantOffset)
const {
214 "The offset bit width does not match DL specification.");
217 Index = Index.sextOrTrunc(
BitWidth);
221 ConstantOffset += Index * IndexedSize;
227 bool ScalableType = GTI.getIndexedType()->isScalableTy();
229 Value *V = GTI.getOperand();
233 if (ConstOffset && ConstOffset->getType()->isIntegerTy()) {
234 if (ConstOffset->isZero())
245 unsigned ElementIdx = ConstOffset->getZExtValue();
252 CollectConstantOffset(ConstOffset->getValue(),
253 GTI.getSequentialElementStride(
DL));
257 if (STy || ScalableType)
264 if (!IndexedSize.
isZero()) {
266 It->second += IndexedSize;
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Class for arbitrary precision integers.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
LLVM_ABI APInt smul_ov(const APInt &RHS, bool &Overflow) const
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
This is the shared class of boolean and integer constants.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
A parsed version of the target data layout string in and methods for querying it.
LLVM_ABI void print(raw_ostream &O) const
Print fast-math flags to O.
bool noSignedZeros() const
bool allowReciprocal() const
bool allowReassoc() const
Flag queries.
bool allowContract() const
static GEPNoWrapFlags none()
LLVM_ABI std::optional< ConstantRange > getInRange() const
Returns the offset of the index with an inrange attachment, or std::nullopt if none.
LLVM_ABI bool collectOffset(const DataLayout &DL, unsigned BitWidth, SmallMapVector< Value *, APInt, 4 > &VariableOffsets, APInt &ConstantOffset) const
Collect the offset of this GEP as a map of Values to their associated APInt multipliers,...
LLVM_ABI Type * getResultElementType() const
LLVM_ABI Type * getSourceElementType() const
LLVM_ABI Align getMaxPreservedAlignment(const DataLayout &DL) const
Compute the maximum alignment that this GEP is garranteed to preserve.
LLVM_ABI bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset, function_ref< bool(Value &, APInt &)> ExternalAnalysis=nullptr) const
Accumulate the constant address offset of this GEP if possible.
unsigned getPointerAddressSpace() const
Method to return the address space of the pointer operand.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
LLVM_ABI bool hasPoisonGeneratingFlags() const
Return true if this operator has flags which may cause this operator to evaluate to poison despite ha...
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
LLVM_ABI bool hasPoisonGeneratingAnnotations() const
Return true if this operator has poison-generating flags, return attributes or metadata.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
TypeSize getElementOffset(unsigned Idx) const
Class to represent struct types.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isIntegerTy() const
True if this is an instance of IntegerType.
iterator_range< value_op_iterator > operand_values()
LLVM Value Representation.
static constexpr uint64_t MaximumAlignment
An efficient, type-erasing, non-owning reference to a callable.
This class implements an extremely fast bulk output stream that can only output to a stream.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
gep_type_iterator gep_type_end(const User *GEP)
constexpr T MinAlign(U A, V B)
A and B are either alignments or offsets.
generic_gep_type_iterator<> gep_type_iterator
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
gep_type_iterator gep_type_begin(const User *GEP)
This struct is a compact representation of a valid (non-zero power of two) alignment.
A MapVector that performs no allocations if smaller than a certain size.