diff --git a/llvm/include/llvm/Transforms/IPO/Attributor.h b/llvm/include/llvm/Transforms/IPO/Attributor.h index f70fd02ca573c..2845aede72abd 100644 --- a/llvm/include/llvm/Transforms/IPO/Attributor.h +++ b/llvm/include/llvm/Transforms/IPO/Attributor.h @@ -103,7 +103,9 @@ #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SetOperations.h" #include "llvm/ADT/SetVector.h" +#include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallSet.h" +#include "llvm/ADT/SmallVector.h" #include "llvm/ADT/iterator.h" #include "llvm/Analysis/AssumeBundleQueries.h" #include "llvm/Analysis/CFG.h" @@ -320,6 +322,10 @@ inline bool operator==(const RangeTy &A, const RangeTy &B) { return A.Offset == B.Offset && A.Size == B.Size; } +inline bool operator<(const RangeTy &A, const RangeTy &B) { + return A.Offset < B.Offset; +} + inline bool operator!=(const RangeTy &A, const RangeTy &B) { return !(A == B); } /// Return the initial value of \p Obj with type \p Ty if that is a constant. @@ -5752,8 +5758,10 @@ struct AANonConvergent : public StateWrapper { /// An abstract interface for struct information. struct AAPointerInfo : public AbstractAttribute { +protected: AAPointerInfo(const IRPosition &IRP) : AbstractAttribute(IRP) {} +public: /// See AbstractAttribute::isValidIRPositionForInit static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) { if (!IRP.getAssociatedType()->isPtrOrPtrVectorTy()) @@ -5785,6 +5793,139 @@ struct AAPointerInfo : public AbstractAttribute { AK_MUST_READ_WRITE = AK_MUST | AK_R | AK_W, }; + /// A helper containing a list of offsets computed for a Use. Ideally this + /// list should be strictly ascending, but we ensure that only when we + /// actually translate the list of offsets to a RangeList. + struct OffsetInfo { + using VecTy = SmallVector; + // A map to store depth 1 predecessors per offset. + using OriginsTy = SmallVector>; + using const_iterator = VecTy::const_iterator; + OriginsTy Origins; + VecTy Ranges; + + const_iterator begin() const { return Ranges.begin(); } + const_iterator end() const { return Ranges.end(); } + + bool operator==(const OffsetInfo &RHS) const { + return Ranges == RHS.Ranges && Origins == RHS.Origins; + } + + bool operator!=(const OffsetInfo &RHS) const { return !(*this == RHS); } + + // Insert a new Range and Origin + void insert(AA::RangeTy Range, Value &V) { + auto *It = std::find(Ranges.begin(), Ranges.end(), Range); + // Offset exists in Offsets map + if (It != Ranges.end()) { + size_t Index = It - Ranges.begin(); + if (Index < Origins.size()) + Origins[Index].insert(&V); + } else { + Ranges.push_back(Range); + Origins.emplace_back(); + Origins.back().insert(&V); + } + } + + // Set the size of the offset for all ranges. + void setSizeAll(uint64_t Size) { + for (auto &Range : Ranges) + Range.Size = Size; + } + + // Helper function to get just the offsets from Ranges. + void getOnlyOffsets(SmallVector &Offsets) { + for (auto &Range : Ranges) + Offsets.push_back(Range.Offset); + // ensure unique + sort(Offsets.begin(), Offsets.end()); + Offsets.erase(std::unique(Offsets.begin(), Offsets.end()), Offsets.end()); + } + + bool isUnassigned() const { return Ranges.empty(); } + + bool isUnknown() const { + if (isUnassigned()) + return false; + if (Ranges.size() == 1) + return Ranges.front().Offset == AA::RangeTy::Unknown; + return false; + } + + void setUnknown(Value &V) { + Ranges.clear(); + Origins.clear(); + insert(AA::RangeTy{AA::RangeTy::Unknown, AA::RangeTy::Unknown}, V); + } + + // Increment all ranges by Inc. + // Add an origin V to all offsets. + void addToAll(int64_t Inc, Value &V) { + for (auto &Range : Ranges) + Range.Offset += Inc; + + if (!Origins.empty()) { + for (auto &Origin : Origins) + Origin.insert(&V); + } else { + for (size_t Index = 0; Index < Ranges.size(); Index++) { + Origins.emplace_back(); + Origins[Index].insert(&V); + } + } + } + + // Increment all ranges by Inc. + void addToAll(int64_t Inc) { + for (auto &Range : Ranges) + Range.Offset += Inc; + } + + /// Copy offsets from \p R into the current list. + /// + /// Ideally all lists should be strictly ascending, but we defer that to the + /// actual use of the list. So we just blindly append here. + void merge(const OffsetInfo &R) { + Ranges.append(R.Ranges); + // ensure elements are unique. + sort(Ranges.begin(), Ranges.end()); + Ranges.erase(std::unique(Ranges.begin(), Ranges.end()), Ranges.end()); + + OriginsTy ToBeMergeOrigins = R.Origins; + for (auto &Origin : ToBeMergeOrigins) + Origins.emplace_back(Origin); + } + + // Merge two OffsetInfo structs. + // takes an additional origin argument + // and adds it to the corresponding offset in the + // origins map. + void mergeWithOffset(const OffsetInfo &R, Value &CurPtr) { + Ranges.append(R.Ranges); + // ensure elements are unique. + sort(Ranges.begin(), Ranges.end()); + Ranges.erase(std::unique(Ranges.begin(), Ranges.end()), Ranges.end()); + auto &ROffsets = R.Ranges; + for (auto Offset : ROffsets) { + auto *It = std::find(Ranges.begin(), Ranges.end(), Offset); + if (It == Ranges.end()) + continue; + size_t Index = It - Ranges.begin(); + if (Index >= Origins.size()) { + Origins.emplace_back(); + Origins.back().insert(&CurPtr); + } else { + Origins[Index].insert(&CurPtr); + } + } + } + }; + + using OffsetInfoMapTy = DenseMap; + using AccessPathTy = SmallVector; + using AccessPathSetTy = SmallPtrSet; + /// A container for a list of ranges. struct RangeList { // The set of ranges rarely contains more than one element, and is unlikely @@ -5939,15 +6080,17 @@ struct AAPointerInfo : public AbstractAttribute { /// An access description. struct Access { Access(Instruction *I, int64_t Offset, int64_t Size, - std::optional Content, AccessKind Kind, Type *Ty) + std::optional Content, AccessKind Kind, Type *Ty, + AccessPathSetTy *AccessPaths) : LocalI(I), RemoteI(I), Content(Content), Ranges(Offset, Size), - Kind(Kind), Ty(Ty) { + Kind(Kind), Ty(Ty), AccessPaths(AccessPaths) { verify(); } Access(Instruction *LocalI, Instruction *RemoteI, const RangeList &Ranges, - std::optional Content, AccessKind K, Type *Ty) + std::optional Content, AccessKind K, Type *Ty, + AccessPathSetTy *AccessPaths) : LocalI(LocalI), RemoteI(RemoteI), Content(Content), Ranges(Ranges), - Kind(K), Ty(Ty) { + Kind(K), Ty(Ty), AccessPaths(AccessPaths) { if (Ranges.size() > 1) { Kind = AccessKind(Kind | AK_MAY); Kind = AccessKind(Kind & ~AK_MUST); @@ -5956,9 +6099,9 @@ struct AAPointerInfo : public AbstractAttribute { } Access(Instruction *LocalI, Instruction *RemoteI, int64_t Offset, int64_t Size, std::optional Content, AccessKind Kind, - Type *Ty) + Type *Ty, AccessPathSetTy *AccessPaths) : LocalI(LocalI), RemoteI(RemoteI), Content(Content), - Ranges(Offset, Size), Kind(Kind), Ty(Ty) { + Ranges(Offset, Size), Kind(Kind), Ty(Ty), AccessPaths(AccessPaths) { verify(); } Access(const Access &Other) = default; @@ -5966,7 +6109,8 @@ struct AAPointerInfo : public AbstractAttribute { Access &operator=(const Access &Other) = default; bool operator==(const Access &R) const { return LocalI == R.LocalI && RemoteI == R.RemoteI && Ranges == R.Ranges && - Content == R.Content && Kind == R.Kind; + Content == R.Content && Kind == R.Kind && + checkAccessPathsAreSame(R.AccessPaths); } bool operator!=(const Access &R) const { return !(*this == R); } @@ -6078,11 +6222,53 @@ struct AAPointerInfo : public AbstractAttribute { } } + // Merge two access paths into one. + void mergeAccessPaths(const AccessPathSetTy *AccessPathsNew) const { + for (auto *Path : *AccessPathsNew) + if (!existsChain(Path)) + AccessPaths->insert(Path); + } + + // Check if the given access paths are same. + bool checkAccessPathsAreSame(const AccessPathSetTy *AccessPathsR) const { + bool IsSame = true; + if (AccessPaths->size() != AccessPathsR->size()) + return false; + + for (auto *Path : *AccessPathsR) { + if (!existsChain(Path)) + IsSame = false; + } + return IsSame; + } + + // Check if the chain exists in the AccessPathsSet. + bool existsChain(const AccessPathTy *NewPath) const { + for (auto *OldPath : *AccessPaths) + if (*OldPath == *NewPath) + return true; + + return false; + } + + void dumpAccessPaths(raw_ostream &O) const { + O << "Print all access paths found:" + << "\n"; + for (auto *It : *AccessPaths) { + O << "Backtrack a unique access path:\n"; + for (Value *Ins : *It) { + O << *Ins << "\n"; + } + } + } + + const AccessPathSetTy *getAccessChain() const { return AccessPaths; } const RangeList &getRanges() const { return Ranges; } using const_iterator = RangeList::const_iterator; const_iterator begin() const { return Ranges.begin(); } const_iterator end() const { return Ranges.end(); } + size_t size() const { return Ranges.size(); } private: /// The instruction responsible for the access with respect to the local @@ -6105,6 +6291,10 @@ struct AAPointerInfo : public AbstractAttribute { /// The type of the content, thus the type read/written, can be null if not /// available. Type *Ty; + + /// The full chain of instructions that participate in the Access. + /// There may be more than one access chain. + AccessPathSetTy *AccessPaths; }; /// Create an abstract attribute view for the position \p IRP. @@ -6121,6 +6311,9 @@ struct AAPointerInfo : public AbstractAttribute { virtual const_bin_iterator begin() const = 0; virtual const_bin_iterator end() const = 0; virtual int64_t numOffsetBins() const = 0; + virtual void dumpState(raw_ostream &O) const = 0; + virtual const Access &getBinAccess(unsigned Index) const = 0; + virtual const DenseMap &getOffsetInfoMap() const = 0; /// Call \p CB on all accesses that might interfere with \p Range and return /// true if all such accesses were known and the callback returned true for @@ -6292,6 +6485,9 @@ struct AAAllocationInfo : public StateWrapper { virtual std::optional getAllocatedSize() const = 0; + using NewOffsetsTy = DenseMap; + virtual const NewOffsetsTy &getNewOffsets() const = 0; + /// See AbstractAttribute::getName() const std::string getName() const override { return "AAAllocationInfo"; } diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp index 2816a85743faa..a508d55967b64 100644 --- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp +++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp @@ -11,6 +11,8 @@ // //===----------------------------------------------------------------------===// +#include "llvm/Analysis/TargetLibraryInfo.h" +#include "llvm/IR/GlobalVariable.h" #include "llvm/Transforms/IPO/Attributor.h" #include "llvm/ADT/APInt.h" @@ -850,8 +852,13 @@ struct AA::PointerInfo::State : public AbstractState { ChangeStatus addAccess(Attributor &A, const AAPointerInfo::RangeList &Ranges, Instruction &I, std::optional Content, AAPointerInfo::AccessKind Kind, Type *Ty, + AAPointerInfo::OffsetInfoMapTy &OffsetInfoMap, Instruction *RemoteI = nullptr); + AAPointerInfo::AccessPathSetTy * + findAllAccessPaths(AAPointerInfo::OffsetInfoMapTy &OffsetInfoMap, + Instruction *LocalI); + AAPointerInfo::const_bin_iterator begin() const { return OffsetBins.begin(); } AAPointerInfo::const_bin_iterator end() const { return OffsetBins.end(); } int64_t numOffsetBins() const { return OffsetBins.size(); } @@ -926,10 +933,95 @@ struct AA::PointerInfo::State : public AbstractState { BooleanState BS; }; +AAPointerInfo::AccessPathSetTy *AA::PointerInfo::State::findAllAccessPaths( + AAPointerInfo::OffsetInfoMapTy &OffsetInfoMap, Instruction *LocalI) { + AAPointerInfo::AccessPathSetTy *AccessPathsSet = + new AAPointerInfo::AccessPathSetTy(); + + // Store the instruction and its storage (i.e, which path it belongs to) + // on the stack. + // We also store the visited map on the stack. + // Since we want to find new paths, we want to make sure an instruction is + // not visited twice on the same path. However, we can visit the same + // instruction more that once if it exists on different paths. + using VisitedTy = SmallPtrSet; + using StackElementTy = + std::tuple; + + SmallVector Stack; + + // Populate the stack with elements. + for (auto *It = LocalI->op_begin(); It != LocalI->op_end(); It++) { + Value *V = cast(It); + if (!OffsetInfoMap.contains(V)) + continue; + + SmallPtrSet LocalVisitedMap; + AAPointerInfo::AccessPathTy *NewPath = new AAPointerInfo::AccessPathTy(); + AccessPathsSet->insert(NewPath); + NewPath->push_back(LocalI); + Stack.push_back(std::make_tuple(V, NewPath, LocalVisitedMap)); + } + + while (!Stack.empty()) { + auto Entry = Stack.pop_back_val(); + Value *Top = std::get<0>(Entry); + AAPointerInfo::AccessPathTy *CurrentChain = std::get<1>(Entry); + auto &Visited = std::get<2>(Entry); + + if (!OffsetInfoMap.contains(Top)) + continue; + + if (!Visited.insert(Top).second) + continue; + + CurrentChain->push_back(Top); + auto OI = OffsetInfoMap.lookup(Top); + auto &Origins = OI.Origins; + + SmallPtrSet Successors; + for (auto &Origin : Origins) { + for (auto *Val : Origin) { + // Since we store depth 1 predecessors in our Origins map + // We can be sure that we hit termination condition if the + // Successor is the current instruction. + if (Val != Top) + Successors.insert(Val); + } + } + + if (Successors.empty()) + continue; + + // Create new paths to be forked + SmallVector NewPaths; + NewPaths.push_back(CurrentChain); + for (size_t Index = 1; Index < Successors.size(); Index++) { + AAPointerInfo::AccessPathTy *NewPath = new AAPointerInfo::AccessPathTy( + CurrentChain->begin(), CurrentChain->end()); + NewPaths.push_back(NewPath); + } + + int Index = 0; + // Traverse the successors + for (auto *Successor : Successors) { + AAPointerInfo::AccessPathTy *NextChain = NewPaths[Index]; + AccessPathsSet->insert(NextChain); + // Push successors to traverse and their corresponding storage on + // stack. + VisitedTy NewVisitedSet(Visited.begin(), Visited.end()); + Stack.push_back(std::make_tuple(Successor, NextChain, NewVisitedSet)); + Index++; + } + } + + return AccessPathsSet; +} + ChangeStatus AA::PointerInfo::State::addAccess( Attributor &A, const AAPointerInfo::RangeList &Ranges, Instruction &I, std::optional Content, AAPointerInfo::AccessKind Kind, Type *Ty, - Instruction *RemoteI) { + AAPointerInfo::OffsetInfoMapTy &OffsetInfoMap, Instruction *RemoteI) { RemoteI = RemoteI ? RemoteI : &I; // Check if we have an access for this instruction, if not, simply add it. @@ -956,7 +1048,11 @@ ChangeStatus AA::PointerInfo::State::addAccess( }; if (!AccExists) { - AccessList.emplace_back(&I, RemoteI, Ranges, Content, Kind, Ty); + AAPointerInfo::AccessPathSetTy *AccessPaths = + AA::PointerInfo::State::findAllAccessPaths(OffsetInfoMap, &I); + AccessList.emplace_back(&I, RemoteI, Ranges, Content, Kind, Ty, + AccessPaths); + assert((AccessList.size() == AccIndex + 1) && "New Access should have been at AccIndex"); LocalList.push_back(AccIndex); @@ -966,13 +1062,18 @@ ChangeStatus AA::PointerInfo::State::addAccess( // Combine the new Access with the existing Access, and then update the // mapping in the offset bins. - AAPointerInfo::Access Acc(&I, RemoteI, Ranges, Content, Kind, Ty); + AAPointerInfo::AccessPathSetTy *AccessPaths = + AA::PointerInfo::State::findAllAccessPaths(OffsetInfoMap, &I); + AAPointerInfo::Access Acc(&I, RemoteI, Ranges, Content, Kind, Ty, + AccessPaths); auto &Current = AccessList[AccIndex]; auto Before = Current; Current &= Acc; if (Current == Before) return ChangeStatus::UNCHANGED; + // Merge the newly generated access paths with the old access paths. + Before.mergeAccessPaths(Acc.getAccessChain()); auto &ExistingRanges = Before.getRanges(); auto &NewRanges = Current.getRanges(); @@ -1002,60 +1103,19 @@ ChangeStatus AA::PointerInfo::State::addAccess( namespace { -/// A helper containing a list of offsets computed for a Use. Ideally this -/// list should be strictly ascending, but we ensure that only when we -/// actually translate the list of offsets to a RangeList. -struct OffsetInfo { - using VecTy = SmallVector; - using const_iterator = VecTy::const_iterator; - VecTy Offsets; - - const_iterator begin() const { return Offsets.begin(); } - const_iterator end() const { return Offsets.end(); } - - bool operator==(const OffsetInfo &RHS) const { - return Offsets == RHS.Offsets; - } - - bool operator!=(const OffsetInfo &RHS) const { return !(*this == RHS); } - - void insert(int64_t Offset) { Offsets.push_back(Offset); } - bool isUnassigned() const { return Offsets.size() == 0; } - - bool isUnknown() const { - if (isUnassigned()) - return false; - if (Offsets.size() == 1) - return Offsets.front() == AA::RangeTy::Unknown; - return false; - } - - void setUnknown() { - Offsets.clear(); - Offsets.push_back(AA::RangeTy::Unknown); - } - - void addToAll(int64_t Inc) { - for (auto &Offset : Offsets) { - Offset += Inc; - } - } - - /// Copy offsets from \p R into the current list. - /// - /// Ideally all lists should be strictly ascending, but we defer that to the - /// actual use of the list. So we just blindly append here. - void merge(const OffsetInfo &R) { Offsets.append(R.Offsets); } -}; - #ifndef NDEBUG -static raw_ostream &operator<<(raw_ostream &OS, const OffsetInfo &OI) { +static raw_ostream &operator<<(raw_ostream &OS, + const AAPointerInfo::OffsetInfo &OI) { ListSeparator LS; - OS << "["; + int I = 0; for (auto Offset : OI) { - OS << LS << Offset; + OS << LS << "[Offset, Size]: " << Offset << "\n"; + auto &Origin = OI.Origins[I]; + for (auto *Val : Origin) + OS << "Origin: " << *Val << "\n"; } - OS << "]"; + OS << "\n"; + return OS; } #endif // NDEBUG @@ -1084,6 +1144,15 @@ struct AAPointerInfoImpl return State::numOffsetBins(); } + virtual const Access &getBinAccess(unsigned Index) const override { + return getAccess(Index); + } + + virtual const DenseMap & + getOffsetInfoMap() const override { + return OffsetInfoMap; + } + bool forallInterferingAccesses( AA::RangeTy Range, function_ref CB) @@ -1365,7 +1434,8 @@ struct AAPointerInfoImpl ChangeStatus translateAndAddStateFromCallee(Attributor &A, const AAPointerInfo &OtherAA, - CallBase &CB) { + CallBase &CB, + OffsetInfoMapTy &OffsetInfoMap) { using namespace AA::PointerInfo; if (!OtherAA.getState().isValidState() || !isValidState()) return indicatePessimisticFixpoint(); @@ -1388,15 +1458,16 @@ struct AAPointerInfoImpl AK = AccessKind(AK & (IsByval ? AccessKind::AK_R : AccessKind::AK_RW)); AK = AccessKind(AK | (RAcc.isMayAccess() ? AK_MAY : AK_MUST)); - Changed |= addAccess(A, RAcc.getRanges(), CB, Content, AK, - RAcc.getType(), RAcc.getRemoteInst()); + Changed |= + addAccess(A, RAcc.getRanges(), CB, Content, AK, RAcc.getType(), + OffsetInfoMap, RAcc.getRemoteInst()); } } return Changed; } ChangeStatus translateAndAddState(Attributor &A, const AAPointerInfo &OtherAA, - const OffsetInfo &Offsets, CallBase &CB) { + const OffsetInfo &Ranges, CallBase &CB) { using namespace AA::PointerInfo; if (!OtherAA.getState().isValidState() || !isValidState()) return indicatePessimisticFixpoint(); @@ -1409,28 +1480,31 @@ struct AAPointerInfoImpl for (const auto &It : State) { for (auto Index : It.getSecond()) { const auto &RAcc = State.getAccess(Index); - for (auto Offset : Offsets) { - auto NewRanges = Offset == AA::RangeTy::Unknown + for (auto Range : Ranges) { + auto NewRanges = Range.Offset == AA::RangeTy::Unknown ? AA::RangeTy::getUnknown() : RAcc.getRanges(); if (!NewRanges.isUnknown()) { - NewRanges.addToAllOffsets(Offset); + NewRanges.addToAllOffsets(Range.Offset); } Changed |= addAccess(A, NewRanges, CB, RAcc.getContent(), RAcc.getKind(), - RAcc.getType(), RAcc.getRemoteInst()); + RAcc.getType(), OffsetInfoMap, RAcc.getRemoteInst()); } } } return Changed; } + // /// Offsets Info Map + // DenseMap OffsetInfoMap; + /// Statistic tracking for all AAPointerInfo implementations. /// See AbstractAttribute::trackStatistics(). void trackPointerInfoStatistics(const IRPosition &IRP) const {} /// Dump the state into \p O. - void dumpState(raw_ostream &O) { + virtual void dumpState(raw_ostream &O) const override { for (auto &It : OffsetBins) { O << "[" << It.first.Offset << "-" << It.first.Offset + It.first.Size << "] : " << It.getSecond().size() << "\n"; @@ -1449,9 +1523,11 @@ struct AAPointerInfoImpl else O << " - c: \n"; } + Acc.dumpAccessPaths(O); } } } + OffsetInfoMapTy OffsetInfoMap; }; struct AAPointerInfoFloating : public AAPointerInfoImpl { @@ -1462,8 +1538,8 @@ struct AAPointerInfoFloating : public AAPointerInfoImpl { /// Deal with an access and signal if it was handled successfully. bool handleAccess(Attributor &A, Instruction &I, std::optional Content, AccessKind Kind, - SmallVectorImpl &Offsets, ChangeStatus &Changed, - Type &Ty) { + OffsetInfo &OI, ChangeStatus &Changed, Type &Ty, + OffsetInfoMapTy &OffsetInfoMap) { using namespace AA::PointerInfo; auto Size = AA::RangeTy::Unknown; const DataLayout &DL = A.getDataLayout(); @@ -1472,16 +1548,23 @@ struct AAPointerInfoFloating : public AAPointerInfoImpl { Size = AccessSize.getFixedValue(); // Make a strictly ascending list of offsets as required by addAccess() - llvm::sort(Offsets); - auto *Last = llvm::unique(Offsets); - Offsets.erase(Last, Offsets.end()); + auto Ranges = OI.Ranges; + auto Origins = OI.Origins; + + llvm::sort(Ranges); + auto *Last = llvm::unique(Ranges); + Ranges.erase(Last, Ranges.end()); + + SmallVector OffsetsOnly; + OI.getOnlyOffsets(OffsetsOnly); VectorType *VT = dyn_cast(&Ty); if (!VT || VT->getElementCount().isScalable() || !Content.value_or(nullptr) || !isa(*Content) || (*Content)->getType() != VT || DL.getTypeStoreSize(VT->getElementType()).isScalable()) { - Changed = Changed | addAccess(A, {Offsets, Size}, I, Content, Kind, &Ty); + Changed = Changed | addAccess(A, {OffsetsOnly, Size}, I, Content, Kind, + &Ty, OffsetInfoMap); } else { // Handle vector stores with constant content element-wise. // TODO: We could look for the elements or create instructions @@ -1493,7 +1576,8 @@ struct AAPointerInfoFloating : public AAPointerInfoImpl { int64_t ElementSize = DL.getTypeStoreSize(ElementType).getFixedValue(); auto *ConstContent = cast(*Content); Type *Int32Ty = Type::getInt32Ty(ElementType->getContext()); - SmallVector ElementOffsets(Offsets.begin(), Offsets.end()); + SmallVector ElementOffsets; + OI.getOnlyOffsets(ElementOffsets); for (int i = 0, e = VT->getElementCount().getFixedValue(); i != e; ++i) { Value *ElementContent = ConstantExpr::getExtractElement( @@ -1501,7 +1585,8 @@ struct AAPointerInfoFloating : public AAPointerInfoImpl { // Add the element access. Changed = Changed | addAccess(A, {ElementOffsets, ElementSize}, I, - ElementContent, Kind, ElementType); + ElementContent, Kind, ElementType, + OffsetInfoMap); // Advance the offsets for the next element. for (auto &ElementOffset : ElementOffsets) @@ -1520,7 +1605,7 @@ struct AAPointerInfoFloating : public AAPointerInfoImpl { /// \return true iff \p UsrOI is updated. bool collectConstantsForGEP(Attributor &A, const DataLayout &DL, OffsetInfo &UsrOI, const OffsetInfo &PtrOI, - const GEPOperator *GEP); + GEPOperator *GEP, Value *CurPtr); /// See AbstractAttribute::trackStatistics() void trackStatistics() const override { @@ -1528,11 +1613,9 @@ struct AAPointerInfoFloating : public AAPointerInfoImpl { } }; -bool AAPointerInfoFloating::collectConstantsForGEP(Attributor &A, - const DataLayout &DL, - OffsetInfo &UsrOI, - const OffsetInfo &PtrOI, - const GEPOperator *GEP) { +bool AAPointerInfoFloating::collectConstantsForGEP( + Attributor &A, const DataLayout &DL, OffsetInfo &UsrOI, + const OffsetInfo &PtrOI, GEPOperator *GEP, Value *CurPtr) { unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType()); MapVector VariableOffsets; APInt ConstantOffset(BitWidth, 0); @@ -1542,7 +1625,7 @@ bool AAPointerInfoFloating::collectConstantsForGEP(Attributor &A, "determined to be unknown."); if (!GEP->collectOffset(DL, BitWidth, VariableOffsets, ConstantOffset)) { - UsrOI.setUnknown(); + UsrOI.setUnknown(*CurPtr); return true; } @@ -1551,7 +1634,9 @@ bool AAPointerInfoFloating::collectConstantsForGEP(Attributor &A, << *GEP << "\n"); auto Union = PtrOI; - Union.addToAll(ConstantOffset.getSExtValue()); + // clear the origins since we just want to keep only one predecessor. + Union.Origins.clear(); + Union.addToAll(ConstantOffset.getSExtValue(), *CurPtr); // Each VI in VariableOffsets has a set of potential constant values. Every // combination of elements, picked one each from these sets, is separately @@ -1560,7 +1645,7 @@ bool AAPointerInfoFloating::collectConstantsForGEP(Attributor &A, auto *PotentialConstantsAA = A.getAAFor( *this, IRPosition::value(*VI.first), DepClassTy::OPTIONAL); if (!PotentialConstantsAA || !PotentialConstantsAA->isValidState()) { - UsrOI.setUnknown(); + UsrOI.setUnknown(*CurPtr); return true; } @@ -1579,14 +1664,16 @@ bool AAPointerInfoFloating::collectConstantsForGEP(Attributor &A, OffsetInfo Product; for (const auto &ConstOffset : AssumedSet) { auto CopyPerOffset = Union; - CopyPerOffset.addToAll(ConstOffset.getSExtValue() * - VI.second.getZExtValue()); + CopyPerOffset.addToAll( + ConstOffset.getSExtValue() * VI.second.getZExtValue(), *CurPtr); Product.merge(CopyPerOffset); } Union = Product; } UsrOI = std::move(Union); + TypeSize Size = DL.getTypeAllocSize(GEP->getResultElementType()); + UsrOI.setSizeAll(Size); return true; } @@ -1595,9 +1682,27 @@ ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) { ChangeStatus Changed = ChangeStatus::UNCHANGED; const DataLayout &DL = A.getDataLayout(); Value &AssociatedValue = getAssociatedValue(); - - DenseMap OffsetInfoMap; - OffsetInfoMap[&AssociatedValue].insert(0); + OffsetInfoMap.clear(); + + uint64_t Size; + Function *F = getAssociatedFunction(); + TargetLibraryInfo *TLI = nullptr; + if (F) + TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); + + if (TLI && getObjectSize(&AssociatedValue, Size, DL, TLI)) { + OffsetInfoMap[&AssociatedValue].insert(AA::RangeTy(0, Size), + AssociatedValue); + } else if (isa(AssociatedValue)) { + auto &Glob = cast(AssociatedValue); + TypeSize SizeOfType = DL.getTypeAllocSize(Glob.getValueType()); + OffsetInfoMap[&AssociatedValue].insert(AA::RangeTy(0, SizeOfType), + AssociatedValue); + } else { + TypeSize SizeOfType = DL.getTypeAllocSize(AssociatedValue.getType()); + OffsetInfoMap[&AssociatedValue].insert(AA::RangeTy(0, SizeOfType), + AssociatedValue); + } auto HandlePassthroughUser = [&](Value *Usr, Value *CurPtr, bool &Follow) { // One does not simply walk into a map and assign a reference to a possibly @@ -1616,7 +1721,13 @@ ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) { auto &PtrOI = OffsetInfoMap[CurPtr]; assert(!PtrOI.isUnassigned() && "Cannot pass through if the input Ptr was not visited!"); - UsrOI.merge(PtrOI); + if (isa(Usr) || isa(Usr)) { + UsrOI.mergeWithOffset(PtrOI, *CurPtr); + } else { + UsrOI = PtrOI; + UsrOI.Origins.clear(); + UsrOI.addToAll(0, *CurPtr); + } Follow = true; return true; }; @@ -1649,11 +1760,11 @@ ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) { if (PtrOI.isUnknown()) { Follow = true; - UsrOI.setUnknown(); + UsrOI.setUnknown(*GEP); return true; } - Follow = collectConstantsForGEP(A, DL, UsrOI, PtrOI, GEP); + Follow = collectConstantsForGEP(A, DL, UsrOI, PtrOI, GEP, CurPtr); return true; } if (isa(Usr)) @@ -1677,7 +1788,7 @@ ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) { LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand offset unknown " << *CurPtr << " in " << *PHI << "\n"); Follow = !UsrOI.isUnknown(); - UsrOI.setUnknown(); + UsrOI.setUnknown(*CurPtr); return true; } @@ -1688,7 +1799,6 @@ ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) { LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI is invariant (so far)"); return true; } - // Check if the PHI operand can be traced back to AssociatedValue. APInt Offset( DL.getIndexSizeInBits(CurPtr->getType()->getPointerAddressSpace()), @@ -1700,7 +1810,7 @@ ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) { LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex " << *CurPtr << " in " << *PHI << " (base: " << *CurPtrBase << ")\n"); - UsrOI.setUnknown(); + UsrOI.setUnknown(*CurPtr); Follow = true; return true; } @@ -1717,7 +1827,7 @@ ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) { *PHI->getFunction()); if (mayBeInCycle(CI, cast(Usr), /* HeaderOnly */ true)) { auto BaseOI = It->getSecond(); - BaseOI.addToAll(Offset.getZExtValue()); + BaseOI.addToAll(Offset.getZExtValue(), *CurPtr); if (IsFirstPHIUser || BaseOI == UsrOI) { LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI is invariant " << *CurPtr << " in " << *Usr << "\n"); @@ -1727,12 +1837,12 @@ ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) { LLVM_DEBUG( dbgs() << "[AAPointerInfo] PHI operand pointer offset mismatch " << *CurPtr << " in " << *PHI << "\n"); - UsrOI.setUnknown(); + UsrOI.setUnknown(*CurPtr); Follow = true; return true; } - UsrOI.merge(PtrOI); + UsrOI.mergeWithOffset(PtrOI, *CurPtr); Follow = true; return true; } @@ -1746,8 +1856,8 @@ ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) { else AK = AccessKind(AK | AccessKind::AK_MAY); if (!handleAccess(A, *LoadI, /* Content */ nullptr, AK, - OffsetInfoMap[CurPtr].Offsets, Changed, - *LoadI->getType())) + OffsetInfoMap[CurPtr], Changed, *LoadI->getType(), + OffsetInfoMap)) return false; auto IsAssumption = [](Instruction &I) { @@ -1831,9 +1941,9 @@ ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) { Content = A.getAssumedSimplified(*Assumption.first, *this, UsedAssumedInformation, AA::Interprocedural); - return handleAccess( - A, *Assumption.second, Content, AccessKind::AK_ASSUMPTION, - OffsetInfoMap[CurPtr].Offsets, Changed, *LoadI->getType()); + return handleAccess(A, *Assumption.second, Content, + AccessKind::AK_ASSUMPTION, OffsetInfoMap[CurPtr], + Changed, *LoadI->getType(), OffsetInfoMap); } auto HandleStoreLike = [&](Instruction &I, Value *ValueOp, Type &ValueTy, @@ -1859,8 +1969,8 @@ ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) { if (ValueOp) Content = A.getAssumedSimplified( *ValueOp, *this, UsedAssumedInformation, AA::Interprocedural); - return handleAccess(A, I, Content, AK, OffsetInfoMap[CurPtr].Offsets, - Changed, ValueTy); + return handleAccess(A, I, Content, AK, OffsetInfoMap[CurPtr], Changed, + ValueTy, OffsetInfoMap); }; if (auto *StoreI = dyn_cast(Usr)) @@ -1983,8 +2093,8 @@ struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating { } else { auto Kind = ArgNo == 0 ? AccessKind::AK_MUST_WRITE : AccessKind::AK_MUST_READ; - Changed = - Changed | addAccess(A, {0, LengthVal}, *MI, nullptr, Kind, nullptr); + Changed = Changed | addAccess(A, {0, LengthVal}, *MI, nullptr, Kind, + nullptr, OffsetInfoMap); } LLVM_DEBUG({ dbgs() << "Accesses by bin after update:\n"; @@ -2004,8 +2114,8 @@ struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating { auto *ArgAA = A.getAAFor(*this, ArgPos, DepClassTy::REQUIRED); if (ArgAA && ArgAA->getState().isValidState()) - return translateAndAddStateFromCallee(A, *ArgAA, - *cast(getCtxI())); + return translateAndAddStateFromCallee( + A, *ArgAA, *cast(getCtxI()), OffsetInfoMap); if (!Arg->getParent()->isDeclaration()) return indicatePessimisticFixpoint(); } @@ -2022,7 +2132,7 @@ struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating { auto Kind = ReadOnly ? AccessKind::AK_MAY_READ : AccessKind::AK_MAY_READ_WRITE; return addAccess(A, AA::RangeTy::getUnknown(), *getCtxI(), nullptr, Kind, - nullptr); + nullptr, OffsetInfoMap); } /// See AbstractAttribute::trackStatistics() @@ -12667,6 +12777,11 @@ struct AAAllocationInfoImpl : public AAAllocationInfo { return AssumedAllocatedSize; } + const NewOffsetsTy &getNewOffsets() const override { + assert(isValidState() && "the AA is invalid"); + return NewComputedOffsets; + } + std::optional findInitialAllocationSize(Instruction *I, const DataLayout &DL) { @@ -12711,42 +12826,53 @@ struct AAAllocationInfoImpl : public AAAllocationInfo { if (!AllocationSize) return indicatePessimisticFixpoint(); - // For zero sized allocations, we give up. - // Since we can't reduce further + // For zero sized allocations, we give up + // because we cannot reduce them any further. if (*AllocationSize == 0) return indicatePessimisticFixpoint(); - int64_t BinSize = PI->numOffsetBins(); - - // TODO: implement for multiple bins - if (BinSize > 1) - return indicatePessimisticFixpoint(); - - if (BinSize == 0) { + int64_t NumBins = PI->numOffsetBins(); + if (NumBins == 0) { auto NewAllocationSize = std::optional(TypeSize(0, false)); if (!changeAllocationSize(NewAllocationSize)) return ChangeStatus::UNCHANGED; return ChangeStatus::CHANGED; } - // TODO: refactor this to be part of multiple bin case - const auto &It = PI->begin(); + // For each access bin we compute its new start offset + // and store the results in a new map (NewOffsetBins). + // NewOffsetsBins is a Map from AA::RangeTy OldRange to AA::RangeTy + // NewRange. + unsigned long PrevBinEndOffset = 0; + bool ChangedOffsets = false; + for (AAPointerInfo::OffsetBinsTy::const_iterator It = PI->begin(); + It != PI->end(); It++) { + const AA::RangeTy &OldRange = It->getFirst(); - // TODO: handle if Offset is not zero - if (It->first.Offset != 0) - return indicatePessimisticFixpoint(); + // If any byte range has an unknown offset or size, we should leave the + // original allocation unmodified. + if (OldRange.offsetOrSizeAreUnknown()) + return indicatePessimisticFixpoint(); - uint64_t SizeOfBin = It->first.Offset + It->first.Size; + unsigned long NewStartOffset = PrevBinEndOffset; + unsigned long NewEndOffset = NewStartOffset + OldRange.Size; + PrevBinEndOffset = NewEndOffset; - if (SizeOfBin >= *AllocationSize) - return indicatePessimisticFixpoint(); + ChangedOffsets |= setNewOffsets(OldRange, OldRange.Offset, NewStartOffset, + OldRange.Size); + } + // Set the new size of the allocation. The new size of the Allocation should + // be the size of PrevBinEndOffset * 8 in bits. auto NewAllocationSize = - std::optional(TypeSize(SizeOfBin * 8, false)); + std::optional(TypeSize(PrevBinEndOffset * 8, false)); if (!changeAllocationSize(NewAllocationSize)) return ChangeStatus::UNCHANGED; + if (!ChangedOffsets) + return ChangeStatus::UNCHANGED; + return ChangeStatus::CHANGED; } @@ -12756,39 +12882,314 @@ struct AAAllocationInfoImpl : public AAAllocationInfo { assert(isValidState() && "Manifest should only be called if the state is valid."); - Instruction *I = getIRPosition().getCtxI(); + bool Changed = false; + const IRPosition &IRP = getIRPosition(); + Instruction *I = IRP.getCtxI(); + + // Check if simplified values exist. + if (checkIfSimplifiedValuesExists(A, I)) + return ChangeStatus::UNCHANGED; - auto FixedAllocatedSizeInBits = getAllocatedSize()->getFixedValue(); + if (getAllocatedSize() == HasNoAllocationSize) + return ChangeStatus::UNCHANGED; - unsigned long NumBytesToAllocate = (FixedAllocatedSizeInBits + 7) / 8; + const AAPointerInfo *PI = + A.getOrCreateAAFor(IRP, *this, DepClassTy::REQUIRED); - switch (I->getOpcode()) { - // TODO: add case for malloc like calls - case Instruction::Alloca: { + if (!PI) + return ChangeStatus::UNCHANGED; - AllocaInst *AI = cast(I); + assert(PI->getState().isValidState() && + "[AAAllocationinfo]: AAPointerinfo was not in valid state!"); - Type *CharType = Type::getInt8Ty(I->getContext()); + // Store a map where each instruction is mapped to a map containing + // old bins accessed by that instruction to the corresponding new + // bins in the allocation. + DenseMap> + AccessedInstructionsToBinsMap; - auto *NumBytesToValue = - ConstantInt::get(I->getContext(), APInt(32, NumBytesToAllocate)); + auto AddBins = + [](DenseMap> &Map, + Instruction *LocalInst, const AA::RangeTy &OldRange, + const AA::RangeTy &NewRange) { + DenseMap &NewBinsForInstruction = + Map.getOrInsertDefault(LocalInst); - BasicBlock::iterator insertPt = AI->getIterator(); - insertPt = std::next(insertPt); - AllocaInst *NewAllocaInst = - new AllocaInst(CharType, AI->getAddressSpace(), NumBytesToValue, - AI->getAlign(), AI->getName(), insertPt); + NewBinsForInstruction.insert(std::make_pair(OldRange, NewRange)); + }; - if (A.changeAfterManifest(IRPosition::inst(*AI), *NewAllocaInst)) - return ChangeStatus::CHANGED; + const auto &NewOffsetsMap = getNewOffsets(); + const auto &OffsetInfoMap = PI->getOffsetInfoMap(); + + // Map access causing instructions to a tuple of (Old, New) bins. + // The access causing instruction contains the pointer operand + // which comes from the allocation we may want to backtrack that + // pointer operand, there are 2 cases that may arise. + // A) A GEP exists that calculates the pointer operand from the original + // allocation instruction: I + // B) A GEP does not exists in which case we need to insert a GEP just + // before the access causing instruction with the shift value from the + // original offset. + for (AAPointerInfo::OffsetBinsTy::const_iterator It = PI->begin(); + It != PI->end(); It++) { + const auto &OldOffsetRange = It->getFirst(); + // If the OldOffsetRange is not in the map, offsets for that bin did not + // change. We should just continue and skip changing the offsets in that + // case. + if (!NewOffsetsMap.contains(OldOffsetRange)) + continue; + + const auto &NewOffsetRange = NewOffsetsMap.lookup(OldOffsetRange); + for (const auto AccIndex : It->getSecond()) { + const auto &AccessInstruction = PI->getBinAccess(AccIndex); + Instruction *LocalInst = AccessInstruction.getLocalInst(); + + if (checkIfSimplifiedValuesExists(A, LocalInst)) + return ChangeStatus::UNCHANGED; + + if (checkIfAccessChainUsesMultipleBins(A, LocalInst, OffsetInfoMap)) + return ChangeStatus::UNCHANGED; + + // Check if we can backtrack the access causing instruction to a GEP + // from the original allocation, if yes, then we prefer to change the + // GEP rather than inserting a new one just before the access causing + // instruction. + switch (LocalInst->getOpcode()) { + case Instruction::Call: { + CallInst *CallInstruction = cast(LocalInst); + for (auto *It = CallInstruction->op_begin(); + It != CallInstruction->op_end(); It++) { + if (Instruction *OperandInstruction = dyn_cast(It)) { + // Operand does not cause an access in the current byte range. + if (!OffsetInfoMap.contains(OperandInstruction)) + continue; + + // Find the old offset and the corresponding new offset for the + // call argument. + auto OffsetsVecArg = + OffsetInfoMap.lookup(OperandInstruction).Ranges; + int64_t OldOffsetArg = OffsetsVecArg.front().Offset; + int NewOffsetArg = 0; + for (auto OldToNewRange : NewOffsetsMap) { + auto Old = OldToNewRange.getFirst(); + if (Old.Offset == OldOffsetArg) + NewOffsetArg = OldToNewRange.getSecond().Offset; + } + + // If the offsets did not change, continue. + if (NewOffsetArg == OldOffsetArg) + continue; + + // We don't have access to the size of the offset here but it is + // ok since we do not need it here. + AA::RangeTy &CallArgOldRange = OffsetsVecArg.front(); + AA::RangeTy CallArgNewRange = + AA::RangeTy(NewOffsetArg, CallArgOldRange.Size); + + // Find the chain the call instruction is part of + const AAPointerInfo::AccessPathSetTy *AccessPaths = + AccessInstruction.getAccessChain(); + + const AAPointerInfo::AccessPathTy *ChainWithArg = nullptr; + for (auto *Chain : *AccessPaths) { + + if (std::find(Chain->begin(), Chain->end(), + OperandInstruction) != Chain->end()) { + ChainWithArg = Chain; + } + } + + bool BackTrackInstructionToGEP = false; + if (ChainWithArg) { + bool Exists = false; + for (auto *V : *ChainWithArg) { + + GetElementPtrInst *GepI = dyn_cast(V); + + if (!GepI) + continue; + + if (AccessedInstructionsToBinsMap.contains(GepI)) { + Exists = true; + continue; + } + + // check if its a GEP and weather the GEP accesses the + // Allocation + if (GepI->getPointerOperand() == I) { + if (checkIfSimplifiedValuesExists(A, GepI)) + return ChangeStatus::UNCHANGED; + + AddBins(AccessedInstructionsToBinsMap, GepI, + CallArgOldRange, CallArgNewRange); + BackTrackInstructionToGEP = true; + } + } + + if (Exists) + continue; + } + if (!BackTrackInstructionToGEP) { + AddBins(AccessedInstructionsToBinsMap, OperandInstruction, + CallArgOldRange, CallArgNewRange); + continue; + } + } + } + break; + } + default: { + + bool BackTrackInstructionToGEP = false; + bool Exists = false; + const AAPointerInfo::AccessPathSetTy *AccessPaths = + AccessInstruction.getAccessChain(); + for (auto *Chain : *AccessPaths) { + for (auto *V : *Chain) { + + GetElementPtrInst *GepI = dyn_cast(V); + + if (!GepI) + continue; + + if (AccessedInstructionsToBinsMap.contains(GepI)) { + Exists = true; + continue; + } + + // check if its a GEP and weather the GEP accesses the Allocation + if (GepI->getPointerOperand() == I) { + if (checkIfSimplifiedValuesExists(A, GepI)) + return ChangeStatus::UNCHANGED; + + AddBins(AccessedInstructionsToBinsMap, GepI, OldOffsetRange, + NewOffsetRange); + BackTrackInstructionToGEP = true; + } + } + } + + if (Exists) + continue; + + if (!BackTrackInstructionToGEP) + AddBins(AccessedInstructionsToBinsMap, LocalInst, OldOffsetRange, + NewOffsetRange); + + break; + } + } + } + } + + unsigned long FixedAllocatedSizeInBits = + getAllocatedSize()->getFixedValue(); + unsigned long NumBytesToAllocate = (FixedAllocatedSizeInBits + 7) / 8; + Type *NewAllocationType = nullptr; + switch (I->getOpcode()) { + // TODO: add case for malloc like calls + case Instruction::Alloca: { + AllocaInst *OldAllocaInst = cast(I); + const DataLayout &DL = A.getDataLayout(); + auto OriginalAllocationSize = OldAllocaInst->getAllocationSizeInBits(DL); + + if (*OriginalAllocationSize <= FixedAllocatedSizeInBits) + return ChangeStatus::UNCHANGED; + + Type *CharType = Type::getInt8Ty(I->getContext()); + Type *CharArrayType = ArrayType::get(CharType, NumBytesToAllocate); + NewAllocationType = CharArrayType; + BasicBlock::iterator InsertPt = OldAllocaInst->getIterator(); + InsertPt = std::next(InsertPt); + Instruction *NewAllocationInstruction = + new AllocaInst(CharArrayType, OldAllocaInst->getAddressSpace(), + OldAllocaInst->getName(), InsertPt); + + Changed |= A.changeAfterManifest(IRPosition::inst(*I), + *NewAllocationInstruction); + A.deleteAfterManifest(*I); break; } default: break; } - return ChangeStatus::UNCHANGED; + for (auto &It : AccessedInstructionsToBinsMap) { + Instruction *LocalInst = It.first; + // Get a hold of a map, mapping old to new bins. + DenseMap &OldToNewBins = It.second; + IntegerType *Int64TyInteger = + IntegerType::get(LocalInst->getContext(), 64); + switch (LocalInst->getOpcode()) { + case Instruction::Load: { + // The number of bytes to shift the load/store by. + int64_t OffsetOld = OldToNewBins.begin()->getFirst().Offset; + int64_t OffsetNew = OldToNewBins.begin()->getSecond().Offset; + LoadInst *OldLoadInst = cast(LocalInst); + Instruction *PointerOperand = + cast(OldLoadInst->getPointerOperand()); + Type *PointeeTy = OldLoadInst->getPointerOperandType(); + int64_t ShiftValue = OffsetNew - OffsetOld; + Value *IndexList[1] = {ConstantInt::get(Int64TyInteger, ShiftValue)}; + Value *GepToNewAddress = GetElementPtrInst::Create( + PointeeTy, PointerOperand, IndexList, "NewGep", OldLoadInst); + + LoadInst *NewLoadInst = new LoadInst( + OldLoadInst->getType(), GepToNewAddress, OldLoadInst->getName(), + false, OldLoadInst->getAlign(), OldLoadInst); + + Changed |= + A.changeAfterManifest(IRPosition::inst(*OldLoadInst), *NewLoadInst); + + A.deleteAfterManifest(*OldLoadInst); + break; + } + case Instruction::Store: { + // The number of bytes to shift the load/store by. + int64_t OffsetOld = OldToNewBins.begin()->getFirst().Offset; + int64_t OffsetNew = OldToNewBins.begin()->getSecond().Offset; + int64_t ShiftValue = OffsetNew - OffsetOld; + StoreInst *OldStoreInst = cast(LocalInst); + Instruction *PointerOperand = + cast(OldStoreInst->getPointerOperand()); + Type *PointeeTy = OldStoreInst->getPointerOperandType(); + Value *IndexList[1] = {ConstantInt::get(Int64TyInteger, ShiftValue)}; + Value *GepToNewAddress = GetElementPtrInst::Create( + PointeeTy, PointerOperand, IndexList, "NewGep", OldStoreInst); + + StoreInst *NewStoreInst = + new StoreInst(OldStoreInst->getValueOperand(), GepToNewAddress, + false, OldStoreInst->getAlign(), OldStoreInst); + + Changed |= A.changeAfterManifest(IRPosition::inst(*OldStoreInst), + *NewStoreInst); + + A.deleteAfterManifest(*OldStoreInst); + break; + } + case Instruction::GetElementPtr: { + GetElementPtrInst *OldGEP = cast(LocalInst); + int64_t OffsetNew = OldToNewBins.begin()->getSecond().Offset; + Value *IndexList[1] = {ConstantInt::get(Int64TyInteger, OffsetNew)}; + Value *OldPointerOperand = OldGEP->getPointerOperand(); + Value *GepToNewAddress = GetElementPtrInst::Create( + NewAllocationType, OldPointerOperand, IndexList, "NewGep", OldGEP); + + Changed |= + A.changeAfterManifest(IRPosition::inst(*OldGEP), *GepToNewAddress); + + A.deleteAfterManifest(*OldGEP); + break; + } + default: + break; + } + } + + if (!Changed) + return ChangeStatus::UNCHANGED; + return ChangeStatus::CHANGED; } /// See AbstractAttribute::getAsStr(). @@ -12802,8 +13203,28 @@ struct AAAllocationInfoImpl : public AAAllocationInfo { ")"; } + void dumpNewOffsetBins(raw_ostream &O) { + + O << "Printing Map from [OldOffsetsRange] : [NewOffsetsRange] if the " + "offsets changed." + << "\n"; + const auto &NewOffsetsMap = getNewOffsets(); + for (auto It = NewOffsetsMap.begin(); It != NewOffsetsMap.end(); It++) { + + const auto &OldRange = It->getFirst(); + const auto &NewRange = It->getSecond(); + + O << "[" << OldRange.Offset << "," << OldRange.Offset + OldRange.Size + << "] : "; + O << "[" << NewRange.Offset << "," << NewRange.Offset + NewRange.Size + << "]"; + O << "\n"; + } + } + private: std::optional AssumedAllocatedSize = HasNoAllocationSize; + NewOffsetsTy NewComputedOffsets; // Maintain the computed allocation size of the object. // Returns (bool) weather the size of the allocation was modified or not. @@ -12815,6 +13236,80 @@ struct AAAllocationInfoImpl : public AAAllocationInfo { } return false; } + + // Maps an old byte range to its new offset range in the new allocation. + // Returns (bool) weather the old byte range's offsets changed or not. + bool setNewOffsets(const AA::RangeTy &OldRange, int64_t OldOffset, + int64_t NewComputedOffset, int64_t Size) { + + if (OldOffset == NewComputedOffset) + return false; + + AA::RangeTy &NewRange = NewComputedOffsets.getOrInsertDefault(OldRange); + NewRange.Offset = NewComputedOffset; + NewRange.Size = Size; + + return true; + } + + // A helper function to check if simplified values exists for the current + // instruction. + // Right now we don't change the value and give up + // on modifying the size and offsets of the allocation + // but this may be sub-optimal. + // TODO: handle case for a similified value + bool checkIfSimplifiedValuesExists(Attributor &A, Instruction *LocalInst) { + + // If there are potential values that replace the accessed instruction, we + // should use those values instead. + bool UsedAssumedInformation = false; + SmallVector Values; + if (A.getAssumedSimplifiedValues(IRPosition::inst(*LocalInst), *this, + Values, AA::AnyScope, + UsedAssumedInformation)) + + for (auto &ValAndContext : Values) + // Don't modify the instruction if any simplified value exists. + if (ValAndContext.getValue() && ValAndContext.getValue() != LocalInst) + return true; + + return false; + } + + bool checkIfAccessChainUsesMultipleBins( + Attributor &A, Instruction *LocalInst, + const DenseMap &OffsetInfoMap) { + + // BackTrack and check if any instruction in the access causing chain + // accessed multiple byte ranges. If they do, we currently give up. + SmallVector ReadyList; + DenseMap Visited; + ReadyList.push_back(LocalInst); + while (!ReadyList.empty()) { + Instruction *GetBack = ReadyList.back(); + ReadyList.pop_back(); + + if (!Visited.insert(std::make_pair(GetBack, true)).second) + continue; + + // Check if the Instruction has multiple bins, if so give up + // for calls it is okay to have multiple bins since they may + // come from different call arguments and we can address them + // seperately. + // TODO: handle when one instruction has multiple bins + auto OffsetsVecArg = OffsetInfoMap.lookup(GetBack).Ranges; + if (GetBack->getOpcode() != Instruction::Call && OffsetsVecArg.size() > 1) + return true; + + for (auto *It = GetBack->op_begin(); It != GetBack->op_end(); It++) { + if (Instruction *Ins = dyn_cast(*It)) { + ReadyList.push_back(Ins); + } + } + } + + return false; + } }; struct AAAllocationInfoFloating : AAAllocationInfoImpl { diff --git a/llvm/test/Transforms/Attributor/ArgumentPromotion/crash.ll b/llvm/test/Transforms/Attributor/ArgumentPromotion/crash.ll index 595cb37c6c93e..f0efa2a0ae3c1 100644 --- a/llvm/test/Transforms/Attributor/ArgumentPromotion/crash.ll +++ b/llvm/test/Transforms/Attributor/ArgumentPromotion/crash.ll @@ -106,10 +106,8 @@ define i32 @test_inf_promote_caller(i32 %arg) { ; CGSCC-LABEL: define {{[^@]+}}@test_inf_promote_caller ; CGSCC-SAME: (i32 [[ARG:%.*]]) #[[ATTR3:[0-9]+]] { ; CGSCC-NEXT: bb: -; CGSCC-NEXT: [[TMP:%.*]] = alloca [[S:%.*]], align 8 -; CGSCC-NEXT: [[TMP3:%.*]] = alloca i8, i32 0, align 8 -; CGSCC-NEXT: [[TMP1:%.*]] = alloca [[S]], align 8 -; CGSCC-NEXT: [[TMP14:%.*]] = alloca i8, i32 0, align 8 +; CGSCC-NEXT: [[TMP3:%.*]] = alloca [0 x i8], align 1 +; CGSCC-NEXT: [[TMP14:%.*]] = alloca [0 x i8], align 1 ; CGSCC-NEXT: ret i32 0 ; bb: diff --git a/llvm/test/Transforms/Attributor/ArgumentPromotion/live_called_from_dead.ll b/llvm/test/Transforms/Attributor/ArgumentPromotion/live_called_from_dead.ll index 1c34fff8dd755..63dbc4da7da37 100644 --- a/llvm/test/Transforms/Attributor/ArgumentPromotion/live_called_from_dead.ll +++ b/llvm/test/Transforms/Attributor/ArgumentPromotion/live_called_from_dead.ll @@ -36,8 +36,7 @@ define internal i32 @caller(ptr %B) { ; CGSCC: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(none) ; CGSCC-LABEL: define {{[^@]+}}@caller ; CGSCC-SAME: () #[[ATTR0]] { -; CGSCC-NEXT: [[A:%.*]] = alloca i32, align 4 -; CGSCC-NEXT: [[A1:%.*]] = alloca i8, i32 0, align 4 +; CGSCC-NEXT: [[A1:%.*]] = alloca [0 x i8], align 1 ; CGSCC-NEXT: ret i32 0 ; %A = alloca i32 diff --git a/llvm/test/Transforms/Attributor/ArgumentPromotion/live_called_from_dead_2.ll b/llvm/test/Transforms/Attributor/ArgumentPromotion/live_called_from_dead_2.ll index b42647840f7cf..956fa0e88b028 100644 --- a/llvm/test/Transforms/Attributor/ArgumentPromotion/live_called_from_dead_2.ll +++ b/llvm/test/Transforms/Attributor/ArgumentPromotion/live_called_from_dead_2.ll @@ -53,8 +53,7 @@ define internal i32 @caller(ptr %B) { ; CGSCC: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write) ; CGSCC-LABEL: define {{[^@]+}}@caller ; CGSCC-SAME: (ptr noalias nocapture nofree noundef nonnull writeonly align 4 dereferenceable(4) [[B:%.*]]) #[[ATTR0]] { -; CGSCC-NEXT: [[A:%.*]] = alloca i32, align 4 -; CGSCC-NEXT: [[A1:%.*]] = alloca i8, i32 0, align 4 +; CGSCC-NEXT: [[A1:%.*]] = alloca [0 x i8], align 1 ; CGSCC-NEXT: [[C:%.*]] = call i32 @test(ptr noalias nocapture nofree noundef nonnull writeonly align 4 dereferenceable(4) [[B]]) #[[ATTR2:[0-9]+]] ; CGSCC-NEXT: ret i32 0 ; diff --git a/llvm/test/Transforms/Attributor/ArgumentPromotion/nonzero-address-spaces.ll b/llvm/test/Transforms/Attributor/ArgumentPromotion/nonzero-address-spaces.ll index b588a399e5bd9..7b5e1276ac212 100644 --- a/llvm/test/Transforms/Attributor/ArgumentPromotion/nonzero-address-spaces.ll +++ b/llvm/test/Transforms/Attributor/ArgumentPromotion/nonzero-address-spaces.ll @@ -29,8 +29,7 @@ define internal i32 @foo(ptr) { ; CHECK-LABEL: define {{[^@]+}}@foo ; CHECK-SAME: () addrspace(1) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 -; CHECK-NEXT: [[RETVAL1:%.*]] = alloca i8, i32 0, align 4 +; CHECK-NEXT: [[RETVAL1:%.*]] = alloca [0 x i8], align 1 ; CHECK-NEXT: call addrspace(0) void asm sideeffect "ldr r0, [r0] \0Abx lr \0A", ""() ; CHECK-NEXT: unreachable ; diff --git a/llvm/test/Transforms/Attributor/IPConstantProp/pthreads.ll b/llvm/test/Transforms/Attributor/IPConstantProp/pthreads.ll index 490894d129023..af2d1ef1eabba 100644 --- a/llvm/test/Transforms/Attributor/IPConstantProp/pthreads.ll +++ b/llvm/test/Transforms/Attributor/IPConstantProp/pthreads.ll @@ -34,8 +34,8 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" define dso_local i32 @main() { ; TUNIT-LABEL: define {{[^@]+}}@main() { ; TUNIT-NEXT: entry: -; TUNIT-NEXT: [[ALLOC11:%.*]] = alloca i8, i32 0, align 8 -; TUNIT-NEXT: [[ALLOC22:%.*]] = alloca i8, i32 0, align 8 +; TUNIT-NEXT: [[ALLOC11:%.*]] = alloca [0 x i8], align 1 +; TUNIT-NEXT: [[ALLOC22:%.*]] = alloca [0 x i8], align 1 ; TUNIT-NEXT: [[THREAD:%.*]] = alloca i64, align 8 ; TUNIT-NEXT: [[CALL:%.*]] = call i32 @pthread_create(ptr noundef nonnull align 8 dereferenceable(8) [[THREAD]], ptr noundef align 4294967296 null, ptr noundef nonnull @foo, ptr nofree readnone align 4294967296 undef) ; TUNIT-NEXT: [[CALL1:%.*]] = call i32 @pthread_create(ptr noundef nonnull align 8 dereferenceable(8) [[THREAD]], ptr noundef align 4294967296 null, ptr noundef nonnull @bar, ptr noalias nocapture nofree nonnull readnone align 8 dereferenceable(8) undef) diff --git a/llvm/test/Transforms/Attributor/allocator.ll b/llvm/test/Transforms/Attributor/allocator.ll index f2d9ecd1d8fa4..693386f053615 100644 --- a/llvm/test/Transforms/Attributor/allocator.ll +++ b/llvm/test/Transforms/Attributor/allocator.ll @@ -13,8 +13,8 @@ define dso_local void @positive_alloca_1(i32 noundef %val) #0 { ; CHECK-LABEL: define dso_local void @positive_alloca_1 ; CHECK-SAME: (i32 noundef [[VAL:%.*]]) { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[VAL_ADDR1:%.*]] = alloca i8, i32 4, align 4 -; CHECK-NEXT: [[F2:%.*]] = alloca i8, i32 4, align 4 +; CHECK-NEXT: [[VAL_ADDR1:%.*]] = alloca [4 x i8], align 1 +; CHECK-NEXT: [[F2:%.*]] = alloca [4 x i8], align 1 ; CHECK-NEXT: store i32 [[VAL]], ptr [[VAL_ADDR1]], align 4 ; CHECK-NEXT: store i32 10, ptr [[F2]], align 4 ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[F2]], align 4 @@ -164,37 +164,52 @@ entry: ;TODO: The allocation can be reduced here. ;However, the offsets (load/store etc.) Need to be changed. ; Function Attrs: noinline nounwind uwtable -define dso_local { i64, ptr } @positive_test_not_a_single_start_offset(i32 noundef %val) #0 { -; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(none) -; CHECK-LABEL: define dso_local { i64, ptr } @positive_test_not_a_single_start_offset -; CHECK-SAME: (i32 noundef [[VAL:%.*]]) #[[ATTR0:[0-9]+]] { +define dso_local void @positive_test_not_a_single_start_offset(i32 noundef %val) #0 { +; CHECK-LABEL: define dso_local void @positive_test_not_a_single_start_offset +; CHECK-SAME: (i32 noundef [[VAL:%.*]]) { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[RETVAL:%.*]] = alloca [[STRUCT_FOO:%.*]], align 8 ; CHECK-NEXT: [[VAL_ADDR:%.*]] = alloca i32, align 4 +; CHECK-NEXT: [[F1:%.*]] = alloca [5 x i8], align 1 ; CHECK-NEXT: store i32 [[VAL]], ptr [[VAL_ADDR]], align 4 -; CHECK-NEXT: store i32 2, ptr [[RETVAL]], align 8 -; CHECK-NEXT: [[FIELD3:%.*]] = getelementptr inbounds [[STRUCT_FOO]], ptr [[RETVAL]], i32 0, i32 2 -; CHECK-NEXT: store ptr [[VAL_ADDR]], ptr [[FIELD3]], align 8 -; CHECK-NEXT: [[TMP0:%.*]] = load { i64, ptr }, ptr [[RETVAL]], align 8 -; CHECK-NEXT: ret { i64, ptr } [[TMP0]] +; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 2, [[VAL]] +; CHECK-NEXT: store i32 [[MUL]], ptr [[F1]], align 4 +; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[F1]], align 4 +; CHECK-NEXT: [[CALL:%.*]] = call i32 (ptr, ...) @printf(ptr noundef nonnull dereferenceable(17) @.str, i32 noundef [[TMP0]]) +; CHECK-NEXT: [[NEWGEP:%.*]] = getelementptr [5 x i8], ptr [[F1]], i64 4 +; CHECK-NEXT: [[CONV1:%.*]] = trunc i32 [[TMP0]] to i8 +; CHECK-NEXT: store i8 [[CONV1]], ptr [[NEWGEP]], align 4 +; CHECK-NEXT: [[NEWGEP2:%.*]] = getelementptr [5 x i8], ptr [[F1]], i64 4 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[NEWGEP2]], align 4 +; CHECK-NEXT: [[CONV:%.*]] = sext i8 [[TMP1]] to i32 +; CHECK-NEXT: [[CALL3:%.*]] = call i32 (ptr, ...) @printf(ptr noundef nonnull dereferenceable(17) @.str, i32 noundef [[CONV]]) +; CHECK-NEXT: ret void ; entry: - %retval = alloca %struct.Foo, align 8 %val.addr = alloca i32, align 4 + %f = alloca %struct.Foo, align 4 store i32 %val, ptr %val.addr, align 4 - %field1 = getelementptr inbounds %struct.Foo, ptr %retval, i32 0, i32 0 - store i32 2, ptr %field1, align 8 - %field3 = getelementptr inbounds %struct.Foo, ptr %retval, i32 0, i32 2 - store ptr %val.addr, ptr %field3, align 8 - %0 = load { i64, ptr }, ptr %retval, align 8 - ret { i64, ptr } %0 + %0 = load i32, ptr %val.addr, align 4 + %mul = mul nsw i32 2, %0 + %a = getelementptr inbounds %struct.Foo, ptr %f, i32 0, i32 0 + store i32 %mul, ptr %a, align 4 + %a1 = getelementptr inbounds %struct.Foo, ptr %f, i32 0, i32 0 + %1 = load i32, ptr %a1, align 4 + %call = call i32 (ptr, ...) @printf(ptr noundef @.str, i32 noundef %1) + %c = getelementptr inbounds %struct.Foo, ptr %f, i32 0, i32 2 + %conv1 = trunc i32 %1 to i8 + store i8 %conv1, ptr %c, align 4 + %c2 = getelementptr inbounds %struct.Foo, ptr %f, i32 0, i32 2 + %2 = load i8, ptr %c2, align 4 + %conv = sext i8 %2 to i32 + %call3 = call i32 (ptr, ...) @printf(ptr noundef @.str, i32 noundef %conv) + ret void } ; Function Attrs: noinline nounwind uwtable define dso_local void @positive_test_reduce_array_allocation_1() { ; CHECK-LABEL: define dso_local void @positive_test_reduce_array_allocation_1() { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[ARRAY1:%.*]] = alloca i8, i32 4, align 8 +; CHECK-NEXT: [[ARRAY1:%.*]] = alloca [4 x i8], align 1 ; CHECK-NEXT: store i32 0, ptr [[ARRAY1]], align 8 ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAY1]], align 8 ; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[TMP0]], 2 @@ -275,37 +290,37 @@ entry: define dso_local void @positive_test_reduce_array_allocation_2() #0 { ; CHECK-LABEL: define dso_local void @positive_test_reduce_array_allocation_2() { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[ARRAY:%.*]] = alloca ptr, align 8 -; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4 +; CHECK-NEXT: [[ARRAY1:%.*]] = alloca ptr, align 8 +; CHECK-NEXT: [[I2:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[CALL:%.*]] = call noalias ptr @malloc(i64 noundef 40000) -; CHECK-NEXT: store ptr [[CALL]], ptr [[ARRAY]], align 8 -; CHECK-NEXT: store i32 0, ptr [[I]], align 4 +; CHECK-NEXT: store ptr [[CALL]], ptr [[ARRAY1]], align 8 +; CHECK-NEXT: store i32 0, ptr [[I2]], align 4 ; CHECK-NEXT: br label [[FOR_COND:%.*]] ; CHECK: for.cond: -; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[I]], align 4 +; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[I2]], align 4 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], 10000 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[I]], align 4 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[I2]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I2]], align 4 ; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP2]] to i64 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[CALL]], i64 [[IDXPROM]] ; CHECK-NEXT: store i32 [[TMP1]], ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: br label [[FOR_INC:%.*]] ; CHECK: for.inc: -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[I]], align 4 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[I2]], align 4 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 2 -; CHECK-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +; CHECK-NEXT: store i32 [[ADD]], ptr [[I2]], align 4 ; CHECK-NEXT: br label [[FOR_COND]] ; CHECK: for.end: -; CHECK-NEXT: store i32 0, ptr [[I]], align 4 +; CHECK-NEXT: store i32 0, ptr [[I2]], align 4 ; CHECK-NEXT: br label [[FOR_COND1:%.*]] ; CHECK: for.cond1: -; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4 +; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[I2]], align 4 ; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i32 [[TMP4]], 10000 ; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_BODY3:%.*]], label [[FOR_END9:%.*]] ; CHECK: for.body3: -; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4 +; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[I2]], align 4 ; CHECK-NEXT: [[IDXPROM4:%.*]] = sext i32 [[TMP5]] to i64 ; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, ptr [[CALL]], i64 [[IDXPROM4]] ; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX5]], align 4 @@ -313,28 +328,28 @@ define dso_local void @positive_test_reduce_array_allocation_2() #0 { ; CHECK-NEXT: store i32 [[ADD6]], ptr [[ARRAYIDX5]], align 4 ; CHECK-NEXT: br label [[FOR_INC7:%.*]] ; CHECK: for.inc7: -; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[I]], align 4 +; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[I2]], align 4 ; CHECK-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP7]], 2 -; CHECK-NEXT: store i32 [[ADD8]], ptr [[I]], align 4 +; CHECK-NEXT: store i32 [[ADD8]], ptr [[I2]], align 4 ; CHECK-NEXT: br label [[FOR_COND1]] ; CHECK: for.end9: -; CHECK-NEXT: store i32 0, ptr [[I]], align 4 +; CHECK-NEXT: store i32 0, ptr [[I2]], align 4 ; CHECK-NEXT: br label [[FOR_COND10:%.*]] ; CHECK: for.cond10: -; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4 +; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[I2]], align 4 ; CHECK-NEXT: [[CMP11:%.*]] = icmp slt i32 [[TMP8]], 10000 ; CHECK-NEXT: br i1 [[CMP11]], label [[FOR_BODY12:%.*]], label [[FOR_END18:%.*]] ; CHECK: for.body12: -; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4 +; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[I2]], align 4 ; CHECK-NEXT: [[IDXPROM13:%.*]] = sext i32 [[TMP9]] to i64 ; CHECK-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds i32, ptr [[CALL]], i64 [[IDXPROM13]] ; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX14]], align 4 ; CHECK-NEXT: [[CALL15:%.*]] = call i32 (ptr, ...) @printf(ptr noundef nonnull dereferenceable(17) @.str, i32 noundef [[TMP10]]) ; CHECK-NEXT: br label [[FOR_INC16:%.*]] ; CHECK: for.inc16: -; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4 +; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[I2]], align 4 ; CHECK-NEXT: [[ADD17:%.*]] = add nsw i32 [[TMP11]], 2 -; CHECK-NEXT: store i32 [[ADD17]], ptr [[I]], align 4 +; CHECK-NEXT: store i32 [[ADD17]], ptr [[I2]], align 4 ; CHECK-NEXT: br label [[FOR_COND10]] ; CHECK: for.end18: ; CHECK-NEXT: ret void @@ -426,7 +441,7 @@ define dso_local void @pthread_test(){ ; TUNIT-NEXT: [[ARG1:%.*]] = alloca i8, align 8 ; TUNIT-NEXT: [[THREAD:%.*]] = alloca i64, align 8 ; TUNIT-NEXT: [[CALL1:%.*]] = call i32 @pthread_create(ptr noundef nonnull align 8 dereferenceable(8) [[THREAD]], ptr noundef align 4294967296 null, ptr noundef nonnull @pthread_allocation_should_remain_same, ptr noundef nonnull align 8 dereferenceable(1) [[ARG1]]) -; TUNIT-NEXT: [[F1:%.*]] = alloca i8, i32 4, align 4 +; TUNIT-NEXT: [[F1:%.*]] = alloca [4 x i8], align 1 ; TUNIT-NEXT: [[CALL2:%.*]] = call i32 @pthread_create(ptr noundef nonnull align 8 dereferenceable(8) [[THREAD]], ptr noundef align 4294967296 null, ptr noundef nonnull @pthread_allocation_should_be_reduced, ptr noalias nocapture nofree nonnull readnone align 4 dereferenceable(12) undef) ; TUNIT-NEXT: [[F2:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4 ; TUNIT-NEXT: [[CALL3:%.*]] = call i32 @pthread_create(ptr noundef nonnull align 8 dereferenceable(8) [[THREAD]], ptr noundef align 4294967296 null, ptr noundef nonnull @pthread_check_captured_pointer, ptr noundef nonnull align 4 dereferenceable(12) [[F2]]) @@ -452,6 +467,46 @@ define dso_local void @pthread_test(){ ret void } + +define dso_local void @select_case(i1 %cond){ +; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(write) +; CHECK-LABEL: define dso_local void @select_case +; CHECK-SAME: (i1 [[COND:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[A:%.*]] = alloca [100 x i8], align 1 +; CHECK-NEXT: [[B:%.*]] = getelementptr inbounds [100 x i8], ptr [[A]], i64 0, i64 3 +; CHECK-NEXT: [[C:%.*]] = getelementptr inbounds [100 x i8], ptr [[A]], i64 0, i64 1 +; CHECK-NEXT: [[SEL:%.*]] = select i1 [[COND]], ptr [[B]], ptr [[C]] +; CHECK-NEXT: store i8 100, ptr [[SEL]], align 1 +; CHECK-NEXT: ret void +; + %a = alloca [100 x i8], align 1 + %b = getelementptr inbounds [100 x i8], ptr %a, i64 0, i64 3 + %c = getelementptr inbounds [100 x i8], ptr %a, i64 0, i64 1 + %sel = select i1 %cond, ptr %b, ptr %c + store i8 100, ptr %sel, align 1 + ret void +} + +define dso_local void @select_case_2(i1 %cond){ +; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(write) +; CHECK-LABEL: define dso_local void @select_case_2 +; CHECK-SAME: (i1 [[COND:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[A:%.*]] = alloca [100 x i32], align 1 +; CHECK-NEXT: [[B:%.*]] = getelementptr inbounds [100 x i32], ptr [[A]], i64 0, i64 3 +; CHECK-NEXT: [[C:%.*]] = getelementptr inbounds [100 x i32], ptr [[A]], i64 0, i64 1 +; CHECK-NEXT: [[SEL:%.*]] = select i1 [[COND]], ptr [[B]], ptr [[C]] +; CHECK-NEXT: store i8 100, ptr [[SEL]], align 1 +; CHECK-NEXT: ret void +; + %a = alloca [100 x i32], align 1 + %b = getelementptr inbounds [100 x i32], ptr %a, i64 0, i64 3 + %c = getelementptr inbounds [100 x i32], ptr %a, i64 0, i64 1 + %sel = select i1 %cond, ptr %b, ptr %c + %sel2 = getelementptr inbounds i32, ptr %sel, i64 0 + store i8 100, ptr %sel2, align 1 + ret void +} + define internal ptr @pthread_allocation_should_remain_same(ptr %arg) { ; CHECK-LABEL: define internal noundef nonnull align 8 dereferenceable(1) ptr @pthread_allocation_should_remain_same ; CHECK-SAME: (ptr noundef nonnull returned align 8 dereferenceable(1) [[ARG:%.*]]) { @@ -499,6 +554,58 @@ entry: ret void } +define dso_local void @alloca_array_multi_offset(){ +; CHECK: Function Attrs: nofree norecurse nosync nounwind memory(none) +; CHECK-LABEL: define dso_local void @alloca_array_multi_offset +; CHECK-SAME: () #[[ATTR1:[0-9]+]] { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4 +; CHECK-NEXT: store i32 0, ptr [[I]], align 4 +; CHECK-NEXT: br label [[FOR_COND:%.*]] +; CHECK: for.cond: +; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[I]], align 4 +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], 10 +; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]] +; CHECK: for.body: +; CHECK-NEXT: br label [[FOR_INC:%.*]] +; CHECK: for.inc: +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[I]], align 4 +; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 2 +; CHECK-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +; CHECK-NEXT: br label [[FOR_COND]] +; CHECK: for.end: +; CHECK-NEXT: ret void +; +entry: + %arr = alloca i8, i32 10, align 4 + %i = alloca i32, align 4 + store i32 0, ptr %i, align 4 + br label %for.cond + +for.cond: + %0 = load i32, ptr %i, align 4 + %cmp = icmp slt i32 %0, 10 + br i1 %cmp, label %for.body, label %for.end + +for.body: + %1 = load i32, ptr %i, align 4 + %2 = load ptr, ptr %arr, align 8 + %3 = load i32, ptr %i, align 4 + %arrayidx = getelementptr inbounds i32, ptr %2, i32 %3 + store i32 %1, ptr %arrayidx, align 4 + br label %for.inc + +for.inc: + %4 = load i32, ptr %i, align 4 + %add = add nsw i32 %4, 2 + store i32 %add, ptr %i, align 4 + br label %for.cond + +for.end: + ret void + +} + declare external void @external_call(ptr) @@ -511,9 +618,11 @@ declare i32 @printf(ptr noundef, ...) #1 ; Function Attrs: nounwind allocsize(0) declare noalias ptr @malloc(i64 noundef) #1 ;. -; TUNIT: attributes #[[ATTR0]] = { mustprogress nofree norecurse nosync nounwind willreturn memory(none) } +; TUNIT: attributes #[[ATTR0]] = { mustprogress nofree norecurse nosync nounwind willreturn memory(write) } +; TUNIT: attributes #[[ATTR1]] = { nofree norecurse nosync nounwind memory(none) } ;. -; CGSCC: attributes #[[ATTR0]] = { mustprogress nofree norecurse nosync nounwind willreturn memory(none) } +; CGSCC: attributes #[[ATTR0]] = { mustprogress nofree norecurse nosync nounwind willreturn memory(write) } +; CGSCC: attributes #[[ATTR1]] = { nofree norecurse nosync nounwind memory(none) } ;. ; TUNIT: [[META0:![0-9]+]] = !{[[META1:![0-9]+]]} ; TUNIT: [[META1]] = !{i64 2, i64 3, i1 false} diff --git a/llvm/test/Transforms/Attributor/call-simplify-pointer-info.ll b/llvm/test/Transforms/Attributor/call-simplify-pointer-info.ll index 5bb795911ce40..ebbfd44017aa5 100644 --- a/llvm/test/Transforms/Attributor/call-simplify-pointer-info.ll +++ b/llvm/test/Transforms/Attributor/call-simplify-pointer-info.ll @@ -36,8 +36,8 @@ define i8 @call_simplifiable_1() { ; TUNIT-LABEL: define {{[^@]+}}@call_simplifiable_1 ; TUNIT-SAME: () #[[ATTR0:[0-9]+]] { ; TUNIT-NEXT: entry: -; TUNIT-NEXT: [[BYTES:%.*]] = alloca [1024 x i8], align 16 -; TUNIT-NEXT: [[I0:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 2 +; TUNIT-NEXT: [[BYTES1:%.*]] = alloca [1 x i8], align 1 +; TUNIT-NEXT: [[NEWGEP:%.*]] = getelementptr [1 x i8], ptr [[BYTES1]], i64 0 ; TUNIT-NEXT: ret i8 2 ; ; CGSCC: Function Attrs: mustprogress nofree nosync nounwind willreturn memory(none) @@ -93,9 +93,9 @@ define i8 @call_simplifiable_2() { ; TUNIT-LABEL: define {{[^@]+}}@call_simplifiable_2 ; TUNIT-SAME: () #[[ATTR0]] { ; TUNIT-NEXT: entry: -; TUNIT-NEXT: [[BYTES:%.*]] = alloca [1024 x i8], align 16 -; TUNIT-NEXT: [[I0:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 2 -; TUNIT-NEXT: [[I1:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 3 +; TUNIT-NEXT: [[BYTES1:%.*]] = alloca [2 x i8], align 1 +; TUNIT-NEXT: [[NEWGEP:%.*]] = getelementptr [2 x i8], ptr [[BYTES1]], i64 0 +; TUNIT-NEXT: [[NEWGEP2:%.*]] = getelementptr [2 x i8], ptr [[BYTES1]], i64 1 ; TUNIT-NEXT: ret i8 4 ; ; CGSCC: Function Attrs: mustprogress nofree nosync nounwind willreturn memory(none) @@ -125,8 +125,8 @@ define i8 @call_simplifiable_3() { ; TUNIT-LABEL: define {{[^@]+}}@call_simplifiable_3 ; TUNIT-SAME: () #[[ATTR0]] { ; TUNIT-NEXT: entry: -; TUNIT-NEXT: [[BYTES:%.*]] = alloca [1024 x i8], align 16 -; TUNIT-NEXT: [[I2:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 2 +; TUNIT-NEXT: [[BYTES1:%.*]] = alloca [1 x i8], align 1 +; TUNIT-NEXT: [[NEWGEP:%.*]] = getelementptr [1 x i8], ptr [[BYTES1]], i64 0 ; TUNIT-NEXT: ret i8 2 ; ; CGSCC: Function Attrs: mustprogress nofree nosync nounwind willreturn memory(none) @@ -198,13 +198,13 @@ define i8 @call_partially_simplifiable_1() { ; TUNIT-LABEL: define {{[^@]+}}@call_partially_simplifiable_1 ; TUNIT-SAME: () #[[ATTR0]] { ; TUNIT-NEXT: entry: -; TUNIT-NEXT: [[BYTES:%.*]] = alloca [1024 x i8], align 16 -; TUNIT-NEXT: [[I2:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 2 -; TUNIT-NEXT: store i8 2, ptr [[I2]], align 2 -; TUNIT-NEXT: [[I3:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 3 -; TUNIT-NEXT: store i8 3, ptr [[I3]], align 1 -; TUNIT-NEXT: [[I4:%.*]] = getelementptr inbounds [1024 x i8], ptr [[BYTES]], i64 0, i64 4 -; TUNIT-NEXT: [[R:%.*]] = call i8 @sum_two_different_loads(ptr nocapture nofree noundef nonnull readonly align 2 dereferenceable(1022) [[I2]], ptr nocapture nofree noundef nonnull readonly dereferenceable(1021) [[I3]]) #[[ATTR3]] +; TUNIT-NEXT: [[BYTES1:%.*]] = alloca [3 x i8], align 1 +; TUNIT-NEXT: [[NEWGEP:%.*]] = getelementptr [3 x i8], ptr [[BYTES1]], i64 0 +; TUNIT-NEXT: store i8 2, ptr [[NEWGEP]], align 2 +; TUNIT-NEXT: [[NEWGEP3:%.*]] = getelementptr [3 x i8], ptr [[BYTES1]], i64 1 +; TUNIT-NEXT: store i8 3, ptr [[NEWGEP3]], align 1 +; TUNIT-NEXT: [[NEWGEP2:%.*]] = getelementptr [3 x i8], ptr [[BYTES1]], i64 2 +; TUNIT-NEXT: [[R:%.*]] = call i8 @sum_two_different_loads(ptr nocapture nofree noundef nonnull readonly align 2 dereferenceable(1022) [[NEWGEP]], ptr nocapture nofree noundef nonnull readonly dereferenceable(1021) [[NEWGEP3]]) #[[ATTR3]] ; TUNIT-NEXT: ret i8 [[R]] ; ; CGSCC: Function Attrs: mustprogress nofree nosync nounwind willreturn memory(none) diff --git a/llvm/test/Transforms/Attributor/heap_to_stack.ll b/llvm/test/Transforms/Attributor/heap_to_stack.ll index 33ac066e43d09..846373e05be1a 100644 --- a/llvm/test/Transforms/Attributor/heap_to_stack.ll +++ b/llvm/test/Transforms/Attributor/heap_to_stack.ll @@ -502,8 +502,7 @@ define i32 @malloc_in_loop(i32 %arg) { ; CHECK-SAME: (i32 [[ARG:%.*]]) { ; CHECK-NEXT: bb: ; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4 -; CHECK-NEXT: [[I1:%.*]] = alloca ptr, align 8 -; CHECK-NEXT: [[I11:%.*]] = alloca i8, i32 0, align 8 +; CHECK-NEXT: [[I11:%.*]] = alloca [0 x i8], align 1 ; CHECK-NEXT: store i32 [[ARG]], ptr [[I]], align 4 ; CHECK-NEXT: br label [[BB2:%.*]] ; CHECK: bb2: diff --git a/llvm/test/Transforms/Attributor/heap_to_stack_gpu.ll b/llvm/test/Transforms/Attributor/heap_to_stack_gpu.ll index 2a5b3e94291a2..70aace8100abd 100644 --- a/llvm/test/Transforms/Attributor/heap_to_stack_gpu.ll +++ b/llvm/test/Transforms/Attributor/heap_to_stack_gpu.ll @@ -452,8 +452,7 @@ define i32 @malloc_in_loop(i32 %arg) { ; CHECK-SAME: (i32 [[ARG:%.*]]) { ; CHECK-NEXT: bb: ; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4 -; CHECK-NEXT: [[I1:%.*]] = alloca ptr, align 8 -; CHECK-NEXT: [[I11:%.*]] = alloca i8, i32 0, align 8 +; CHECK-NEXT: [[I11:%.*]] = alloca [0 x i8], align 1 ; CHECK-NEXT: store i32 [[ARG]], ptr [[I]], align 4 ; CHECK-NEXT: br label [[BB2:%.*]] ; CHECK: bb2: diff --git a/llvm/test/Transforms/Attributor/liveness.ll b/llvm/test/Transforms/Attributor/liveness.ll index f17bd5795a174..9eb79f8a46723 100644 --- a/llvm/test/Transforms/Attributor/liveness.ll +++ b/llvm/test/Transforms/Attributor/liveness.ll @@ -2587,8 +2587,8 @@ define void @bad_gep() { ; TUNIT-LABEL: define {{[^@]+}}@bad_gep ; TUNIT-SAME: () #[[ATTR13]] { ; TUNIT-NEXT: entry: -; TUNIT-NEXT: [[N1:%.*]] = alloca i8, i32 0, align 1 -; TUNIT-NEXT: [[M2:%.*]] = alloca i8, i32 0, align 1 +; TUNIT-NEXT: [[N1:%.*]] = alloca [0 x i8], align 1 +; TUNIT-NEXT: [[M2:%.*]] = alloca [0 x i8], align 1 ; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 1, ptr noalias nocapture nofree noundef nonnull dereferenceable(1) [[N1]]) #[[ATTR18:[0-9]+]] ; TUNIT-NEXT: br label [[EXIT:%.*]] ; TUNIT: while.body: @@ -2605,8 +2605,8 @@ define void @bad_gep() { ; CGSCC-LABEL: define {{[^@]+}}@bad_gep ; CGSCC-SAME: () #[[ATTR6]] { ; CGSCC-NEXT: entry: -; CGSCC-NEXT: [[N1:%.*]] = alloca i8, i32 0, align 1 -; CGSCC-NEXT: [[M2:%.*]] = alloca i8, i32 0, align 1 +; CGSCC-NEXT: [[N1:%.*]] = alloca [0 x i8], align 1 +; CGSCC-NEXT: [[M2:%.*]] = alloca [0 x i8], align 1 ; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 1, ptr noalias nocapture nofree noundef nonnull dereferenceable(1) [[N1]]) #[[ATTR21:[0-9]+]] ; CGSCC-NEXT: br label [[EXIT:%.*]] ; CGSCC: while.body: diff --git a/llvm/test/Transforms/Attributor/nodelete.ll b/llvm/test/Transforms/Attributor/nodelete.ll index c28cb28379348..6357bf742bbf1 100644 --- a/llvm/test/Transforms/Attributor/nodelete.ll +++ b/llvm/test/Transforms/Attributor/nodelete.ll @@ -10,15 +10,14 @@ define hidden i64 @f1() align 2 { ; TUNIT-LABEL: define {{[^@]+}}@f1 ; TUNIT-SAME: () #[[ATTR0:[0-9]+]] align 2 { ; TUNIT-NEXT: entry: -; TUNIT-NEXT: [[REF_TMP1:%.*]] = alloca i8, i32 0, align 8 +; TUNIT-NEXT: [[REF_TMP1:%.*]] = alloca [0 x i8], align 1 ; TUNIT-NEXT: ret i64 undef ; ; CGSCC: Function Attrs: mustprogress nofree nosync nounwind willreturn memory(none) ; CGSCC-LABEL: define {{[^@]+}}@f1 ; CGSCC-SAME: () #[[ATTR0:[0-9]+]] align 2 { ; CGSCC-NEXT: entry: -; CGSCC-NEXT: [[REF_TMP:%.*]] = alloca [[A:%.*]], align 8 -; CGSCC-NEXT: [[REF_TMP1:%.*]] = alloca i8, i32 0, align 8 +; CGSCC-NEXT: [[REF_TMP1:%.*]] = alloca [0 x i8], align 1 ; CGSCC-NEXT: [[CALL2:%.*]] = call i64 @f2() #[[ATTR2:[0-9]+]] ; CGSCC-NEXT: ret i64 [[CALL2]] ; diff --git a/llvm/test/Transforms/Attributor/pointer-info-track-access-chain.ll b/llvm/test/Transforms/Attributor/pointer-info-track-access-chain.ll new file mode 100644 index 0000000000000..b7c3f1f33191e --- /dev/null +++ b/llvm/test/Transforms/Attributor/pointer-info-track-access-chain.ll @@ -0,0 +1,387 @@ +; RUN: opt -aa-pipeline=basic-aa -passes=attributor -attributor-manifest-internal -debug-only=attributor -attributor-annotate-decl-cs -S < %s 2>&1 | FileCheck %s +; RUN: opt -aa-pipeline=basic-aa -passes=attributor-cgscc -attributor-manifest-internal -debug-only=attributor -attributor-annotate-decl-cs -S < %s 2>&1 | FileCheck %s +; REQUIRES: asserts + + +@globalBytes = internal global [1024 x i8] zeroinitializer, align 16 + +; CHECK: Accesses by bin after update: +; CHECK: [8-12] : 1 +; CHECK: - 5 - %1 = load i32, ptr %field22, align 4 +; CHECK: - c: +; CHECK: Print all access paths found: +; CHECK: Backtrack a unique access path: +; CHECK: %1 = load i32, ptr %field22, align 4 +; CHECK: %field22 = getelementptr i32, ptr %field2, i32 0 +; CHECK: %field2 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 2 +; CHECK: %f = alloca [10 x i32], align 4 +; CHECK: [4-5] : 1 +; CHECK: - 9 - store i8 10, ptr %field11, align 4 +; CHECK: - c: i8 10 +; CHECK: Print all access paths found: +; CHECK: Backtrack a unique access path: +; CHECK: store i8 10, ptr %field11, align 4 +; CHECK: %field11 = getelementptr i32, ptr %field1, i32 0 +; CHECK: %field1 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 1 +; CHECK: %f = alloca [10 x i32], align 4 +; CHECK: [32-36] : 1 +; CHECK: - 9 - store i32 %3, ptr %field8, align 4 +; CHECK: - c: %3 = load i32, ptr %val, align 4 +; CHECK: Print all access paths found: +; CHECK: Backtrack a unique access path: +; CHECK: store i32 %3, ptr %field8, align 4 +; CHECK: %field8 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 8 +; CHECK: %f = alloca [10 x i32], align 4 +; CHECK: [4-8] : 1 +; CHECK: - 5 - %0 = load i32, ptr %field11, align 4 +; CHECK: - c: +; CHECK: Print all access paths found: +; CHECK: Backtrack a unique access path: +; CHECK: %0 = load i32, ptr %field11, align 4 +; CHECK: %field11 = getelementptr i32, ptr %field1, i32 0 +; CHECK: %field1 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 1 +; CHECK: %f = alloca [10 x i32], align 4 +; CHECK: [8-9] : 1 +; CHECK: - 9 - store i8 12, ptr %field22, align 4 +; CHECK: - c: i8 12 +; CHECK: Print all access paths found: +; CHECK: Backtrack a unique access path: +; CHECK: store i8 12, ptr %field22, align 4 +; CHECK: %field22 = getelementptr i32, ptr %field2, i32 0 +; CHECK: %field2 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 2 +; CHECK: %f = alloca [10 x i32], align 4 +define dso_local i32 @track_chain(ptr nocapture %val) #0 { +entry: + %f = alloca [10 x i32] + %field1 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 1 + %field2 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 2 + %field3 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 3 + %field8 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 8 + + %field11 = getelementptr i32, ptr %field1, i32 0 + %field22 = getelementptr i32, ptr %field2, i32 0 + store i8 10, ptr %field11, align 4 + store i8 12, ptr %field22, align 4 + + %1 = load i32, ptr %field11, align 4 + %2 = load i32, ptr %field22, align 4 + %3 = add i32 %1, %2 + + %4 = load i32, ptr %val, align 4 + store i32 %4, ptr %field8, align 4 + + %5 = add i32 %4, %3 + + ret i32 %5 +} + + +; CHECK: Accesses by bin after update: +; CHECK: [12-16] : 1 +; CHECK: - 5 - %0 = load i32, ptr %field11, align 4 +; CHECK: - c: +; CHECK: Print all access paths found: +; CHECK: Backtrack a unique access path: +; CHECK: %0 = load i32, ptr %field11, align 4 +; CHECK: %field11 = getelementptr i32, ptr %field1, i32 2 +; CHECK: %field1 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 1 +; CHECK: %f = alloca [10 x i32], align 4 +; CHECK: [16-17] : 1 +; CHECK: - 9 - store i8 12, ptr %field22, align 4 +; CHECK: - c: i8 12 +; CHECK: Print all access paths found: +; CHECK: Backtrack a unique access path: +; CHECK: store i8 12, ptr %field22, align 4 +; CHECK: %field22 = getelementptr i32, ptr %field2, i32 2 +; CHECK: %field2 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 2 +; CHECK: %f = alloca [10 x i32], align 4 +; CHECK: [32-36] : 1 +; CHECK: - 9 - store i32 %3, ptr %field8, align 4 +; CHECK: - c: %3 = load i32, ptr %val, align 4 +; CHECK: Print all access paths found: +; CHECK: Backtrack a unique access path: +; CHECK: store i32 %3, ptr %field8, align 4 +; CHECK: %field8 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 8 +; CHECK: %f = alloca [10 x i32], align 4 +; CHECK: [16-20] : 1 +; CHECK: - 5 - %1 = load i32, ptr %field22, align 4 +; CHECK: - c: +; CHECK: Print all access paths found: +; CHECK: Backtrack a unique access path: +; CHECK: %1 = load i32, ptr %field22, align 4 +; CHECK: %field22 = getelementptr i32, ptr %field2, i32 2 +; CHECK: %field2 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 2 +; CHECK: %f = alloca [10 x i32], align 4 +; CHECK: [12-13] : 1 +; CHECK: - 9 - store i8 10, ptr %field11, align 4 +; CHECK: - c: i8 10 +; CHECK: Print all access paths found: +; CHECK: Backtrack a unique access path: +; CHECK: store i8 10, ptr %field11, align 4 +; CHECK: %field11 = getelementptr i32, ptr %field1, i32 2 +; CHECK: %field1 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 1 +; CHECK: %f = alloca [10 x i32], align 4 +define dso_local i32 @track_chain_2(ptr nocapture %val) #0 { +entry: + %f = alloca [10 x i32] + %field1 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 1 + %field2 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 2 + %field3 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 3 + %field8 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 8 + + %field11 = getelementptr i32, ptr %field1, i32 2 + %field22 = getelementptr i32, ptr %field2, i32 2 + store i8 10, ptr %field11, align 4 + store i8 12, ptr %field22, align 4 + + %1 = load i32, ptr %field11, align 4 + %2 = load i32, ptr %field22, align 4 + %3 = add i32 %1, %2 + + %4 = load i32, ptr %val, align 4 + store i32 %4, ptr %field8, align 4 + + %5 = add i32 %4, %3 + + ret i32 %5 +} + + +; CHECK: Accesses by bin after update: +; CHECK: [12-16] : 3 +; CHECK: - 5 - %0 = load i32, ptr %field11, align 4 +; CHECK: - c: +; CHECK: Print all access paths found: +; CHECK: Backtrack a unique access path: +; CHECK: %0 = load i32, ptr %field11, align 4 +; CHECK: %field11 = getelementptr i32, ptr %field1, i32 2 +; CHECK: %field1 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 1 +; CHECK: %f = alloca [10 x i32], align 4 +; CHECK: - 5 - %b = load i32, ptr %field3, align 4 +; CHECK: - c: +; CHECK: Print all access paths found: +; CHECK: Backtrack a unique access path: +; CHECK: %b = load i32, ptr %field3, align 4 +; CHECK: %field3 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 3 +; CHECK: %f = alloca [10 x i32], align 4 +; CHECK: - 10 - store i32 1000, ptr %6, align 4 +; CHECK: - c: i32 1000 +; CHECK: Print all access paths found: +; CHECK: Backtrack a unique access path: +; CHECK: store i32 1000, ptr %6, align 4 +; CHECK: %6 = select i1 %cond, ptr %field3, ptr %field8 +; CHECK: %field3 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 3 +; CHECK: %f = alloca [10 x i32], align 4 +; CHECK: Backtrack a unique access path: +; CHECK: store i32 1000, ptr %6, align 4 +; CHECK: %6 = select i1 %cond, ptr %field3, ptr %field8 +; CHECK: %field8 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 8 +; CHECK: %f = alloca [10 x i32], align 4 +; CHECK: [16-17] : 1 +; CHECK: - 9 - store i8 12, ptr %field22, align 4 +; CHECK: - c: i8 12 +; CHECK: Print all access paths found: +; CHECK: Backtrack a unique access path: +; CHECK: store i8 12, ptr %field22, align 4 +; CHECK: %field22 = getelementptr i32, ptr %field2, i32 2 +; CHECK: %field2 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 2 +; CHECK: %f = alloca [10 x i32], align 4 +; CHECK: [32-36] : 4 +; CHECK: - 9 - store i32 %3, ptr %field8, align 4 +; CHECK: - c: %3 = load i32, ptr %val, align 4 +; CHECK: Print all access paths found: +; CHECK: Backtrack a unique access path: +; CHECK: store i32 %3, ptr %field8, align 4 +; CHECK: %field8 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 8 +; CHECK: %f = alloca [10 x i32], align 4 +; CHECK: - 5 - %a1 = load i32, ptr %field8, align 4 +; CHECK: - c: +; CHECK: Print all access paths found: +; CHECK: Backtrack a unique access path: +; CHECK: %a1 = load i32, ptr %field8, align 4 +; CHECK: %field8 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 8 +; CHECK: %f = alloca [10 x i32], align 4 +; CHECK: - 10 - store i32 1000, ptr %6, align 4 +; CHECK: - c: i32 1000 +; CHECK: Print all access paths found: +; CHECK: Backtrack a unique access path: +; CHECK: store i32 1000, ptr %6, align 4 +; CHECK: %6 = select i1 %cond, ptr %field3, ptr %field8 +; CHECK: %field3 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 3 +; CHECK: %f = alloca [10 x i32], align 4 +; CHECK: Backtrack a unique access path: +; CHECK: store i32 1000, ptr %6, align 4 +; CHECK: %6 = select i1 %cond, ptr %field3, ptr %field8 +; CHECK: %field8 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 8 +; CHECK: %f = alloca [10 x i32], align 4 +; CHECK: - 5 - %8 = load i32, ptr %field8, align 4 +; CHECK: - c: +; CHECK: Print all access paths found: +; CHECK: Backtrack a unique access path: +; CHECK: %8 = load i32, ptr %field8, align 4 +; CHECK: %field8 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 8 +; CHECK: %f = alloca [10 x i32], align 4 +; CHECK: [16-20] : 1 +; CHECK: - 5 - %1 = load i32, ptr %field22, align 4 +; CHECK: - c: +; CHECK: Print all access paths found: +; CHECK: Backtrack a unique access path: +; CHECK: %1 = load i32, ptr %field22, align 4 +; CHECK: %field22 = getelementptr i32, ptr %field2, i32 2 +; CHECK: %field2 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 2 +; CHECK: %f = alloca [10 x i32], align 4 +; CHECK: [12-13] : 1 +; CHECK: - 9 - store i8 10, ptr %field11, align 4 +; CHECK: - c: i8 10 +; CHECK: Print all access paths found: +; CHECK: Backtrack a unique access path: +; CHECK: store i8 10, ptr %field11, align 4 +; CHECK: %field11 = getelementptr i32, ptr %field1, i32 2 +; CHECK: %field1 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 1 +; CHECK: %f = alloca [10 x i32], align 4 +define dso_local i32 @track_chain_3(ptr nocapture %val, i1 %cond) #0 { +entry: + %f = alloca [10 x i32] + %field1 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 1 + %field2 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 2 + %field3 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 3 + %field8 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 8 + + %field11 = getelementptr i32, ptr %field1, i32 2 + %field22 = getelementptr i32, ptr %field2, i32 2 + store i8 10, ptr %field11, align 4 + store i8 12, ptr %field22, align 4 + %1 = load i32, ptr %field11, align 4 + %2 = load i32, ptr %field22, align 4 + %3 = add i32 %1, %2 + %4 = load i32, ptr %val, align 4 + store i32 %4, ptr %field8, align 4 + %5 = add i32 %4, %3 + %6 = load i32, ptr %val + %a1 = load i32, ptr %field8 + %a = add i32 %a1, %6 + %b = load i32, ptr %field3 + ;%b = sub i32 %b1, %6 + %7 = select i1 %cond, ptr %field3, ptr %field8 + store i32 1000, ptr %7 + %8 = add i32 %5, %b + %9 = load i32, ptr %field8 + %10 = add i32 %9, %8 + ret i32 %10 +} + +; CHECK: Accesses by bin after update: +; CHECK: [8-12] : 2 +; CHECK: - 9 - store i32 %0, ptr %field2, align 4 +; CHECK: - c: %0 = load i32, ptr %val, align 4 +; CHECK: Print all access paths found: +; CHECK: Backtrack a unique access path: +; CHECK: store i32 %0, ptr %field2, align 4 +; CHECK: %field2 = getelementptr i32, ptr @globalBytes, i32 2 +; CHECK: @globalBytes = internal global [1024 x i8] zeroinitializer, align 16 +; CHECK: - 6 - %ret = load i32, ptr %x, align 4 +; CHECK: - c: +; CHECK: Print all access paths found: +; CHECK: Backtrack a unique access path: +; CHECK: %ret = load i32, ptr %x, align 4 +; CHECK: %x = phi ptr [ %field2, %then ], [ %field8, %else ] +; CHECK: %field2 = getelementptr i32, ptr @globalBytes, i32 2 +; CHECK: @globalBytes = internal global [1024 x i8] zeroinitializer, align 16 +; CHECK: Backtrack a unique access path: +; CHECK: %ret = load i32, ptr %x, align 4 +; CHECK: %x = phi ptr [ %field2, %then ], [ %field8, %else ] +; CHECK: %field8 = getelementptr i32, ptr @globalBytes, i32 8 +; CHECK: @globalBytes = internal global [1024 x i8] zeroinitializer, align 16 +; CHECK: [32-36] : 5 +; CHECK: - 6 - %ret = load i32, ptr %x, align 4 +; CHECK: - c: +; CHECK: Print all access paths found: +; CHECK: Backtrack a unique access path: +; CHECK: %ret = load i32, ptr %x, align 4 +; CHECK: %x = phi ptr [ %field2, %then ], [ %field8, %else ] +; CHECK: %field2 = getelementptr i32, ptr @globalBytes, i32 2 +; CHECK: @globalBytes = internal global [1024 x i8] zeroinitializer, align 16 +; CHECK: Backtrack a unique access path: +; CHECK: %ret = load i32, ptr %x, align 4 +; CHECK: %x = phi ptr [ %field2, %then ], [ %field8, %else ] +; CHECK: %field8 = getelementptr i32, ptr @globalBytes, i32 8 +; CHECK: @globalBytes = internal global [1024 x i8] zeroinitializer, align 16 +; CHECK: - 9 - store i32 %1, ptr %field8, align 4 +; CHECK: - c: %1 = load i32, ptr %val2, align 4 +; CHECK: Print all access paths found: +; CHECK: Backtrack a unique access path: +; CHECK: store i32 %1, ptr %field8, align 4 +; CHECK: %field8 = getelementptr i32, ptr @globalBytes, i32 8 +; CHECK: @globalBytes = internal global [1024 x i8] zeroinitializer, align 16 +; CHECK: - 9 - store i32 %0, ptr %field2, align 4 +; CHECK: - c: %0 = load i32, ptr %val, align 4 +; CHECK: Print all access paths found: +; CHECK: Backtrack a unique access path: +; CHECK: store i32 %0, ptr %field2, align 4 +; CHECK: %field2 = getelementptr i32, ptr @globalBytes, i32 8 +; CHECK: @globalBytes = internal global [1024 x i8] zeroinitializer, align 16 +; CHECK: - 6 - %ret = load i32, ptr %x, align 4 +; CHECK: - c: +; CHECK: Print all access paths found: +; CHECK: Backtrack a unique access path: +; CHECK: %ret = load i32, ptr %x, align 4 +; CHECK: %x = phi ptr [ %field2, %then ], [ %field8, %else ] +; CHECK: %field2 = getelementptr i32, ptr @globalBytes, i32 8 +; CHECK: @globalBytes = internal global [1024 x i8] zeroinitializer, align 16 +; CHECK: Backtrack a unique access path: +; CHECK: %ret = load i32, ptr %x, align 4 +; CHECK: %x = phi ptr [ %field2, %then ], [ %field8, %else ] +; CHECK: %field8 = getelementptr i32, ptr @globalBytes, i32 8 +; CHECK: @globalBytes = internal global [1024 x i8] zeroinitializer, align 16 +; CHECK: - 9 - store i32 %1, ptr %field8, align 4 +; CHECK: - c: %1 = load i32, ptr %val2, align 4 +; CHECK: Print all access paths found: +; CHECK: Backtrack a unique access path: +; CHECK: store i32 %1, ptr %field8, align 4 +; CHECK: %field8 = getelementptr i32, ptr @globalBytes, i32 8 +; CHECK: @globalBytes = internal global [1024 x i8] zeroinitializer, align 16 + +define dso_local i32 @phi_different_offsets(ptr nocapture %val, ptr nocapture %val2, i1 %cmp) { +entry: + br i1 %cmp, label %then, label %else + +then: + %field2 = getelementptr i32, ptr @globalBytes, i32 2 + %0 = load i32, ptr %val + store i32 %0, ptr %field2 + br label %end + +else: + %field8 = getelementptr i32, ptr @globalBytes, i32 8 + %2 = load i32, ptr %val2 + store i32 %2, ptr %field8 + br label %end + +end: + %x = phi ptr [ %field2, %then ], [ %field8, %else ] + %ret = load i32, ptr %x + ret i32 %ret + +} + +define dso_local i32 @phi_same_offsets(ptr nocapture %val, ptr nocapture %val2, i1 %cmp) { +entry: + br i1 %cmp, label %then, label %else + +then: + %field2 = getelementptr i32, ptr @globalBytes, i32 8 + %0 = load i32, ptr %val + store i32 %0, ptr %field2 + br label %end + +else: + %field8 = getelementptr i32, ptr @globalBytes, i32 8 + %2 = load i32, ptr %val2 + store i32 %2, ptr %field8 + br label %end + +end: + %x = phi ptr [ %field2, %then ], [ %field8, %else ] + %ret = load i32, ptr %x + ret i32 %ret +} \ No newline at end of file diff --git a/llvm/test/Transforms/Attributor/pointer-info.ll b/llvm/test/Transforms/Attributor/pointer-info.ll index 6afdbdaee317c..66dc0160a4e99 100644 --- a/llvm/test/Transforms/Attributor/pointer-info.ll +++ b/llvm/test/Transforms/Attributor/pointer-info.ll @@ -10,11 +10,11 @@ define void @foo(ptr %ptr) { ; TUNIT-LABEL: define {{[^@]+}}@foo ; TUNIT-SAME: (ptr nocapture nofree readnone [[PTR:%.*]]) #[[ATTR0:[0-9]+]] { ; TUNIT-NEXT: entry: -; TUNIT-NEXT: [[TMP0:%.*]] = alloca [[STRUCT_TEST_A:%.*]], align 8 +; TUNIT-NEXT: [[TMP0:%.*]] = alloca [8 x i8], align 1 ; TUNIT-NEXT: br label [[CALL_BR:%.*]] ; TUNIT: call.br: -; TUNIT-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_TEST_A]], ptr [[TMP0]], i64 0, i32 2 -; TUNIT-NEXT: tail call void @bar(ptr noalias nocapture nofree noundef nonnull readonly byval([[STRUCT_TEST_A]]) align 8 dereferenceable(24) [[TMP0]]) #[[ATTR2:[0-9]+]] +; TUNIT-NEXT: [[NEWGEP:%.*]] = getelementptr [8 x i8], ptr [[TMP0]], i64 0 +; TUNIT-NEXT: tail call void @bar(ptr noalias nocapture nofree noundef nonnull readonly byval([[STRUCT_TEST_A:%.*]]) align 8 dereferenceable(24) [[TMP0]]) #[[ATTR2:[0-9]+]] ; TUNIT-NEXT: ret void ; ; CGSCC: Function Attrs: mustprogress nofree nosync nounwind willreturn memory(none) diff --git a/llvm/test/Transforms/Attributor/value-simplify-pointer-info.ll b/llvm/test/Transforms/Attributor/value-simplify-pointer-info.ll index 7a35b5c856097..07b25f6232436 100644 --- a/llvm/test/Transforms/Attributor/value-simplify-pointer-info.ll +++ b/llvm/test/Transforms/Attributor/value-simplify-pointer-info.ll @@ -2666,18 +2666,18 @@ define dso_local void @test_nested_memory(ptr %dst, ptr %src) { ; TUNIT-SAME: (ptr nocapture nofree writeonly [[DST:%.*]], ptr nocapture nofree readonly [[SRC:%.*]]) { ; TUNIT-NEXT: entry: ; TUNIT-NEXT: [[CALL_H2S:%.*]] = alloca i8, i64 24, align 1 -; TUNIT-NEXT: [[LOCAL:%.*]] = alloca [[STRUCT_STY:%.*]], align 8 -; TUNIT-NEXT: [[INNER:%.*]] = getelementptr inbounds [[STRUCT_STY]], ptr [[LOCAL]], i64 0, i32 2 -; TUNIT-NEXT: store ptr @global, ptr [[INNER]], align 8 +; TUNIT-NEXT: [[LOCAL1:%.*]] = alloca [8 x i8], align 1 +; TUNIT-NEXT: [[NEWGEP:%.*]] = getelementptr [8 x i8], ptr [[LOCAL1]], i64 0 +; TUNIT-NEXT: store ptr @global, ptr [[NEWGEP]], align 8 ; TUNIT-NEXT: store ptr [[DST]], ptr [[CALL_H2S]], align 8 ; TUNIT-NEXT: [[SRC2:%.*]] = getelementptr inbounds i8, ptr [[CALL_H2S]], i64 8 ; TUNIT-NEXT: store ptr [[SRC]], ptr [[SRC2]], align 8 -; TUNIT-NEXT: store ptr [[CALL_H2S]], ptr getelementptr inbounds ([[STRUCT_STY]], ptr @global, i64 0, i32 2), align 8 -; TUNIT-NEXT: [[TMP0:%.*]] = load ptr, ptr [[LOCAL]], align 8 -; TUNIT-NEXT: [[LOCAL_B8:%.*]] = getelementptr i8, ptr [[LOCAL]], i64 8 -; TUNIT-NEXT: [[TMP1:%.*]] = load ptr, ptr [[LOCAL_B8]], align 8 -; TUNIT-NEXT: [[LOCAL_B16:%.*]] = getelementptr i8, ptr [[LOCAL]], i64 16 -; TUNIT-NEXT: [[TMP2:%.*]] = load ptr, ptr [[LOCAL_B16]], align 8 +; TUNIT-NEXT: store ptr [[CALL_H2S]], ptr getelementptr inbounds ([[STRUCT_STY:%.*]], ptr @global, i64 0, i32 2), align 8 +; TUNIT-NEXT: [[TMP0:%.*]] = load ptr, ptr [[LOCAL1]], align 8 +; TUNIT-NEXT: [[LOCAL1_B8:%.*]] = getelementptr i8, ptr [[LOCAL1]], i64 8 +; TUNIT-NEXT: [[TMP1:%.*]] = load ptr, ptr [[LOCAL1_B8]], align 8 +; TUNIT-NEXT: [[LOCAL1_B16:%.*]] = getelementptr i8, ptr [[LOCAL1]], i64 16 +; TUNIT-NEXT: [[TMP2:%.*]] = load ptr, ptr [[LOCAL1_B16]], align 8 ; TUNIT-NEXT: call fastcc void @nested_memory_callee(ptr [[TMP0]], ptr [[TMP1]], ptr [[TMP2]]) #[[ATTR21:[0-9]+]] ; TUNIT-NEXT: ret void ;