xref: /llvm-project/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp (revision 52e98f620caf29f75c6d41f51a45610c26f68c65)
1 //===----------------------- AlignmentFromAssumptions.cpp -----------------===//
2 //                  Set Load/Store Alignments From Assumptions
3 //
4 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5 // See https://llvm.org/LICENSE.txt for license information.
6 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements a ScalarEvolution-based transformation to set
11 // the alignments of load, stores and memory intrinsics based on the truth
12 // expressions of assume intrinsics. The primary motivation is to handle
13 // complex alignment assumptions that apply to vector loads and stores that
14 // appear after vectorization and unrolling.
15 //
16 //===----------------------------------------------------------------------===//
17 
18 #include "llvm/InitializePasses.h"
19 #define AA_NAME "alignment-from-assumptions"
20 #define DEBUG_TYPE AA_NAME
21 #include "llvm/Transforms/Scalar/AlignmentFromAssumptions.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/Analysis/AliasAnalysis.h"
25 #include "llvm/Analysis/AssumptionCache.h"
26 #include "llvm/Analysis/GlobalsModRef.h"
27 #include "llvm/Analysis/LoopInfo.h"
28 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/IR/Constant.h"
31 #include "llvm/IR/Dominators.h"
32 #include "llvm/IR/Instruction.h"
33 #include "llvm/IR/Intrinsics.h"
34 #include "llvm/IR/Module.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/raw_ostream.h"
37 #include "llvm/Transforms/Scalar.h"
38 using namespace llvm;
39 
40 STATISTIC(NumLoadAlignChanged,
41   "Number of loads changed by alignment assumptions");
42 STATISTIC(NumStoreAlignChanged,
43   "Number of stores changed by alignment assumptions");
44 STATISTIC(NumMemIntAlignChanged,
45   "Number of memory intrinsics changed by alignment assumptions");
46 
47 namespace {
48 struct AlignmentFromAssumptions : public FunctionPass {
49   static char ID; // Pass identification, replacement for typeid
50   AlignmentFromAssumptions() : FunctionPass(ID) {
51     initializeAlignmentFromAssumptionsPass(*PassRegistry::getPassRegistry());
52   }
53 
54   bool runOnFunction(Function &F) override;
55 
56   void getAnalysisUsage(AnalysisUsage &AU) const override {
57     AU.addRequired<AssumptionCacheTracker>();
58     AU.addRequired<ScalarEvolutionWrapperPass>();
59     AU.addRequired<DominatorTreeWrapperPass>();
60 
61     AU.setPreservesCFG();
62     AU.addPreserved<AAResultsWrapperPass>();
63     AU.addPreserved<GlobalsAAWrapperPass>();
64     AU.addPreserved<LoopInfoWrapperPass>();
65     AU.addPreserved<DominatorTreeWrapperPass>();
66     AU.addPreserved<ScalarEvolutionWrapperPass>();
67   }
68 
69   AlignmentFromAssumptionsPass Impl;
70 };
71 }
72 
73 char AlignmentFromAssumptions::ID = 0;
74 static const char aip_name[] = "Alignment from assumptions";
75 INITIALIZE_PASS_BEGIN(AlignmentFromAssumptions, AA_NAME,
76                       aip_name, false, false)
77 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
78 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
79 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
80 INITIALIZE_PASS_END(AlignmentFromAssumptions, AA_NAME,
81                     aip_name, false, false)
82 
83 FunctionPass *llvm::createAlignmentFromAssumptionsPass() {
84   return new AlignmentFromAssumptions();
85 }
86 
87 // Given an expression for the (constant) alignment, AlignSCEV, and an
88 // expression for the displacement between a pointer and the aligned address,
89 // DiffSCEV, compute the alignment of the displaced pointer if it can be reduced
90 // to a constant. Using SCEV to compute alignment handles the case where
91 // DiffSCEV is a recurrence with constant start such that the aligned offset
92 // is constant. e.g. {16,+,32} % 32 -> 16.
93 static MaybeAlign getNewAlignmentDiff(const SCEV *DiffSCEV,
94                                       const SCEV *AlignSCEV,
95                                       ScalarEvolution *SE) {
96   // DiffUnits = Diff % int64_t(Alignment)
97   const SCEV *DiffUnitsSCEV = SE->getURemExpr(DiffSCEV, AlignSCEV);
98 
99   LLVM_DEBUG(dbgs() << "\talignment relative to " << *AlignSCEV << " is "
100                     << *DiffUnitsSCEV << " (diff: " << *DiffSCEV << ")\n");
101 
102   if (const SCEVConstant *ConstDUSCEV =
103       dyn_cast<SCEVConstant>(DiffUnitsSCEV)) {
104     int64_t DiffUnits = ConstDUSCEV->getValue()->getSExtValue();
105 
106     // If the displacement is an exact multiple of the alignment, then the
107     // displaced pointer has the same alignment as the aligned pointer, so
108     // return the alignment value.
109     if (!DiffUnits)
110       return cast<SCEVConstant>(AlignSCEV)->getValue()->getAlignValue();
111 
112     // If the displacement is not an exact multiple, but the remainder is a
113     // constant, then return this remainder (but only if it is a power of 2).
114     uint64_t DiffUnitsAbs = std::abs(DiffUnits);
115     if (isPowerOf2_64(DiffUnitsAbs))
116       return Align(DiffUnitsAbs);
117   }
118 
119   return None;
120 }
121 
122 // There is an address given by an offset OffSCEV from AASCEV which has an
123 // alignment AlignSCEV. Use that information, if possible, to compute a new
124 // alignment for Ptr.
125 static Align getNewAlignment(const SCEV *AASCEV, const SCEV *AlignSCEV,
126                              const SCEV *OffSCEV, Value *Ptr,
127                              ScalarEvolution *SE) {
128   const SCEV *PtrSCEV = SE->getSCEV(Ptr);
129   // On a platform with 32-bit allocas, but 64-bit flat/global pointer sizes
130   // (*cough* AMDGPU), the effective SCEV type of AASCEV and PtrSCEV
131   // may disagree. Trunc/extend so they agree.
132   PtrSCEV = SE->getTruncateOrZeroExtend(
133       PtrSCEV, SE->getEffectiveSCEVType(AASCEV->getType()));
134   const SCEV *DiffSCEV = SE->getMinusSCEV(PtrSCEV, AASCEV);
135 
136   // On 32-bit platforms, DiffSCEV might now have type i32 -- we've always
137   // sign-extended OffSCEV to i64, so make sure they agree again.
138   DiffSCEV = SE->getNoopOrSignExtend(DiffSCEV, OffSCEV->getType());
139 
140   // What we really want to know is the overall offset to the aligned
141   // address. This address is displaced by the provided offset.
142   DiffSCEV = SE->getMinusSCEV(DiffSCEV, OffSCEV);
143 
144   LLVM_DEBUG(dbgs() << "AFI: alignment of " << *Ptr << " relative to "
145                     << *AlignSCEV << " and offset " << *OffSCEV
146                     << " using diff " << *DiffSCEV << "\n");
147 
148   if (MaybeAlign NewAlignment = getNewAlignmentDiff(DiffSCEV, AlignSCEV, SE)) {
149     LLVM_DEBUG(dbgs() << "\tnew alignment: " << DebugStr(NewAlignment) << "\n");
150     return *NewAlignment;
151   }
152 
153   if (const SCEVAddRecExpr *DiffARSCEV = dyn_cast<SCEVAddRecExpr>(DiffSCEV)) {
154     // The relative offset to the alignment assumption did not yield a constant,
155     // but we should try harder: if we assume that a is 32-byte aligned, then in
156     // for (i = 0; i < 1024; i += 4) r += a[i]; not all of the loads from a are
157     // 32-byte aligned, but instead alternate between 32 and 16-byte alignment.
158     // As a result, the new alignment will not be a constant, but can still
159     // be improved over the default (of 4) to 16.
160 
161     const SCEV *DiffStartSCEV = DiffARSCEV->getStart();
162     const SCEV *DiffIncSCEV = DiffARSCEV->getStepRecurrence(*SE);
163 
164     LLVM_DEBUG(dbgs() << "\ttrying start/inc alignment using start "
165                       << *DiffStartSCEV << " and inc " << *DiffIncSCEV << "\n");
166 
167     // Now compute the new alignment using the displacement to the value in the
168     // first iteration, and also the alignment using the per-iteration delta.
169     // If these are the same, then use that answer. Otherwise, use the smaller
170     // one, but only if it divides the larger one.
171     MaybeAlign NewAlignment = getNewAlignmentDiff(DiffStartSCEV, AlignSCEV, SE);
172     MaybeAlign NewIncAlignment =
173         getNewAlignmentDiff(DiffIncSCEV, AlignSCEV, SE);
174 
175     LLVM_DEBUG(dbgs() << "\tnew start alignment: " << DebugStr(NewAlignment)
176                       << "\n");
177     LLVM_DEBUG(dbgs() << "\tnew inc alignment: " << DebugStr(NewIncAlignment)
178                       << "\n");
179 
180     if (!NewAlignment || !NewIncAlignment)
181       return Align(1);
182 
183     const Align NewAlign = *NewAlignment;
184     const Align NewIncAlign = *NewIncAlignment;
185     if (NewAlign > NewIncAlign) {
186       LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: "
187                         << DebugStr(NewIncAlign) << "\n");
188       return NewIncAlign;
189     }
190     if (NewIncAlign > NewAlign) {
191       LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: " << DebugStr(NewAlign)
192                         << "\n");
193       return NewAlign;
194     }
195     assert(NewIncAlign == NewAlign);
196     LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: " << DebugStr(NewAlign)
197                       << "\n");
198     return NewAlign;
199   }
200 
201   return Align(1);
202 }
203 
204 bool AlignmentFromAssumptionsPass::extractAlignmentInfo(CallInst *I,
205                                                         Value *&AAPtr,
206                                                         const SCEV *&AlignSCEV,
207                                                         const SCEV *&OffSCEV) {
208   // An alignment assume must be a statement about the least-significant
209   // bits of the pointer being zero, possibly with some offset.
210   ICmpInst *ICI = dyn_cast<ICmpInst>(I->getArgOperand(0));
211   if (!ICI)
212     return false;
213 
214   // This must be an expression of the form: x & m == 0.
215   if (ICI->getPredicate() != ICmpInst::ICMP_EQ)
216     return false;
217 
218   // Swap things around so that the RHS is 0.
219   Value *CmpLHS = ICI->getOperand(0);
220   Value *CmpRHS = ICI->getOperand(1);
221   const SCEV *CmpLHSSCEV = SE->getSCEV(CmpLHS);
222   const SCEV *CmpRHSSCEV = SE->getSCEV(CmpRHS);
223   if (CmpLHSSCEV->isZero())
224     std::swap(CmpLHS, CmpRHS);
225   else if (!CmpRHSSCEV->isZero())
226     return false;
227 
228   BinaryOperator *CmpBO = dyn_cast<BinaryOperator>(CmpLHS);
229   if (!CmpBO || CmpBO->getOpcode() != Instruction::And)
230     return false;
231 
232   // Swap things around so that the right operand of the and is a constant
233   // (the mask); we cannot deal with variable masks.
234   Value *AndLHS = CmpBO->getOperand(0);
235   Value *AndRHS = CmpBO->getOperand(1);
236   const SCEV *AndLHSSCEV = SE->getSCEV(AndLHS);
237   const SCEV *AndRHSSCEV = SE->getSCEV(AndRHS);
238   if (isa<SCEVConstant>(AndLHSSCEV)) {
239     std::swap(AndLHS, AndRHS);
240     std::swap(AndLHSSCEV, AndRHSSCEV);
241   }
242 
243   const SCEVConstant *MaskSCEV = dyn_cast<SCEVConstant>(AndRHSSCEV);
244   if (!MaskSCEV)
245     return false;
246 
247   // The mask must have some trailing ones (otherwise the condition is
248   // trivial and tells us nothing about the alignment of the left operand).
249   unsigned TrailingOnes = MaskSCEV->getAPInt().countTrailingOnes();
250   if (!TrailingOnes)
251     return false;
252 
253   // Cap the alignment at the maximum with which LLVM can deal (and make sure
254   // we don't overflow the shift).
255   uint64_t Alignment;
256   TrailingOnes = std::min(TrailingOnes,
257     unsigned(sizeof(unsigned) * CHAR_BIT - 1));
258   Alignment = std::min(1u << TrailingOnes, +Value::MaximumAlignment);
259 
260   Type *Int64Ty = Type::getInt64Ty(I->getParent()->getParent()->getContext());
261   AlignSCEV = SE->getConstant(Int64Ty, Alignment);
262 
263   // The LHS might be a ptrtoint instruction, or it might be the pointer
264   // with an offset.
265   AAPtr = nullptr;
266   OffSCEV = nullptr;
267   if (PtrToIntInst *PToI = dyn_cast<PtrToIntInst>(AndLHS)) {
268     AAPtr = PToI->getPointerOperand();
269     OffSCEV = SE->getZero(Int64Ty);
270   } else if (const SCEVAddExpr* AndLHSAddSCEV =
271              dyn_cast<SCEVAddExpr>(AndLHSSCEV)) {
272     // Try to find the ptrtoint; subtract it and the rest is the offset.
273     for (SCEVAddExpr::op_iterator J = AndLHSAddSCEV->op_begin(),
274          JE = AndLHSAddSCEV->op_end(); J != JE; ++J)
275       if (const SCEVUnknown *OpUnk = dyn_cast<SCEVUnknown>(*J))
276         if (PtrToIntInst *PToI = dyn_cast<PtrToIntInst>(OpUnk->getValue())) {
277           AAPtr = PToI->getPointerOperand();
278           OffSCEV = SE->getMinusSCEV(AndLHSAddSCEV, *J);
279           break;
280         }
281   }
282 
283   if (!AAPtr)
284     return false;
285 
286   // Sign extend the offset to 64 bits (so that it is like all of the other
287   // expressions).
288   unsigned OffSCEVBits = OffSCEV->getType()->getPrimitiveSizeInBits();
289   if (OffSCEVBits < 64)
290     OffSCEV = SE->getSignExtendExpr(OffSCEV, Int64Ty);
291   else if (OffSCEVBits > 64)
292     return false;
293 
294   AAPtr = AAPtr->stripPointerCasts();
295   return true;
296 }
297 
298 bool AlignmentFromAssumptionsPass::processAssumption(CallInst *ACall) {
299   Value *AAPtr;
300   const SCEV *AlignSCEV, *OffSCEV;
301   if (!extractAlignmentInfo(ACall, AAPtr, AlignSCEV, OffSCEV))
302     return false;
303 
304   // Skip ConstantPointerNull and UndefValue.  Assumptions on these shouldn't
305   // affect other users.
306   if (isa<ConstantData>(AAPtr))
307     return false;
308 
309   const SCEV *AASCEV = SE->getSCEV(AAPtr);
310 
311   // Apply the assumption to all other users of the specified pointer.
312   SmallPtrSet<Instruction *, 32> Visited;
313   SmallVector<Instruction*, 16> WorkList;
314   for (User *J : AAPtr->users()) {
315     if (J == ACall)
316       continue;
317 
318     if (Instruction *K = dyn_cast<Instruction>(J))
319       if (isValidAssumeForContext(ACall, K, DT))
320         WorkList.push_back(K);
321   }
322 
323   while (!WorkList.empty()) {
324     Instruction *J = WorkList.pop_back_val();
325     if (LoadInst *LI = dyn_cast<LoadInst>(J)) {
326       Align NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,
327                                            LI->getPointerOperand(), SE);
328       if (NewAlignment > LI->getAlign()) {
329         LI->setAlignment(NewAlignment);
330         ++NumLoadAlignChanged;
331       }
332     } else if (StoreInst *SI = dyn_cast<StoreInst>(J)) {
333       Align NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,
334                                            SI->getPointerOperand(), SE);
335       if (NewAlignment > SI->getAlign()) {
336         SI->setAlignment(NewAlignment);
337         ++NumStoreAlignChanged;
338       }
339     } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(J)) {
340       Align NewDestAlignment =
341           getNewAlignment(AASCEV, AlignSCEV, OffSCEV, MI->getDest(), SE);
342 
343       LLVM_DEBUG(dbgs() << "\tmem inst: " << DebugStr(NewDestAlignment)
344                         << "\n";);
345       if (NewDestAlignment > *MI->getDestAlign()) {
346         MI->setDestAlignment(NewDestAlignment);
347         ++NumMemIntAlignChanged;
348       }
349 
350       // For memory transfers, there is also a source alignment that
351       // can be set.
352       if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
353         Align NewSrcAlignment =
354             getNewAlignment(AASCEV, AlignSCEV, OffSCEV, MTI->getSource(), SE);
355 
356         LLVM_DEBUG(dbgs() << "\tmem trans: " << DebugStr(NewSrcAlignment)
357                           << "\n";);
358 
359         if (NewSrcAlignment > *MTI->getSourceAlign()) {
360           MTI->setSourceAlignment(NewSrcAlignment);
361           ++NumMemIntAlignChanged;
362         }
363       }
364     }
365 
366     // Now that we've updated that use of the pointer, look for other uses of
367     // the pointer to update.
368     Visited.insert(J);
369     for (User *UJ : J->users()) {
370       Instruction *K = cast<Instruction>(UJ);
371       if (!Visited.count(K) && isValidAssumeForContext(ACall, K, DT))
372         WorkList.push_back(K);
373     }
374   }
375 
376   return true;
377 }
378 
379 bool AlignmentFromAssumptions::runOnFunction(Function &F) {
380   if (skipFunction(F))
381     return false;
382 
383   auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
384   ScalarEvolution *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
385   DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
386 
387   return Impl.runImpl(F, AC, SE, DT);
388 }
389 
390 bool AlignmentFromAssumptionsPass::runImpl(Function &F, AssumptionCache &AC,
391                                            ScalarEvolution *SE_,
392                                            DominatorTree *DT_) {
393   SE = SE_;
394   DT = DT_;
395 
396   bool Changed = false;
397   for (auto &AssumeVH : AC.assumptions())
398     if (AssumeVH)
399       Changed |= processAssumption(cast<CallInst>(AssumeVH));
400 
401   return Changed;
402 }
403 
404 PreservedAnalyses
405 AlignmentFromAssumptionsPass::run(Function &F, FunctionAnalysisManager &AM) {
406 
407   AssumptionCache &AC = AM.getResult<AssumptionAnalysis>(F);
408   ScalarEvolution &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
409   DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F);
410   if (!runImpl(F, AC, &SE, &DT))
411     return PreservedAnalyses::all();
412 
413   PreservedAnalyses PA;
414   PA.preserveSet<CFGAnalyses>();
415   PA.preserve<AAManager>();
416   PA.preserve<ScalarEvolutionAnalysis>();
417   PA.preserve<GlobalsAA>();
418   return PA;
419 }
420