1 //===- InferAlignment.cpp -------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Infer alignment for load, stores and other memory operations based on 10 // trailing zero known bits information. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/Scalar/InferAlignment.h" 15 #include "llvm/Analysis/AssumptionCache.h" 16 #include "llvm/Analysis/ValueTracking.h" 17 #include "llvm/IR/Instructions.h" 18 #include "llvm/InitializePasses.h" 19 #include "llvm/Support/KnownBits.h" 20 #include "llvm/Transforms/Scalar.h" 21 #include "llvm/Transforms/Utils/Local.h" 22 23 using namespace llvm; 24 25 static bool tryToImproveAlign( 26 const DataLayout &DL, Instruction *I, 27 function_ref<Align(Value *PtrOp, Align OldAlign, Align PrefAlign)> Fn) { 28 29 if (auto *PtrOp = getLoadStorePointerOperand(I)) { 30 Align OldAlign = getLoadStoreAlignment(I); 31 Align PrefAlign = DL.getPrefTypeAlign(getLoadStoreType(I)); 32 33 Align NewAlign = Fn(PtrOp, OldAlign, PrefAlign); 34 if (NewAlign > OldAlign) { 35 setLoadStoreAlignment(I, NewAlign); 36 return true; 37 } 38 } 39 // TODO: Also handle memory intrinsics. 40 return false; 41 } 42 43 bool inferAlignment(Function &F, AssumptionCache &AC, DominatorTree &DT) { 44 const DataLayout &DL = F.getDataLayout(); 45 bool Changed = false; 46 47 // Enforce preferred type alignment if possible. We do this as a separate 48 // pass first, because it may improve the alignments we infer below. 49 for (BasicBlock &BB : F) { 50 for (Instruction &I : BB) { 51 Changed |= tryToImproveAlign( 52 DL, &I, [&](Value *PtrOp, Align OldAlign, Align PrefAlign) { 53 if (PrefAlign > OldAlign) 54 return std::max(OldAlign, 55 tryEnforceAlignment(PtrOp, PrefAlign, DL)); 56 return OldAlign; 57 }); 58 } 59 } 60 61 // Compute alignment from known bits. 62 for (BasicBlock &BB : F) { 63 for (Instruction &I : BB) { 64 Changed |= tryToImproveAlign( 65 DL, &I, [&](Value *PtrOp, Align OldAlign, Align PrefAlign) { 66 KnownBits Known = computeKnownBits(PtrOp, DL, 0, &AC, &I, &DT); 67 unsigned TrailZ = std::min(Known.countMinTrailingZeros(), 68 +Value::MaxAlignmentExponent); 69 return Align(1ull << std::min(Known.getBitWidth() - 1, TrailZ)); 70 }); 71 } 72 } 73 74 return Changed; 75 } 76 77 PreservedAnalyses InferAlignmentPass::run(Function &F, 78 FunctionAnalysisManager &AM) { 79 AssumptionCache &AC = AM.getResult<AssumptionAnalysis>(F); 80 DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F); 81 inferAlignment(F, AC, DT); 82 // Changes to alignment shouldn't invalidated analyses. 83 return PreservedAnalyses::all(); 84 } 85