xref: /llvm-project/llvm/lib/Target/AArch64/AArch64StackTagging.cpp (revision a41922ad7530ef5e311afbff2721e69cbf520890)
1 //===- AArch64StackTagging.cpp - Stack tagging in IR --===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //===----------------------------------------------------------------------===//
9 
10 #include "AArch64.h"
11 #include "AArch64Subtarget.h"
12 #include "llvm/ADT/APInt.h"
13 #include "llvm/ADT/MapVector.h"
14 #include "llvm/ADT/SmallVector.h"
15 #include "llvm/ADT/Statistic.h"
16 #include "llvm/Analysis/AliasAnalysis.h"
17 #include "llvm/Analysis/CFG.h"
18 #include "llvm/Analysis/LoopInfo.h"
19 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
20 #include "llvm/Analysis/PostDominators.h"
21 #include "llvm/Analysis/ScalarEvolution.h"
22 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
23 #include "llvm/Analysis/StackSafetyAnalysis.h"
24 #include "llvm/BinaryFormat/Dwarf.h"
25 #include "llvm/CodeGen/MachineBasicBlock.h"
26 #include "llvm/CodeGen/MachineFunction.h"
27 #include "llvm/CodeGen/MachineInstr.h"
28 #include "llvm/CodeGen/MachineOperand.h"
29 #include "llvm/IR/DebugLoc.h"
30 #include "llvm/IR/Dominators.h"
31 #include "llvm/IR/Function.h"
32 #include "llvm/IR/IRBuilder.h"
33 #include "llvm/IR/InstIterator.h"
34 #include "llvm/IR/Instruction.h"
35 #include "llvm/IR/Instructions.h"
36 #include "llvm/IR/IntrinsicInst.h"
37 #include "llvm/IR/IntrinsicsAArch64.h"
38 #include "llvm/IR/Metadata.h"
39 #include "llvm/IR/PassManager.h"
40 #include "llvm/InitializePasses.h"
41 #include "llvm/Pass.h"
42 #include "llvm/Support/Casting.h"
43 #include "llvm/Support/Debug.h"
44 #include "llvm/Support/raw_ostream.h"
45 #include "llvm/Transforms/Utils/Local.h"
46 #include "llvm/Transforms/Utils/MemoryTaggingSupport.h"
47 #include <cassert>
48 #include <memory>
49 #include <utility>
50 
51 using namespace llvm;
52 
53 #define DEBUG_TYPE "aarch64-stack-tagging"
54 
55 static cl::opt<bool> ClMergeInit(
56     "stack-tagging-merge-init", cl::Hidden, cl::init(true),
57     cl::desc("merge stack variable initializers with tagging when possible"));
58 
59 static cl::opt<bool>
60     ClUseStackSafety("stack-tagging-use-stack-safety", cl::Hidden,
61                      cl::init(true),
62                      cl::desc("Use Stack Safety analysis results"));
63 
64 static cl::opt<unsigned> ClScanLimit("stack-tagging-merge-init-scan-limit",
65                                      cl::init(40), cl::Hidden);
66 
67 static cl::opt<unsigned>
68     ClMergeInitSizeLimit("stack-tagging-merge-init-size-limit", cl::init(272),
69                          cl::Hidden);
70 
71 static cl::opt<size_t> ClMaxLifetimes(
72     "stack-tagging-max-lifetimes-for-alloca", cl::Hidden, cl::init(3),
73     cl::ReallyHidden,
74     cl::desc("How many lifetime ends to handle for a single alloca."),
75     cl::Optional);
76 
77 // Mode for selecting how to insert frame record info into the stack ring
78 // buffer.
79 enum StackTaggingRecordStackHistoryMode {
80   // Do not record frame record info.
81   none,
82 
83   // Insert instructions into the prologue for storing into the stack ring
84   // buffer directly.
85   instr,
86 };
87 
88 static cl::opt<StackTaggingRecordStackHistoryMode> ClRecordStackHistory(
89     "stack-tagging-record-stack-history",
90     cl::desc("Record stack frames with tagged allocations in a thread-local "
91              "ring buffer"),
92     cl::values(clEnumVal(none, "Do not record stack ring history"),
93                clEnumVal(instr, "Insert instructions into the prologue for "
94                                 "storing into the stack ring buffer")),
95     cl::Hidden, cl::init(none));
96 
97 static const Align kTagGranuleSize = Align(16);
98 
99 namespace {
100 
101 class InitializerBuilder {
102   uint64_t Size;
103   const DataLayout *DL;
104   Value *BasePtr;
105   Function *SetTagFn;
106   Function *SetTagZeroFn;
107   Function *StgpFn;
108 
109   // List of initializers sorted by start offset.
110   struct Range {
111     uint64_t Start, End;
112     Instruction *Inst;
113   };
114   SmallVector<Range, 4> Ranges;
115   // 8-aligned offset => 8-byte initializer
116   // Missing keys are zero initialized.
117   std::map<uint64_t, Value *> Out;
118 
119 public:
120   InitializerBuilder(uint64_t Size, const DataLayout *DL, Value *BasePtr,
121                      Function *SetTagFn, Function *SetTagZeroFn,
122                      Function *StgpFn)
123       : Size(Size), DL(DL), BasePtr(BasePtr), SetTagFn(SetTagFn),
124         SetTagZeroFn(SetTagZeroFn), StgpFn(StgpFn) {}
125 
126   bool addRange(uint64_t Start, uint64_t End, Instruction *Inst) {
127     auto I =
128         llvm::lower_bound(Ranges, Start, [](const Range &LHS, uint64_t RHS) {
129           return LHS.End <= RHS;
130         });
131     if (I != Ranges.end() && End > I->Start) {
132       // Overlap - bail.
133       return false;
134     }
135     Ranges.insert(I, {Start, End, Inst});
136     return true;
137   }
138 
139   bool addStore(uint64_t Offset, StoreInst *SI, const DataLayout *DL) {
140     int64_t StoreSize = DL->getTypeStoreSize(SI->getOperand(0)->getType());
141     if (!addRange(Offset, Offset + StoreSize, SI))
142       return false;
143     IRBuilder<> IRB(SI);
144     applyStore(IRB, Offset, Offset + StoreSize, SI->getOperand(0));
145     return true;
146   }
147 
148   bool addMemSet(uint64_t Offset, MemSetInst *MSI) {
149     uint64_t StoreSize = cast<ConstantInt>(MSI->getLength())->getZExtValue();
150     if (!addRange(Offset, Offset + StoreSize, MSI))
151       return false;
152     IRBuilder<> IRB(MSI);
153     applyMemSet(IRB, Offset, Offset + StoreSize,
154                 cast<ConstantInt>(MSI->getValue()));
155     return true;
156   }
157 
158   void applyMemSet(IRBuilder<> &IRB, int64_t Start, int64_t End,
159                    ConstantInt *V) {
160     // Out[] does not distinguish between zero and undef, and we already know
161     // that this memset does not overlap with any other initializer. Nothing to
162     // do for memset(0).
163     if (V->isZero())
164       return;
165     for (int64_t Offset = Start - Start % 8; Offset < End; Offset += 8) {
166       uint64_t Cst = 0x0101010101010101UL;
167       int LowBits = Offset < Start ? (Start - Offset) * 8 : 0;
168       if (LowBits)
169         Cst = (Cst >> LowBits) << LowBits;
170       int HighBits = End - Offset < 8 ? (8 - (End - Offset)) * 8 : 0;
171       if (HighBits)
172         Cst = (Cst << HighBits) >> HighBits;
173       ConstantInt *C =
174           ConstantInt::get(IRB.getInt64Ty(), Cst * V->getZExtValue());
175 
176       Value *&CurrentV = Out[Offset];
177       if (!CurrentV) {
178         CurrentV = C;
179       } else {
180         CurrentV = IRB.CreateOr(CurrentV, C);
181       }
182     }
183   }
184 
185   // Take a 64-bit slice of the value starting at the given offset (in bytes).
186   // Offset can be negative. Pad with zeroes on both sides when necessary.
187   Value *sliceValue(IRBuilder<> &IRB, Value *V, int64_t Offset) {
188     if (Offset > 0) {
189       V = IRB.CreateLShr(V, Offset * 8);
190       V = IRB.CreateZExtOrTrunc(V, IRB.getInt64Ty());
191     } else if (Offset < 0) {
192       V = IRB.CreateZExtOrTrunc(V, IRB.getInt64Ty());
193       V = IRB.CreateShl(V, -Offset * 8);
194     } else {
195       V = IRB.CreateZExtOrTrunc(V, IRB.getInt64Ty());
196     }
197     return V;
198   }
199 
200   void applyStore(IRBuilder<> &IRB, int64_t Start, int64_t End,
201                   Value *StoredValue) {
202     StoredValue = flatten(IRB, StoredValue);
203     for (int64_t Offset = Start - Start % 8; Offset < End; Offset += 8) {
204       Value *V = sliceValue(IRB, StoredValue, Offset - Start);
205       Value *&CurrentV = Out[Offset];
206       if (!CurrentV) {
207         CurrentV = V;
208       } else {
209         CurrentV = IRB.CreateOr(CurrentV, V);
210       }
211     }
212   }
213 
214   void generate(IRBuilder<> &IRB) {
215     LLVM_DEBUG(dbgs() << "Combined initializer\n");
216     // No initializers => the entire allocation is undef.
217     if (Ranges.empty()) {
218       emitUndef(IRB, 0, Size);
219       return;
220     }
221 
222     // Look through 8-byte initializer list 16 bytes at a time;
223     // If one of the two 8-byte halfs is non-zero non-undef, emit STGP.
224     // Otherwise, emit zeroes up to next available item.
225     uint64_t LastOffset = 0;
226     for (uint64_t Offset = 0; Offset < Size; Offset += 16) {
227       auto I1 = Out.find(Offset);
228       auto I2 = Out.find(Offset + 8);
229       if (I1 == Out.end() && I2 == Out.end())
230         continue;
231 
232       if (Offset > LastOffset)
233         emitZeroes(IRB, LastOffset, Offset - LastOffset);
234 
235       Value *Store1 = I1 == Out.end() ? Constant::getNullValue(IRB.getInt64Ty())
236                                       : I1->second;
237       Value *Store2 = I2 == Out.end() ? Constant::getNullValue(IRB.getInt64Ty())
238                                       : I2->second;
239       emitPair(IRB, Offset, Store1, Store2);
240       LastOffset = Offset + 16;
241     }
242 
243     // memset(0) does not update Out[], therefore the tail can be either undef
244     // or zero.
245     if (LastOffset < Size)
246       emitZeroes(IRB, LastOffset, Size - LastOffset);
247 
248     for (const auto &R : Ranges) {
249       R.Inst->eraseFromParent();
250     }
251   }
252 
253   void emitZeroes(IRBuilder<> &IRB, uint64_t Offset, uint64_t Size) {
254     LLVM_DEBUG(dbgs() << "  [" << Offset << ", " << Offset + Size
255                       << ") zero\n");
256     Value *Ptr = BasePtr;
257     if (Offset)
258       Ptr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), Ptr, Offset);
259     IRB.CreateCall(SetTagZeroFn,
260                    {Ptr, ConstantInt::get(IRB.getInt64Ty(), Size)});
261   }
262 
263   void emitUndef(IRBuilder<> &IRB, uint64_t Offset, uint64_t Size) {
264     LLVM_DEBUG(dbgs() << "  [" << Offset << ", " << Offset + Size
265                       << ") undef\n");
266     Value *Ptr = BasePtr;
267     if (Offset)
268       Ptr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), Ptr, Offset);
269     IRB.CreateCall(SetTagFn, {Ptr, ConstantInt::get(IRB.getInt64Ty(), Size)});
270   }
271 
272   void emitPair(IRBuilder<> &IRB, uint64_t Offset, Value *A, Value *B) {
273     LLVM_DEBUG(dbgs() << "  [" << Offset << ", " << Offset + 16 << "):\n");
274     LLVM_DEBUG(dbgs() << "    " << *A << "\n    " << *B << "\n");
275     Value *Ptr = BasePtr;
276     if (Offset)
277       Ptr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), Ptr, Offset);
278     IRB.CreateCall(StgpFn, {Ptr, A, B});
279   }
280 
281   Value *flatten(IRBuilder<> &IRB, Value *V) {
282     if (V->getType()->isIntegerTy())
283       return V;
284     // vector of pointers -> vector of ints
285     if (VectorType *VecTy = dyn_cast<VectorType>(V->getType())) {
286       LLVMContext &Ctx = IRB.getContext();
287       Type *EltTy = VecTy->getElementType();
288       if (EltTy->isPointerTy()) {
289         uint32_t EltSize = DL->getTypeSizeInBits(EltTy);
290         auto *NewTy = FixedVectorType::get(
291             IntegerType::get(Ctx, EltSize),
292             cast<FixedVectorType>(VecTy)->getNumElements());
293         V = IRB.CreatePointerCast(V, NewTy);
294       }
295     }
296     return IRB.CreateBitOrPointerCast(
297         V, IRB.getIntNTy(DL->getTypeStoreSize(V->getType()) * 8));
298   }
299 };
300 
301 class AArch64StackTagging : public FunctionPass {
302   const bool MergeInit;
303   const bool UseStackSafety;
304 
305 public:
306   static char ID; // Pass ID, replacement for typeid
307 
308   AArch64StackTagging(bool IsOptNone = false)
309       : FunctionPass(ID),
310         MergeInit(ClMergeInit.getNumOccurrences() ? ClMergeInit : !IsOptNone),
311         UseStackSafety(ClUseStackSafety.getNumOccurrences() ? ClUseStackSafety
312                                                             : !IsOptNone) {
313     initializeAArch64StackTaggingPass(*PassRegistry::getPassRegistry());
314   }
315 
316   void tagAlloca(AllocaInst *AI, Instruction *InsertBefore, Value *Ptr,
317                  uint64_t Size);
318   void untagAlloca(AllocaInst *AI, Instruction *InsertBefore, uint64_t Size);
319 
320   Instruction *collectInitializers(Instruction *StartInst, Value *StartPtr,
321                                    uint64_t Size, InitializerBuilder &IB);
322 
323   Instruction *insertBaseTaggedPointer(
324       const Module &M,
325       const MapVector<AllocaInst *, memtag::AllocaInfo> &Allocas,
326       const DominatorTree *DT);
327   bool runOnFunction(Function &F) override;
328 
329   StringRef getPassName() const override { return "AArch64 Stack Tagging"; }
330 
331 private:
332   Function *F = nullptr;
333   Function *SetTagFunc = nullptr;
334   const DataLayout *DL = nullptr;
335   AAResults *AA = nullptr;
336   const StackSafetyGlobalInfo *SSI = nullptr;
337 
338   void getAnalysisUsage(AnalysisUsage &AU) const override {
339     AU.setPreservesCFG();
340     if (UseStackSafety)
341       AU.addRequired<StackSafetyGlobalInfoWrapperPass>();
342     if (MergeInit)
343       AU.addRequired<AAResultsWrapperPass>();
344     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
345   }
346 };
347 
348 } // end anonymous namespace
349 
350 char AArch64StackTagging::ID = 0;
351 
352 INITIALIZE_PASS_BEGIN(AArch64StackTagging, DEBUG_TYPE, "AArch64 Stack Tagging",
353                       false, false)
354 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
355 INITIALIZE_PASS_DEPENDENCY(StackSafetyGlobalInfoWrapperPass)
356 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
357 INITIALIZE_PASS_END(AArch64StackTagging, DEBUG_TYPE, "AArch64 Stack Tagging",
358                     false, false)
359 
360 FunctionPass *llvm::createAArch64StackTaggingPass(bool IsOptNone) {
361   return new AArch64StackTagging(IsOptNone);
362 }
363 
364 Instruction *AArch64StackTagging::collectInitializers(Instruction *StartInst,
365                                                       Value *StartPtr,
366                                                       uint64_t Size,
367                                                       InitializerBuilder &IB) {
368   MemoryLocation AllocaLoc{StartPtr, Size};
369   Instruction *LastInst = StartInst;
370   BasicBlock::iterator BI(StartInst);
371 
372   unsigned Count = 0;
373   for (; Count < ClScanLimit && !BI->isTerminator(); ++BI) {
374     if (!isa<DbgInfoIntrinsic>(*BI))
375       ++Count;
376 
377     if (isNoModRef(AA->getModRefInfo(&*BI, AllocaLoc)))
378       continue;
379 
380     if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) {
381       // If the instruction is readnone, ignore it, otherwise bail out.  We
382       // don't even allow readonly here because we don't want something like:
383       // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
384       if (BI->mayWriteToMemory() || BI->mayReadFromMemory())
385         break;
386       continue;
387     }
388 
389     if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) {
390       if (!NextStore->isSimple())
391         break;
392 
393       // Check to see if this store is to a constant offset from the start ptr.
394       std::optional<int64_t> Offset =
395           NextStore->getPointerOperand()->getPointerOffsetFrom(StartPtr, *DL);
396       if (!Offset)
397         break;
398 
399       if (!IB.addStore(*Offset, NextStore, DL))
400         break;
401       LastInst = NextStore;
402     } else {
403       MemSetInst *MSI = cast<MemSetInst>(BI);
404 
405       if (MSI->isVolatile() || !isa<ConstantInt>(MSI->getLength()))
406         break;
407 
408       if (!isa<ConstantInt>(MSI->getValue()))
409         break;
410 
411       // Check to see if this store is to a constant offset from the start ptr.
412       std::optional<int64_t> Offset =
413           MSI->getDest()->getPointerOffsetFrom(StartPtr, *DL);
414       if (!Offset)
415         break;
416 
417       if (!IB.addMemSet(*Offset, MSI))
418         break;
419       LastInst = MSI;
420     }
421   }
422   return LastInst;
423 }
424 
425 void AArch64StackTagging::tagAlloca(AllocaInst *AI, Instruction *InsertBefore,
426                                     Value *Ptr, uint64_t Size) {
427   auto SetTagZeroFunc = Intrinsic::getOrInsertDeclaration(
428       F->getParent(), Intrinsic::aarch64_settag_zero);
429   auto StgpFunc = Intrinsic::getOrInsertDeclaration(F->getParent(),
430                                                     Intrinsic::aarch64_stgp);
431 
432   InitializerBuilder IB(Size, DL, Ptr, SetTagFunc, SetTagZeroFunc, StgpFunc);
433   bool LittleEndian =
434       Triple(AI->getModule()->getTargetTriple()).isLittleEndian();
435   // Current implementation of initializer merging assumes little endianness.
436   if (MergeInit && !F->hasOptNone() && LittleEndian &&
437       Size < ClMergeInitSizeLimit) {
438     LLVM_DEBUG(dbgs() << "collecting initializers for " << *AI
439                       << ", size = " << Size << "\n");
440     InsertBefore = collectInitializers(InsertBefore, Ptr, Size, IB);
441   }
442 
443   IRBuilder<> IRB(InsertBefore);
444   IB.generate(IRB);
445 }
446 
447 void AArch64StackTagging::untagAlloca(AllocaInst *AI, Instruction *InsertBefore,
448                                       uint64_t Size) {
449   IRBuilder<> IRB(InsertBefore);
450   IRB.CreateCall(SetTagFunc, {IRB.CreatePointerCast(AI, IRB.getPtrTy()),
451                               ConstantInt::get(IRB.getInt64Ty(), Size)});
452 }
453 
454 Instruction *AArch64StackTagging::insertBaseTaggedPointer(
455     const Module &M,
456     const MapVector<AllocaInst *, memtag::AllocaInfo> &AllocasToInstrument,
457     const DominatorTree *DT) {
458   BasicBlock *PrologueBB = nullptr;
459   // Try sinking IRG as deep as possible to avoid hurting shrink wrap.
460   for (auto &I : AllocasToInstrument) {
461     const memtag::AllocaInfo &Info = I.second;
462     AllocaInst *AI = Info.AI;
463     if (!PrologueBB) {
464       PrologueBB = AI->getParent();
465       continue;
466     }
467     PrologueBB = DT->findNearestCommonDominator(PrologueBB, AI->getParent());
468   }
469   assert(PrologueBB);
470 
471   IRBuilder<> IRB(&PrologueBB->front());
472   Instruction *Base =
473       IRB.CreateIntrinsic(Intrinsic::aarch64_irg_sp, {},
474                           {Constant::getNullValue(IRB.getInt64Ty())});
475   Base->setName("basetag");
476   auto TargetTriple = Triple(M.getTargetTriple());
477   // This ABI will make it into Android API level 35.
478   // The ThreadLong format is the same as with HWASan, but the entries for
479   // stack MTE take two slots (16 bytes).
480   if (ClRecordStackHistory == instr && TargetTriple.isAndroid() &&
481       TargetTriple.isAArch64() && !TargetTriple.isAndroidVersionLT(35) &&
482       !AllocasToInstrument.empty()) {
483     constexpr int StackMteSlot = -3;
484     constexpr uint64_t TagMask = 0xFULL << 56;
485 
486     auto *IntptrTy = IRB.getIntPtrTy(M.getDataLayout());
487     Value *SlotPtr = memtag::getAndroidSlotPtr(IRB, StackMteSlot);
488     auto *ThreadLong = IRB.CreateLoad(IntptrTy, SlotPtr);
489     Value *FP = memtag::getFP(IRB);
490     Value *Tag = IRB.CreateAnd(IRB.CreatePtrToInt(Base, IntptrTy), TagMask);
491     Value *TaggedFP = IRB.CreateOr(FP, Tag);
492     Value *PC = memtag::getPC(TargetTriple, IRB);
493     Value *RecordPtr = IRB.CreateIntToPtr(ThreadLong, IRB.getPtrTy(0));
494     IRB.CreateStore(PC, RecordPtr);
495     IRB.CreateStore(TaggedFP, IRB.CreateConstGEP1_64(IntptrTy, RecordPtr, 1));
496 
497     IRB.CreateStore(memtag::incrementThreadLong(IRB, ThreadLong, 16), SlotPtr);
498   }
499   return Base;
500 }
501 
502 // FIXME: check for MTE extension
503 bool AArch64StackTagging::runOnFunction(Function &Fn) {
504   if (!Fn.hasFnAttribute(Attribute::SanitizeMemTag))
505     return false;
506 
507   if (UseStackSafety)
508     SSI = &getAnalysis<StackSafetyGlobalInfoWrapperPass>().getResult();
509   F = &Fn;
510   DL = &Fn.getDataLayout();
511   if (MergeInit)
512     AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
513   OptimizationRemarkEmitter &ORE =
514       getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
515 
516   memtag::StackInfoBuilder SIB(SSI, DEBUG_TYPE);
517   for (Instruction &I : instructions(F))
518     SIB.visit(ORE, I);
519   memtag::StackInfo &SInfo = SIB.get();
520 
521   if (SInfo.AllocasToInstrument.empty())
522     return false;
523 
524   std::unique_ptr<DominatorTree> DeleteDT;
525   DominatorTree *DT = nullptr;
526   if (auto *P = getAnalysisIfAvailable<DominatorTreeWrapperPass>())
527     DT = &P->getDomTree();
528 
529   if (DT == nullptr) {
530     DeleteDT = std::make_unique<DominatorTree>(*F);
531     DT = DeleteDT.get();
532   }
533 
534   std::unique_ptr<PostDominatorTree> DeletePDT;
535   PostDominatorTree *PDT = nullptr;
536   if (auto *P = getAnalysisIfAvailable<PostDominatorTreeWrapperPass>())
537     PDT = &P->getPostDomTree();
538 
539   if (PDT == nullptr) {
540     DeletePDT = std::make_unique<PostDominatorTree>(*F);
541     PDT = DeletePDT.get();
542   }
543 
544   std::unique_ptr<LoopInfo> DeleteLI;
545   LoopInfo *LI = nullptr;
546   if (auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>()) {
547     LI = &LIWP->getLoopInfo();
548   } else {
549     DeleteLI = std::make_unique<LoopInfo>(*DT);
550     LI = DeleteLI.get();
551   }
552 
553   SetTagFunc = Intrinsic::getOrInsertDeclaration(F->getParent(),
554                                                  Intrinsic::aarch64_settag);
555 
556   Instruction *Base =
557       insertBaseTaggedPointer(*Fn.getParent(), SInfo.AllocasToInstrument, DT);
558 
559   unsigned int NextTag = 0;
560   for (auto &I : SInfo.AllocasToInstrument) {
561     memtag::AllocaInfo &Info = I.second;
562     assert(Info.AI && SIB.getAllocaInterestingness(*Info.AI) ==
563                           llvm::memtag::AllocaInterestingness::kInteresting);
564     memtag::alignAndPadAlloca(Info, kTagGranuleSize);
565     AllocaInst *AI = Info.AI;
566     unsigned int Tag = NextTag;
567     NextTag = (NextTag + 1) % 16;
568     // Replace alloca with tagp(alloca).
569     IRBuilder<> IRB(Info.AI->getNextNode());
570     Instruction *TagPCall =
571         IRB.CreateIntrinsic(Intrinsic::aarch64_tagp, {Info.AI->getType()},
572                             {Constant::getNullValue(Info.AI->getType()), Base,
573                              ConstantInt::get(IRB.getInt64Ty(), Tag)});
574     if (Info.AI->hasName())
575       TagPCall->setName(Info.AI->getName() + ".tag");
576     // Does not replace metadata, so we don't have to handle DbgVariableRecords.
577     Info.AI->replaceUsesWithIf(TagPCall, [&](const Use &U) {
578       return !memtag::isLifetimeIntrinsic(U.getUser());
579     });
580     TagPCall->setOperand(0, Info.AI);
581 
582     // Calls to functions that may return twice (e.g. setjmp) confuse the
583     // postdominator analysis, and will leave us to keep memory tagged after
584     // function return. Work around this by always untagging at every return
585     // statement if return_twice functions are called.
586     bool StandardLifetime =
587         !SInfo.CallsReturnTwice &&
588         SInfo.UnrecognizedLifetimes.empty() &&
589         memtag::isStandardLifetime(Info.LifetimeStart, Info.LifetimeEnd, DT, LI,
590                                    ClMaxLifetimes);
591     if (StandardLifetime) {
592       IntrinsicInst *Start = Info.LifetimeStart[0];
593       uint64_t Size =
594           cast<ConstantInt>(Start->getArgOperand(0))->getZExtValue();
595       Size = alignTo(Size, kTagGranuleSize);
596       tagAlloca(AI, Start->getNextNode(), TagPCall, Size);
597 
598       auto TagEnd = [&](Instruction *Node) { untagAlloca(AI, Node, Size); };
599       if (!DT || !PDT ||
600           !memtag::forAllReachableExits(*DT, *PDT, *LI, Start, Info.LifetimeEnd,
601                                         SInfo.RetVec, TagEnd)) {
602         for (auto *End : Info.LifetimeEnd)
603           End->eraseFromParent();
604       }
605     } else {
606       uint64_t Size = *Info.AI->getAllocationSize(*DL);
607       Value *Ptr = IRB.CreatePointerCast(TagPCall, IRB.getPtrTy());
608       tagAlloca(AI, &*IRB.GetInsertPoint(), Ptr, Size);
609       for (auto *RI : SInfo.RetVec) {
610         untagAlloca(AI, RI, Size);
611       }
612       // We may have inserted tag/untag outside of any lifetime interval.
613       // Remove all lifetime intrinsics for this alloca.
614       for (auto *II : Info.LifetimeStart)
615         II->eraseFromParent();
616       for (auto *II : Info.LifetimeEnd)
617         II->eraseFromParent();
618     }
619 
620     memtag::annotateDebugRecords(Info, Tag);
621   }
622 
623   // If we have instrumented at least one alloca, all unrecognized lifetime
624   // intrinsics have to go.
625   for (auto *I : SInfo.UnrecognizedLifetimes)
626     I->eraseFromParent();
627 
628   return true;
629 }
630