xref: /freebsd-src/contrib/llvm-project/llvm/lib/CodeGen/StackProtector.cpp (revision cb14a3fe5122c879eae1fb480ed7ce82a699ddb6)
1 //===- StackProtector.cpp - Stack Protector Insertion ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass inserts stack protectors into functions which need them. A variable
10 // with a random value in it is stored onto the stack before the local variables
11 // are allocated. Upon exiting the block, the stored value is checked. If it's
12 // changed, then there was some sort of violation and the program aborts.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/CodeGen/StackProtector.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/Analysis/BranchProbabilityInfo.h"
21 #include "llvm/Analysis/MemoryLocation.h"
22 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
23 #include "llvm/CodeGen/Passes.h"
24 #include "llvm/CodeGen/TargetLowering.h"
25 #include "llvm/CodeGen/TargetPassConfig.h"
26 #include "llvm/CodeGen/TargetSubtargetInfo.h"
27 #include "llvm/IR/Attributes.h"
28 #include "llvm/IR/BasicBlock.h"
29 #include "llvm/IR/Constants.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/DerivedTypes.h"
32 #include "llvm/IR/Dominators.h"
33 #include "llvm/IR/EHPersonalities.h"
34 #include "llvm/IR/Function.h"
35 #include "llvm/IR/IRBuilder.h"
36 #include "llvm/IR/Instruction.h"
37 #include "llvm/IR/Instructions.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/Intrinsics.h"
40 #include "llvm/IR/MDBuilder.h"
41 #include "llvm/IR/Module.h"
42 #include "llvm/IR/Type.h"
43 #include "llvm/IR/User.h"
44 #include "llvm/InitializePasses.h"
45 #include "llvm/Pass.h"
46 #include "llvm/Support/Casting.h"
47 #include "llvm/Support/CommandLine.h"
48 #include "llvm/Target/TargetMachine.h"
49 #include "llvm/Target/TargetOptions.h"
50 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
51 #include <optional>
52 #include <utility>
53 
54 using namespace llvm;
55 
56 #define DEBUG_TYPE "stack-protector"
57 
58 STATISTIC(NumFunProtected, "Number of functions protected");
59 STATISTIC(NumAddrTaken, "Number of local variables that have their address"
60                         " taken.");
61 
62 static cl::opt<bool> EnableSelectionDAGSP("enable-selectiondag-sp",
63                                           cl::init(true), cl::Hidden);
64 static cl::opt<bool> DisableCheckNoReturn("disable-check-noreturn-call",
65                                           cl::init(false), cl::Hidden);
66 
67 char StackProtector::ID = 0;
68 
69 StackProtector::StackProtector() : FunctionPass(ID) {
70   initializeStackProtectorPass(*PassRegistry::getPassRegistry());
71 }
72 
73 INITIALIZE_PASS_BEGIN(StackProtector, DEBUG_TYPE,
74                       "Insert stack protectors", false, true)
75 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
76 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
77 INITIALIZE_PASS_END(StackProtector, DEBUG_TYPE,
78                     "Insert stack protectors", false, true)
79 
80 FunctionPass *llvm::createStackProtectorPass() { return new StackProtector(); }
81 
82 void StackProtector::getAnalysisUsage(AnalysisUsage &AU) const {
83   AU.addRequired<TargetPassConfig>();
84   AU.addPreserved<DominatorTreeWrapperPass>();
85 }
86 
87 bool StackProtector::runOnFunction(Function &Fn) {
88   F = &Fn;
89   M = F->getParent();
90   if (auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>())
91     DTU.emplace(DTWP->getDomTree(), DomTreeUpdater::UpdateStrategy::Lazy);
92   TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
93   Trip = TM->getTargetTriple();
94   TLI = TM->getSubtargetImpl(Fn)->getTargetLowering();
95   HasPrologue = false;
96   HasIRCheck = false;
97 
98   SSPBufferSize = Fn.getFnAttributeAsParsedInteger(
99       "stack-protector-buffer-size", DefaultSSPBufferSize);
100   if (!requiresStackProtector(F, &Layout))
101     return false;
102 
103   // TODO(etienneb): Functions with funclets are not correctly supported now.
104   // Do nothing if this is funclet-based personality.
105   if (Fn.hasPersonalityFn()) {
106     EHPersonality Personality = classifyEHPersonality(Fn.getPersonalityFn());
107     if (isFuncletEHPersonality(Personality))
108       return false;
109   }
110 
111   ++NumFunProtected;
112   bool Changed = InsertStackProtectors();
113 #ifdef EXPENSIVE_CHECKS
114   assert((!DTU ||
115           DTU->getDomTree().verify(DominatorTree::VerificationLevel::Full)) &&
116          "Failed to maintain validity of domtree!");
117 #endif
118   DTU.reset();
119   return Changed;
120 }
121 
122 /// \param [out] IsLarge is set to true if a protectable array is found and
123 /// it is "large" ( >= ssp-buffer-size).  In the case of a structure with
124 /// multiple arrays, this gets set if any of them is large.
125 static bool ContainsProtectableArray(Type *Ty, Module *M, unsigned SSPBufferSize,
126                                      bool &IsLarge, bool Strong,
127                                      bool InStruct) {
128   if (!Ty)
129     return false;
130   if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
131     if (!AT->getElementType()->isIntegerTy(8)) {
132       // If we're on a non-Darwin platform or we're inside of a structure, don't
133       // add stack protectors unless the array is a character array.
134       // However, in strong mode any array, regardless of type and size,
135       // triggers a protector.
136       if (!Strong && (InStruct || !Triple(M->getTargetTriple()).isOSDarwin()))
137         return false;
138     }
139 
140     // If an array has more than SSPBufferSize bytes of allocated space, then we
141     // emit stack protectors.
142     if (SSPBufferSize <= M->getDataLayout().getTypeAllocSize(AT)) {
143       IsLarge = true;
144       return true;
145     }
146 
147     if (Strong)
148       // Require a protector for all arrays in strong mode
149       return true;
150   }
151 
152   const StructType *ST = dyn_cast<StructType>(Ty);
153   if (!ST)
154     return false;
155 
156   bool NeedsProtector = false;
157   for (Type *ET : ST->elements())
158     if (ContainsProtectableArray(ET, M, SSPBufferSize, IsLarge, Strong, true)) {
159       // If the element is a protectable array and is large (>= SSPBufferSize)
160       // then we are done.  If the protectable array is not large, then
161       // keep looking in case a subsequent element is a large array.
162       if (IsLarge)
163         return true;
164       NeedsProtector = true;
165     }
166 
167   return NeedsProtector;
168 }
169 
170 /// Check whether a stack allocation has its address taken.
171 static bool HasAddressTaken(const Instruction *AI, TypeSize AllocSize,
172                             Module *M,
173                             SmallPtrSet<const PHINode *, 16> &VisitedPHIs) {
174   const DataLayout &DL = M->getDataLayout();
175   for (const User *U : AI->users()) {
176     const auto *I = cast<Instruction>(U);
177     // If this instruction accesses memory make sure it doesn't access beyond
178     // the bounds of the allocated object.
179     std::optional<MemoryLocation> MemLoc = MemoryLocation::getOrNone(I);
180     if (MemLoc && MemLoc->Size.hasValue() &&
181         !TypeSize::isKnownGE(AllocSize, MemLoc->Size.getValue()))
182       return true;
183     switch (I->getOpcode()) {
184     case Instruction::Store:
185       if (AI == cast<StoreInst>(I)->getValueOperand())
186         return true;
187       break;
188     case Instruction::AtomicCmpXchg:
189       // cmpxchg conceptually includes both a load and store from the same
190       // location. So, like store, the value being stored is what matters.
191       if (AI == cast<AtomicCmpXchgInst>(I)->getNewValOperand())
192         return true;
193       break;
194     case Instruction::PtrToInt:
195       if (AI == cast<PtrToIntInst>(I)->getOperand(0))
196         return true;
197       break;
198     case Instruction::Call: {
199       // Ignore intrinsics that do not become real instructions.
200       // TODO: Narrow this to intrinsics that have store-like effects.
201       const auto *CI = cast<CallInst>(I);
202       if (!CI->isDebugOrPseudoInst() && !CI->isLifetimeStartOrEnd())
203         return true;
204       break;
205     }
206     case Instruction::Invoke:
207       return true;
208     case Instruction::GetElementPtr: {
209       // If the GEP offset is out-of-bounds, or is non-constant and so has to be
210       // assumed to be potentially out-of-bounds, then any memory access that
211       // would use it could also be out-of-bounds meaning stack protection is
212       // required.
213       const GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
214       unsigned IndexSize = DL.getIndexTypeSizeInBits(I->getType());
215       APInt Offset(IndexSize, 0);
216       if (!GEP->accumulateConstantOffset(DL, Offset))
217         return true;
218       TypeSize OffsetSize = TypeSize::getFixed(Offset.getLimitedValue());
219       if (!TypeSize::isKnownGT(AllocSize, OffsetSize))
220         return true;
221       // Adjust AllocSize to be the space remaining after this offset.
222       // We can't subtract a fixed size from a scalable one, so in that case
223       // assume the scalable value is of minimum size.
224       TypeSize NewAllocSize =
225           TypeSize::getFixed(AllocSize.getKnownMinValue()) - OffsetSize;
226       if (HasAddressTaken(I, NewAllocSize, M, VisitedPHIs))
227         return true;
228       break;
229     }
230     case Instruction::BitCast:
231     case Instruction::Select:
232     case Instruction::AddrSpaceCast:
233       if (HasAddressTaken(I, AllocSize, M, VisitedPHIs))
234         return true;
235       break;
236     case Instruction::PHI: {
237       // Keep track of what PHI nodes we have already visited to ensure
238       // they are only visited once.
239       const auto *PN = cast<PHINode>(I);
240       if (VisitedPHIs.insert(PN).second)
241         if (HasAddressTaken(PN, AllocSize, M, VisitedPHIs))
242           return true;
243       break;
244     }
245     case Instruction::Load:
246     case Instruction::AtomicRMW:
247     case Instruction::Ret:
248       // These instructions take an address operand, but have load-like or
249       // other innocuous behavior that should not trigger a stack protector.
250       // atomicrmw conceptually has both load and store semantics, but the
251       // value being stored must be integer; so if a pointer is being stored,
252       // we'll catch it in the PtrToInt case above.
253       break;
254     default:
255       // Conservatively return true for any instruction that takes an address
256       // operand, but is not handled above.
257       return true;
258     }
259   }
260   return false;
261 }
262 
263 /// Search for the first call to the llvm.stackprotector intrinsic and return it
264 /// if present.
265 static const CallInst *findStackProtectorIntrinsic(Function &F) {
266   for (const BasicBlock &BB : F)
267     for (const Instruction &I : BB)
268       if (const auto *II = dyn_cast<IntrinsicInst>(&I))
269         if (II->getIntrinsicID() == Intrinsic::stackprotector)
270           return II;
271   return nullptr;
272 }
273 
274 /// Check whether or not this function needs a stack protector based
275 /// upon the stack protector level.
276 ///
277 /// We use two heuristics: a standard (ssp) and strong (sspstrong).
278 /// The standard heuristic which will add a guard variable to functions that
279 /// call alloca with a either a variable size or a size >= SSPBufferSize,
280 /// functions with character buffers larger than SSPBufferSize, and functions
281 /// with aggregates containing character buffers larger than SSPBufferSize. The
282 /// strong heuristic will add a guard variables to functions that call alloca
283 /// regardless of size, functions with any buffer regardless of type and size,
284 /// functions with aggregates that contain any buffer regardless of type and
285 /// size, and functions that contain stack-based variables that have had their
286 /// address taken.
287 bool StackProtector::requiresStackProtector(Function *F, SSPLayoutMap *Layout) {
288   Module *M = F->getParent();
289   bool Strong = false;
290   bool NeedsProtector = false;
291 
292   // The set of PHI nodes visited when determining if a variable's reference has
293   // been taken.  This set is maintained to ensure we don't visit the same PHI
294   // node multiple times.
295   SmallPtrSet<const PHINode *, 16> VisitedPHIs;
296 
297   unsigned SSPBufferSize = F->getFnAttributeAsParsedInteger(
298       "stack-protector-buffer-size", DefaultSSPBufferSize);
299 
300   if (F->hasFnAttribute(Attribute::SafeStack))
301     return false;
302 
303   // We are constructing the OptimizationRemarkEmitter on the fly rather than
304   // using the analysis pass to avoid building DominatorTree and LoopInfo which
305   // are not available this late in the IR pipeline.
306   OptimizationRemarkEmitter ORE(F);
307 
308   if (F->hasFnAttribute(Attribute::StackProtectReq)) {
309     if (!Layout)
310       return true;
311     ORE.emit([&]() {
312       return OptimizationRemark(DEBUG_TYPE, "StackProtectorRequested", F)
313              << "Stack protection applied to function "
314              << ore::NV("Function", F)
315              << " due to a function attribute or command-line switch";
316     });
317     NeedsProtector = true;
318     Strong = true; // Use the same heuristic as strong to determine SSPLayout
319   } else if (F->hasFnAttribute(Attribute::StackProtectStrong))
320     Strong = true;
321   else if (!F->hasFnAttribute(Attribute::StackProtect))
322     return false;
323 
324   for (const BasicBlock &BB : *F) {
325     for (const Instruction &I : BB) {
326       if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) {
327         if (AI->isArrayAllocation()) {
328           auto RemarkBuilder = [&]() {
329             return OptimizationRemark(DEBUG_TYPE, "StackProtectorAllocaOrArray",
330                                       &I)
331                    << "Stack protection applied to function "
332                    << ore::NV("Function", F)
333                    << " due to a call to alloca or use of a variable length "
334                       "array";
335           };
336           if (const auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) {
337             if (CI->getLimitedValue(SSPBufferSize) >= SSPBufferSize) {
338               // A call to alloca with size >= SSPBufferSize requires
339               // stack protectors.
340               if (!Layout)
341                 return true;
342               Layout->insert(
343                   std::make_pair(AI, MachineFrameInfo::SSPLK_LargeArray));
344               ORE.emit(RemarkBuilder);
345               NeedsProtector = true;
346             } else if (Strong) {
347               // Require protectors for all alloca calls in strong mode.
348               if (!Layout)
349                 return true;
350               Layout->insert(
351                   std::make_pair(AI, MachineFrameInfo::SSPLK_SmallArray));
352               ORE.emit(RemarkBuilder);
353               NeedsProtector = true;
354             }
355           } else {
356             // A call to alloca with a variable size requires protectors.
357             if (!Layout)
358               return true;
359             Layout->insert(
360                 std::make_pair(AI, MachineFrameInfo::SSPLK_LargeArray));
361             ORE.emit(RemarkBuilder);
362             NeedsProtector = true;
363           }
364           continue;
365         }
366 
367         bool IsLarge = false;
368         if (ContainsProtectableArray(AI->getAllocatedType(), M, SSPBufferSize,
369                                      IsLarge, Strong, false)) {
370           if (!Layout)
371             return true;
372           Layout->insert(std::make_pair(
373               AI, IsLarge ? MachineFrameInfo::SSPLK_LargeArray
374                           : MachineFrameInfo::SSPLK_SmallArray));
375           ORE.emit([&]() {
376             return OptimizationRemark(DEBUG_TYPE, "StackProtectorBuffer", &I)
377                    << "Stack protection applied to function "
378                    << ore::NV("Function", F)
379                    << " due to a stack allocated buffer or struct containing a "
380                       "buffer";
381           });
382           NeedsProtector = true;
383           continue;
384         }
385 
386         if (Strong &&
387             HasAddressTaken(
388                 AI, M->getDataLayout().getTypeAllocSize(AI->getAllocatedType()),
389                 M, VisitedPHIs)) {
390           ++NumAddrTaken;
391           if (!Layout)
392             return true;
393           Layout->insert(std::make_pair(AI, MachineFrameInfo::SSPLK_AddrOf));
394           ORE.emit([&]() {
395             return OptimizationRemark(DEBUG_TYPE, "StackProtectorAddressTaken",
396                                       &I)
397                    << "Stack protection applied to function "
398                    << ore::NV("Function", F)
399                    << " due to the address of a local variable being taken";
400           });
401           NeedsProtector = true;
402         }
403         // Clear any PHIs that we visited, to make sure we examine all uses of
404         // any subsequent allocas that we look at.
405         VisitedPHIs.clear();
406       }
407     }
408   }
409 
410   return NeedsProtector;
411 }
412 
413 /// Create a stack guard loading and populate whether SelectionDAG SSP is
414 /// supported.
415 static Value *getStackGuard(const TargetLoweringBase *TLI, Module *M,
416                             IRBuilder<> &B,
417                             bool *SupportsSelectionDAGSP = nullptr) {
418   Value *Guard = TLI->getIRStackGuard(B);
419   StringRef GuardMode = M->getStackProtectorGuard();
420   if ((GuardMode == "tls" || GuardMode.empty()) && Guard)
421     return B.CreateLoad(B.getPtrTy(), Guard, true, "StackGuard");
422 
423   // Use SelectionDAG SSP handling, since there isn't an IR guard.
424   //
425   // This is more or less weird, since we optionally output whether we
426   // should perform a SelectionDAG SP here. The reason is that it's strictly
427   // defined as !TLI->getIRStackGuard(B), where getIRStackGuard is also
428   // mutating. There is no way to get this bit without mutating the IR, so
429   // getting this bit has to happen in this right time.
430   //
431   // We could have define a new function TLI::supportsSelectionDAGSP(), but that
432   // will put more burden on the backends' overriding work, especially when it
433   // actually conveys the same information getIRStackGuard() already gives.
434   if (SupportsSelectionDAGSP)
435     *SupportsSelectionDAGSP = true;
436   TLI->insertSSPDeclarations(*M);
437   return B.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::stackguard));
438 }
439 
440 /// Insert code into the entry block that stores the stack guard
441 /// variable onto the stack:
442 ///
443 ///   entry:
444 ///     StackGuardSlot = alloca i8*
445 ///     StackGuard = <stack guard>
446 ///     call void @llvm.stackprotector(StackGuard, StackGuardSlot)
447 ///
448 /// Returns true if the platform/triple supports the stackprotectorcreate pseudo
449 /// node.
450 static bool CreatePrologue(Function *F, Module *M, Instruction *CheckLoc,
451                            const TargetLoweringBase *TLI, AllocaInst *&AI) {
452   bool SupportsSelectionDAGSP = false;
453   IRBuilder<> B(&F->getEntryBlock().front());
454   PointerType *PtrTy = PointerType::getUnqual(CheckLoc->getContext());
455   AI = B.CreateAlloca(PtrTy, nullptr, "StackGuardSlot");
456 
457   Value *GuardSlot = getStackGuard(TLI, M, B, &SupportsSelectionDAGSP);
458   B.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::stackprotector),
459                {GuardSlot, AI});
460   return SupportsSelectionDAGSP;
461 }
462 
463 /// InsertStackProtectors - Insert code into the prologue and epilogue of the
464 /// function.
465 ///
466 ///  - The prologue code loads and stores the stack guard onto the stack.
467 ///  - The epilogue checks the value stored in the prologue against the original
468 ///    value. It calls __stack_chk_fail if they differ.
469 bool StackProtector::InsertStackProtectors() {
470   // If the target wants to XOR the frame pointer into the guard value, it's
471   // impossible to emit the check in IR, so the target *must* support stack
472   // protection in SDAG.
473   bool SupportsSelectionDAGSP =
474       TLI->useStackGuardXorFP() ||
475       (EnableSelectionDAGSP && !TM->Options.EnableFastISel);
476   AllocaInst *AI = nullptr; // Place on stack that stores the stack guard.
477   BasicBlock *FailBB = nullptr;
478 
479   for (BasicBlock &BB : llvm::make_early_inc_range(*F)) {
480     // This is stack protector auto generated check BB, skip it.
481     if (&BB == FailBB)
482       continue;
483     Instruction *CheckLoc = dyn_cast<ReturnInst>(BB.getTerminator());
484     if (!CheckLoc && !DisableCheckNoReturn)
485       for (auto &Inst : BB)
486         if (auto *CB = dyn_cast<CallBase>(&Inst))
487           // Do stack check before noreturn calls that aren't nounwind (e.g:
488           // __cxa_throw).
489           if (CB->doesNotReturn() && !CB->doesNotThrow()) {
490             CheckLoc = CB;
491             break;
492           }
493 
494     if (!CheckLoc)
495       continue;
496 
497     // Generate prologue instrumentation if not already generated.
498     if (!HasPrologue) {
499       HasPrologue = true;
500       SupportsSelectionDAGSP &= CreatePrologue(F, M, CheckLoc, TLI, AI);
501     }
502 
503     // SelectionDAG based code generation. Nothing else needs to be done here.
504     // The epilogue instrumentation is postponed to SelectionDAG.
505     if (SupportsSelectionDAGSP)
506       break;
507 
508     // Find the stack guard slot if the prologue was not created by this pass
509     // itself via a previous call to CreatePrologue().
510     if (!AI) {
511       const CallInst *SPCall = findStackProtectorIntrinsic(*F);
512       assert(SPCall && "Call to llvm.stackprotector is missing");
513       AI = cast<AllocaInst>(SPCall->getArgOperand(1));
514     }
515 
516     // Set HasIRCheck to true, so that SelectionDAG will not generate its own
517     // version. SelectionDAG called 'shouldEmitSDCheck' to check whether
518     // instrumentation has already been generated.
519     HasIRCheck = true;
520 
521     // If we're instrumenting a block with a tail call, the check has to be
522     // inserted before the call rather than between it and the return. The
523     // verifier guarantees that a tail call is either directly before the
524     // return or with a single correct bitcast of the return value in between so
525     // we don't need to worry about many situations here.
526     Instruction *Prev = CheckLoc->getPrevNonDebugInstruction();
527     if (Prev && isa<CallInst>(Prev) && cast<CallInst>(Prev)->isTailCall())
528       CheckLoc = Prev;
529     else if (Prev) {
530       Prev = Prev->getPrevNonDebugInstruction();
531       if (Prev && isa<CallInst>(Prev) && cast<CallInst>(Prev)->isTailCall())
532         CheckLoc = Prev;
533     }
534 
535     // Generate epilogue instrumentation. The epilogue intrumentation can be
536     // function-based or inlined depending on which mechanism the target is
537     // providing.
538     if (Function *GuardCheck = TLI->getSSPStackGuardCheck(*M)) {
539       // Generate the function-based epilogue instrumentation.
540       // The target provides a guard check function, generate a call to it.
541       IRBuilder<> B(CheckLoc);
542       LoadInst *Guard = B.CreateLoad(B.getPtrTy(), AI, true, "Guard");
543       CallInst *Call = B.CreateCall(GuardCheck, {Guard});
544       Call->setAttributes(GuardCheck->getAttributes());
545       Call->setCallingConv(GuardCheck->getCallingConv());
546     } else {
547       // Generate the epilogue with inline instrumentation.
548       // If we do not support SelectionDAG based calls, generate IR level
549       // calls.
550       //
551       // For each block with a return instruction, convert this:
552       //
553       //   return:
554       //     ...
555       //     ret ...
556       //
557       // into this:
558       //
559       //   return:
560       //     ...
561       //     %1 = <stack guard>
562       //     %2 = load StackGuardSlot
563       //     %3 = icmp ne i1 %1, %2
564       //     br i1 %3, label %CallStackCheckFailBlk, label %SP_return
565       //
566       //   SP_return:
567       //     ret ...
568       //
569       //   CallStackCheckFailBlk:
570       //     call void @__stack_chk_fail()
571       //     unreachable
572 
573       // Create the FailBB. We duplicate the BB every time since the MI tail
574       // merge pass will merge together all of the various BB into one including
575       // fail BB generated by the stack protector pseudo instruction.
576       if (!FailBB)
577         FailBB = CreateFailBB();
578 
579       IRBuilder<> B(CheckLoc);
580       Value *Guard = getStackGuard(TLI, M, B);
581       LoadInst *LI2 = B.CreateLoad(B.getPtrTy(), AI, true);
582       auto *Cmp = cast<ICmpInst>(B.CreateICmpNE(Guard, LI2));
583       auto SuccessProb =
584           BranchProbabilityInfo::getBranchProbStackProtector(true);
585       auto FailureProb =
586           BranchProbabilityInfo::getBranchProbStackProtector(false);
587       MDNode *Weights = MDBuilder(F->getContext())
588                             .createBranchWeights(FailureProb.getNumerator(),
589                                                  SuccessProb.getNumerator());
590 
591       SplitBlockAndInsertIfThen(Cmp, CheckLoc,
592                                 /*Unreachable=*/false, Weights,
593                                 DTU ? &*DTU : nullptr,
594                                 /*LI=*/nullptr, /*ThenBlock=*/FailBB);
595 
596       auto *BI = cast<BranchInst>(Cmp->getParent()->getTerminator());
597       BasicBlock *NewBB = BI->getSuccessor(1);
598       NewBB->setName("SP_return");
599       NewBB->moveAfter(&BB);
600 
601       Cmp->setPredicate(Cmp->getInversePredicate());
602       BI->swapSuccessors();
603     }
604   }
605 
606   // Return if we didn't modify any basic blocks. i.e., there are no return
607   // statements in the function.
608   return HasPrologue;
609 }
610 
611 /// CreateFailBB - Create a basic block to jump to when the stack protector
612 /// check fails.
613 BasicBlock *StackProtector::CreateFailBB() {
614   LLVMContext &Context = F->getContext();
615   BasicBlock *FailBB = BasicBlock::Create(Context, "CallStackCheckFailBlk", F);
616   IRBuilder<> B(FailBB);
617   if (F->getSubprogram())
618     B.SetCurrentDebugLocation(
619         DILocation::get(Context, 0, 0, F->getSubprogram()));
620   FunctionCallee StackChkFail;
621   SmallVector<Value *, 1> Args;
622   if (Trip.isOSOpenBSD()) {
623     StackChkFail = M->getOrInsertFunction("__stack_smash_handler",
624                                           Type::getVoidTy(Context),
625                                           PointerType::getUnqual(Context));
626     Args.push_back(B.CreateGlobalStringPtr(F->getName(), "SSH"));
627   } else {
628     StackChkFail =
629         M->getOrInsertFunction("__stack_chk_fail", Type::getVoidTy(Context));
630   }
631   cast<Function>(StackChkFail.getCallee())->addFnAttr(Attribute::NoReturn);
632   B.CreateCall(StackChkFail, Args);
633   B.CreateUnreachable();
634   return FailBB;
635 }
636 
637 bool StackProtector::shouldEmitSDCheck(const BasicBlock &BB) const {
638   return HasPrologue && !HasIRCheck && isa<ReturnInst>(BB.getTerminator());
639 }
640 
641 void StackProtector::copyToMachineFrameInfo(MachineFrameInfo &MFI) const {
642   if (Layout.empty())
643     return;
644 
645   for (int I = 0, E = MFI.getObjectIndexEnd(); I != E; ++I) {
646     if (MFI.isDeadObjectIndex(I))
647       continue;
648 
649     const AllocaInst *AI = MFI.getObjectAllocation(I);
650     if (!AI)
651       continue;
652 
653     SSPLayoutMap::const_iterator LI = Layout.find(AI);
654     if (LI == Layout.end())
655       continue;
656 
657     MFI.setObjectSSPLayout(I, LI->second);
658   }
659 }
660