xref: /netbsd-src/external/apache2/llvm/dist/llvm/lib/Transforms/IPO/LowerTypeTests.cpp (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 //===- LowerTypeTests.cpp - type metadata lowering pass -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass lowers type metadata and calls to the llvm.type.test intrinsic.
10 // It also ensures that globals are properly laid out for the
11 // llvm.icall.branch.funnel intrinsic.
12 // See http://llvm.org/docs/TypeMetadata.html for more information.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/Transforms/IPO/LowerTypeTests.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/DenseMap.h"
20 #include "llvm/ADT/EquivalenceClasses.h"
21 #include "llvm/ADT/PointerUnion.h"
22 #include "llvm/ADT/SetVector.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/ADT/StringRef.h"
26 #include "llvm/ADT/TinyPtrVector.h"
27 #include "llvm/ADT/Triple.h"
28 #include "llvm/Analysis/TypeMetadataUtils.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/BasicBlock.h"
32 #include "llvm/IR/Constant.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/DerivedTypes.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/IR/GlobalAlias.h"
38 #include "llvm/IR/GlobalObject.h"
39 #include "llvm/IR/GlobalValue.h"
40 #include "llvm/IR/GlobalVariable.h"
41 #include "llvm/IR/IRBuilder.h"
42 #include "llvm/IR/InlineAsm.h"
43 #include "llvm/IR/Instruction.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/Intrinsics.h"
46 #include "llvm/IR/LLVMContext.h"
47 #include "llvm/IR/Metadata.h"
48 #include "llvm/IR/Module.h"
49 #include "llvm/IR/ModuleSummaryIndex.h"
50 #include "llvm/IR/ModuleSummaryIndexYAML.h"
51 #include "llvm/IR/Operator.h"
52 #include "llvm/IR/PassManager.h"
53 #include "llvm/IR/Type.h"
54 #include "llvm/IR/Use.h"
55 #include "llvm/IR/User.h"
56 #include "llvm/IR/Value.h"
57 #include "llvm/InitializePasses.h"
58 #include "llvm/Pass.h"
59 #include "llvm/Support/Allocator.h"
60 #include "llvm/Support/Casting.h"
61 #include "llvm/Support/CommandLine.h"
62 #include "llvm/Support/Debug.h"
63 #include "llvm/Support/Error.h"
64 #include "llvm/Support/ErrorHandling.h"
65 #include "llvm/Support/FileSystem.h"
66 #include "llvm/Support/MathExtras.h"
67 #include "llvm/Support/MemoryBuffer.h"
68 #include "llvm/Support/TrailingObjects.h"
69 #include "llvm/Support/YAMLTraits.h"
70 #include "llvm/Support/raw_ostream.h"
71 #include "llvm/Transforms/IPO.h"
72 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
73 #include "llvm/Transforms/Utils/ModuleUtils.h"
74 #include <algorithm>
75 #include <cassert>
76 #include <cstdint>
77 #include <memory>
78 #include <set>
79 #include <string>
80 #include <system_error>
81 #include <utility>
82 #include <vector>
83 
84 using namespace llvm;
85 using namespace lowertypetests;
86 
87 #define DEBUG_TYPE "lowertypetests"
88 
89 STATISTIC(ByteArraySizeBits, "Byte array size in bits");
90 STATISTIC(ByteArraySizeBytes, "Byte array size in bytes");
91 STATISTIC(NumByteArraysCreated, "Number of byte arrays created");
92 STATISTIC(NumTypeTestCallsLowered, "Number of type test calls lowered");
93 STATISTIC(NumTypeIdDisjointSets, "Number of disjoint sets of type identifiers");
94 
95 static cl::opt<bool> AvoidReuse(
96     "lowertypetests-avoid-reuse",
97     cl::desc("Try to avoid reuse of byte array addresses using aliases"),
98     cl::Hidden, cl::init(true));
99 
100 static cl::opt<PassSummaryAction> ClSummaryAction(
101     "lowertypetests-summary-action",
102     cl::desc("What to do with the summary when running this pass"),
103     cl::values(clEnumValN(PassSummaryAction::None, "none", "Do nothing"),
104                clEnumValN(PassSummaryAction::Import, "import",
105                           "Import typeid resolutions from summary and globals"),
106                clEnumValN(PassSummaryAction::Export, "export",
107                           "Export typeid resolutions to summary and globals")),
108     cl::Hidden);
109 
110 static cl::opt<std::string> ClReadSummary(
111     "lowertypetests-read-summary",
112     cl::desc("Read summary from given YAML file before running pass"),
113     cl::Hidden);
114 
115 static cl::opt<std::string> ClWriteSummary(
116     "lowertypetests-write-summary",
117     cl::desc("Write summary to given YAML file after running pass"),
118     cl::Hidden);
119 
containsGlobalOffset(uint64_t Offset) const120 bool BitSetInfo::containsGlobalOffset(uint64_t Offset) const {
121   if (Offset < ByteOffset)
122     return false;
123 
124   if ((Offset - ByteOffset) % (uint64_t(1) << AlignLog2) != 0)
125     return false;
126 
127   uint64_t BitOffset = (Offset - ByteOffset) >> AlignLog2;
128   if (BitOffset >= BitSize)
129     return false;
130 
131   return Bits.count(BitOffset);
132 }
133 
print(raw_ostream & OS) const134 void BitSetInfo::print(raw_ostream &OS) const {
135   OS << "offset " << ByteOffset << " size " << BitSize << " align "
136      << (1 << AlignLog2);
137 
138   if (isAllOnes()) {
139     OS << " all-ones\n";
140     return;
141   }
142 
143   OS << " { ";
144   for (uint64_t B : Bits)
145     OS << B << ' ';
146   OS << "}\n";
147 }
148 
build()149 BitSetInfo BitSetBuilder::build() {
150   if (Min > Max)
151     Min = 0;
152 
153   // Normalize each offset against the minimum observed offset, and compute
154   // the bitwise OR of each of the offsets. The number of trailing zeros
155   // in the mask gives us the log2 of the alignment of all offsets, which
156   // allows us to compress the bitset by only storing one bit per aligned
157   // address.
158   uint64_t Mask = 0;
159   for (uint64_t &Offset : Offsets) {
160     Offset -= Min;
161     Mask |= Offset;
162   }
163 
164   BitSetInfo BSI;
165   BSI.ByteOffset = Min;
166 
167   BSI.AlignLog2 = 0;
168   if (Mask != 0)
169     BSI.AlignLog2 = countTrailingZeros(Mask, ZB_Undefined);
170 
171   // Build the compressed bitset while normalizing the offsets against the
172   // computed alignment.
173   BSI.BitSize = ((Max - Min) >> BSI.AlignLog2) + 1;
174   for (uint64_t Offset : Offsets) {
175     Offset >>= BSI.AlignLog2;
176     BSI.Bits.insert(Offset);
177   }
178 
179   return BSI;
180 }
181 
addFragment(const std::set<uint64_t> & F)182 void GlobalLayoutBuilder::addFragment(const std::set<uint64_t> &F) {
183   // Create a new fragment to hold the layout for F.
184   Fragments.emplace_back();
185   std::vector<uint64_t> &Fragment = Fragments.back();
186   uint64_t FragmentIndex = Fragments.size() - 1;
187 
188   for (auto ObjIndex : F) {
189     uint64_t OldFragmentIndex = FragmentMap[ObjIndex];
190     if (OldFragmentIndex == 0) {
191       // We haven't seen this object index before, so just add it to the current
192       // fragment.
193       Fragment.push_back(ObjIndex);
194     } else {
195       // This index belongs to an existing fragment. Copy the elements of the
196       // old fragment into this one and clear the old fragment. We don't update
197       // the fragment map just yet, this ensures that any further references to
198       // indices from the old fragment in this fragment do not insert any more
199       // indices.
200       std::vector<uint64_t> &OldFragment = Fragments[OldFragmentIndex];
201       llvm::append_range(Fragment, OldFragment);
202       OldFragment.clear();
203     }
204   }
205 
206   // Update the fragment map to point our object indices to this fragment.
207   for (uint64_t ObjIndex : Fragment)
208     FragmentMap[ObjIndex] = FragmentIndex;
209 }
210 
allocate(const std::set<uint64_t> & Bits,uint64_t BitSize,uint64_t & AllocByteOffset,uint8_t & AllocMask)211 void ByteArrayBuilder::allocate(const std::set<uint64_t> &Bits,
212                                 uint64_t BitSize, uint64_t &AllocByteOffset,
213                                 uint8_t &AllocMask) {
214   // Find the smallest current allocation.
215   unsigned Bit = 0;
216   for (unsigned I = 1; I != BitsPerByte; ++I)
217     if (BitAllocs[I] < BitAllocs[Bit])
218       Bit = I;
219 
220   AllocByteOffset = BitAllocs[Bit];
221 
222   // Add our size to it.
223   unsigned ReqSize = AllocByteOffset + BitSize;
224   BitAllocs[Bit] = ReqSize;
225   if (Bytes.size() < ReqSize)
226     Bytes.resize(ReqSize);
227 
228   // Set our bits.
229   AllocMask = 1 << Bit;
230   for (uint64_t B : Bits)
231     Bytes[AllocByteOffset + B] |= AllocMask;
232 }
233 
isJumpTableCanonical(Function * F)234 bool lowertypetests::isJumpTableCanonical(Function *F) {
235   if (F->isDeclarationForLinker())
236     return false;
237   auto *CI = mdconst::extract_or_null<ConstantInt>(
238       F->getParent()->getModuleFlag("CFI Canonical Jump Tables"));
239   if (!CI || CI->getZExtValue() != 0)
240     return true;
241   return F->hasFnAttribute("cfi-canonical-jump-table");
242 }
243 
244 namespace {
245 
246 struct ByteArrayInfo {
247   std::set<uint64_t> Bits;
248   uint64_t BitSize;
249   GlobalVariable *ByteArray;
250   GlobalVariable *MaskGlobal;
251   uint8_t *MaskPtr = nullptr;
252 };
253 
254 /// A POD-like structure that we use to store a global reference together with
255 /// its metadata types. In this pass we frequently need to query the set of
256 /// metadata types referenced by a global, which at the IR level is an expensive
257 /// operation involving a map lookup; this data structure helps to reduce the
258 /// number of times we need to do this lookup.
259 class GlobalTypeMember final : TrailingObjects<GlobalTypeMember, MDNode *> {
260   friend TrailingObjects;
261 
262   GlobalObject *GO;
263   size_t NTypes;
264 
265   // For functions: true if the jump table is canonical. This essentially means
266   // whether the canonical address (i.e. the symbol table entry) of the function
267   // is provided by the local jump table. This is normally the same as whether
268   // the function is defined locally, but if canonical jump tables are disabled
269   // by the user then the jump table never provides a canonical definition.
270   bool IsJumpTableCanonical;
271 
272   // For functions: true if this function is either defined or used in a thinlto
273   // module and its jumptable entry needs to be exported to thinlto backends.
274   bool IsExported;
275 
numTrailingObjects(OverloadToken<MDNode * >) const276   size_t numTrailingObjects(OverloadToken<MDNode *>) const { return NTypes; }
277 
278 public:
create(BumpPtrAllocator & Alloc,GlobalObject * GO,bool IsJumpTableCanonical,bool IsExported,ArrayRef<MDNode * > Types)279   static GlobalTypeMember *create(BumpPtrAllocator &Alloc, GlobalObject *GO,
280                                   bool IsJumpTableCanonical, bool IsExported,
281                                   ArrayRef<MDNode *> Types) {
282     auto *GTM = static_cast<GlobalTypeMember *>(Alloc.Allocate(
283         totalSizeToAlloc<MDNode *>(Types.size()), alignof(GlobalTypeMember)));
284     GTM->GO = GO;
285     GTM->NTypes = Types.size();
286     GTM->IsJumpTableCanonical = IsJumpTableCanonical;
287     GTM->IsExported = IsExported;
288     std::uninitialized_copy(Types.begin(), Types.end(),
289                             GTM->getTrailingObjects<MDNode *>());
290     return GTM;
291   }
292 
getGlobal() const293   GlobalObject *getGlobal() const {
294     return GO;
295   }
296 
isJumpTableCanonical() const297   bool isJumpTableCanonical() const {
298     return IsJumpTableCanonical;
299   }
300 
isExported() const301   bool isExported() const {
302     return IsExported;
303   }
304 
types() const305   ArrayRef<MDNode *> types() const {
306     return makeArrayRef(getTrailingObjects<MDNode *>(), NTypes);
307   }
308 };
309 
310 struct ICallBranchFunnel final
311     : TrailingObjects<ICallBranchFunnel, GlobalTypeMember *> {
create__anon008eecce0111::ICallBranchFunnel312   static ICallBranchFunnel *create(BumpPtrAllocator &Alloc, CallInst *CI,
313                                    ArrayRef<GlobalTypeMember *> Targets,
314                                    unsigned UniqueId) {
315     auto *Call = static_cast<ICallBranchFunnel *>(
316         Alloc.Allocate(totalSizeToAlloc<GlobalTypeMember *>(Targets.size()),
317                        alignof(ICallBranchFunnel)));
318     Call->CI = CI;
319     Call->UniqueId = UniqueId;
320     Call->NTargets = Targets.size();
321     std::uninitialized_copy(Targets.begin(), Targets.end(),
322                             Call->getTrailingObjects<GlobalTypeMember *>());
323     return Call;
324   }
325 
326   CallInst *CI;
targets__anon008eecce0111::ICallBranchFunnel327   ArrayRef<GlobalTypeMember *> targets() const {
328     return makeArrayRef(getTrailingObjects<GlobalTypeMember *>(), NTargets);
329   }
330 
331   unsigned UniqueId;
332 
333 private:
334   size_t NTargets;
335 };
336 
337 struct ScopedSaveAliaseesAndUsed {
338   Module &M;
339   SmallVector<GlobalValue *, 4> Used, CompilerUsed;
340   std::vector<std::pair<GlobalIndirectSymbol *, Function *>> FunctionAliases;
341 
ScopedSaveAliaseesAndUsed__anon008eecce0111::ScopedSaveAliaseesAndUsed342   ScopedSaveAliaseesAndUsed(Module &M) : M(M) {
343     // The users of this class want to replace all function references except
344     // for aliases and llvm.used/llvm.compiler.used with references to a jump
345     // table. We avoid replacing aliases in order to avoid introducing a double
346     // indirection (or an alias pointing to a declaration in ThinLTO mode), and
347     // we avoid replacing llvm.used/llvm.compiler.used because these global
348     // variables describe properties of the global, not the jump table (besides,
349     // offseted references to the jump table in llvm.used are invalid).
350     // Unfortunately, LLVM doesn't have a "RAUW except for these (possibly
351     // indirect) users", so what we do is save the list of globals referenced by
352     // llvm.used/llvm.compiler.used and aliases, erase the used lists, let RAUW
353     // replace the aliasees and then set them back to their original values at
354     // the end.
355     if (GlobalVariable *GV = collectUsedGlobalVariables(M, Used, false))
356       GV->eraseFromParent();
357     if (GlobalVariable *GV = collectUsedGlobalVariables(M, CompilerUsed, true))
358       GV->eraseFromParent();
359 
360     for (auto &GIS : concat<GlobalIndirectSymbol>(M.aliases(), M.ifuncs())) {
361       // FIXME: This should look past all aliases not just interposable ones,
362       // see discussion on D65118.
363       if (auto *F =
364               dyn_cast<Function>(GIS.getIndirectSymbol()->stripPointerCasts()))
365         FunctionAliases.push_back({&GIS, F});
366     }
367   }
368 
~ScopedSaveAliaseesAndUsed__anon008eecce0111::ScopedSaveAliaseesAndUsed369   ~ScopedSaveAliaseesAndUsed() {
370     appendToUsed(M, Used);
371     appendToCompilerUsed(M, CompilerUsed);
372 
373     for (auto P : FunctionAliases)
374       P.first->setIndirectSymbol(
375           ConstantExpr::getBitCast(P.second, P.first->getType()));
376   }
377 };
378 
379 class LowerTypeTestsModule {
380   Module &M;
381 
382   ModuleSummaryIndex *ExportSummary;
383   const ModuleSummaryIndex *ImportSummary;
384   // Set when the client has invoked this to simply drop all type test assume
385   // sequences.
386   bool DropTypeTests;
387 
388   Triple::ArchType Arch;
389   Triple::OSType OS;
390   Triple::ObjectFormatType ObjectFormat;
391 
392   IntegerType *Int1Ty = Type::getInt1Ty(M.getContext());
393   IntegerType *Int8Ty = Type::getInt8Ty(M.getContext());
394   PointerType *Int8PtrTy = Type::getInt8PtrTy(M.getContext());
395   ArrayType *Int8Arr0Ty = ArrayType::get(Type::getInt8Ty(M.getContext()), 0);
396   IntegerType *Int32Ty = Type::getInt32Ty(M.getContext());
397   PointerType *Int32PtrTy = PointerType::getUnqual(Int32Ty);
398   IntegerType *Int64Ty = Type::getInt64Ty(M.getContext());
399   IntegerType *IntPtrTy = M.getDataLayout().getIntPtrType(M.getContext(), 0);
400 
401   // Indirect function call index assignment counter for WebAssembly
402   uint64_t IndirectIndex = 1;
403 
404   // Mapping from type identifiers to the call sites that test them, as well as
405   // whether the type identifier needs to be exported to ThinLTO backends as
406   // part of the regular LTO phase of the ThinLTO pipeline (see exportTypeId).
407   struct TypeIdUserInfo {
408     std::vector<CallInst *> CallSites;
409     bool IsExported = false;
410   };
411   DenseMap<Metadata *, TypeIdUserInfo> TypeIdUsers;
412 
413   /// This structure describes how to lower type tests for a particular type
414   /// identifier. It is either built directly from the global analysis (during
415   /// regular LTO or the regular LTO phase of ThinLTO), or indirectly using type
416   /// identifier summaries and external symbol references (in ThinLTO backends).
417   struct TypeIdLowering {
418     TypeTestResolution::Kind TheKind = TypeTestResolution::Unsat;
419 
420     /// All except Unsat: the start address within the combined global.
421     Constant *OffsetedGlobal;
422 
423     /// ByteArray, Inline, AllOnes: log2 of the required global alignment
424     /// relative to the start address.
425     Constant *AlignLog2;
426 
427     /// ByteArray, Inline, AllOnes: one less than the size of the memory region
428     /// covering members of this type identifier as a multiple of 2^AlignLog2.
429     Constant *SizeM1;
430 
431     /// ByteArray: the byte array to test the address against.
432     Constant *TheByteArray;
433 
434     /// ByteArray: the bit mask to apply to bytes loaded from the byte array.
435     Constant *BitMask;
436 
437     /// Inline: the bit mask to test the address against.
438     Constant *InlineBits;
439   };
440 
441   std::vector<ByteArrayInfo> ByteArrayInfos;
442 
443   Function *WeakInitializerFn = nullptr;
444 
445   bool shouldExportConstantsAsAbsoluteSymbols();
446   uint8_t *exportTypeId(StringRef TypeId, const TypeIdLowering &TIL);
447   TypeIdLowering importTypeId(StringRef TypeId);
448   void importTypeTest(CallInst *CI);
449   void importFunction(Function *F, bool isJumpTableCanonical,
450                       std::vector<GlobalAlias *> &AliasesToErase);
451 
452   BitSetInfo
453   buildBitSet(Metadata *TypeId,
454               const DenseMap<GlobalTypeMember *, uint64_t> &GlobalLayout);
455   ByteArrayInfo *createByteArray(BitSetInfo &BSI);
456   void allocateByteArrays();
457   Value *createBitSetTest(IRBuilder<> &B, const TypeIdLowering &TIL,
458                           Value *BitOffset);
459   void lowerTypeTestCalls(
460       ArrayRef<Metadata *> TypeIds, Constant *CombinedGlobalAddr,
461       const DenseMap<GlobalTypeMember *, uint64_t> &GlobalLayout);
462   Value *lowerTypeTestCall(Metadata *TypeId, CallInst *CI,
463                            const TypeIdLowering &TIL);
464 
465   void buildBitSetsFromGlobalVariables(ArrayRef<Metadata *> TypeIds,
466                                        ArrayRef<GlobalTypeMember *> Globals);
467   unsigned getJumpTableEntrySize();
468   Type *getJumpTableEntryType();
469   void createJumpTableEntry(raw_ostream &AsmOS, raw_ostream &ConstraintOS,
470                             Triple::ArchType JumpTableArch,
471                             SmallVectorImpl<Value *> &AsmArgs, Function *Dest);
472   void verifyTypeMDNode(GlobalObject *GO, MDNode *Type);
473   void buildBitSetsFromFunctions(ArrayRef<Metadata *> TypeIds,
474                                  ArrayRef<GlobalTypeMember *> Functions);
475   void buildBitSetsFromFunctionsNative(ArrayRef<Metadata *> TypeIds,
476                                        ArrayRef<GlobalTypeMember *> Functions);
477   void buildBitSetsFromFunctionsWASM(ArrayRef<Metadata *> TypeIds,
478                                      ArrayRef<GlobalTypeMember *> Functions);
479   void
480   buildBitSetsFromDisjointSet(ArrayRef<Metadata *> TypeIds,
481                               ArrayRef<GlobalTypeMember *> Globals,
482                               ArrayRef<ICallBranchFunnel *> ICallBranchFunnels);
483 
484   void replaceWeakDeclarationWithJumpTablePtr(Function *F, Constant *JT,
485                                               bool IsJumpTableCanonical);
486   void moveInitializerToModuleConstructor(GlobalVariable *GV);
487   void findGlobalVariableUsersOf(Constant *C,
488                                  SmallSetVector<GlobalVariable *, 8> &Out);
489 
490   void createJumpTable(Function *F, ArrayRef<GlobalTypeMember *> Functions);
491 
492   /// replaceCfiUses - Go through the uses list for this definition
493   /// and make each use point to "V" instead of "this" when the use is outside
494   /// the block. 'This's use list is expected to have at least one element.
495   /// Unlike replaceAllUsesWith this function skips blockaddr and direct call
496   /// uses.
497   void replaceCfiUses(Function *Old, Value *New, bool IsJumpTableCanonical);
498 
499   /// replaceDirectCalls - Go through the uses list for this definition and
500   /// replace each use, which is a direct function call.
501   void replaceDirectCalls(Value *Old, Value *New);
502 
503 public:
504   LowerTypeTestsModule(Module &M, ModuleSummaryIndex *ExportSummary,
505                        const ModuleSummaryIndex *ImportSummary,
506                        bool DropTypeTests);
507 
508   bool lower();
509 
510   // Lower the module using the action and summary passed as command line
511   // arguments. For testing purposes only.
512   static bool runForTesting(Module &M);
513 };
514 
515 struct LowerTypeTests : public ModulePass {
516   static char ID;
517 
518   bool UseCommandLine = false;
519 
520   ModuleSummaryIndex *ExportSummary;
521   const ModuleSummaryIndex *ImportSummary;
522   bool DropTypeTests;
523 
LowerTypeTests__anon008eecce0111::LowerTypeTests524   LowerTypeTests() : ModulePass(ID), UseCommandLine(true) {
525     initializeLowerTypeTestsPass(*PassRegistry::getPassRegistry());
526   }
527 
LowerTypeTests__anon008eecce0111::LowerTypeTests528   LowerTypeTests(ModuleSummaryIndex *ExportSummary,
529                  const ModuleSummaryIndex *ImportSummary, bool DropTypeTests)
530       : ModulePass(ID), ExportSummary(ExportSummary),
531         ImportSummary(ImportSummary), DropTypeTests(DropTypeTests) {
532     initializeLowerTypeTestsPass(*PassRegistry::getPassRegistry());
533   }
534 
runOnModule__anon008eecce0111::LowerTypeTests535   bool runOnModule(Module &M) override {
536     if (UseCommandLine)
537       return LowerTypeTestsModule::runForTesting(M);
538     return LowerTypeTestsModule(M, ExportSummary, ImportSummary, DropTypeTests)
539         .lower();
540   }
541 };
542 
543 } // end anonymous namespace
544 
545 char LowerTypeTests::ID = 0;
546 
547 INITIALIZE_PASS(LowerTypeTests, "lowertypetests", "Lower type metadata", false,
548                 false)
549 
550 ModulePass *
createLowerTypeTestsPass(ModuleSummaryIndex * ExportSummary,const ModuleSummaryIndex * ImportSummary,bool DropTypeTests)551 llvm::createLowerTypeTestsPass(ModuleSummaryIndex *ExportSummary,
552                                const ModuleSummaryIndex *ImportSummary,
553                                bool DropTypeTests) {
554   return new LowerTypeTests(ExportSummary, ImportSummary, DropTypeTests);
555 }
556 
557 /// Build a bit set for TypeId using the object layouts in
558 /// GlobalLayout.
buildBitSet(Metadata * TypeId,const DenseMap<GlobalTypeMember *,uint64_t> & GlobalLayout)559 BitSetInfo LowerTypeTestsModule::buildBitSet(
560     Metadata *TypeId,
561     const DenseMap<GlobalTypeMember *, uint64_t> &GlobalLayout) {
562   BitSetBuilder BSB;
563 
564   // Compute the byte offset of each address associated with this type
565   // identifier.
566   for (auto &GlobalAndOffset : GlobalLayout) {
567     for (MDNode *Type : GlobalAndOffset.first->types()) {
568       if (Type->getOperand(1) != TypeId)
569         continue;
570       uint64_t Offset =
571           cast<ConstantInt>(
572               cast<ConstantAsMetadata>(Type->getOperand(0))->getValue())
573               ->getZExtValue();
574       BSB.addOffset(GlobalAndOffset.second + Offset);
575     }
576   }
577 
578   return BSB.build();
579 }
580 
581 /// Build a test that bit BitOffset mod sizeof(Bits)*8 is set in
582 /// Bits. This pattern matches to the bt instruction on x86.
createMaskedBitTest(IRBuilder<> & B,Value * Bits,Value * BitOffset)583 static Value *createMaskedBitTest(IRBuilder<> &B, Value *Bits,
584                                   Value *BitOffset) {
585   auto BitsType = cast<IntegerType>(Bits->getType());
586   unsigned BitWidth = BitsType->getBitWidth();
587 
588   BitOffset = B.CreateZExtOrTrunc(BitOffset, BitsType);
589   Value *BitIndex =
590       B.CreateAnd(BitOffset, ConstantInt::get(BitsType, BitWidth - 1));
591   Value *BitMask = B.CreateShl(ConstantInt::get(BitsType, 1), BitIndex);
592   Value *MaskedBits = B.CreateAnd(Bits, BitMask);
593   return B.CreateICmpNE(MaskedBits, ConstantInt::get(BitsType, 0));
594 }
595 
createByteArray(BitSetInfo & BSI)596 ByteArrayInfo *LowerTypeTestsModule::createByteArray(BitSetInfo &BSI) {
597   // Create globals to stand in for byte arrays and masks. These never actually
598   // get initialized, we RAUW and erase them later in allocateByteArrays() once
599   // we know the offset and mask to use.
600   auto ByteArrayGlobal = new GlobalVariable(
601       M, Int8Ty, /*isConstant=*/true, GlobalValue::PrivateLinkage, nullptr);
602   auto MaskGlobal = new GlobalVariable(M, Int8Ty, /*isConstant=*/true,
603                                        GlobalValue::PrivateLinkage, nullptr);
604 
605   ByteArrayInfos.emplace_back();
606   ByteArrayInfo *BAI = &ByteArrayInfos.back();
607 
608   BAI->Bits = BSI.Bits;
609   BAI->BitSize = BSI.BitSize;
610   BAI->ByteArray = ByteArrayGlobal;
611   BAI->MaskGlobal = MaskGlobal;
612   return BAI;
613 }
614 
allocateByteArrays()615 void LowerTypeTestsModule::allocateByteArrays() {
616   llvm::stable_sort(ByteArrayInfos,
617                     [](const ByteArrayInfo &BAI1, const ByteArrayInfo &BAI2) {
618                       return BAI1.BitSize > BAI2.BitSize;
619                     });
620 
621   std::vector<uint64_t> ByteArrayOffsets(ByteArrayInfos.size());
622 
623   ByteArrayBuilder BAB;
624   for (unsigned I = 0; I != ByteArrayInfos.size(); ++I) {
625     ByteArrayInfo *BAI = &ByteArrayInfos[I];
626 
627     uint8_t Mask;
628     BAB.allocate(BAI->Bits, BAI->BitSize, ByteArrayOffsets[I], Mask);
629 
630     BAI->MaskGlobal->replaceAllUsesWith(
631         ConstantExpr::getIntToPtr(ConstantInt::get(Int8Ty, Mask), Int8PtrTy));
632     BAI->MaskGlobal->eraseFromParent();
633     if (BAI->MaskPtr)
634       *BAI->MaskPtr = Mask;
635   }
636 
637   Constant *ByteArrayConst = ConstantDataArray::get(M.getContext(), BAB.Bytes);
638   auto ByteArray =
639       new GlobalVariable(M, ByteArrayConst->getType(), /*isConstant=*/true,
640                          GlobalValue::PrivateLinkage, ByteArrayConst);
641 
642   for (unsigned I = 0; I != ByteArrayInfos.size(); ++I) {
643     ByteArrayInfo *BAI = &ByteArrayInfos[I];
644 
645     Constant *Idxs[] = {ConstantInt::get(IntPtrTy, 0),
646                         ConstantInt::get(IntPtrTy, ByteArrayOffsets[I])};
647     Constant *GEP = ConstantExpr::getInBoundsGetElementPtr(
648         ByteArrayConst->getType(), ByteArray, Idxs);
649 
650     // Create an alias instead of RAUW'ing the gep directly. On x86 this ensures
651     // that the pc-relative displacement is folded into the lea instead of the
652     // test instruction getting another displacement.
653     GlobalAlias *Alias = GlobalAlias::create(
654         Int8Ty, 0, GlobalValue::PrivateLinkage, "bits", GEP, &M);
655     BAI->ByteArray->replaceAllUsesWith(Alias);
656     BAI->ByteArray->eraseFromParent();
657   }
658 
659   ByteArraySizeBits = BAB.BitAllocs[0] + BAB.BitAllocs[1] + BAB.BitAllocs[2] +
660                       BAB.BitAllocs[3] + BAB.BitAllocs[4] + BAB.BitAllocs[5] +
661                       BAB.BitAllocs[6] + BAB.BitAllocs[7];
662   ByteArraySizeBytes = BAB.Bytes.size();
663 }
664 
665 /// Build a test that bit BitOffset is set in the type identifier that was
666 /// lowered to TIL, which must be either an Inline or a ByteArray.
createBitSetTest(IRBuilder<> & B,const TypeIdLowering & TIL,Value * BitOffset)667 Value *LowerTypeTestsModule::createBitSetTest(IRBuilder<> &B,
668                                               const TypeIdLowering &TIL,
669                                               Value *BitOffset) {
670   if (TIL.TheKind == TypeTestResolution::Inline) {
671     // If the bit set is sufficiently small, we can avoid a load by bit testing
672     // a constant.
673     return createMaskedBitTest(B, TIL.InlineBits, BitOffset);
674   } else {
675     Constant *ByteArray = TIL.TheByteArray;
676     if (AvoidReuse && !ImportSummary) {
677       // Each use of the byte array uses a different alias. This makes the
678       // backend less likely to reuse previously computed byte array addresses,
679       // improving the security of the CFI mechanism based on this pass.
680       // This won't work when importing because TheByteArray is external.
681       ByteArray = GlobalAlias::create(Int8Ty, 0, GlobalValue::PrivateLinkage,
682                                       "bits_use", ByteArray, &M);
683     }
684 
685     Value *ByteAddr = B.CreateGEP(Int8Ty, ByteArray, BitOffset);
686     Value *Byte = B.CreateLoad(Int8Ty, ByteAddr);
687 
688     Value *ByteAndMask =
689         B.CreateAnd(Byte, ConstantExpr::getPtrToInt(TIL.BitMask, Int8Ty));
690     return B.CreateICmpNE(ByteAndMask, ConstantInt::get(Int8Ty, 0));
691   }
692 }
693 
isKnownTypeIdMember(Metadata * TypeId,const DataLayout & DL,Value * V,uint64_t COffset)694 static bool isKnownTypeIdMember(Metadata *TypeId, const DataLayout &DL,
695                                 Value *V, uint64_t COffset) {
696   if (auto GV = dyn_cast<GlobalObject>(V)) {
697     SmallVector<MDNode *, 2> Types;
698     GV->getMetadata(LLVMContext::MD_type, Types);
699     for (MDNode *Type : Types) {
700       if (Type->getOperand(1) != TypeId)
701         continue;
702       uint64_t Offset =
703           cast<ConstantInt>(
704               cast<ConstantAsMetadata>(Type->getOperand(0))->getValue())
705               ->getZExtValue();
706       if (COffset == Offset)
707         return true;
708     }
709     return false;
710   }
711 
712   if (auto GEP = dyn_cast<GEPOperator>(V)) {
713     APInt APOffset(DL.getPointerSizeInBits(0), 0);
714     bool Result = GEP->accumulateConstantOffset(DL, APOffset);
715     if (!Result)
716       return false;
717     COffset += APOffset.getZExtValue();
718     return isKnownTypeIdMember(TypeId, DL, GEP->getPointerOperand(), COffset);
719   }
720 
721   if (auto Op = dyn_cast<Operator>(V)) {
722     if (Op->getOpcode() == Instruction::BitCast)
723       return isKnownTypeIdMember(TypeId, DL, Op->getOperand(0), COffset);
724 
725     if (Op->getOpcode() == Instruction::Select)
726       return isKnownTypeIdMember(TypeId, DL, Op->getOperand(1), COffset) &&
727              isKnownTypeIdMember(TypeId, DL, Op->getOperand(2), COffset);
728   }
729 
730   return false;
731 }
732 
733 /// Lower a llvm.type.test call to its implementation. Returns the value to
734 /// replace the call with.
lowerTypeTestCall(Metadata * TypeId,CallInst * CI,const TypeIdLowering & TIL)735 Value *LowerTypeTestsModule::lowerTypeTestCall(Metadata *TypeId, CallInst *CI,
736                                                const TypeIdLowering &TIL) {
737   // Delay lowering if the resolution is currently unknown.
738   if (TIL.TheKind == TypeTestResolution::Unknown)
739     return nullptr;
740   if (TIL.TheKind == TypeTestResolution::Unsat)
741     return ConstantInt::getFalse(M.getContext());
742 
743   Value *Ptr = CI->getArgOperand(0);
744   const DataLayout &DL = M.getDataLayout();
745   if (isKnownTypeIdMember(TypeId, DL, Ptr, 0))
746     return ConstantInt::getTrue(M.getContext());
747 
748   BasicBlock *InitialBB = CI->getParent();
749 
750   IRBuilder<> B(CI);
751 
752   Value *PtrAsInt = B.CreatePtrToInt(Ptr, IntPtrTy);
753 
754   Constant *OffsetedGlobalAsInt =
755       ConstantExpr::getPtrToInt(TIL.OffsetedGlobal, IntPtrTy);
756   if (TIL.TheKind == TypeTestResolution::Single)
757     return B.CreateICmpEQ(PtrAsInt, OffsetedGlobalAsInt);
758 
759   Value *PtrOffset = B.CreateSub(PtrAsInt, OffsetedGlobalAsInt);
760 
761   // We need to check that the offset both falls within our range and is
762   // suitably aligned. We can check both properties at the same time by
763   // performing a right rotate by log2(alignment) followed by an integer
764   // comparison against the bitset size. The rotate will move the lower
765   // order bits that need to be zero into the higher order bits of the
766   // result, causing the comparison to fail if they are nonzero. The rotate
767   // also conveniently gives us a bit offset to use during the load from
768   // the bitset.
769   Value *OffsetSHR =
770       B.CreateLShr(PtrOffset, ConstantExpr::getZExt(TIL.AlignLog2, IntPtrTy));
771   Value *OffsetSHL = B.CreateShl(
772       PtrOffset, ConstantExpr::getZExt(
773                      ConstantExpr::getSub(
774                          ConstantInt::get(Int8Ty, DL.getPointerSizeInBits(0)),
775                          TIL.AlignLog2),
776                      IntPtrTy));
777   Value *BitOffset = B.CreateOr(OffsetSHR, OffsetSHL);
778 
779   Value *OffsetInRange = B.CreateICmpULE(BitOffset, TIL.SizeM1);
780 
781   // If the bit set is all ones, testing against it is unnecessary.
782   if (TIL.TheKind == TypeTestResolution::AllOnes)
783     return OffsetInRange;
784 
785   // See if the intrinsic is used in the following common pattern:
786   //   br(llvm.type.test(...), thenbb, elsebb)
787   // where nothing happens between the type test and the br.
788   // If so, create slightly simpler IR.
789   if (CI->hasOneUse())
790     if (auto *Br = dyn_cast<BranchInst>(*CI->user_begin()))
791       if (CI->getNextNode() == Br) {
792         BasicBlock *Then = InitialBB->splitBasicBlock(CI->getIterator());
793         BasicBlock *Else = Br->getSuccessor(1);
794         BranchInst *NewBr = BranchInst::Create(Then, Else, OffsetInRange);
795         NewBr->setMetadata(LLVMContext::MD_prof,
796                            Br->getMetadata(LLVMContext::MD_prof));
797         ReplaceInstWithInst(InitialBB->getTerminator(), NewBr);
798 
799         // Update phis in Else resulting from InitialBB being split
800         for (auto &Phi : Else->phis())
801           Phi.addIncoming(Phi.getIncomingValueForBlock(Then), InitialBB);
802 
803         IRBuilder<> ThenB(CI);
804         return createBitSetTest(ThenB, TIL, BitOffset);
805       }
806 
807   IRBuilder<> ThenB(SplitBlockAndInsertIfThen(OffsetInRange, CI, false));
808 
809   // Now that we know that the offset is in range and aligned, load the
810   // appropriate bit from the bitset.
811   Value *Bit = createBitSetTest(ThenB, TIL, BitOffset);
812 
813   // The value we want is 0 if we came directly from the initial block
814   // (having failed the range or alignment checks), or the loaded bit if
815   // we came from the block in which we loaded it.
816   B.SetInsertPoint(CI);
817   PHINode *P = B.CreatePHI(Int1Ty, 2);
818   P->addIncoming(ConstantInt::get(Int1Ty, 0), InitialBB);
819   P->addIncoming(Bit, ThenB.GetInsertBlock());
820   return P;
821 }
822 
823 /// Given a disjoint set of type identifiers and globals, lay out the globals,
824 /// build the bit sets and lower the llvm.type.test calls.
buildBitSetsFromGlobalVariables(ArrayRef<Metadata * > TypeIds,ArrayRef<GlobalTypeMember * > Globals)825 void LowerTypeTestsModule::buildBitSetsFromGlobalVariables(
826     ArrayRef<Metadata *> TypeIds, ArrayRef<GlobalTypeMember *> Globals) {
827   // Build a new global with the combined contents of the referenced globals.
828   // This global is a struct whose even-indexed elements contain the original
829   // contents of the referenced globals and whose odd-indexed elements contain
830   // any padding required to align the next element to the next power of 2 plus
831   // any additional padding required to meet its alignment requirements.
832   std::vector<Constant *> GlobalInits;
833   const DataLayout &DL = M.getDataLayout();
834   DenseMap<GlobalTypeMember *, uint64_t> GlobalLayout;
835   Align MaxAlign;
836   uint64_t CurOffset = 0;
837   uint64_t DesiredPadding = 0;
838   for (GlobalTypeMember *G : Globals) {
839     auto *GV = cast<GlobalVariable>(G->getGlobal());
840     Align Alignment =
841         DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getValueType());
842     MaxAlign = std::max(MaxAlign, Alignment);
843     uint64_t GVOffset = alignTo(CurOffset + DesiredPadding, Alignment);
844     GlobalLayout[G] = GVOffset;
845     if (GVOffset != 0) {
846       uint64_t Padding = GVOffset - CurOffset;
847       GlobalInits.push_back(
848           ConstantAggregateZero::get(ArrayType::get(Int8Ty, Padding)));
849     }
850 
851     GlobalInits.push_back(GV->getInitializer());
852     uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType());
853     CurOffset = GVOffset + InitSize;
854 
855     // Compute the amount of padding that we'd like for the next element.
856     DesiredPadding = NextPowerOf2(InitSize - 1) - InitSize;
857 
858     // Experiments of different caps with Chromium on both x64 and ARM64
859     // have shown that the 32-byte cap generates the smallest binary on
860     // both platforms while different caps yield similar performance.
861     // (see https://lists.llvm.org/pipermail/llvm-dev/2018-July/124694.html)
862     if (DesiredPadding > 32)
863       DesiredPadding = alignTo(InitSize, 32) - InitSize;
864   }
865 
866   Constant *NewInit = ConstantStruct::getAnon(M.getContext(), GlobalInits);
867   auto *CombinedGlobal =
868       new GlobalVariable(M, NewInit->getType(), /*isConstant=*/true,
869                          GlobalValue::PrivateLinkage, NewInit);
870   CombinedGlobal->setAlignment(MaxAlign);
871 
872   StructType *NewTy = cast<StructType>(NewInit->getType());
873   lowerTypeTestCalls(TypeIds, CombinedGlobal, GlobalLayout);
874 
875   // Build aliases pointing to offsets into the combined global for each
876   // global from which we built the combined global, and replace references
877   // to the original globals with references to the aliases.
878   for (unsigned I = 0; I != Globals.size(); ++I) {
879     GlobalVariable *GV = cast<GlobalVariable>(Globals[I]->getGlobal());
880 
881     // Multiply by 2 to account for padding elements.
882     Constant *CombinedGlobalIdxs[] = {ConstantInt::get(Int32Ty, 0),
883                                       ConstantInt::get(Int32Ty, I * 2)};
884     Constant *CombinedGlobalElemPtr = ConstantExpr::getGetElementPtr(
885         NewInit->getType(), CombinedGlobal, CombinedGlobalIdxs);
886     assert(GV->getType()->getAddressSpace() == 0);
887     GlobalAlias *GAlias =
888         GlobalAlias::create(NewTy->getElementType(I * 2), 0, GV->getLinkage(),
889                             "", CombinedGlobalElemPtr, &M);
890     GAlias->setVisibility(GV->getVisibility());
891     GAlias->takeName(GV);
892     GV->replaceAllUsesWith(GAlias);
893     GV->eraseFromParent();
894   }
895 }
896 
shouldExportConstantsAsAbsoluteSymbols()897 bool LowerTypeTestsModule::shouldExportConstantsAsAbsoluteSymbols() {
898   return (Arch == Triple::x86 || Arch == Triple::x86_64) &&
899          ObjectFormat == Triple::ELF;
900 }
901 
902 /// Export the given type identifier so that ThinLTO backends may import it.
903 /// Type identifiers are exported by adding coarse-grained information about how
904 /// to test the type identifier to the summary, and creating symbols in the
905 /// object file (aliases and absolute symbols) containing fine-grained
906 /// information about the type identifier.
907 ///
908 /// Returns a pointer to the location in which to store the bitmask, if
909 /// applicable.
exportTypeId(StringRef TypeId,const TypeIdLowering & TIL)910 uint8_t *LowerTypeTestsModule::exportTypeId(StringRef TypeId,
911                                             const TypeIdLowering &TIL) {
912   TypeTestResolution &TTRes =
913       ExportSummary->getOrInsertTypeIdSummary(TypeId).TTRes;
914   TTRes.TheKind = TIL.TheKind;
915 
916   auto ExportGlobal = [&](StringRef Name, Constant *C) {
917     GlobalAlias *GA =
918         GlobalAlias::create(Int8Ty, 0, GlobalValue::ExternalLinkage,
919                             "__typeid_" + TypeId + "_" + Name, C, &M);
920     GA->setVisibility(GlobalValue::HiddenVisibility);
921   };
922 
923   auto ExportConstant = [&](StringRef Name, uint64_t &Storage, Constant *C) {
924     if (shouldExportConstantsAsAbsoluteSymbols())
925       ExportGlobal(Name, ConstantExpr::getIntToPtr(C, Int8PtrTy));
926     else
927       Storage = cast<ConstantInt>(C)->getZExtValue();
928   };
929 
930   if (TIL.TheKind != TypeTestResolution::Unsat)
931     ExportGlobal("global_addr", TIL.OffsetedGlobal);
932 
933   if (TIL.TheKind == TypeTestResolution::ByteArray ||
934       TIL.TheKind == TypeTestResolution::Inline ||
935       TIL.TheKind == TypeTestResolution::AllOnes) {
936     ExportConstant("align", TTRes.AlignLog2, TIL.AlignLog2);
937     ExportConstant("size_m1", TTRes.SizeM1, TIL.SizeM1);
938 
939     uint64_t BitSize = cast<ConstantInt>(TIL.SizeM1)->getZExtValue() + 1;
940     if (TIL.TheKind == TypeTestResolution::Inline)
941       TTRes.SizeM1BitWidth = (BitSize <= 32) ? 5 : 6;
942     else
943       TTRes.SizeM1BitWidth = (BitSize <= 128) ? 7 : 32;
944   }
945 
946   if (TIL.TheKind == TypeTestResolution::ByteArray) {
947     ExportGlobal("byte_array", TIL.TheByteArray);
948     if (shouldExportConstantsAsAbsoluteSymbols())
949       ExportGlobal("bit_mask", TIL.BitMask);
950     else
951       return &TTRes.BitMask;
952   }
953 
954   if (TIL.TheKind == TypeTestResolution::Inline)
955     ExportConstant("inline_bits", TTRes.InlineBits, TIL.InlineBits);
956 
957   return nullptr;
958 }
959 
960 LowerTypeTestsModule::TypeIdLowering
importTypeId(StringRef TypeId)961 LowerTypeTestsModule::importTypeId(StringRef TypeId) {
962   const TypeIdSummary *TidSummary = ImportSummary->getTypeIdSummary(TypeId);
963   if (!TidSummary)
964     return {}; // Unsat: no globals match this type id.
965   const TypeTestResolution &TTRes = TidSummary->TTRes;
966 
967   TypeIdLowering TIL;
968   TIL.TheKind = TTRes.TheKind;
969 
970   auto ImportGlobal = [&](StringRef Name) {
971     // Give the global a type of length 0 so that it is not assumed not to alias
972     // with any other global.
973     Constant *C = M.getOrInsertGlobal(("__typeid_" + TypeId + "_" + Name).str(),
974                                       Int8Arr0Ty);
975     if (auto *GV = dyn_cast<GlobalVariable>(C))
976       GV->setVisibility(GlobalValue::HiddenVisibility);
977     C = ConstantExpr::getBitCast(C, Int8PtrTy);
978     return C;
979   };
980 
981   auto ImportConstant = [&](StringRef Name, uint64_t Const, unsigned AbsWidth,
982                             Type *Ty) {
983     if (!shouldExportConstantsAsAbsoluteSymbols()) {
984       Constant *C =
985           ConstantInt::get(isa<IntegerType>(Ty) ? Ty : Int64Ty, Const);
986       if (!isa<IntegerType>(Ty))
987         C = ConstantExpr::getIntToPtr(C, Ty);
988       return C;
989     }
990 
991     Constant *C = ImportGlobal(Name);
992     auto *GV = cast<GlobalVariable>(C->stripPointerCasts());
993     if (isa<IntegerType>(Ty))
994       C = ConstantExpr::getPtrToInt(C, Ty);
995     if (GV->getMetadata(LLVMContext::MD_absolute_symbol))
996       return C;
997 
998     auto SetAbsRange = [&](uint64_t Min, uint64_t Max) {
999       auto *MinC = ConstantAsMetadata::get(ConstantInt::get(IntPtrTy, Min));
1000       auto *MaxC = ConstantAsMetadata::get(ConstantInt::get(IntPtrTy, Max));
1001       GV->setMetadata(LLVMContext::MD_absolute_symbol,
1002                       MDNode::get(M.getContext(), {MinC, MaxC}));
1003     };
1004     if (AbsWidth == IntPtrTy->getBitWidth())
1005       SetAbsRange(~0ull, ~0ull); // Full set.
1006     else
1007       SetAbsRange(0, 1ull << AbsWidth);
1008     return C;
1009   };
1010 
1011   if (TIL.TheKind != TypeTestResolution::Unsat)
1012     TIL.OffsetedGlobal = ImportGlobal("global_addr");
1013 
1014   if (TIL.TheKind == TypeTestResolution::ByteArray ||
1015       TIL.TheKind == TypeTestResolution::Inline ||
1016       TIL.TheKind == TypeTestResolution::AllOnes) {
1017     TIL.AlignLog2 = ImportConstant("align", TTRes.AlignLog2, 8, Int8Ty);
1018     TIL.SizeM1 =
1019         ImportConstant("size_m1", TTRes.SizeM1, TTRes.SizeM1BitWidth, IntPtrTy);
1020   }
1021 
1022   if (TIL.TheKind == TypeTestResolution::ByteArray) {
1023     TIL.TheByteArray = ImportGlobal("byte_array");
1024     TIL.BitMask = ImportConstant("bit_mask", TTRes.BitMask, 8, Int8PtrTy);
1025   }
1026 
1027   if (TIL.TheKind == TypeTestResolution::Inline)
1028     TIL.InlineBits = ImportConstant(
1029         "inline_bits", TTRes.InlineBits, 1 << TTRes.SizeM1BitWidth,
1030         TTRes.SizeM1BitWidth <= 5 ? Int32Ty : Int64Ty);
1031 
1032   return TIL;
1033 }
1034 
importTypeTest(CallInst * CI)1035 void LowerTypeTestsModule::importTypeTest(CallInst *CI) {
1036   auto TypeIdMDVal = dyn_cast<MetadataAsValue>(CI->getArgOperand(1));
1037   if (!TypeIdMDVal)
1038     report_fatal_error("Second argument of llvm.type.test must be metadata");
1039 
1040   auto TypeIdStr = dyn_cast<MDString>(TypeIdMDVal->getMetadata());
1041   // If this is a local unpromoted type, which doesn't have a metadata string,
1042   // treat as Unknown and delay lowering, so that we can still utilize it for
1043   // later optimizations.
1044   if (!TypeIdStr)
1045     return;
1046 
1047   TypeIdLowering TIL = importTypeId(TypeIdStr->getString());
1048   Value *Lowered = lowerTypeTestCall(TypeIdStr, CI, TIL);
1049   if (Lowered) {
1050     CI->replaceAllUsesWith(Lowered);
1051     CI->eraseFromParent();
1052   }
1053 }
1054 
1055 // ThinLTO backend: the function F has a jump table entry; update this module
1056 // accordingly. isJumpTableCanonical describes the type of the jump table entry.
importFunction(Function * F,bool isJumpTableCanonical,std::vector<GlobalAlias * > & AliasesToErase)1057 void LowerTypeTestsModule::importFunction(
1058     Function *F, bool isJumpTableCanonical,
1059     std::vector<GlobalAlias *> &AliasesToErase) {
1060   assert(F->getType()->getAddressSpace() == 0);
1061 
1062   GlobalValue::VisibilityTypes Visibility = F->getVisibility();
1063   std::string Name = std::string(F->getName());
1064 
1065   if (F->isDeclarationForLinker() && isJumpTableCanonical) {
1066     // Non-dso_local functions may be overriden at run time,
1067     // don't short curcuit them
1068     if (F->isDSOLocal()) {
1069       Function *RealF = Function::Create(F->getFunctionType(),
1070                                          GlobalValue::ExternalLinkage,
1071                                          F->getAddressSpace(),
1072                                          Name + ".cfi", &M);
1073       RealF->setVisibility(GlobalVariable::HiddenVisibility);
1074       replaceDirectCalls(F, RealF);
1075     }
1076     return;
1077   }
1078 
1079   Function *FDecl;
1080   if (!isJumpTableCanonical) {
1081     // Either a declaration of an external function or a reference to a locally
1082     // defined jump table.
1083     FDecl = Function::Create(F->getFunctionType(), GlobalValue::ExternalLinkage,
1084                              F->getAddressSpace(), Name + ".cfi_jt", &M);
1085     FDecl->setVisibility(GlobalValue::HiddenVisibility);
1086   } else {
1087     F->setName(Name + ".cfi");
1088     F->setLinkage(GlobalValue::ExternalLinkage);
1089     FDecl = Function::Create(F->getFunctionType(), GlobalValue::ExternalLinkage,
1090                              F->getAddressSpace(), Name, &M);
1091     FDecl->setVisibility(Visibility);
1092     Visibility = GlobalValue::HiddenVisibility;
1093 
1094     // Delete aliases pointing to this function, they'll be re-created in the
1095     // merged output. Don't do it yet though because ScopedSaveAliaseesAndUsed
1096     // will want to reset the aliasees first.
1097     for (auto &U : F->uses()) {
1098       if (auto *A = dyn_cast<GlobalAlias>(U.getUser())) {
1099         Function *AliasDecl = Function::Create(
1100             F->getFunctionType(), GlobalValue::ExternalLinkage,
1101             F->getAddressSpace(), "", &M);
1102         AliasDecl->takeName(A);
1103         A->replaceAllUsesWith(AliasDecl);
1104         AliasesToErase.push_back(A);
1105       }
1106     }
1107   }
1108 
1109   if (F->hasExternalWeakLinkage())
1110     replaceWeakDeclarationWithJumpTablePtr(F, FDecl, isJumpTableCanonical);
1111   else
1112     replaceCfiUses(F, FDecl, isJumpTableCanonical);
1113 
1114   // Set visibility late because it's used in replaceCfiUses() to determine
1115   // whether uses need to to be replaced.
1116   F->setVisibility(Visibility);
1117 }
1118 
lowerTypeTestCalls(ArrayRef<Metadata * > TypeIds,Constant * CombinedGlobalAddr,const DenseMap<GlobalTypeMember *,uint64_t> & GlobalLayout)1119 void LowerTypeTestsModule::lowerTypeTestCalls(
1120     ArrayRef<Metadata *> TypeIds, Constant *CombinedGlobalAddr,
1121     const DenseMap<GlobalTypeMember *, uint64_t> &GlobalLayout) {
1122   CombinedGlobalAddr = ConstantExpr::getBitCast(CombinedGlobalAddr, Int8PtrTy);
1123 
1124   // For each type identifier in this disjoint set...
1125   for (Metadata *TypeId : TypeIds) {
1126     // Build the bitset.
1127     BitSetInfo BSI = buildBitSet(TypeId, GlobalLayout);
1128     LLVM_DEBUG({
1129       if (auto MDS = dyn_cast<MDString>(TypeId))
1130         dbgs() << MDS->getString() << ": ";
1131       else
1132         dbgs() << "<unnamed>: ";
1133       BSI.print(dbgs());
1134     });
1135 
1136     ByteArrayInfo *BAI = nullptr;
1137     TypeIdLowering TIL;
1138     TIL.OffsetedGlobal = ConstantExpr::getGetElementPtr(
1139         Int8Ty, CombinedGlobalAddr, ConstantInt::get(IntPtrTy, BSI.ByteOffset)),
1140     TIL.AlignLog2 = ConstantInt::get(Int8Ty, BSI.AlignLog2);
1141     TIL.SizeM1 = ConstantInt::get(IntPtrTy, BSI.BitSize - 1);
1142     if (BSI.isAllOnes()) {
1143       TIL.TheKind = (BSI.BitSize == 1) ? TypeTestResolution::Single
1144                                        : TypeTestResolution::AllOnes;
1145     } else if (BSI.BitSize <= 64) {
1146       TIL.TheKind = TypeTestResolution::Inline;
1147       uint64_t InlineBits = 0;
1148       for (auto Bit : BSI.Bits)
1149         InlineBits |= uint64_t(1) << Bit;
1150       if (InlineBits == 0)
1151         TIL.TheKind = TypeTestResolution::Unsat;
1152       else
1153         TIL.InlineBits = ConstantInt::get(
1154             (BSI.BitSize <= 32) ? Int32Ty : Int64Ty, InlineBits);
1155     } else {
1156       TIL.TheKind = TypeTestResolution::ByteArray;
1157       ++NumByteArraysCreated;
1158       BAI = createByteArray(BSI);
1159       TIL.TheByteArray = BAI->ByteArray;
1160       TIL.BitMask = BAI->MaskGlobal;
1161     }
1162 
1163     TypeIdUserInfo &TIUI = TypeIdUsers[TypeId];
1164 
1165     if (TIUI.IsExported) {
1166       uint8_t *MaskPtr = exportTypeId(cast<MDString>(TypeId)->getString(), TIL);
1167       if (BAI)
1168         BAI->MaskPtr = MaskPtr;
1169     }
1170 
1171     // Lower each call to llvm.type.test for this type identifier.
1172     for (CallInst *CI : TIUI.CallSites) {
1173       ++NumTypeTestCallsLowered;
1174       Value *Lowered = lowerTypeTestCall(TypeId, CI, TIL);
1175       if (Lowered) {
1176         CI->replaceAllUsesWith(Lowered);
1177         CI->eraseFromParent();
1178       }
1179     }
1180   }
1181 }
1182 
verifyTypeMDNode(GlobalObject * GO,MDNode * Type)1183 void LowerTypeTestsModule::verifyTypeMDNode(GlobalObject *GO, MDNode *Type) {
1184   if (Type->getNumOperands() != 2)
1185     report_fatal_error("All operands of type metadata must have 2 elements");
1186 
1187   if (GO->isThreadLocal())
1188     report_fatal_error("Bit set element may not be thread-local");
1189   if (isa<GlobalVariable>(GO) && GO->hasSection())
1190     report_fatal_error(
1191         "A member of a type identifier may not have an explicit section");
1192 
1193   // FIXME: We previously checked that global var member of a type identifier
1194   // must be a definition, but the IR linker may leave type metadata on
1195   // declarations. We should restore this check after fixing PR31759.
1196 
1197   auto OffsetConstMD = dyn_cast<ConstantAsMetadata>(Type->getOperand(0));
1198   if (!OffsetConstMD)
1199     report_fatal_error("Type offset must be a constant");
1200   auto OffsetInt = dyn_cast<ConstantInt>(OffsetConstMD->getValue());
1201   if (!OffsetInt)
1202     report_fatal_error("Type offset must be an integer constant");
1203 }
1204 
1205 static const unsigned kX86JumpTableEntrySize = 8;
1206 static const unsigned kARMJumpTableEntrySize = 4;
1207 static const unsigned kARMBTIJumpTableEntrySize = 8;
1208 
getJumpTableEntrySize()1209 unsigned LowerTypeTestsModule::getJumpTableEntrySize() {
1210   switch (Arch) {
1211     case Triple::x86:
1212     case Triple::x86_64:
1213       return kX86JumpTableEntrySize;
1214     case Triple::arm:
1215     case Triple::thumb:
1216       return kARMJumpTableEntrySize;
1217     case Triple::aarch64:
1218       if (const auto *BTE = mdconst::extract_or_null<ConstantInt>(
1219             M.getModuleFlag("branch-target-enforcement")))
1220         if (BTE->getZExtValue())
1221           return kARMBTIJumpTableEntrySize;
1222       return kARMJumpTableEntrySize;
1223     default:
1224       report_fatal_error("Unsupported architecture for jump tables");
1225   }
1226 }
1227 
1228 // Create a jump table entry for the target. This consists of an instruction
1229 // sequence containing a relative branch to Dest. Appends inline asm text,
1230 // constraints and arguments to AsmOS, ConstraintOS and AsmArgs.
createJumpTableEntry(raw_ostream & AsmOS,raw_ostream & ConstraintOS,Triple::ArchType JumpTableArch,SmallVectorImpl<Value * > & AsmArgs,Function * Dest)1231 void LowerTypeTestsModule::createJumpTableEntry(
1232     raw_ostream &AsmOS, raw_ostream &ConstraintOS,
1233     Triple::ArchType JumpTableArch, SmallVectorImpl<Value *> &AsmArgs,
1234     Function *Dest) {
1235   unsigned ArgIndex = AsmArgs.size();
1236 
1237   if (JumpTableArch == Triple::x86 || JumpTableArch == Triple::x86_64) {
1238     AsmOS << "jmp ${" << ArgIndex << ":c}@plt\n";
1239     AsmOS << "int3\nint3\nint3\n";
1240   } else if (JumpTableArch == Triple::arm) {
1241     AsmOS << "b $" << ArgIndex << "\n";
1242   } else if (JumpTableArch == Triple::aarch64) {
1243     if (const auto *BTE = mdconst::extract_or_null<ConstantInt>(
1244           Dest->getParent()->getModuleFlag("branch-target-enforcement")))
1245       if (BTE->getZExtValue())
1246         AsmOS << "bti c\n";
1247     AsmOS << "b $" << ArgIndex << "\n";
1248   } else if (JumpTableArch == Triple::thumb) {
1249     AsmOS << "b.w $" << ArgIndex << "\n";
1250   } else {
1251     report_fatal_error("Unsupported architecture for jump tables");
1252   }
1253 
1254   ConstraintOS << (ArgIndex > 0 ? ",s" : "s");
1255   AsmArgs.push_back(Dest);
1256 }
1257 
getJumpTableEntryType()1258 Type *LowerTypeTestsModule::getJumpTableEntryType() {
1259   return ArrayType::get(Int8Ty, getJumpTableEntrySize());
1260 }
1261 
1262 /// Given a disjoint set of type identifiers and functions, build the bit sets
1263 /// and lower the llvm.type.test calls, architecture dependently.
buildBitSetsFromFunctions(ArrayRef<Metadata * > TypeIds,ArrayRef<GlobalTypeMember * > Functions)1264 void LowerTypeTestsModule::buildBitSetsFromFunctions(
1265     ArrayRef<Metadata *> TypeIds, ArrayRef<GlobalTypeMember *> Functions) {
1266   if (Arch == Triple::x86 || Arch == Triple::x86_64 || Arch == Triple::arm ||
1267       Arch == Triple::thumb || Arch == Triple::aarch64)
1268     buildBitSetsFromFunctionsNative(TypeIds, Functions);
1269   else if (Arch == Triple::wasm32 || Arch == Triple::wasm64)
1270     buildBitSetsFromFunctionsWASM(TypeIds, Functions);
1271   else
1272     report_fatal_error("Unsupported architecture for jump tables");
1273 }
1274 
moveInitializerToModuleConstructor(GlobalVariable * GV)1275 void LowerTypeTestsModule::moveInitializerToModuleConstructor(
1276     GlobalVariable *GV) {
1277   if (WeakInitializerFn == nullptr) {
1278     WeakInitializerFn = Function::Create(
1279         FunctionType::get(Type::getVoidTy(M.getContext()),
1280                           /* IsVarArg */ false),
1281         GlobalValue::InternalLinkage,
1282         M.getDataLayout().getProgramAddressSpace(),
1283         "__cfi_global_var_init", &M);
1284     BasicBlock *BB =
1285         BasicBlock::Create(M.getContext(), "entry", WeakInitializerFn);
1286     ReturnInst::Create(M.getContext(), BB);
1287     WeakInitializerFn->setSection(
1288         ObjectFormat == Triple::MachO
1289             ? "__TEXT,__StaticInit,regular,pure_instructions"
1290             : ".text.startup");
1291     // This code is equivalent to relocation application, and should run at the
1292     // earliest possible time (i.e. with the highest priority).
1293     appendToGlobalCtors(M, WeakInitializerFn, /* Priority */ 0);
1294   }
1295 
1296   IRBuilder<> IRB(WeakInitializerFn->getEntryBlock().getTerminator());
1297   GV->setConstant(false);
1298   IRB.CreateAlignedStore(GV->getInitializer(), GV, GV->getAlign());
1299   GV->setInitializer(Constant::getNullValue(GV->getValueType()));
1300 }
1301 
findGlobalVariableUsersOf(Constant * C,SmallSetVector<GlobalVariable *,8> & Out)1302 void LowerTypeTestsModule::findGlobalVariableUsersOf(
1303     Constant *C, SmallSetVector<GlobalVariable *, 8> &Out) {
1304   for (auto *U : C->users()){
1305     if (auto *GV = dyn_cast<GlobalVariable>(U))
1306       Out.insert(GV);
1307     else if (auto *C2 = dyn_cast<Constant>(U))
1308       findGlobalVariableUsersOf(C2, Out);
1309   }
1310 }
1311 
1312 // Replace all uses of F with (F ? JT : 0).
replaceWeakDeclarationWithJumpTablePtr(Function * F,Constant * JT,bool IsJumpTableCanonical)1313 void LowerTypeTestsModule::replaceWeakDeclarationWithJumpTablePtr(
1314     Function *F, Constant *JT, bool IsJumpTableCanonical) {
1315   // The target expression can not appear in a constant initializer on most
1316   // (all?) targets. Switch to a runtime initializer.
1317   SmallSetVector<GlobalVariable *, 8> GlobalVarUsers;
1318   findGlobalVariableUsersOf(F, GlobalVarUsers);
1319   for (auto GV : GlobalVarUsers)
1320     moveInitializerToModuleConstructor(GV);
1321 
1322   // Can not RAUW F with an expression that uses F. Replace with a temporary
1323   // placeholder first.
1324   Function *PlaceholderFn =
1325       Function::Create(cast<FunctionType>(F->getValueType()),
1326                        GlobalValue::ExternalWeakLinkage,
1327                        F->getAddressSpace(), "", &M);
1328   replaceCfiUses(F, PlaceholderFn, IsJumpTableCanonical);
1329 
1330   Constant *Target = ConstantExpr::getSelect(
1331       ConstantExpr::getICmp(CmpInst::ICMP_NE, F,
1332                             Constant::getNullValue(F->getType())),
1333       JT, Constant::getNullValue(F->getType()));
1334   PlaceholderFn->replaceAllUsesWith(Target);
1335   PlaceholderFn->eraseFromParent();
1336 }
1337 
isThumbFunction(Function * F,Triple::ArchType ModuleArch)1338 static bool isThumbFunction(Function *F, Triple::ArchType ModuleArch) {
1339   Attribute TFAttr = F->getFnAttribute("target-features");
1340   if (TFAttr.isValid()) {
1341     SmallVector<StringRef, 6> Features;
1342     TFAttr.getValueAsString().split(Features, ',');
1343     for (StringRef Feature : Features) {
1344       if (Feature == "-thumb-mode")
1345         return false;
1346       else if (Feature == "+thumb-mode")
1347         return true;
1348     }
1349   }
1350 
1351   return ModuleArch == Triple::thumb;
1352 }
1353 
1354 // Each jump table must be either ARM or Thumb as a whole for the bit-test math
1355 // to work. Pick one that matches the majority of members to minimize interop
1356 // veneers inserted by the linker.
1357 static Triple::ArchType
selectJumpTableArmEncoding(ArrayRef<GlobalTypeMember * > Functions,Triple::ArchType ModuleArch)1358 selectJumpTableArmEncoding(ArrayRef<GlobalTypeMember *> Functions,
1359                            Triple::ArchType ModuleArch) {
1360   if (ModuleArch != Triple::arm && ModuleArch != Triple::thumb)
1361     return ModuleArch;
1362 
1363   unsigned ArmCount = 0, ThumbCount = 0;
1364   for (const auto GTM : Functions) {
1365     if (!GTM->isJumpTableCanonical()) {
1366       // PLT stubs are always ARM.
1367       // FIXME: This is the wrong heuristic for non-canonical jump tables.
1368       ++ArmCount;
1369       continue;
1370     }
1371 
1372     Function *F = cast<Function>(GTM->getGlobal());
1373     ++(isThumbFunction(F, ModuleArch) ? ThumbCount : ArmCount);
1374   }
1375 
1376   return ArmCount > ThumbCount ? Triple::arm : Triple::thumb;
1377 }
1378 
createJumpTable(Function * F,ArrayRef<GlobalTypeMember * > Functions)1379 void LowerTypeTestsModule::createJumpTable(
1380     Function *F, ArrayRef<GlobalTypeMember *> Functions) {
1381   std::string AsmStr, ConstraintStr;
1382   raw_string_ostream AsmOS(AsmStr), ConstraintOS(ConstraintStr);
1383   SmallVector<Value *, 16> AsmArgs;
1384   AsmArgs.reserve(Functions.size() * 2);
1385 
1386   Triple::ArchType JumpTableArch = selectJumpTableArmEncoding(Functions, Arch);
1387 
1388   for (unsigned I = 0; I != Functions.size(); ++I)
1389     createJumpTableEntry(AsmOS, ConstraintOS, JumpTableArch, AsmArgs,
1390                          cast<Function>(Functions[I]->getGlobal()));
1391 
1392   // Align the whole table by entry size.
1393   F->setAlignment(Align(getJumpTableEntrySize()));
1394   // Skip prologue.
1395   // Disabled on win32 due to https://llvm.org/bugs/show_bug.cgi?id=28641#c3.
1396   // Luckily, this function does not get any prologue even without the
1397   // attribute.
1398   if (OS != Triple::Win32)
1399     F->addFnAttr(Attribute::Naked);
1400   if (JumpTableArch == Triple::arm)
1401     F->addFnAttr("target-features", "-thumb-mode");
1402   if (JumpTableArch == Triple::thumb) {
1403     F->addFnAttr("target-features", "+thumb-mode");
1404     // Thumb jump table assembly needs Thumb2. The following attribute is added
1405     // by Clang for -march=armv7.
1406     F->addFnAttr("target-cpu", "cortex-a8");
1407   }
1408   if (JumpTableArch == Triple::aarch64) {
1409     F->addFnAttr("branch-target-enforcement", "false");
1410     F->addFnAttr("sign-return-address", "none");
1411   }
1412   // Make sure we don't emit .eh_frame for this function.
1413   F->addFnAttr(Attribute::NoUnwind);
1414 
1415   BasicBlock *BB = BasicBlock::Create(M.getContext(), "entry", F);
1416   IRBuilder<> IRB(BB);
1417 
1418   SmallVector<Type *, 16> ArgTypes;
1419   ArgTypes.reserve(AsmArgs.size());
1420   for (const auto &Arg : AsmArgs)
1421     ArgTypes.push_back(Arg->getType());
1422   InlineAsm *JumpTableAsm =
1423       InlineAsm::get(FunctionType::get(IRB.getVoidTy(), ArgTypes, false),
1424                      AsmOS.str(), ConstraintOS.str(),
1425                      /*hasSideEffects=*/true);
1426 
1427   IRB.CreateCall(JumpTableAsm, AsmArgs);
1428   IRB.CreateUnreachable();
1429 }
1430 
1431 /// Given a disjoint set of type identifiers and functions, build a jump table
1432 /// for the functions, build the bit sets and lower the llvm.type.test calls.
buildBitSetsFromFunctionsNative(ArrayRef<Metadata * > TypeIds,ArrayRef<GlobalTypeMember * > Functions)1433 void LowerTypeTestsModule::buildBitSetsFromFunctionsNative(
1434     ArrayRef<Metadata *> TypeIds, ArrayRef<GlobalTypeMember *> Functions) {
1435   // Unlike the global bitset builder, the function bitset builder cannot
1436   // re-arrange functions in a particular order and base its calculations on the
1437   // layout of the functions' entry points, as we have no idea how large a
1438   // particular function will end up being (the size could even depend on what
1439   // this pass does!) Instead, we build a jump table, which is a block of code
1440   // consisting of one branch instruction for each of the functions in the bit
1441   // set that branches to the target function, and redirect any taken function
1442   // addresses to the corresponding jump table entry. In the object file's
1443   // symbol table, the symbols for the target functions also refer to the jump
1444   // table entries, so that addresses taken outside the module will pass any
1445   // verification done inside the module.
1446   //
1447   // In more concrete terms, suppose we have three functions f, g, h which are
1448   // of the same type, and a function foo that returns their addresses:
1449   //
1450   // f:
1451   // mov 0, %eax
1452   // ret
1453   //
1454   // g:
1455   // mov 1, %eax
1456   // ret
1457   //
1458   // h:
1459   // mov 2, %eax
1460   // ret
1461   //
1462   // foo:
1463   // mov f, %eax
1464   // mov g, %edx
1465   // mov h, %ecx
1466   // ret
1467   //
1468   // We output the jump table as module-level inline asm string. The end result
1469   // will (conceptually) look like this:
1470   //
1471   // f = .cfi.jumptable
1472   // g = .cfi.jumptable + 4
1473   // h = .cfi.jumptable + 8
1474   // .cfi.jumptable:
1475   // jmp f.cfi  ; 5 bytes
1476   // int3       ; 1 byte
1477   // int3       ; 1 byte
1478   // int3       ; 1 byte
1479   // jmp g.cfi  ; 5 bytes
1480   // int3       ; 1 byte
1481   // int3       ; 1 byte
1482   // int3       ; 1 byte
1483   // jmp h.cfi  ; 5 bytes
1484   // int3       ; 1 byte
1485   // int3       ; 1 byte
1486   // int3       ; 1 byte
1487   //
1488   // f.cfi:
1489   // mov 0, %eax
1490   // ret
1491   //
1492   // g.cfi:
1493   // mov 1, %eax
1494   // ret
1495   //
1496   // h.cfi:
1497   // mov 2, %eax
1498   // ret
1499   //
1500   // foo:
1501   // mov f, %eax
1502   // mov g, %edx
1503   // mov h, %ecx
1504   // ret
1505   //
1506   // Because the addresses of f, g, h are evenly spaced at a power of 2, in the
1507   // normal case the check can be carried out using the same kind of simple
1508   // arithmetic that we normally use for globals.
1509 
1510   // FIXME: find a better way to represent the jumptable in the IR.
1511   assert(!Functions.empty());
1512 
1513   // Build a simple layout based on the regular layout of jump tables.
1514   DenseMap<GlobalTypeMember *, uint64_t> GlobalLayout;
1515   unsigned EntrySize = getJumpTableEntrySize();
1516   for (unsigned I = 0; I != Functions.size(); ++I)
1517     GlobalLayout[Functions[I]] = I * EntrySize;
1518 
1519   Function *JumpTableFn =
1520       Function::Create(FunctionType::get(Type::getVoidTy(M.getContext()),
1521                                          /* IsVarArg */ false),
1522                        GlobalValue::PrivateLinkage,
1523                        M.getDataLayout().getProgramAddressSpace(),
1524                        ".cfi.jumptable", &M);
1525   ArrayType *JumpTableType =
1526       ArrayType::get(getJumpTableEntryType(), Functions.size());
1527   auto JumpTable =
1528       ConstantExpr::getPointerCast(JumpTableFn, JumpTableType->getPointerTo(0));
1529 
1530   lowerTypeTestCalls(TypeIds, JumpTable, GlobalLayout);
1531 
1532   {
1533     ScopedSaveAliaseesAndUsed S(M);
1534 
1535     // Build aliases pointing to offsets into the jump table, and replace
1536     // references to the original functions with references to the aliases.
1537     for (unsigned I = 0; I != Functions.size(); ++I) {
1538       Function *F = cast<Function>(Functions[I]->getGlobal());
1539       bool IsJumpTableCanonical = Functions[I]->isJumpTableCanonical();
1540 
1541       Constant *CombinedGlobalElemPtr = ConstantExpr::getBitCast(
1542           ConstantExpr::getInBoundsGetElementPtr(
1543               JumpTableType, JumpTable,
1544               ArrayRef<Constant *>{ConstantInt::get(IntPtrTy, 0),
1545                                    ConstantInt::get(IntPtrTy, I)}),
1546           F->getType());
1547       if (Functions[I]->isExported()) {
1548         if (IsJumpTableCanonical) {
1549           ExportSummary->cfiFunctionDefs().insert(std::string(F->getName()));
1550         } else {
1551           GlobalAlias *JtAlias = GlobalAlias::create(
1552               F->getValueType(), 0, GlobalValue::ExternalLinkage,
1553               F->getName() + ".cfi_jt", CombinedGlobalElemPtr, &M);
1554           JtAlias->setVisibility(GlobalValue::HiddenVisibility);
1555           ExportSummary->cfiFunctionDecls().insert(std::string(F->getName()));
1556         }
1557       }
1558       if (!IsJumpTableCanonical) {
1559         if (F->hasExternalWeakLinkage())
1560           replaceWeakDeclarationWithJumpTablePtr(F, CombinedGlobalElemPtr,
1561                                                  IsJumpTableCanonical);
1562         else
1563           replaceCfiUses(F, CombinedGlobalElemPtr, IsJumpTableCanonical);
1564       } else {
1565         assert(F->getType()->getAddressSpace() == 0);
1566 
1567         GlobalAlias *FAlias =
1568             GlobalAlias::create(F->getValueType(), 0, F->getLinkage(), "",
1569                                 CombinedGlobalElemPtr, &M);
1570         FAlias->setVisibility(F->getVisibility());
1571         FAlias->takeName(F);
1572         if (FAlias->hasName())
1573           F->setName(FAlias->getName() + ".cfi");
1574         replaceCfiUses(F, FAlias, IsJumpTableCanonical);
1575         if (!F->hasLocalLinkage())
1576           F->setVisibility(GlobalVariable::HiddenVisibility);
1577       }
1578     }
1579   }
1580 
1581   createJumpTable(JumpTableFn, Functions);
1582 }
1583 
1584 /// Assign a dummy layout using an incrementing counter, tag each function
1585 /// with its index represented as metadata, and lower each type test to an
1586 /// integer range comparison. During generation of the indirect function call
1587 /// table in the backend, it will assign the given indexes.
1588 /// Note: Dynamic linking is not supported, as the WebAssembly ABI has not yet
1589 /// been finalized.
buildBitSetsFromFunctionsWASM(ArrayRef<Metadata * > TypeIds,ArrayRef<GlobalTypeMember * > Functions)1590 void LowerTypeTestsModule::buildBitSetsFromFunctionsWASM(
1591     ArrayRef<Metadata *> TypeIds, ArrayRef<GlobalTypeMember *> Functions) {
1592   assert(!Functions.empty());
1593 
1594   // Build consecutive monotonic integer ranges for each call target set
1595   DenseMap<GlobalTypeMember *, uint64_t> GlobalLayout;
1596 
1597   for (GlobalTypeMember *GTM : Functions) {
1598     Function *F = cast<Function>(GTM->getGlobal());
1599 
1600     // Skip functions that are not address taken, to avoid bloating the table
1601     if (!F->hasAddressTaken())
1602       continue;
1603 
1604     // Store metadata with the index for each function
1605     MDNode *MD = MDNode::get(F->getContext(),
1606                              ArrayRef<Metadata *>(ConstantAsMetadata::get(
1607                                  ConstantInt::get(Int64Ty, IndirectIndex))));
1608     F->setMetadata("wasm.index", MD);
1609 
1610     // Assign the counter value
1611     GlobalLayout[GTM] = IndirectIndex++;
1612   }
1613 
1614   // The indirect function table index space starts at zero, so pass a NULL
1615   // pointer as the subtracted "jump table" offset.
1616   lowerTypeTestCalls(TypeIds, ConstantPointerNull::get(Int32PtrTy),
1617                      GlobalLayout);
1618 }
1619 
buildBitSetsFromDisjointSet(ArrayRef<Metadata * > TypeIds,ArrayRef<GlobalTypeMember * > Globals,ArrayRef<ICallBranchFunnel * > ICallBranchFunnels)1620 void LowerTypeTestsModule::buildBitSetsFromDisjointSet(
1621     ArrayRef<Metadata *> TypeIds, ArrayRef<GlobalTypeMember *> Globals,
1622     ArrayRef<ICallBranchFunnel *> ICallBranchFunnels) {
1623   DenseMap<Metadata *, uint64_t> TypeIdIndices;
1624   for (unsigned I = 0; I != TypeIds.size(); ++I)
1625     TypeIdIndices[TypeIds[I]] = I;
1626 
1627   // For each type identifier, build a set of indices that refer to members of
1628   // the type identifier.
1629   std::vector<std::set<uint64_t>> TypeMembers(TypeIds.size());
1630   unsigned GlobalIndex = 0;
1631   DenseMap<GlobalTypeMember *, uint64_t> GlobalIndices;
1632   for (GlobalTypeMember *GTM : Globals) {
1633     for (MDNode *Type : GTM->types()) {
1634       // Type = { offset, type identifier }
1635       auto I = TypeIdIndices.find(Type->getOperand(1));
1636       if (I != TypeIdIndices.end())
1637         TypeMembers[I->second].insert(GlobalIndex);
1638     }
1639     GlobalIndices[GTM] = GlobalIndex;
1640     GlobalIndex++;
1641   }
1642 
1643   for (ICallBranchFunnel *JT : ICallBranchFunnels) {
1644     TypeMembers.emplace_back();
1645     std::set<uint64_t> &TMSet = TypeMembers.back();
1646     for (GlobalTypeMember *T : JT->targets())
1647       TMSet.insert(GlobalIndices[T]);
1648   }
1649 
1650   // Order the sets of indices by size. The GlobalLayoutBuilder works best
1651   // when given small index sets first.
1652   llvm::stable_sort(TypeMembers, [](const std::set<uint64_t> &O1,
1653                                     const std::set<uint64_t> &O2) {
1654     return O1.size() < O2.size();
1655   });
1656 
1657   // Create a GlobalLayoutBuilder and provide it with index sets as layout
1658   // fragments. The GlobalLayoutBuilder tries to lay out members of fragments as
1659   // close together as possible.
1660   GlobalLayoutBuilder GLB(Globals.size());
1661   for (auto &&MemSet : TypeMembers)
1662     GLB.addFragment(MemSet);
1663 
1664   // Build a vector of globals with the computed layout.
1665   bool IsGlobalSet =
1666       Globals.empty() || isa<GlobalVariable>(Globals[0]->getGlobal());
1667   std::vector<GlobalTypeMember *> OrderedGTMs(Globals.size());
1668   auto OGTMI = OrderedGTMs.begin();
1669   for (auto &&F : GLB.Fragments) {
1670     for (auto &&Offset : F) {
1671       if (IsGlobalSet != isa<GlobalVariable>(Globals[Offset]->getGlobal()))
1672         report_fatal_error("Type identifier may not contain both global "
1673                            "variables and functions");
1674       *OGTMI++ = Globals[Offset];
1675     }
1676   }
1677 
1678   // Build the bitsets from this disjoint set.
1679   if (IsGlobalSet)
1680     buildBitSetsFromGlobalVariables(TypeIds, OrderedGTMs);
1681   else
1682     buildBitSetsFromFunctions(TypeIds, OrderedGTMs);
1683 }
1684 
1685 /// Lower all type tests in this module.
LowerTypeTestsModule(Module & M,ModuleSummaryIndex * ExportSummary,const ModuleSummaryIndex * ImportSummary,bool DropTypeTests)1686 LowerTypeTestsModule::LowerTypeTestsModule(
1687     Module &M, ModuleSummaryIndex *ExportSummary,
1688     const ModuleSummaryIndex *ImportSummary, bool DropTypeTests)
1689     : M(M), ExportSummary(ExportSummary), ImportSummary(ImportSummary),
1690       DropTypeTests(DropTypeTests) {
1691   assert(!(ExportSummary && ImportSummary));
1692   Triple TargetTriple(M.getTargetTriple());
1693   Arch = TargetTriple.getArch();
1694   OS = TargetTriple.getOS();
1695   ObjectFormat = TargetTriple.getObjectFormat();
1696 }
1697 
runForTesting(Module & M)1698 bool LowerTypeTestsModule::runForTesting(Module &M) {
1699   ModuleSummaryIndex Summary(/*HaveGVs=*/false);
1700 
1701   // Handle the command-line summary arguments. This code is for testing
1702   // purposes only, so we handle errors directly.
1703   if (!ClReadSummary.empty()) {
1704     ExitOnError ExitOnErr("-lowertypetests-read-summary: " + ClReadSummary +
1705                           ": ");
1706     auto ReadSummaryFile =
1707         ExitOnErr(errorOrToExpected(MemoryBuffer::getFile(ClReadSummary)));
1708 
1709     yaml::Input In(ReadSummaryFile->getBuffer());
1710     In >> Summary;
1711     ExitOnErr(errorCodeToError(In.error()));
1712   }
1713 
1714   bool Changed =
1715       LowerTypeTestsModule(
1716           M, ClSummaryAction == PassSummaryAction::Export ? &Summary : nullptr,
1717           ClSummaryAction == PassSummaryAction::Import ? &Summary : nullptr,
1718           /*DropTypeTests*/ false)
1719           .lower();
1720 
1721   if (!ClWriteSummary.empty()) {
1722     ExitOnError ExitOnErr("-lowertypetests-write-summary: " + ClWriteSummary +
1723                           ": ");
1724     std::error_code EC;
1725     raw_fd_ostream OS(ClWriteSummary, EC, sys::fs::OF_TextWithCRLF);
1726     ExitOnErr(errorCodeToError(EC));
1727 
1728     yaml::Output Out(OS);
1729     Out << Summary;
1730   }
1731 
1732   return Changed;
1733 }
1734 
isDirectCall(Use & U)1735 static bool isDirectCall(Use& U) {
1736   auto *Usr = dyn_cast<CallInst>(U.getUser());
1737   if (Usr) {
1738     auto *CB = dyn_cast<CallBase>(Usr);
1739     if (CB && CB->isCallee(&U))
1740       return true;
1741   }
1742   return false;
1743 }
1744 
replaceCfiUses(Function * Old,Value * New,bool IsJumpTableCanonical)1745 void LowerTypeTestsModule::replaceCfiUses(Function *Old, Value *New,
1746                                           bool IsJumpTableCanonical) {
1747   SmallSetVector<Constant *, 4> Constants;
1748   auto UI = Old->use_begin(), E = Old->use_end();
1749   for (; UI != E;) {
1750     Use &U = *UI;
1751     ++UI;
1752 
1753     // Skip block addresses
1754     if (isa<BlockAddress>(U.getUser()))
1755       continue;
1756 
1757     // Skip direct calls to externally defined or non-dso_local functions
1758     if (isDirectCall(U) && (Old->isDSOLocal() || !IsJumpTableCanonical))
1759       continue;
1760 
1761     // Must handle Constants specially, we cannot call replaceUsesOfWith on a
1762     // constant because they are uniqued.
1763     if (auto *C = dyn_cast<Constant>(U.getUser())) {
1764       if (!isa<GlobalValue>(C)) {
1765         // Save unique users to avoid processing operand replacement
1766         // more than once.
1767         Constants.insert(C);
1768         continue;
1769       }
1770     }
1771 
1772     U.set(New);
1773   }
1774 
1775   // Process operand replacement of saved constants.
1776   for (auto *C : Constants)
1777     C->handleOperandChange(Old, New);
1778 }
1779 
replaceDirectCalls(Value * Old,Value * New)1780 void LowerTypeTestsModule::replaceDirectCalls(Value *Old, Value *New) {
1781   Old->replaceUsesWithIf(New, [](Use &U) { return isDirectCall(U); });
1782 }
1783 
lower()1784 bool LowerTypeTestsModule::lower() {
1785   Function *TypeTestFunc =
1786       M.getFunction(Intrinsic::getName(Intrinsic::type_test));
1787 
1788   if (DropTypeTests && TypeTestFunc) {
1789     for (auto UI = TypeTestFunc->use_begin(), UE = TypeTestFunc->use_end();
1790          UI != UE;) {
1791       auto *CI = cast<CallInst>((*UI++).getUser());
1792       // Find and erase llvm.assume intrinsics for this llvm.type.test call.
1793       for (auto CIU = CI->use_begin(), CIUE = CI->use_end(); CIU != CIUE;)
1794         if (auto *Assume = dyn_cast<AssumeInst>((*CIU++).getUser()))
1795           Assume->eraseFromParent();
1796       CI->eraseFromParent();
1797     }
1798 
1799     // We have deleted the type intrinsics, so we no longer have enough
1800     // information to reason about the liveness of virtual function pointers
1801     // in GlobalDCE.
1802     for (GlobalVariable &GV : M.globals())
1803       GV.eraseMetadata(LLVMContext::MD_vcall_visibility);
1804 
1805     return true;
1806   }
1807 
1808   // If only some of the modules were split, we cannot correctly perform
1809   // this transformation. We already checked for the presense of type tests
1810   // with partially split modules during the thin link, and would have emitted
1811   // an error if any were found, so here we can simply return.
1812   if ((ExportSummary && ExportSummary->partiallySplitLTOUnits()) ||
1813       (ImportSummary && ImportSummary->partiallySplitLTOUnits()))
1814     return false;
1815 
1816   Function *ICallBranchFunnelFunc =
1817       M.getFunction(Intrinsic::getName(Intrinsic::icall_branch_funnel));
1818   if ((!TypeTestFunc || TypeTestFunc->use_empty()) &&
1819       (!ICallBranchFunnelFunc || ICallBranchFunnelFunc->use_empty()) &&
1820       !ExportSummary && !ImportSummary)
1821     return false;
1822 
1823   if (ImportSummary) {
1824     if (TypeTestFunc) {
1825       for (auto UI = TypeTestFunc->use_begin(), UE = TypeTestFunc->use_end();
1826            UI != UE;) {
1827         auto *CI = cast<CallInst>((*UI++).getUser());
1828         importTypeTest(CI);
1829       }
1830     }
1831 
1832     if (ICallBranchFunnelFunc && !ICallBranchFunnelFunc->use_empty())
1833       report_fatal_error(
1834           "unexpected call to llvm.icall.branch.funnel during import phase");
1835 
1836     SmallVector<Function *, 8> Defs;
1837     SmallVector<Function *, 8> Decls;
1838     for (auto &F : M) {
1839       // CFI functions are either external, or promoted. A local function may
1840       // have the same name, but it's not the one we are looking for.
1841       if (F.hasLocalLinkage())
1842         continue;
1843       if (ImportSummary->cfiFunctionDefs().count(std::string(F.getName())))
1844         Defs.push_back(&F);
1845       else if (ImportSummary->cfiFunctionDecls().count(
1846                    std::string(F.getName())))
1847         Decls.push_back(&F);
1848     }
1849 
1850     std::vector<GlobalAlias *> AliasesToErase;
1851     {
1852       ScopedSaveAliaseesAndUsed S(M);
1853       for (auto F : Defs)
1854         importFunction(F, /*isJumpTableCanonical*/ true, AliasesToErase);
1855       for (auto F : Decls)
1856         importFunction(F, /*isJumpTableCanonical*/ false, AliasesToErase);
1857     }
1858     for (GlobalAlias *GA : AliasesToErase)
1859       GA->eraseFromParent();
1860 
1861     return true;
1862   }
1863 
1864   // Equivalence class set containing type identifiers and the globals that
1865   // reference them. This is used to partition the set of type identifiers in
1866   // the module into disjoint sets.
1867   using GlobalClassesTy = EquivalenceClasses<
1868       PointerUnion<GlobalTypeMember *, Metadata *, ICallBranchFunnel *>>;
1869   GlobalClassesTy GlobalClasses;
1870 
1871   // Verify the type metadata and build a few data structures to let us
1872   // efficiently enumerate the type identifiers associated with a global:
1873   // a list of GlobalTypeMembers (a GlobalObject stored alongside a vector
1874   // of associated type metadata) and a mapping from type identifiers to their
1875   // list of GlobalTypeMembers and last observed index in the list of globals.
1876   // The indices will be used later to deterministically order the list of type
1877   // identifiers.
1878   BumpPtrAllocator Alloc;
1879   struct TIInfo {
1880     unsigned UniqueId;
1881     std::vector<GlobalTypeMember *> RefGlobals;
1882   };
1883   DenseMap<Metadata *, TIInfo> TypeIdInfo;
1884   unsigned CurUniqueId = 0;
1885   SmallVector<MDNode *, 2> Types;
1886 
1887   // Cross-DSO CFI emits jumptable entries for exported functions as well as
1888   // address taken functions in case they are address taken in other modules.
1889   const bool CrossDsoCfi = M.getModuleFlag("Cross-DSO CFI") != nullptr;
1890 
1891   struct ExportedFunctionInfo {
1892     CfiFunctionLinkage Linkage;
1893     MDNode *FuncMD; // {name, linkage, type[, type...]}
1894   };
1895   DenseMap<StringRef, ExportedFunctionInfo> ExportedFunctions;
1896   if (ExportSummary) {
1897     // A set of all functions that are address taken by a live global object.
1898     DenseSet<GlobalValue::GUID> AddressTaken;
1899     for (auto &I : *ExportSummary)
1900       for (auto &GVS : I.second.SummaryList)
1901         if (GVS->isLive())
1902           for (auto &Ref : GVS->refs())
1903             AddressTaken.insert(Ref.getGUID());
1904 
1905     NamedMDNode *CfiFunctionsMD = M.getNamedMetadata("cfi.functions");
1906     if (CfiFunctionsMD) {
1907       for (auto FuncMD : CfiFunctionsMD->operands()) {
1908         assert(FuncMD->getNumOperands() >= 2);
1909         StringRef FunctionName =
1910             cast<MDString>(FuncMD->getOperand(0))->getString();
1911         CfiFunctionLinkage Linkage = static_cast<CfiFunctionLinkage>(
1912             cast<ConstantAsMetadata>(FuncMD->getOperand(1))
1913                 ->getValue()
1914                 ->getUniqueInteger()
1915                 .getZExtValue());
1916         const GlobalValue::GUID GUID = GlobalValue::getGUID(
1917                 GlobalValue::dropLLVMManglingEscape(FunctionName));
1918         // Do not emit jumptable entries for functions that are not-live and
1919         // have no live references (and are not exported with cross-DSO CFI.)
1920         if (!ExportSummary->isGUIDLive(GUID))
1921           continue;
1922         if (!AddressTaken.count(GUID)) {
1923           if (!CrossDsoCfi || Linkage != CFL_Definition)
1924             continue;
1925 
1926           bool Exported = false;
1927           if (auto VI = ExportSummary->getValueInfo(GUID))
1928             for (auto &GVS : VI.getSummaryList())
1929               if (GVS->isLive() && !GlobalValue::isLocalLinkage(GVS->linkage()))
1930                 Exported = true;
1931 
1932           if (!Exported)
1933             continue;
1934         }
1935         auto P = ExportedFunctions.insert({FunctionName, {Linkage, FuncMD}});
1936         if (!P.second && P.first->second.Linkage != CFL_Definition)
1937           P.first->second = {Linkage, FuncMD};
1938       }
1939 
1940       for (const auto &P : ExportedFunctions) {
1941         StringRef FunctionName = P.first;
1942         CfiFunctionLinkage Linkage = P.second.Linkage;
1943         MDNode *FuncMD = P.second.FuncMD;
1944         Function *F = M.getFunction(FunctionName);
1945         if (F && F->hasLocalLinkage()) {
1946           // Locally defined function that happens to have the same name as a
1947           // function defined in a ThinLTO module. Rename it to move it out of
1948           // the way of the external reference that we're about to create.
1949           // Note that setName will find a unique name for the function, so even
1950           // if there is an existing function with the suffix there won't be a
1951           // name collision.
1952           F->setName(F->getName() + ".1");
1953           F = nullptr;
1954         }
1955 
1956         if (!F)
1957           F = Function::Create(
1958               FunctionType::get(Type::getVoidTy(M.getContext()), false),
1959               GlobalVariable::ExternalLinkage,
1960               M.getDataLayout().getProgramAddressSpace(), FunctionName, &M);
1961 
1962         // If the function is available_externally, remove its definition so
1963         // that it is handled the same way as a declaration. Later we will try
1964         // to create an alias using this function's linkage, which will fail if
1965         // the linkage is available_externally. This will also result in us
1966         // following the code path below to replace the type metadata.
1967         if (F->hasAvailableExternallyLinkage()) {
1968           F->setLinkage(GlobalValue::ExternalLinkage);
1969           F->deleteBody();
1970           F->setComdat(nullptr);
1971           F->clearMetadata();
1972         }
1973 
1974         // Update the linkage for extern_weak declarations when a definition
1975         // exists.
1976         if (Linkage == CFL_Definition && F->hasExternalWeakLinkage())
1977           F->setLinkage(GlobalValue::ExternalLinkage);
1978 
1979         // If the function in the full LTO module is a declaration, replace its
1980         // type metadata with the type metadata we found in cfi.functions. That
1981         // metadata is presumed to be more accurate than the metadata attached
1982         // to the declaration.
1983         if (F->isDeclaration()) {
1984           if (Linkage == CFL_WeakDeclaration)
1985             F->setLinkage(GlobalValue::ExternalWeakLinkage);
1986 
1987           F->eraseMetadata(LLVMContext::MD_type);
1988           for (unsigned I = 2; I < FuncMD->getNumOperands(); ++I)
1989             F->addMetadata(LLVMContext::MD_type,
1990                            *cast<MDNode>(FuncMD->getOperand(I).get()));
1991         }
1992       }
1993     }
1994   }
1995 
1996   DenseMap<GlobalObject *, GlobalTypeMember *> GlobalTypeMembers;
1997   for (GlobalObject &GO : M.global_objects()) {
1998     if (isa<GlobalVariable>(GO) && GO.isDeclarationForLinker())
1999       continue;
2000 
2001     Types.clear();
2002     GO.getMetadata(LLVMContext::MD_type, Types);
2003 
2004     bool IsJumpTableCanonical = false;
2005     bool IsExported = false;
2006     if (Function *F = dyn_cast<Function>(&GO)) {
2007       IsJumpTableCanonical = isJumpTableCanonical(F);
2008       if (ExportedFunctions.count(F->getName())) {
2009         IsJumpTableCanonical |=
2010             ExportedFunctions[F->getName()].Linkage == CFL_Definition;
2011         IsExported = true;
2012       // TODO: The logic here checks only that the function is address taken,
2013       // not that the address takers are live. This can be updated to check
2014       // their liveness and emit fewer jumptable entries once monolithic LTO
2015       // builds also emit summaries.
2016       } else if (!F->hasAddressTaken()) {
2017         if (!CrossDsoCfi || !IsJumpTableCanonical || F->hasLocalLinkage())
2018           continue;
2019       }
2020     }
2021 
2022     auto *GTM = GlobalTypeMember::create(Alloc, &GO, IsJumpTableCanonical,
2023                                          IsExported, Types);
2024     GlobalTypeMembers[&GO] = GTM;
2025     for (MDNode *Type : Types) {
2026       verifyTypeMDNode(&GO, Type);
2027       auto &Info = TypeIdInfo[Type->getOperand(1)];
2028       Info.UniqueId = ++CurUniqueId;
2029       Info.RefGlobals.push_back(GTM);
2030     }
2031   }
2032 
2033   auto AddTypeIdUse = [&](Metadata *TypeId) -> TypeIdUserInfo & {
2034     // Add the call site to the list of call sites for this type identifier. We
2035     // also use TypeIdUsers to keep track of whether we have seen this type
2036     // identifier before. If we have, we don't need to re-add the referenced
2037     // globals to the equivalence class.
2038     auto Ins = TypeIdUsers.insert({TypeId, {}});
2039     if (Ins.second) {
2040       // Add the type identifier to the equivalence class.
2041       GlobalClassesTy::iterator GCI = GlobalClasses.insert(TypeId);
2042       GlobalClassesTy::member_iterator CurSet = GlobalClasses.findLeader(GCI);
2043 
2044       // Add the referenced globals to the type identifier's equivalence class.
2045       for (GlobalTypeMember *GTM : TypeIdInfo[TypeId].RefGlobals)
2046         CurSet = GlobalClasses.unionSets(
2047             CurSet, GlobalClasses.findLeader(GlobalClasses.insert(GTM)));
2048     }
2049 
2050     return Ins.first->second;
2051   };
2052 
2053   if (TypeTestFunc) {
2054     for (const Use &U : TypeTestFunc->uses()) {
2055       auto CI = cast<CallInst>(U.getUser());
2056       // If this type test is only used by llvm.assume instructions, it
2057       // was used for whole program devirtualization, and is being kept
2058       // for use by other optimization passes. We do not need or want to
2059       // lower it here. We also don't want to rewrite any associated globals
2060       // unnecessarily. These will be removed by a subsequent LTT invocation
2061       // with the DropTypeTests flag set.
2062       bool OnlyAssumeUses = !CI->use_empty();
2063       for (const Use &CIU : CI->uses()) {
2064         if (isa<AssumeInst>(CIU.getUser()))
2065           continue;
2066         OnlyAssumeUses = false;
2067         break;
2068       }
2069       if (OnlyAssumeUses)
2070         continue;
2071 
2072       auto TypeIdMDVal = dyn_cast<MetadataAsValue>(CI->getArgOperand(1));
2073       if (!TypeIdMDVal)
2074         report_fatal_error("Second argument of llvm.type.test must be metadata");
2075       auto TypeId = TypeIdMDVal->getMetadata();
2076       AddTypeIdUse(TypeId).CallSites.push_back(CI);
2077     }
2078   }
2079 
2080   if (ICallBranchFunnelFunc) {
2081     for (const Use &U : ICallBranchFunnelFunc->uses()) {
2082       if (Arch != Triple::x86_64)
2083         report_fatal_error(
2084             "llvm.icall.branch.funnel not supported on this target");
2085 
2086       auto CI = cast<CallInst>(U.getUser());
2087 
2088       std::vector<GlobalTypeMember *> Targets;
2089       if (CI->getNumArgOperands() % 2 != 1)
2090         report_fatal_error("number of arguments should be odd");
2091 
2092       GlobalClassesTy::member_iterator CurSet;
2093       for (unsigned I = 1; I != CI->getNumArgOperands(); I += 2) {
2094         int64_t Offset;
2095         auto *Base = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
2096             CI->getOperand(I), Offset, M.getDataLayout()));
2097         if (!Base)
2098           report_fatal_error(
2099               "Expected branch funnel operand to be global value");
2100 
2101         GlobalTypeMember *GTM = GlobalTypeMembers[Base];
2102         Targets.push_back(GTM);
2103         GlobalClassesTy::member_iterator NewSet =
2104             GlobalClasses.findLeader(GlobalClasses.insert(GTM));
2105         if (I == 1)
2106           CurSet = NewSet;
2107         else
2108           CurSet = GlobalClasses.unionSets(CurSet, NewSet);
2109       }
2110 
2111       GlobalClasses.unionSets(
2112           CurSet, GlobalClasses.findLeader(
2113                       GlobalClasses.insert(ICallBranchFunnel::create(
2114                           Alloc, CI, Targets, ++CurUniqueId))));
2115     }
2116   }
2117 
2118   if (ExportSummary) {
2119     DenseMap<GlobalValue::GUID, TinyPtrVector<Metadata *>> MetadataByGUID;
2120     for (auto &P : TypeIdInfo) {
2121       if (auto *TypeId = dyn_cast<MDString>(P.first))
2122         MetadataByGUID[GlobalValue::getGUID(TypeId->getString())].push_back(
2123             TypeId);
2124     }
2125 
2126     for (auto &P : *ExportSummary) {
2127       for (auto &S : P.second.SummaryList) {
2128         if (!ExportSummary->isGlobalValueLive(S.get()))
2129           continue;
2130         if (auto *FS = dyn_cast<FunctionSummary>(S->getBaseObject()))
2131           for (GlobalValue::GUID G : FS->type_tests())
2132             for (Metadata *MD : MetadataByGUID[G])
2133               AddTypeIdUse(MD).IsExported = true;
2134       }
2135     }
2136   }
2137 
2138   if (GlobalClasses.empty())
2139     return false;
2140 
2141   // Build a list of disjoint sets ordered by their maximum global index for
2142   // determinism.
2143   std::vector<std::pair<GlobalClassesTy::iterator, unsigned>> Sets;
2144   for (GlobalClassesTy::iterator I = GlobalClasses.begin(),
2145                                  E = GlobalClasses.end();
2146        I != E; ++I) {
2147     if (!I->isLeader())
2148       continue;
2149     ++NumTypeIdDisjointSets;
2150 
2151     unsigned MaxUniqueId = 0;
2152     for (GlobalClassesTy::member_iterator MI = GlobalClasses.member_begin(I);
2153          MI != GlobalClasses.member_end(); ++MI) {
2154       if (auto *MD = MI->dyn_cast<Metadata *>())
2155         MaxUniqueId = std::max(MaxUniqueId, TypeIdInfo[MD].UniqueId);
2156       else if (auto *BF = MI->dyn_cast<ICallBranchFunnel *>())
2157         MaxUniqueId = std::max(MaxUniqueId, BF->UniqueId);
2158     }
2159     Sets.emplace_back(I, MaxUniqueId);
2160   }
2161   llvm::sort(Sets,
2162              [](const std::pair<GlobalClassesTy::iterator, unsigned> &S1,
2163                 const std::pair<GlobalClassesTy::iterator, unsigned> &S2) {
2164                return S1.second < S2.second;
2165              });
2166 
2167   // For each disjoint set we found...
2168   for (const auto &S : Sets) {
2169     // Build the list of type identifiers in this disjoint set.
2170     std::vector<Metadata *> TypeIds;
2171     std::vector<GlobalTypeMember *> Globals;
2172     std::vector<ICallBranchFunnel *> ICallBranchFunnels;
2173     for (GlobalClassesTy::member_iterator MI =
2174              GlobalClasses.member_begin(S.first);
2175          MI != GlobalClasses.member_end(); ++MI) {
2176       if (MI->is<Metadata *>())
2177         TypeIds.push_back(MI->get<Metadata *>());
2178       else if (MI->is<GlobalTypeMember *>())
2179         Globals.push_back(MI->get<GlobalTypeMember *>());
2180       else
2181         ICallBranchFunnels.push_back(MI->get<ICallBranchFunnel *>());
2182     }
2183 
2184     // Order type identifiers by unique ID for determinism. This ordering is
2185     // stable as there is a one-to-one mapping between metadata and unique IDs.
2186     llvm::sort(TypeIds, [&](Metadata *M1, Metadata *M2) {
2187       return TypeIdInfo[M1].UniqueId < TypeIdInfo[M2].UniqueId;
2188     });
2189 
2190     // Same for the branch funnels.
2191     llvm::sort(ICallBranchFunnels,
2192                [&](ICallBranchFunnel *F1, ICallBranchFunnel *F2) {
2193                  return F1->UniqueId < F2->UniqueId;
2194                });
2195 
2196     // Build bitsets for this disjoint set.
2197     buildBitSetsFromDisjointSet(TypeIds, Globals, ICallBranchFunnels);
2198   }
2199 
2200   allocateByteArrays();
2201 
2202   // Parse alias data to replace stand-in function declarations for aliases
2203   // with an alias to the intended target.
2204   if (ExportSummary) {
2205     if (NamedMDNode *AliasesMD = M.getNamedMetadata("aliases")) {
2206       for (auto AliasMD : AliasesMD->operands()) {
2207         assert(AliasMD->getNumOperands() >= 4);
2208         StringRef AliasName =
2209             cast<MDString>(AliasMD->getOperand(0))->getString();
2210         StringRef Aliasee = cast<MDString>(AliasMD->getOperand(1))->getString();
2211 
2212         if (!ExportedFunctions.count(Aliasee) ||
2213             ExportedFunctions[Aliasee].Linkage != CFL_Definition ||
2214             !M.getNamedAlias(Aliasee))
2215           continue;
2216 
2217         GlobalValue::VisibilityTypes Visibility =
2218             static_cast<GlobalValue::VisibilityTypes>(
2219                 cast<ConstantAsMetadata>(AliasMD->getOperand(2))
2220                     ->getValue()
2221                     ->getUniqueInteger()
2222                     .getZExtValue());
2223         bool Weak =
2224             static_cast<bool>(cast<ConstantAsMetadata>(AliasMD->getOperand(3))
2225                                   ->getValue()
2226                                   ->getUniqueInteger()
2227                                   .getZExtValue());
2228 
2229         auto *Alias = GlobalAlias::create("", M.getNamedAlias(Aliasee));
2230         Alias->setVisibility(Visibility);
2231         if (Weak)
2232           Alias->setLinkage(GlobalValue::WeakAnyLinkage);
2233 
2234         if (auto *F = M.getFunction(AliasName)) {
2235           Alias->takeName(F);
2236           F->replaceAllUsesWith(Alias);
2237           F->eraseFromParent();
2238         } else {
2239           Alias->setName(AliasName);
2240         }
2241       }
2242     }
2243   }
2244 
2245   // Emit .symver directives for exported functions, if they exist.
2246   if (ExportSummary) {
2247     if (NamedMDNode *SymversMD = M.getNamedMetadata("symvers")) {
2248       for (auto Symver : SymversMD->operands()) {
2249         assert(Symver->getNumOperands() >= 2);
2250         StringRef SymbolName =
2251             cast<MDString>(Symver->getOperand(0))->getString();
2252         StringRef Alias = cast<MDString>(Symver->getOperand(1))->getString();
2253 
2254         if (!ExportedFunctions.count(SymbolName))
2255           continue;
2256 
2257         M.appendModuleInlineAsm(
2258             (llvm::Twine(".symver ") + SymbolName + ", " + Alias).str());
2259       }
2260     }
2261   }
2262 
2263   return true;
2264 }
2265 
run(Module & M,ModuleAnalysisManager & AM)2266 PreservedAnalyses LowerTypeTestsPass::run(Module &M,
2267                                           ModuleAnalysisManager &AM) {
2268   bool Changed;
2269   if (UseCommandLine)
2270     Changed = LowerTypeTestsModule::runForTesting(M);
2271   else
2272     Changed =
2273         LowerTypeTestsModule(M, ExportSummary, ImportSummary, DropTypeTests)
2274             .lower();
2275   if (!Changed)
2276     return PreservedAnalyses::all();
2277   return PreservedAnalyses::none();
2278 }
2279