xref: /llvm-project/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp (revision 1b95e76d8f10725f73c706881ccc49669e151f38)
1 //===- MemorySanitizer.cpp - detector of uninitialized reads --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file is a part of MemorySanitizer, a detector of uninitialized
11 /// reads.
12 ///
13 /// The algorithm of the tool is similar to Memcheck
14 /// (https://static.usenix.org/event/usenix05/tech/general/full_papers/seward/seward_html/usenix2005.html)
15 /// We associate a few shadow bits with every byte of the application memory,
16 /// poison the shadow of the malloc-ed or alloca-ed memory, load the shadow,
17 /// bits on every memory read, propagate the shadow bits through some of the
18 /// arithmetic instruction (including MOV), store the shadow bits on every
19 /// memory write, report a bug on some other instructions (e.g. JMP) if the
20 /// associated shadow is poisoned.
21 ///
22 /// But there are differences too. The first and the major one:
23 /// compiler instrumentation instead of binary instrumentation. This
24 /// gives us much better register allocation, possible compiler
25 /// optimizations and a fast start-up. But this brings the major issue
26 /// as well: msan needs to see all program events, including system
27 /// calls and reads/writes in system libraries, so we either need to
28 /// compile *everything* with msan or use a binary translation
29 /// component (e.g. DynamoRIO) to instrument pre-built libraries.
30 /// Another difference from Memcheck is that we use 8 shadow bits per
31 /// byte of application memory and use a direct shadow mapping. This
32 /// greatly simplifies the instrumentation code and avoids races on
33 /// shadow updates (Memcheck is single-threaded so races are not a
34 /// concern there. Memcheck uses 2 shadow bits per byte with a slow
35 /// path storage that uses 8 bits per byte).
36 ///
37 /// The default value of shadow is 0, which means "clean" (not poisoned).
38 ///
39 /// Every module initializer should call __msan_init to ensure that the
40 /// shadow memory is ready. On error, __msan_warning is called. Since
41 /// parameters and return values may be passed via registers, we have a
42 /// specialized thread-local shadow for return values
43 /// (__msan_retval_tls) and parameters (__msan_param_tls).
44 ///
45 ///                           Origin tracking.
46 ///
47 /// MemorySanitizer can track origins (allocation points) of all uninitialized
48 /// values. This behavior is controlled with a flag (msan-track-origins) and is
49 /// disabled by default.
50 ///
51 /// Origins are 4-byte values created and interpreted by the runtime library.
52 /// They are stored in a second shadow mapping, one 4-byte value for 4 bytes
53 /// of application memory. Propagation of origins is basically a bunch of
54 /// "select" instructions that pick the origin of a dirty argument, if an
55 /// instruction has one.
56 ///
57 /// Every 4 aligned, consecutive bytes of application memory have one origin
58 /// value associated with them. If these bytes contain uninitialized data
59 /// coming from 2 different allocations, the last store wins. Because of this,
60 /// MemorySanitizer reports can show unrelated origins, but this is unlikely in
61 /// practice.
62 ///
63 /// Origins are meaningless for fully initialized values, so MemorySanitizer
64 /// avoids storing origin to memory when a fully initialized value is stored.
65 /// This way it avoids needless overwriting origin of the 4-byte region on
66 /// a short (i.e. 1 byte) clean store, and it is also good for performance.
67 ///
68 ///                            Atomic handling.
69 ///
70 /// Ideally, every atomic store of application value should update the
71 /// corresponding shadow location in an atomic way. Unfortunately, atomic store
72 /// of two disjoint locations can not be done without severe slowdown.
73 ///
74 /// Therefore, we implement an approximation that may err on the safe side.
75 /// In this implementation, every atomically accessed location in the program
76 /// may only change from (partially) uninitialized to fully initialized, but
77 /// not the other way around. We load the shadow _after_ the application load,
78 /// and we store the shadow _before_ the app store. Also, we always store clean
79 /// shadow (if the application store is atomic). This way, if the store-load
80 /// pair constitutes a happens-before arc, shadow store and load are correctly
81 /// ordered such that the load will get either the value that was stored, or
82 /// some later value (which is always clean).
83 ///
84 /// This does not work very well with Compare-And-Swap (CAS) and
85 /// Read-Modify-Write (RMW) operations. To follow the above logic, CAS and RMW
86 /// must store the new shadow before the app operation, and load the shadow
87 /// after the app operation. Computers don't work this way. Current
88 /// implementation ignores the load aspect of CAS/RMW, always returning a clean
89 /// value. It implements the store part as a simple atomic store by storing a
90 /// clean shadow.
91 ///
92 ///                      Instrumenting inline assembly.
93 ///
94 /// For inline assembly code LLVM has little idea about which memory locations
95 /// become initialized depending on the arguments. It can be possible to figure
96 /// out which arguments are meant to point to inputs and outputs, but the
97 /// actual semantics can be only visible at runtime. In the Linux kernel it's
98 /// also possible that the arguments only indicate the offset for a base taken
99 /// from a segment register, so it's dangerous to treat any asm() arguments as
100 /// pointers. We take a conservative approach generating calls to
101 ///   __msan_instrument_asm_store(ptr, size)
102 /// , which defer the memory unpoisoning to the runtime library.
103 /// The latter can perform more complex address checks to figure out whether
104 /// it's safe to touch the shadow memory.
105 /// Like with atomic operations, we call __msan_instrument_asm_store() before
106 /// the assembly call, so that changes to the shadow memory will be seen by
107 /// other threads together with main memory initialization.
108 ///
109 ///                  KernelMemorySanitizer (KMSAN) implementation.
110 ///
111 /// The major differences between KMSAN and MSan instrumentation are:
112 ///  - KMSAN always tracks the origins and implies msan-keep-going=true;
113 ///  - KMSAN allocates shadow and origin memory for each page separately, so
114 ///    there are no explicit accesses to shadow and origin in the
115 ///    instrumentation.
116 ///    Shadow and origin values for a particular X-byte memory location
117 ///    (X=1,2,4,8) are accessed through pointers obtained via the
118 ///      __msan_metadata_ptr_for_load_X(ptr)
119 ///      __msan_metadata_ptr_for_store_X(ptr)
120 ///    functions. The corresponding functions check that the X-byte accesses
121 ///    are possible and returns the pointers to shadow and origin memory.
122 ///    Arbitrary sized accesses are handled with:
123 ///      __msan_metadata_ptr_for_load_n(ptr, size)
124 ///      __msan_metadata_ptr_for_store_n(ptr, size);
125 ///    Note that the sanitizer code has to deal with how shadow/origin pairs
126 ///    returned by the these functions are represented in different ABIs. In
127 ///    the X86_64 ABI they are returned in RDX:RAX, in PowerPC64 they are
128 ///    returned in r3 and r4, and in the SystemZ ABI they are written to memory
129 ///    pointed to by a hidden parameter.
130 ///  - TLS variables are stored in a single per-task struct. A call to a
131 ///    function __msan_get_context_state() returning a pointer to that struct
132 ///    is inserted into every instrumented function before the entry block;
133 ///  - __msan_warning() takes a 32-bit origin parameter;
134 ///  - local variables are poisoned with __msan_poison_alloca() upon function
135 ///    entry and unpoisoned with __msan_unpoison_alloca() before leaving the
136 ///    function;
137 ///  - the pass doesn't declare any global variables or add global constructors
138 ///    to the translation unit.
139 ///
140 /// Also, KMSAN currently ignores uninitialized memory passed into inline asm
141 /// calls, making sure we're on the safe side wrt. possible false positives.
142 ///
143 ///  KernelMemorySanitizer only supports X86_64, SystemZ and PowerPC64 at the
144 ///  moment.
145 ///
146 //
147 // FIXME: This sanitizer does not yet handle scalable vectors
148 //
149 //===----------------------------------------------------------------------===//
150 
151 #include "llvm/Transforms/Instrumentation/MemorySanitizer.h"
152 #include "llvm/ADT/APInt.h"
153 #include "llvm/ADT/ArrayRef.h"
154 #include "llvm/ADT/DenseMap.h"
155 #include "llvm/ADT/DepthFirstIterator.h"
156 #include "llvm/ADT/SetVector.h"
157 #include "llvm/ADT/SmallPtrSet.h"
158 #include "llvm/ADT/SmallVector.h"
159 #include "llvm/ADT/StringExtras.h"
160 #include "llvm/ADT/StringRef.h"
161 #include "llvm/Analysis/GlobalsModRef.h"
162 #include "llvm/Analysis/TargetLibraryInfo.h"
163 #include "llvm/Analysis/ValueTracking.h"
164 #include "llvm/IR/Argument.h"
165 #include "llvm/IR/AttributeMask.h"
166 #include "llvm/IR/Attributes.h"
167 #include "llvm/IR/BasicBlock.h"
168 #include "llvm/IR/CallingConv.h"
169 #include "llvm/IR/Constant.h"
170 #include "llvm/IR/Constants.h"
171 #include "llvm/IR/DataLayout.h"
172 #include "llvm/IR/DerivedTypes.h"
173 #include "llvm/IR/Function.h"
174 #include "llvm/IR/GlobalValue.h"
175 #include "llvm/IR/GlobalVariable.h"
176 #include "llvm/IR/IRBuilder.h"
177 #include "llvm/IR/InlineAsm.h"
178 #include "llvm/IR/InstVisitor.h"
179 #include "llvm/IR/InstrTypes.h"
180 #include "llvm/IR/Instruction.h"
181 #include "llvm/IR/Instructions.h"
182 #include "llvm/IR/IntrinsicInst.h"
183 #include "llvm/IR/Intrinsics.h"
184 #include "llvm/IR/IntrinsicsAArch64.h"
185 #include "llvm/IR/IntrinsicsX86.h"
186 #include "llvm/IR/MDBuilder.h"
187 #include "llvm/IR/Module.h"
188 #include "llvm/IR/Type.h"
189 #include "llvm/IR/Value.h"
190 #include "llvm/IR/ValueMap.h"
191 #include "llvm/Support/Alignment.h"
192 #include "llvm/Support/AtomicOrdering.h"
193 #include "llvm/Support/Casting.h"
194 #include "llvm/Support/CommandLine.h"
195 #include "llvm/Support/Debug.h"
196 #include "llvm/Support/DebugCounter.h"
197 #include "llvm/Support/ErrorHandling.h"
198 #include "llvm/Support/MathExtras.h"
199 #include "llvm/Support/raw_ostream.h"
200 #include "llvm/TargetParser/Triple.h"
201 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
202 #include "llvm/Transforms/Utils/Instrumentation.h"
203 #include "llvm/Transforms/Utils/Local.h"
204 #include "llvm/Transforms/Utils/ModuleUtils.h"
205 #include <algorithm>
206 #include <cassert>
207 #include <cstddef>
208 #include <cstdint>
209 #include <memory>
210 #include <string>
211 #include <tuple>
212 
213 using namespace llvm;
214 
215 #define DEBUG_TYPE "msan"
216 
217 DEBUG_COUNTER(DebugInsertCheck, "msan-insert-check",
218               "Controls which checks to insert");
219 
220 DEBUG_COUNTER(DebugInstrumentInstruction, "msan-instrument-instruction",
221               "Controls which instruction to instrument");
222 
223 static const unsigned kOriginSize = 4;
224 static const Align kMinOriginAlignment = Align(4);
225 static const Align kShadowTLSAlignment = Align(8);
226 
227 // These constants must be kept in sync with the ones in msan.h.
228 static const unsigned kParamTLSSize = 800;
229 static const unsigned kRetvalTLSSize = 800;
230 
231 // Accesses sizes are powers of two: 1, 2, 4, 8.
232 static const size_t kNumberOfAccessSizes = 4;
233 
234 /// Track origins of uninitialized values.
235 ///
236 /// Adds a section to MemorySanitizer report that points to the allocation
237 /// (stack or heap) the uninitialized bits came from originally.
238 static cl::opt<int> ClTrackOrigins(
239     "msan-track-origins",
240     cl::desc("Track origins (allocation sites) of poisoned memory"), cl::Hidden,
241     cl::init(0));
242 
243 static cl::opt<bool> ClKeepGoing("msan-keep-going",
244                                  cl::desc("keep going after reporting a UMR"),
245                                  cl::Hidden, cl::init(false));
246 
247 static cl::opt<bool>
248     ClPoisonStack("msan-poison-stack",
249                   cl::desc("poison uninitialized stack variables"), cl::Hidden,
250                   cl::init(true));
251 
252 static cl::opt<bool> ClPoisonStackWithCall(
253     "msan-poison-stack-with-call",
254     cl::desc("poison uninitialized stack variables with a call"), cl::Hidden,
255     cl::init(false));
256 
257 static cl::opt<int> ClPoisonStackPattern(
258     "msan-poison-stack-pattern",
259     cl::desc("poison uninitialized stack variables with the given pattern"),
260     cl::Hidden, cl::init(0xff));
261 
262 static cl::opt<bool>
263     ClPrintStackNames("msan-print-stack-names",
264                       cl::desc("Print name of local stack variable"),
265                       cl::Hidden, cl::init(true));
266 
267 static cl::opt<bool> ClPoisonUndef("msan-poison-undef",
268                                    cl::desc("poison undef temps"), cl::Hidden,
269                                    cl::init(true));
270 
271 static cl::opt<bool>
272     ClHandleICmp("msan-handle-icmp",
273                  cl::desc("propagate shadow through ICmpEQ and ICmpNE"),
274                  cl::Hidden, cl::init(true));
275 
276 static cl::opt<bool>
277     ClHandleICmpExact("msan-handle-icmp-exact",
278                       cl::desc("exact handling of relational integer ICmp"),
279                       cl::Hidden, cl::init(true));
280 
281 static cl::opt<bool> ClHandleLifetimeIntrinsics(
282     "msan-handle-lifetime-intrinsics",
283     cl::desc(
284         "when possible, poison scoped variables at the beginning of the scope "
285         "(slower, but more precise)"),
286     cl::Hidden, cl::init(true));
287 
288 // When compiling the Linux kernel, we sometimes see false positives related to
289 // MSan being unable to understand that inline assembly calls may initialize
290 // local variables.
291 // This flag makes the compiler conservatively unpoison every memory location
292 // passed into an assembly call. Note that this may cause false positives.
293 // Because it's impossible to figure out the array sizes, we can only unpoison
294 // the first sizeof(type) bytes for each type* pointer.
295 static cl::opt<bool> ClHandleAsmConservative(
296     "msan-handle-asm-conservative",
297     cl::desc("conservative handling of inline assembly"), cl::Hidden,
298     cl::init(true));
299 
300 // This flag controls whether we check the shadow of the address
301 // operand of load or store. Such bugs are very rare, since load from
302 // a garbage address typically results in SEGV, but still happen
303 // (e.g. only lower bits of address are garbage, or the access happens
304 // early at program startup where malloc-ed memory is more likely to
305 // be zeroed. As of 2012-08-28 this flag adds 20% slowdown.
306 static cl::opt<bool> ClCheckAccessAddress(
307     "msan-check-access-address",
308     cl::desc("report accesses through a pointer which has poisoned shadow"),
309     cl::Hidden, cl::init(true));
310 
311 static cl::opt<bool> ClEagerChecks(
312     "msan-eager-checks",
313     cl::desc("check arguments and return values at function call boundaries"),
314     cl::Hidden, cl::init(false));
315 
316 static cl::opt<bool> ClDumpStrictInstructions(
317     "msan-dump-strict-instructions",
318     cl::desc("print out instructions with default strict semantics"),
319     cl::Hidden, cl::init(false));
320 
321 static cl::opt<int> ClInstrumentationWithCallThreshold(
322     "msan-instrumentation-with-call-threshold",
323     cl::desc(
324         "If the function being instrumented requires more than "
325         "this number of checks and origin stores, use callbacks instead of "
326         "inline checks (-1 means never use callbacks)."),
327     cl::Hidden, cl::init(3500));
328 
329 static cl::opt<bool>
330     ClEnableKmsan("msan-kernel",
331                   cl::desc("Enable KernelMemorySanitizer instrumentation"),
332                   cl::Hidden, cl::init(false));
333 
334 static cl::opt<bool>
335     ClDisableChecks("msan-disable-checks",
336                     cl::desc("Apply no_sanitize to the whole file"), cl::Hidden,
337                     cl::init(false));
338 
339 static cl::opt<bool>
340     ClCheckConstantShadow("msan-check-constant-shadow",
341                           cl::desc("Insert checks for constant shadow values"),
342                           cl::Hidden, cl::init(true));
343 
344 // This is off by default because of a bug in gold:
345 // https://sourceware.org/bugzilla/show_bug.cgi?id=19002
346 static cl::opt<bool>
347     ClWithComdat("msan-with-comdat",
348                  cl::desc("Place MSan constructors in comdat sections"),
349                  cl::Hidden, cl::init(false));
350 
351 // These options allow to specify custom memory map parameters
352 // See MemoryMapParams for details.
353 static cl::opt<uint64_t> ClAndMask("msan-and-mask",
354                                    cl::desc("Define custom MSan AndMask"),
355                                    cl::Hidden, cl::init(0));
356 
357 static cl::opt<uint64_t> ClXorMask("msan-xor-mask",
358                                    cl::desc("Define custom MSan XorMask"),
359                                    cl::Hidden, cl::init(0));
360 
361 static cl::opt<uint64_t> ClShadowBase("msan-shadow-base",
362                                       cl::desc("Define custom MSan ShadowBase"),
363                                       cl::Hidden, cl::init(0));
364 
365 static cl::opt<uint64_t> ClOriginBase("msan-origin-base",
366                                       cl::desc("Define custom MSan OriginBase"),
367                                       cl::Hidden, cl::init(0));
368 
369 static cl::opt<int>
370     ClDisambiguateWarning("msan-disambiguate-warning-threshold",
371                           cl::desc("Define threshold for number of checks per "
372                                    "debug location to force origin update."),
373                           cl::Hidden, cl::init(3));
374 
375 const char kMsanModuleCtorName[] = "msan.module_ctor";
376 const char kMsanInitName[] = "__msan_init";
377 
378 namespace {
379 
380 // Memory map parameters used in application-to-shadow address calculation.
381 // Offset = (Addr & ~AndMask) ^ XorMask
382 // Shadow = ShadowBase + Offset
383 // Origin = OriginBase + Offset
384 struct MemoryMapParams {
385   uint64_t AndMask;
386   uint64_t XorMask;
387   uint64_t ShadowBase;
388   uint64_t OriginBase;
389 };
390 
391 struct PlatformMemoryMapParams {
392   const MemoryMapParams *bits32;
393   const MemoryMapParams *bits64;
394 };
395 
396 } // end anonymous namespace
397 
398 // i386 Linux
399 static const MemoryMapParams Linux_I386_MemoryMapParams = {
400     0x000080000000, // AndMask
401     0,              // XorMask (not used)
402     0,              // ShadowBase (not used)
403     0x000040000000, // OriginBase
404 };
405 
406 // x86_64 Linux
407 static const MemoryMapParams Linux_X86_64_MemoryMapParams = {
408     0,              // AndMask (not used)
409     0x500000000000, // XorMask
410     0,              // ShadowBase (not used)
411     0x100000000000, // OriginBase
412 };
413 
414 // mips32 Linux
415 // FIXME: Remove -msan-origin-base -msan-and-mask added by PR #109284 to tests
416 // after picking good constants
417 
418 // mips64 Linux
419 static const MemoryMapParams Linux_MIPS64_MemoryMapParams = {
420     0,              // AndMask (not used)
421     0x008000000000, // XorMask
422     0,              // ShadowBase (not used)
423     0x002000000000, // OriginBase
424 };
425 
426 // ppc32 Linux
427 // FIXME: Remove -msan-origin-base -msan-and-mask added by PR #109284 to tests
428 // after picking good constants
429 
430 // ppc64 Linux
431 static const MemoryMapParams Linux_PowerPC64_MemoryMapParams = {
432     0xE00000000000, // AndMask
433     0x100000000000, // XorMask
434     0x080000000000, // ShadowBase
435     0x1C0000000000, // OriginBase
436 };
437 
438 // s390x Linux
439 static const MemoryMapParams Linux_S390X_MemoryMapParams = {
440     0xC00000000000, // AndMask
441     0,              // XorMask (not used)
442     0x080000000000, // ShadowBase
443     0x1C0000000000, // OriginBase
444 };
445 
446 // arm32 Linux
447 // FIXME: Remove -msan-origin-base -msan-and-mask added by PR #109284 to tests
448 // after picking good constants
449 
450 // aarch64 Linux
451 static const MemoryMapParams Linux_AArch64_MemoryMapParams = {
452     0,               // AndMask (not used)
453     0x0B00000000000, // XorMask
454     0,               // ShadowBase (not used)
455     0x0200000000000, // OriginBase
456 };
457 
458 // loongarch64 Linux
459 static const MemoryMapParams Linux_LoongArch64_MemoryMapParams = {
460     0,              // AndMask (not used)
461     0x500000000000, // XorMask
462     0,              // ShadowBase (not used)
463     0x100000000000, // OriginBase
464 };
465 
466 // riscv32 Linux
467 // FIXME: Remove -msan-origin-base -msan-and-mask added by PR #109284 to tests
468 // after picking good constants
469 
470 // aarch64 FreeBSD
471 static const MemoryMapParams FreeBSD_AArch64_MemoryMapParams = {
472     0x1800000000000, // AndMask
473     0x0400000000000, // XorMask
474     0x0200000000000, // ShadowBase
475     0x0700000000000, // OriginBase
476 };
477 
478 // i386 FreeBSD
479 static const MemoryMapParams FreeBSD_I386_MemoryMapParams = {
480     0x000180000000, // AndMask
481     0x000040000000, // XorMask
482     0x000020000000, // ShadowBase
483     0x000700000000, // OriginBase
484 };
485 
486 // x86_64 FreeBSD
487 static const MemoryMapParams FreeBSD_X86_64_MemoryMapParams = {
488     0xc00000000000, // AndMask
489     0x200000000000, // XorMask
490     0x100000000000, // ShadowBase
491     0x380000000000, // OriginBase
492 };
493 
494 // x86_64 NetBSD
495 static const MemoryMapParams NetBSD_X86_64_MemoryMapParams = {
496     0,              // AndMask
497     0x500000000000, // XorMask
498     0,              // ShadowBase
499     0x100000000000, // OriginBase
500 };
501 
502 static const PlatformMemoryMapParams Linux_X86_MemoryMapParams = {
503     &Linux_I386_MemoryMapParams,
504     &Linux_X86_64_MemoryMapParams,
505 };
506 
507 static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams = {
508     nullptr,
509     &Linux_MIPS64_MemoryMapParams,
510 };
511 
512 static const PlatformMemoryMapParams Linux_PowerPC_MemoryMapParams = {
513     nullptr,
514     &Linux_PowerPC64_MemoryMapParams,
515 };
516 
517 static const PlatformMemoryMapParams Linux_S390_MemoryMapParams = {
518     nullptr,
519     &Linux_S390X_MemoryMapParams,
520 };
521 
522 static const PlatformMemoryMapParams Linux_ARM_MemoryMapParams = {
523     nullptr,
524     &Linux_AArch64_MemoryMapParams,
525 };
526 
527 static const PlatformMemoryMapParams Linux_LoongArch_MemoryMapParams = {
528     nullptr,
529     &Linux_LoongArch64_MemoryMapParams,
530 };
531 
532 static const PlatformMemoryMapParams FreeBSD_ARM_MemoryMapParams = {
533     nullptr,
534     &FreeBSD_AArch64_MemoryMapParams,
535 };
536 
537 static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams = {
538     &FreeBSD_I386_MemoryMapParams,
539     &FreeBSD_X86_64_MemoryMapParams,
540 };
541 
542 static const PlatformMemoryMapParams NetBSD_X86_MemoryMapParams = {
543     nullptr,
544     &NetBSD_X86_64_MemoryMapParams,
545 };
546 
547 namespace {
548 
549 /// Instrument functions of a module to detect uninitialized reads.
550 ///
551 /// Instantiating MemorySanitizer inserts the msan runtime library API function
552 /// declarations into the module if they don't exist already. Instantiating
553 /// ensures the __msan_init function is in the list of global constructors for
554 /// the module.
555 class MemorySanitizer {
556 public:
557   MemorySanitizer(Module &M, MemorySanitizerOptions Options)
558       : CompileKernel(Options.Kernel), TrackOrigins(Options.TrackOrigins),
559         Recover(Options.Recover), EagerChecks(Options.EagerChecks) {
560     initializeModule(M);
561   }
562 
563   // MSan cannot be moved or copied because of MapParams.
564   MemorySanitizer(MemorySanitizer &&) = delete;
565   MemorySanitizer &operator=(MemorySanitizer &&) = delete;
566   MemorySanitizer(const MemorySanitizer &) = delete;
567   MemorySanitizer &operator=(const MemorySanitizer &) = delete;
568 
569   bool sanitizeFunction(Function &F, TargetLibraryInfo &TLI);
570 
571 private:
572   friend struct MemorySanitizerVisitor;
573   friend struct VarArgHelperBase;
574   friend struct VarArgAMD64Helper;
575   friend struct VarArgAArch64Helper;
576   friend struct VarArgPowerPCHelper;
577   friend struct VarArgSystemZHelper;
578   friend struct VarArgI386Helper;
579   friend struct VarArgGenericHelper;
580 
581   void initializeModule(Module &M);
582   void initializeCallbacks(Module &M, const TargetLibraryInfo &TLI);
583   void createKernelApi(Module &M, const TargetLibraryInfo &TLI);
584   void createUserspaceApi(Module &M, const TargetLibraryInfo &TLI);
585 
586   template <typename... ArgsTy>
587   FunctionCallee getOrInsertMsanMetadataFunction(Module &M, StringRef Name,
588                                                  ArgsTy... Args);
589 
590   /// True if we're compiling the Linux kernel.
591   bool CompileKernel;
592   /// Track origins (allocation points) of uninitialized values.
593   int TrackOrigins;
594   bool Recover;
595   bool EagerChecks;
596 
597   Triple TargetTriple;
598   LLVMContext *C;
599   Type *IntptrTy; ///< Integer type with the size of a ptr in default AS.
600   Type *OriginTy;
601   PointerType *PtrTy; ///< Integer type with the size of a ptr in default AS.
602 
603   // XxxTLS variables represent the per-thread state in MSan and per-task state
604   // in KMSAN.
605   // For the userspace these point to thread-local globals. In the kernel land
606   // they point to the members of a per-task struct obtained via a call to
607   // __msan_get_context_state().
608 
609   /// Thread-local shadow storage for function parameters.
610   Value *ParamTLS;
611 
612   /// Thread-local origin storage for function parameters.
613   Value *ParamOriginTLS;
614 
615   /// Thread-local shadow storage for function return value.
616   Value *RetvalTLS;
617 
618   /// Thread-local origin storage for function return value.
619   Value *RetvalOriginTLS;
620 
621   /// Thread-local shadow storage for in-register va_arg function.
622   Value *VAArgTLS;
623 
624   /// Thread-local shadow storage for in-register va_arg function.
625   Value *VAArgOriginTLS;
626 
627   /// Thread-local shadow storage for va_arg overflow area.
628   Value *VAArgOverflowSizeTLS;
629 
630   /// Are the instrumentation callbacks set up?
631   bool CallbacksInitialized = false;
632 
633   /// The run-time callback to print a warning.
634   FunctionCallee WarningFn;
635 
636   // These arrays are indexed by log2(AccessSize).
637   FunctionCallee MaybeWarningFn[kNumberOfAccessSizes];
638   FunctionCallee MaybeStoreOriginFn[kNumberOfAccessSizes];
639 
640   /// Run-time helper that generates a new origin value for a stack
641   /// allocation.
642   FunctionCallee MsanSetAllocaOriginWithDescriptionFn;
643   // No description version
644   FunctionCallee MsanSetAllocaOriginNoDescriptionFn;
645 
646   /// Run-time helper that poisons stack on function entry.
647   FunctionCallee MsanPoisonStackFn;
648 
649   /// Run-time helper that records a store (or any event) of an
650   /// uninitialized value and returns an updated origin id encoding this info.
651   FunctionCallee MsanChainOriginFn;
652 
653   /// Run-time helper that paints an origin over a region.
654   FunctionCallee MsanSetOriginFn;
655 
656   /// MSan runtime replacements for memmove, memcpy and memset.
657   FunctionCallee MemmoveFn, MemcpyFn, MemsetFn;
658 
659   /// KMSAN callback for task-local function argument shadow.
660   StructType *MsanContextStateTy;
661   FunctionCallee MsanGetContextStateFn;
662 
663   /// Functions for poisoning/unpoisoning local variables
664   FunctionCallee MsanPoisonAllocaFn, MsanUnpoisonAllocaFn;
665 
666   /// Pair of shadow/origin pointers.
667   Type *MsanMetadata;
668 
669   /// Each of the MsanMetadataPtrXxx functions returns a MsanMetadata.
670   FunctionCallee MsanMetadataPtrForLoadN, MsanMetadataPtrForStoreN;
671   FunctionCallee MsanMetadataPtrForLoad_1_8[4];
672   FunctionCallee MsanMetadataPtrForStore_1_8[4];
673   FunctionCallee MsanInstrumentAsmStoreFn;
674 
675   /// Storage for return values of the MsanMetadataPtrXxx functions.
676   Value *MsanMetadataAlloca;
677 
678   /// Helper to choose between different MsanMetadataPtrXxx().
679   FunctionCallee getKmsanShadowOriginAccessFn(bool isStore, int size);
680 
681   /// Memory map parameters used in application-to-shadow calculation.
682   const MemoryMapParams *MapParams;
683 
684   /// Custom memory map parameters used when -msan-shadow-base or
685   // -msan-origin-base is provided.
686   MemoryMapParams CustomMapParams;
687 
688   MDNode *ColdCallWeights;
689 
690   /// Branch weights for origin store.
691   MDNode *OriginStoreWeights;
692 };
693 
694 void insertModuleCtor(Module &M) {
695   getOrCreateSanitizerCtorAndInitFunctions(
696       M, kMsanModuleCtorName, kMsanInitName,
697       /*InitArgTypes=*/{},
698       /*InitArgs=*/{},
699       // This callback is invoked when the functions are created the first
700       // time. Hook them into the global ctors list in that case:
701       [&](Function *Ctor, FunctionCallee) {
702         if (!ClWithComdat) {
703           appendToGlobalCtors(M, Ctor, 0);
704           return;
705         }
706         Comdat *MsanCtorComdat = M.getOrInsertComdat(kMsanModuleCtorName);
707         Ctor->setComdat(MsanCtorComdat);
708         appendToGlobalCtors(M, Ctor, 0, Ctor);
709       });
710 }
711 
712 template <class T> T getOptOrDefault(const cl::opt<T> &Opt, T Default) {
713   return (Opt.getNumOccurrences() > 0) ? Opt : Default;
714 }
715 
716 } // end anonymous namespace
717 
718 MemorySanitizerOptions::MemorySanitizerOptions(int TO, bool R, bool K,
719                                                bool EagerChecks)
720     : Kernel(getOptOrDefault(ClEnableKmsan, K)),
721       TrackOrigins(getOptOrDefault(ClTrackOrigins, Kernel ? 2 : TO)),
722       Recover(getOptOrDefault(ClKeepGoing, Kernel || R)),
723       EagerChecks(getOptOrDefault(ClEagerChecks, EagerChecks)) {}
724 
725 PreservedAnalyses MemorySanitizerPass::run(Module &M,
726                                            ModuleAnalysisManager &AM) {
727   // Return early if nosanitize_memory module flag is present for the module.
728   if (checkIfAlreadyInstrumented(M, "nosanitize_memory"))
729     return PreservedAnalyses::all();
730   bool Modified = false;
731   if (!Options.Kernel) {
732     insertModuleCtor(M);
733     Modified = true;
734   }
735 
736   auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
737   for (Function &F : M) {
738     if (F.empty())
739       continue;
740     MemorySanitizer Msan(*F.getParent(), Options);
741     Modified |=
742         Msan.sanitizeFunction(F, FAM.getResult<TargetLibraryAnalysis>(F));
743   }
744 
745   if (!Modified)
746     return PreservedAnalyses::all();
747 
748   PreservedAnalyses PA = PreservedAnalyses::none();
749   // GlobalsAA is considered stateless and does not get invalidated unless
750   // explicitly invalidated; PreservedAnalyses::none() is not enough. Sanitizers
751   // make changes that require GlobalsAA to be invalidated.
752   PA.abandon<GlobalsAA>();
753   return PA;
754 }
755 
756 void MemorySanitizerPass::printPipeline(
757     raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
758   static_cast<PassInfoMixin<MemorySanitizerPass> *>(this)->printPipeline(
759       OS, MapClassName2PassName);
760   OS << '<';
761   if (Options.Recover)
762     OS << "recover;";
763   if (Options.Kernel)
764     OS << "kernel;";
765   if (Options.EagerChecks)
766     OS << "eager-checks;";
767   OS << "track-origins=" << Options.TrackOrigins;
768   OS << '>';
769 }
770 
771 /// Create a non-const global initialized with the given string.
772 ///
773 /// Creates a writable global for Str so that we can pass it to the
774 /// run-time lib. Runtime uses first 4 bytes of the string to store the
775 /// frame ID, so the string needs to be mutable.
776 static GlobalVariable *createPrivateConstGlobalForString(Module &M,
777                                                          StringRef Str) {
778   Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str);
779   return new GlobalVariable(M, StrConst->getType(), /*isConstant=*/true,
780                             GlobalValue::PrivateLinkage, StrConst, "");
781 }
782 
783 template <typename... ArgsTy>
784 FunctionCallee
785 MemorySanitizer::getOrInsertMsanMetadataFunction(Module &M, StringRef Name,
786                                                  ArgsTy... Args) {
787   if (TargetTriple.getArch() == Triple::systemz) {
788     // SystemZ ABI: shadow/origin pair is returned via a hidden parameter.
789     return M.getOrInsertFunction(Name, Type::getVoidTy(*C), PtrTy,
790                                  std::forward<ArgsTy>(Args)...);
791   }
792 
793   return M.getOrInsertFunction(Name, MsanMetadata,
794                                std::forward<ArgsTy>(Args)...);
795 }
796 
797 /// Create KMSAN API callbacks.
798 void MemorySanitizer::createKernelApi(Module &M, const TargetLibraryInfo &TLI) {
799   IRBuilder<> IRB(*C);
800 
801   // These will be initialized in insertKmsanPrologue().
802   RetvalTLS = nullptr;
803   RetvalOriginTLS = nullptr;
804   ParamTLS = nullptr;
805   ParamOriginTLS = nullptr;
806   VAArgTLS = nullptr;
807   VAArgOriginTLS = nullptr;
808   VAArgOverflowSizeTLS = nullptr;
809 
810   WarningFn = M.getOrInsertFunction("__msan_warning",
811                                     TLI.getAttrList(C, {0}, /*Signed=*/false),
812                                     IRB.getVoidTy(), IRB.getInt32Ty());
813 
814   // Requests the per-task context state (kmsan_context_state*) from the
815   // runtime library.
816   MsanContextStateTy = StructType::get(
817       ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8),
818       ArrayType::get(IRB.getInt64Ty(), kRetvalTLSSize / 8),
819       ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8),
820       ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), /* va_arg_origin */
821       IRB.getInt64Ty(), ArrayType::get(OriginTy, kParamTLSSize / 4), OriginTy,
822       OriginTy);
823   MsanGetContextStateFn =
824       M.getOrInsertFunction("__msan_get_context_state", PtrTy);
825 
826   MsanMetadata = StructType::get(PtrTy, PtrTy);
827 
828   for (int ind = 0, size = 1; ind < 4; ind++, size <<= 1) {
829     std::string name_load =
830         "__msan_metadata_ptr_for_load_" + std::to_string(size);
831     std::string name_store =
832         "__msan_metadata_ptr_for_store_" + std::to_string(size);
833     MsanMetadataPtrForLoad_1_8[ind] =
834         getOrInsertMsanMetadataFunction(M, name_load, PtrTy);
835     MsanMetadataPtrForStore_1_8[ind] =
836         getOrInsertMsanMetadataFunction(M, name_store, PtrTy);
837   }
838 
839   MsanMetadataPtrForLoadN = getOrInsertMsanMetadataFunction(
840       M, "__msan_metadata_ptr_for_load_n", PtrTy, IRB.getInt64Ty());
841   MsanMetadataPtrForStoreN = getOrInsertMsanMetadataFunction(
842       M, "__msan_metadata_ptr_for_store_n", PtrTy, IRB.getInt64Ty());
843 
844   // Functions for poisoning and unpoisoning memory.
845   MsanPoisonAllocaFn = M.getOrInsertFunction(
846       "__msan_poison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
847   MsanUnpoisonAllocaFn = M.getOrInsertFunction(
848       "__msan_unpoison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy);
849 }
850 
851 static Constant *getOrInsertGlobal(Module &M, StringRef Name, Type *Ty) {
852   return M.getOrInsertGlobal(Name, Ty, [&] {
853     return new GlobalVariable(M, Ty, false, GlobalVariable::ExternalLinkage,
854                               nullptr, Name, nullptr,
855                               GlobalVariable::InitialExecTLSModel);
856   });
857 }
858 
859 /// Insert declarations for userspace-specific functions and globals.
860 void MemorySanitizer::createUserspaceApi(Module &M,
861                                          const TargetLibraryInfo &TLI) {
862   IRBuilder<> IRB(*C);
863 
864   // Create the callback.
865   // FIXME: this function should have "Cold" calling conv,
866   // which is not yet implemented.
867   if (TrackOrigins) {
868     StringRef WarningFnName = Recover ? "__msan_warning_with_origin"
869                                       : "__msan_warning_with_origin_noreturn";
870     WarningFn = M.getOrInsertFunction(WarningFnName,
871                                       TLI.getAttrList(C, {0}, /*Signed=*/false),
872                                       IRB.getVoidTy(), IRB.getInt32Ty());
873   } else {
874     StringRef WarningFnName =
875         Recover ? "__msan_warning" : "__msan_warning_noreturn";
876     WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy());
877   }
878 
879   // Create the global TLS variables.
880   RetvalTLS =
881       getOrInsertGlobal(M, "__msan_retval_tls",
882                         ArrayType::get(IRB.getInt64Ty(), kRetvalTLSSize / 8));
883 
884   RetvalOriginTLS = getOrInsertGlobal(M, "__msan_retval_origin_tls", OriginTy);
885 
886   ParamTLS =
887       getOrInsertGlobal(M, "__msan_param_tls",
888                         ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8));
889 
890   ParamOriginTLS =
891       getOrInsertGlobal(M, "__msan_param_origin_tls",
892                         ArrayType::get(OriginTy, kParamTLSSize / 4));
893 
894   VAArgTLS =
895       getOrInsertGlobal(M, "__msan_va_arg_tls",
896                         ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8));
897 
898   VAArgOriginTLS =
899       getOrInsertGlobal(M, "__msan_va_arg_origin_tls",
900                         ArrayType::get(OriginTy, kParamTLSSize / 4));
901 
902   VAArgOverflowSizeTLS =
903       getOrInsertGlobal(M, "__msan_va_arg_overflow_size_tls", IRB.getInt64Ty());
904 
905   for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
906        AccessSizeIndex++) {
907     unsigned AccessSize = 1 << AccessSizeIndex;
908     std::string FunctionName = "__msan_maybe_warning_" + itostr(AccessSize);
909     MaybeWarningFn[AccessSizeIndex] = M.getOrInsertFunction(
910         FunctionName, TLI.getAttrList(C, {0, 1}, /*Signed=*/false),
911         IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), IRB.getInt32Ty());
912 
913     FunctionName = "__msan_maybe_store_origin_" + itostr(AccessSize);
914     MaybeStoreOriginFn[AccessSizeIndex] = M.getOrInsertFunction(
915         FunctionName, TLI.getAttrList(C, {0, 2}, /*Signed=*/false),
916         IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), PtrTy,
917         IRB.getInt32Ty());
918   }
919 
920   MsanSetAllocaOriginWithDescriptionFn =
921       M.getOrInsertFunction("__msan_set_alloca_origin_with_descr",
922                             IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy, PtrTy);
923   MsanSetAllocaOriginNoDescriptionFn =
924       M.getOrInsertFunction("__msan_set_alloca_origin_no_descr",
925                             IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
926   MsanPoisonStackFn = M.getOrInsertFunction("__msan_poison_stack",
927                                             IRB.getVoidTy(), PtrTy, IntptrTy);
928 }
929 
930 /// Insert extern declaration of runtime-provided functions and globals.
931 void MemorySanitizer::initializeCallbacks(Module &M,
932                                           const TargetLibraryInfo &TLI) {
933   // Only do this once.
934   if (CallbacksInitialized)
935     return;
936 
937   IRBuilder<> IRB(*C);
938   // Initialize callbacks that are common for kernel and userspace
939   // instrumentation.
940   MsanChainOriginFn = M.getOrInsertFunction(
941       "__msan_chain_origin",
942       TLI.getAttrList(C, {0}, /*Signed=*/false, /*Ret=*/true), IRB.getInt32Ty(),
943       IRB.getInt32Ty());
944   MsanSetOriginFn = M.getOrInsertFunction(
945       "__msan_set_origin", TLI.getAttrList(C, {2}, /*Signed=*/false),
946       IRB.getVoidTy(), PtrTy, IntptrTy, IRB.getInt32Ty());
947   MemmoveFn =
948       M.getOrInsertFunction("__msan_memmove", PtrTy, PtrTy, PtrTy, IntptrTy);
949   MemcpyFn =
950       M.getOrInsertFunction("__msan_memcpy", PtrTy, PtrTy, PtrTy, IntptrTy);
951   MemsetFn = M.getOrInsertFunction("__msan_memset",
952                                    TLI.getAttrList(C, {1}, /*Signed=*/true),
953                                    PtrTy, PtrTy, IRB.getInt32Ty(), IntptrTy);
954 
955   MsanInstrumentAsmStoreFn = M.getOrInsertFunction(
956       "__msan_instrument_asm_store", IRB.getVoidTy(), PtrTy, IntptrTy);
957 
958   if (CompileKernel) {
959     createKernelApi(M, TLI);
960   } else {
961     createUserspaceApi(M, TLI);
962   }
963   CallbacksInitialized = true;
964 }
965 
966 FunctionCallee MemorySanitizer::getKmsanShadowOriginAccessFn(bool isStore,
967                                                              int size) {
968   FunctionCallee *Fns =
969       isStore ? MsanMetadataPtrForStore_1_8 : MsanMetadataPtrForLoad_1_8;
970   switch (size) {
971   case 1:
972     return Fns[0];
973   case 2:
974     return Fns[1];
975   case 4:
976     return Fns[2];
977   case 8:
978     return Fns[3];
979   default:
980     return nullptr;
981   }
982 }
983 
984 /// Module-level initialization.
985 ///
986 /// inserts a call to __msan_init to the module's constructor list.
987 void MemorySanitizer::initializeModule(Module &M) {
988   auto &DL = M.getDataLayout();
989 
990   TargetTriple = Triple(M.getTargetTriple());
991 
992   bool ShadowPassed = ClShadowBase.getNumOccurrences() > 0;
993   bool OriginPassed = ClOriginBase.getNumOccurrences() > 0;
994   // Check the overrides first
995   if (ShadowPassed || OriginPassed) {
996     CustomMapParams.AndMask = ClAndMask;
997     CustomMapParams.XorMask = ClXorMask;
998     CustomMapParams.ShadowBase = ClShadowBase;
999     CustomMapParams.OriginBase = ClOriginBase;
1000     MapParams = &CustomMapParams;
1001   } else {
1002     switch (TargetTriple.getOS()) {
1003     case Triple::FreeBSD:
1004       switch (TargetTriple.getArch()) {
1005       case Triple::aarch64:
1006         MapParams = FreeBSD_ARM_MemoryMapParams.bits64;
1007         break;
1008       case Triple::x86_64:
1009         MapParams = FreeBSD_X86_MemoryMapParams.bits64;
1010         break;
1011       case Triple::x86:
1012         MapParams = FreeBSD_X86_MemoryMapParams.bits32;
1013         break;
1014       default:
1015         report_fatal_error("unsupported architecture");
1016       }
1017       break;
1018     case Triple::NetBSD:
1019       switch (TargetTriple.getArch()) {
1020       case Triple::x86_64:
1021         MapParams = NetBSD_X86_MemoryMapParams.bits64;
1022         break;
1023       default:
1024         report_fatal_error("unsupported architecture");
1025       }
1026       break;
1027     case Triple::Linux:
1028       switch (TargetTriple.getArch()) {
1029       case Triple::x86_64:
1030         MapParams = Linux_X86_MemoryMapParams.bits64;
1031         break;
1032       case Triple::x86:
1033         MapParams = Linux_X86_MemoryMapParams.bits32;
1034         break;
1035       case Triple::mips64:
1036       case Triple::mips64el:
1037         MapParams = Linux_MIPS_MemoryMapParams.bits64;
1038         break;
1039       case Triple::ppc64:
1040       case Triple::ppc64le:
1041         MapParams = Linux_PowerPC_MemoryMapParams.bits64;
1042         break;
1043       case Triple::systemz:
1044         MapParams = Linux_S390_MemoryMapParams.bits64;
1045         break;
1046       case Triple::aarch64:
1047       case Triple::aarch64_be:
1048         MapParams = Linux_ARM_MemoryMapParams.bits64;
1049         break;
1050       case Triple::loongarch64:
1051         MapParams = Linux_LoongArch_MemoryMapParams.bits64;
1052         break;
1053       default:
1054         report_fatal_error("unsupported architecture");
1055       }
1056       break;
1057     default:
1058       report_fatal_error("unsupported operating system");
1059     }
1060   }
1061 
1062   C = &(M.getContext());
1063   IRBuilder<> IRB(*C);
1064   IntptrTy = IRB.getIntPtrTy(DL);
1065   OriginTy = IRB.getInt32Ty();
1066   PtrTy = IRB.getPtrTy();
1067 
1068   ColdCallWeights = MDBuilder(*C).createUnlikelyBranchWeights();
1069   OriginStoreWeights = MDBuilder(*C).createUnlikelyBranchWeights();
1070 
1071   if (!CompileKernel) {
1072     if (TrackOrigins)
1073       M.getOrInsertGlobal("__msan_track_origins", IRB.getInt32Ty(), [&] {
1074         return new GlobalVariable(
1075             M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
1076             IRB.getInt32(TrackOrigins), "__msan_track_origins");
1077       });
1078 
1079     if (Recover)
1080       M.getOrInsertGlobal("__msan_keep_going", IRB.getInt32Ty(), [&] {
1081         return new GlobalVariable(M, IRB.getInt32Ty(), true,
1082                                   GlobalValue::WeakODRLinkage,
1083                                   IRB.getInt32(Recover), "__msan_keep_going");
1084       });
1085   }
1086 }
1087 
1088 namespace {
1089 
1090 /// A helper class that handles instrumentation of VarArg
1091 /// functions on a particular platform.
1092 ///
1093 /// Implementations are expected to insert the instrumentation
1094 /// necessary to propagate argument shadow through VarArg function
1095 /// calls. Visit* methods are called during an InstVisitor pass over
1096 /// the function, and should avoid creating new basic blocks. A new
1097 /// instance of this class is created for each instrumented function.
1098 struct VarArgHelper {
1099   virtual ~VarArgHelper() = default;
1100 
1101   /// Visit a CallBase.
1102   virtual void visitCallBase(CallBase &CB, IRBuilder<> &IRB) = 0;
1103 
1104   /// Visit a va_start call.
1105   virtual void visitVAStartInst(VAStartInst &I) = 0;
1106 
1107   /// Visit a va_copy call.
1108   virtual void visitVACopyInst(VACopyInst &I) = 0;
1109 
1110   /// Finalize function instrumentation.
1111   ///
1112   /// This method is called after visiting all interesting (see above)
1113   /// instructions in a function.
1114   virtual void finalizeInstrumentation() = 0;
1115 };
1116 
1117 struct MemorySanitizerVisitor;
1118 
1119 } // end anonymous namespace
1120 
1121 static VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
1122                                         MemorySanitizerVisitor &Visitor);
1123 
1124 static unsigned TypeSizeToSizeIndex(TypeSize TS) {
1125   if (TS.isScalable())
1126     // Scalable types unconditionally take slowpaths.
1127     return kNumberOfAccessSizes;
1128   unsigned TypeSizeFixed = TS.getFixedValue();
1129   if (TypeSizeFixed <= 8)
1130     return 0;
1131   return Log2_32_Ceil((TypeSizeFixed + 7) / 8);
1132 }
1133 
1134 namespace {
1135 
1136 /// Helper class to attach debug information of the given instruction onto new
1137 /// instructions inserted after.
1138 class NextNodeIRBuilder : public IRBuilder<> {
1139 public:
1140   explicit NextNodeIRBuilder(Instruction *IP) : IRBuilder<>(IP->getNextNode()) {
1141     SetCurrentDebugLocation(IP->getDebugLoc());
1142   }
1143 };
1144 
1145 /// This class does all the work for a given function. Store and Load
1146 /// instructions store and load corresponding shadow and origin
1147 /// values. Most instructions propagate shadow from arguments to their
1148 /// return values. Certain instructions (most importantly, BranchInst)
1149 /// test their argument shadow and print reports (with a runtime call) if it's
1150 /// non-zero.
1151 struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
1152   Function &F;
1153   MemorySanitizer &MS;
1154   SmallVector<PHINode *, 16> ShadowPHINodes, OriginPHINodes;
1155   ValueMap<Value *, Value *> ShadowMap, OriginMap;
1156   std::unique_ptr<VarArgHelper> VAHelper;
1157   const TargetLibraryInfo *TLI;
1158   Instruction *FnPrologueEnd;
1159   SmallVector<Instruction *, 16> Instructions;
1160 
1161   // The following flags disable parts of MSan instrumentation based on
1162   // exclusion list contents and command-line options.
1163   bool InsertChecks;
1164   bool PropagateShadow;
1165   bool PoisonStack;
1166   bool PoisonUndef;
1167 
1168   struct ShadowOriginAndInsertPoint {
1169     Value *Shadow;
1170     Value *Origin;
1171     Instruction *OrigIns;
1172 
1173     ShadowOriginAndInsertPoint(Value *S, Value *O, Instruction *I)
1174         : Shadow(S), Origin(O), OrigIns(I) {}
1175   };
1176   SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList;
1177   DenseMap<const DILocation *, int> LazyWarningDebugLocationCount;
1178   bool InstrumentLifetimeStart = ClHandleLifetimeIntrinsics;
1179   SmallSetVector<AllocaInst *, 16> AllocaSet;
1180   SmallVector<std::pair<IntrinsicInst *, AllocaInst *>, 16> LifetimeStartList;
1181   SmallVector<StoreInst *, 16> StoreList;
1182   int64_t SplittableBlocksCount = 0;
1183 
1184   MemorySanitizerVisitor(Function &F, MemorySanitizer &MS,
1185                          const TargetLibraryInfo &TLI)
1186       : F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)), TLI(&TLI) {
1187     bool SanitizeFunction =
1188         F.hasFnAttribute(Attribute::SanitizeMemory) && !ClDisableChecks;
1189     InsertChecks = SanitizeFunction;
1190     PropagateShadow = SanitizeFunction;
1191     PoisonStack = SanitizeFunction && ClPoisonStack;
1192     PoisonUndef = SanitizeFunction && ClPoisonUndef;
1193 
1194     // In the presence of unreachable blocks, we may see Phi nodes with
1195     // incoming nodes from such blocks. Since InstVisitor skips unreachable
1196     // blocks, such nodes will not have any shadow value associated with them.
1197     // It's easier to remove unreachable blocks than deal with missing shadow.
1198     removeUnreachableBlocks(F);
1199 
1200     MS.initializeCallbacks(*F.getParent(), TLI);
1201     FnPrologueEnd = IRBuilder<>(F.getEntryBlock().getFirstNonPHI())
1202                         .CreateIntrinsic(Intrinsic::donothing, {}, {});
1203 
1204     if (MS.CompileKernel) {
1205       IRBuilder<> IRB(FnPrologueEnd);
1206       insertKmsanPrologue(IRB);
1207     }
1208 
1209     LLVM_DEBUG(if (!InsertChecks) dbgs()
1210                << "MemorySanitizer is not inserting checks into '"
1211                << F.getName() << "'\n");
1212   }
1213 
1214   bool instrumentWithCalls(Value *V) {
1215     // Constants likely will be eliminated by follow-up passes.
1216     if (isa<Constant>(V))
1217       return false;
1218 
1219     ++SplittableBlocksCount;
1220     return ClInstrumentationWithCallThreshold >= 0 &&
1221            SplittableBlocksCount > ClInstrumentationWithCallThreshold;
1222   }
1223 
1224   bool isInPrologue(Instruction &I) {
1225     return I.getParent() == FnPrologueEnd->getParent() &&
1226            (&I == FnPrologueEnd || I.comesBefore(FnPrologueEnd));
1227   }
1228 
1229   // Creates a new origin and records the stack trace. In general we can call
1230   // this function for any origin manipulation we like. However it will cost
1231   // runtime resources. So use this wisely only if it can provide additional
1232   // information helpful to a user.
1233   Value *updateOrigin(Value *V, IRBuilder<> &IRB) {
1234     if (MS.TrackOrigins <= 1)
1235       return V;
1236     return IRB.CreateCall(MS.MsanChainOriginFn, V);
1237   }
1238 
1239   Value *originToIntptr(IRBuilder<> &IRB, Value *Origin) {
1240     const DataLayout &DL = F.getDataLayout();
1241     unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
1242     if (IntptrSize == kOriginSize)
1243       return Origin;
1244     assert(IntptrSize == kOriginSize * 2);
1245     Origin = IRB.CreateIntCast(Origin, MS.IntptrTy, /* isSigned */ false);
1246     return IRB.CreateOr(Origin, IRB.CreateShl(Origin, kOriginSize * 8));
1247   }
1248 
1249   /// Fill memory range with the given origin value.
1250   void paintOrigin(IRBuilder<> &IRB, Value *Origin, Value *OriginPtr,
1251                    TypeSize TS, Align Alignment) {
1252     const DataLayout &DL = F.getDataLayout();
1253     const Align IntptrAlignment = DL.getABITypeAlign(MS.IntptrTy);
1254     unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
1255     assert(IntptrAlignment >= kMinOriginAlignment);
1256     assert(IntptrSize >= kOriginSize);
1257 
1258     // Note: The loop based formation works for fixed length vectors too,
1259     // however we prefer to unroll and specialize alignment below.
1260     if (TS.isScalable()) {
1261       Value *Size = IRB.CreateTypeSize(MS.IntptrTy, TS);
1262       Value *RoundUp =
1263           IRB.CreateAdd(Size, ConstantInt::get(MS.IntptrTy, kOriginSize - 1));
1264       Value *End =
1265           IRB.CreateUDiv(RoundUp, ConstantInt::get(MS.IntptrTy, kOriginSize));
1266       auto [InsertPt, Index] =
1267           SplitBlockAndInsertSimpleForLoop(End, &*IRB.GetInsertPoint());
1268       IRB.SetInsertPoint(InsertPt);
1269 
1270       Value *GEP = IRB.CreateGEP(MS.OriginTy, OriginPtr, Index);
1271       IRB.CreateAlignedStore(Origin, GEP, kMinOriginAlignment);
1272       return;
1273     }
1274 
1275     unsigned Size = TS.getFixedValue();
1276 
1277     unsigned Ofs = 0;
1278     Align CurrentAlignment = Alignment;
1279     if (Alignment >= IntptrAlignment && IntptrSize > kOriginSize) {
1280       Value *IntptrOrigin = originToIntptr(IRB, Origin);
1281       Value *IntptrOriginPtr = IRB.CreatePointerCast(OriginPtr, MS.PtrTy);
1282       for (unsigned i = 0; i < Size / IntptrSize; ++i) {
1283         Value *Ptr = i ? IRB.CreateConstGEP1_32(MS.IntptrTy, IntptrOriginPtr, i)
1284                        : IntptrOriginPtr;
1285         IRB.CreateAlignedStore(IntptrOrigin, Ptr, CurrentAlignment);
1286         Ofs += IntptrSize / kOriginSize;
1287         CurrentAlignment = IntptrAlignment;
1288       }
1289     }
1290 
1291     for (unsigned i = Ofs; i < (Size + kOriginSize - 1) / kOriginSize; ++i) {
1292       Value *GEP =
1293           i ? IRB.CreateConstGEP1_32(MS.OriginTy, OriginPtr, i) : OriginPtr;
1294       IRB.CreateAlignedStore(Origin, GEP, CurrentAlignment);
1295       CurrentAlignment = kMinOriginAlignment;
1296     }
1297   }
1298 
1299   void storeOrigin(IRBuilder<> &IRB, Value *Addr, Value *Shadow, Value *Origin,
1300                    Value *OriginPtr, Align Alignment) {
1301     const DataLayout &DL = F.getDataLayout();
1302     const Align OriginAlignment = std::max(kMinOriginAlignment, Alignment);
1303     TypeSize StoreSize = DL.getTypeStoreSize(Shadow->getType());
1304     // ZExt cannot convert between vector and scalar
1305     Value *ConvertedShadow = convertShadowToScalar(Shadow, IRB);
1306     if (auto *ConstantShadow = dyn_cast<Constant>(ConvertedShadow)) {
1307       if (!ClCheckConstantShadow || ConstantShadow->isZeroValue()) {
1308         // Origin is not needed: value is initialized or const shadow is
1309         // ignored.
1310         return;
1311       }
1312       if (llvm::isKnownNonZero(ConvertedShadow, DL)) {
1313         // Copy origin as the value is definitely uninitialized.
1314         paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, StoreSize,
1315                     OriginAlignment);
1316         return;
1317       }
1318       // Fallback to runtime check, which still can be optimized out later.
1319     }
1320 
1321     TypeSize TypeSizeInBits = DL.getTypeSizeInBits(ConvertedShadow->getType());
1322     unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
1323     if (instrumentWithCalls(ConvertedShadow) &&
1324         SizeIndex < kNumberOfAccessSizes && !MS.CompileKernel) {
1325       FunctionCallee Fn = MS.MaybeStoreOriginFn[SizeIndex];
1326       Value *ConvertedShadow2 =
1327           IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
1328       CallBase *CB = IRB.CreateCall(Fn, {ConvertedShadow2, Addr, Origin});
1329       CB->addParamAttr(0, Attribute::ZExt);
1330       CB->addParamAttr(2, Attribute::ZExt);
1331     } else {
1332       Value *Cmp = convertToBool(ConvertedShadow, IRB, "_mscmp");
1333       Instruction *CheckTerm = SplitBlockAndInsertIfThen(
1334           Cmp, &*IRB.GetInsertPoint(), false, MS.OriginStoreWeights);
1335       IRBuilder<> IRBNew(CheckTerm);
1336       paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), OriginPtr, StoreSize,
1337                   OriginAlignment);
1338     }
1339   }
1340 
1341   void materializeStores() {
1342     for (StoreInst *SI : StoreList) {
1343       IRBuilder<> IRB(SI);
1344       Value *Val = SI->getValueOperand();
1345       Value *Addr = SI->getPointerOperand();
1346       Value *Shadow = SI->isAtomic() ? getCleanShadow(Val) : getShadow(Val);
1347       Value *ShadowPtr, *OriginPtr;
1348       Type *ShadowTy = Shadow->getType();
1349       const Align Alignment = SI->getAlign();
1350       const Align OriginAlignment = std::max(kMinOriginAlignment, Alignment);
1351       std::tie(ShadowPtr, OriginPtr) =
1352           getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ true);
1353 
1354       StoreInst *NewSI = IRB.CreateAlignedStore(Shadow, ShadowPtr, Alignment);
1355       LLVM_DEBUG(dbgs() << "  STORE: " << *NewSI << "\n");
1356       (void)NewSI;
1357 
1358       if (SI->isAtomic())
1359         SI->setOrdering(addReleaseOrdering(SI->getOrdering()));
1360 
1361       if (MS.TrackOrigins && !SI->isAtomic())
1362         storeOrigin(IRB, Addr, Shadow, getOrigin(Val), OriginPtr,
1363                     OriginAlignment);
1364     }
1365   }
1366 
1367   // Returns true if Debug Location corresponds to multiple warnings.
1368   bool shouldDisambiguateWarningLocation(const DebugLoc &DebugLoc) {
1369     if (MS.TrackOrigins < 2)
1370       return false;
1371 
1372     if (LazyWarningDebugLocationCount.empty())
1373       for (const auto &I : InstrumentationList)
1374         ++LazyWarningDebugLocationCount[I.OrigIns->getDebugLoc()];
1375 
1376     return LazyWarningDebugLocationCount[DebugLoc] >= ClDisambiguateWarning;
1377   }
1378 
1379   /// Helper function to insert a warning at IRB's current insert point.
1380   void insertWarningFn(IRBuilder<> &IRB, Value *Origin) {
1381     if (!Origin)
1382       Origin = (Value *)IRB.getInt32(0);
1383     assert(Origin->getType()->isIntegerTy());
1384 
1385     if (shouldDisambiguateWarningLocation(IRB.getCurrentDebugLocation())) {
1386       // Try to create additional origin with debug info of the last origin
1387       // instruction. It may provide additional information to the user.
1388       if (Instruction *OI = dyn_cast_or_null<Instruction>(Origin)) {
1389         assert(MS.TrackOrigins);
1390         auto NewDebugLoc = OI->getDebugLoc();
1391         // Origin update with missing or the same debug location provides no
1392         // additional value.
1393         if (NewDebugLoc && NewDebugLoc != IRB.getCurrentDebugLocation()) {
1394           // Insert update just before the check, so we call runtime only just
1395           // before the report.
1396           IRBuilder<> IRBOrigin(&*IRB.GetInsertPoint());
1397           IRBOrigin.SetCurrentDebugLocation(NewDebugLoc);
1398           Origin = updateOrigin(Origin, IRBOrigin);
1399         }
1400       }
1401     }
1402 
1403     if (MS.CompileKernel || MS.TrackOrigins)
1404       IRB.CreateCall(MS.WarningFn, Origin)->setCannotMerge();
1405     else
1406       IRB.CreateCall(MS.WarningFn)->setCannotMerge();
1407     // FIXME: Insert UnreachableInst if !MS.Recover?
1408     // This may invalidate some of the following checks and needs to be done
1409     // at the very end.
1410   }
1411 
1412   void materializeOneCheck(IRBuilder<> &IRB, Value *ConvertedShadow,
1413                            Value *Origin) {
1414     const DataLayout &DL = F.getDataLayout();
1415     TypeSize TypeSizeInBits = DL.getTypeSizeInBits(ConvertedShadow->getType());
1416     unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
1417     if (instrumentWithCalls(ConvertedShadow) &&
1418         SizeIndex < kNumberOfAccessSizes && !MS.CompileKernel) {
1419       FunctionCallee Fn = MS.MaybeWarningFn[SizeIndex];
1420       // ZExt cannot convert between vector and scalar
1421       ConvertedShadow = convertShadowToScalar(ConvertedShadow, IRB);
1422       Value *ConvertedShadow2 =
1423           IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
1424       CallBase *CB = IRB.CreateCall(
1425           Fn, {ConvertedShadow2,
1426                MS.TrackOrigins && Origin ? Origin : (Value *)IRB.getInt32(0)});
1427       CB->addParamAttr(0, Attribute::ZExt);
1428       CB->addParamAttr(1, Attribute::ZExt);
1429     } else {
1430       Value *Cmp = convertToBool(ConvertedShadow, IRB, "_mscmp");
1431       Instruction *CheckTerm = SplitBlockAndInsertIfThen(
1432           Cmp, &*IRB.GetInsertPoint(),
1433           /* Unreachable */ !MS.Recover, MS.ColdCallWeights);
1434 
1435       IRB.SetInsertPoint(CheckTerm);
1436       insertWarningFn(IRB, Origin);
1437       LLVM_DEBUG(dbgs() << "  CHECK: " << *Cmp << "\n");
1438     }
1439   }
1440 
1441   void materializeInstructionChecks(
1442       ArrayRef<ShadowOriginAndInsertPoint> InstructionChecks) {
1443     const DataLayout &DL = F.getDataLayout();
1444     // Disable combining in some cases. TrackOrigins checks each shadow to pick
1445     // correct origin.
1446     bool Combine = !MS.TrackOrigins;
1447     Instruction *Instruction = InstructionChecks.front().OrigIns;
1448     Value *Shadow = nullptr;
1449     for (const auto &ShadowData : InstructionChecks) {
1450       assert(ShadowData.OrigIns == Instruction);
1451       IRBuilder<> IRB(Instruction);
1452 
1453       Value *ConvertedShadow = ShadowData.Shadow;
1454 
1455       if (auto *ConstantShadow = dyn_cast<Constant>(ConvertedShadow)) {
1456         if (!ClCheckConstantShadow || ConstantShadow->isZeroValue()) {
1457           // Skip, value is initialized or const shadow is ignored.
1458           continue;
1459         }
1460         if (llvm::isKnownNonZero(ConvertedShadow, DL)) {
1461           // Report as the value is definitely uninitialized.
1462           insertWarningFn(IRB, ShadowData.Origin);
1463           if (!MS.Recover)
1464             return; // Always fail and stop here, not need to check the rest.
1465           // Skip entire instruction,
1466           continue;
1467         }
1468         // Fallback to runtime check, which still can be optimized out later.
1469       }
1470 
1471       if (!Combine) {
1472         materializeOneCheck(IRB, ConvertedShadow, ShadowData.Origin);
1473         continue;
1474       }
1475 
1476       if (!Shadow) {
1477         Shadow = ConvertedShadow;
1478         continue;
1479       }
1480 
1481       Shadow = convertToBool(Shadow, IRB, "_mscmp");
1482       ConvertedShadow = convertToBool(ConvertedShadow, IRB, "_mscmp");
1483       Shadow = IRB.CreateOr(Shadow, ConvertedShadow, "_msor");
1484     }
1485 
1486     if (Shadow) {
1487       assert(Combine);
1488       IRBuilder<> IRB(Instruction);
1489       materializeOneCheck(IRB, Shadow, nullptr);
1490     }
1491   }
1492 
1493   void materializeChecks() {
1494 #ifndef NDEBUG
1495     // For assert below.
1496     SmallPtrSet<Instruction *, 16> Done;
1497 #endif
1498 
1499     for (auto I = InstrumentationList.begin();
1500          I != InstrumentationList.end();) {
1501       auto OrigIns = I->OrigIns;
1502       // Checks are grouped by the original instruction. We call all
1503       // `insertShadowCheck` for an instruction at once.
1504       assert(Done.insert(OrigIns).second);
1505       auto J = std::find_if(I + 1, InstrumentationList.end(),
1506                             [OrigIns](const ShadowOriginAndInsertPoint &R) {
1507                               return OrigIns != R.OrigIns;
1508                             });
1509       // Process all checks of instruction at once.
1510       materializeInstructionChecks(ArrayRef<ShadowOriginAndInsertPoint>(I, J));
1511       I = J;
1512     }
1513 
1514     LLVM_DEBUG(dbgs() << "DONE:\n" << F);
1515   }
1516 
1517   // Returns the last instruction in the new prologue
1518   void insertKmsanPrologue(IRBuilder<> &IRB) {
1519     Value *ContextState = IRB.CreateCall(MS.MsanGetContextStateFn, {});
1520     Constant *Zero = IRB.getInt32(0);
1521     MS.ParamTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1522                                 {Zero, IRB.getInt32(0)}, "param_shadow");
1523     MS.RetvalTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1524                                  {Zero, IRB.getInt32(1)}, "retval_shadow");
1525     MS.VAArgTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1526                                 {Zero, IRB.getInt32(2)}, "va_arg_shadow");
1527     MS.VAArgOriginTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1528                                       {Zero, IRB.getInt32(3)}, "va_arg_origin");
1529     MS.VAArgOverflowSizeTLS =
1530         IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1531                       {Zero, IRB.getInt32(4)}, "va_arg_overflow_size");
1532     MS.ParamOriginTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1533                                       {Zero, IRB.getInt32(5)}, "param_origin");
1534     MS.RetvalOriginTLS =
1535         IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1536                       {Zero, IRB.getInt32(6)}, "retval_origin");
1537     if (MS.TargetTriple.getArch() == Triple::systemz)
1538       MS.MsanMetadataAlloca = IRB.CreateAlloca(MS.MsanMetadata, 0u);
1539   }
1540 
1541   /// Add MemorySanitizer instrumentation to a function.
1542   bool runOnFunction() {
1543     // Iterate all BBs in depth-first order and create shadow instructions
1544     // for all instructions (where applicable).
1545     // For PHI nodes we create dummy shadow PHIs which will be finalized later.
1546     for (BasicBlock *BB : depth_first(FnPrologueEnd->getParent()))
1547       visit(*BB);
1548 
1549     // `visit` above only collects instructions. Process them after iterating
1550     // CFG to avoid requirement on CFG transformations.
1551     for (Instruction *I : Instructions)
1552       InstVisitor<MemorySanitizerVisitor>::visit(*I);
1553 
1554     // Finalize PHI nodes.
1555     for (PHINode *PN : ShadowPHINodes) {
1556       PHINode *PNS = cast<PHINode>(getShadow(PN));
1557       PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : nullptr;
1558       size_t NumValues = PN->getNumIncomingValues();
1559       for (size_t v = 0; v < NumValues; v++) {
1560         PNS->addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
1561         if (PNO)
1562           PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
1563       }
1564     }
1565 
1566     VAHelper->finalizeInstrumentation();
1567 
1568     // Poison llvm.lifetime.start intrinsics, if we haven't fallen back to
1569     // instrumenting only allocas.
1570     if (InstrumentLifetimeStart) {
1571       for (auto Item : LifetimeStartList) {
1572         instrumentAlloca(*Item.second, Item.first);
1573         AllocaSet.remove(Item.second);
1574       }
1575     }
1576     // Poison the allocas for which we didn't instrument the corresponding
1577     // lifetime intrinsics.
1578     for (AllocaInst *AI : AllocaSet)
1579       instrumentAlloca(*AI);
1580 
1581     // Insert shadow value checks.
1582     materializeChecks();
1583 
1584     // Delayed instrumentation of StoreInst.
1585     // This may not add new address checks.
1586     materializeStores();
1587 
1588     return true;
1589   }
1590 
1591   /// Compute the shadow type that corresponds to a given Value.
1592   Type *getShadowTy(Value *V) { return getShadowTy(V->getType()); }
1593 
1594   /// Compute the shadow type that corresponds to a given Type.
1595   Type *getShadowTy(Type *OrigTy) {
1596     if (!OrigTy->isSized()) {
1597       return nullptr;
1598     }
1599     // For integer type, shadow is the same as the original type.
1600     // This may return weird-sized types like i1.
1601     if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy))
1602       return IT;
1603     const DataLayout &DL = F.getDataLayout();
1604     if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
1605       uint32_t EltSize = DL.getTypeSizeInBits(VT->getElementType());
1606       return VectorType::get(IntegerType::get(*MS.C, EltSize),
1607                              VT->getElementCount());
1608     }
1609     if (ArrayType *AT = dyn_cast<ArrayType>(OrigTy)) {
1610       return ArrayType::get(getShadowTy(AT->getElementType()),
1611                             AT->getNumElements());
1612     }
1613     if (StructType *ST = dyn_cast<StructType>(OrigTy)) {
1614       SmallVector<Type *, 4> Elements;
1615       for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
1616         Elements.push_back(getShadowTy(ST->getElementType(i)));
1617       StructType *Res = StructType::get(*MS.C, Elements, ST->isPacked());
1618       LLVM_DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n");
1619       return Res;
1620     }
1621     uint32_t TypeSize = DL.getTypeSizeInBits(OrigTy);
1622     return IntegerType::get(*MS.C, TypeSize);
1623   }
1624 
1625   /// Extract combined shadow of struct elements as a bool
1626   Value *collapseStructShadow(StructType *Struct, Value *Shadow,
1627                               IRBuilder<> &IRB) {
1628     Value *FalseVal = IRB.getIntN(/* width */ 1, /* value */ 0);
1629     Value *Aggregator = FalseVal;
1630 
1631     for (unsigned Idx = 0; Idx < Struct->getNumElements(); Idx++) {
1632       // Combine by ORing together each element's bool shadow
1633       Value *ShadowItem = IRB.CreateExtractValue(Shadow, Idx);
1634       Value *ShadowBool = convertToBool(ShadowItem, IRB);
1635 
1636       if (Aggregator != FalseVal)
1637         Aggregator = IRB.CreateOr(Aggregator, ShadowBool);
1638       else
1639         Aggregator = ShadowBool;
1640     }
1641 
1642     return Aggregator;
1643   }
1644 
1645   // Extract combined shadow of array elements
1646   Value *collapseArrayShadow(ArrayType *Array, Value *Shadow,
1647                              IRBuilder<> &IRB) {
1648     if (!Array->getNumElements())
1649       return IRB.getIntN(/* width */ 1, /* value */ 0);
1650 
1651     Value *FirstItem = IRB.CreateExtractValue(Shadow, 0);
1652     Value *Aggregator = convertShadowToScalar(FirstItem, IRB);
1653 
1654     for (unsigned Idx = 1; Idx < Array->getNumElements(); Idx++) {
1655       Value *ShadowItem = IRB.CreateExtractValue(Shadow, Idx);
1656       Value *ShadowInner = convertShadowToScalar(ShadowItem, IRB);
1657       Aggregator = IRB.CreateOr(Aggregator, ShadowInner);
1658     }
1659     return Aggregator;
1660   }
1661 
1662   /// Convert a shadow value to it's flattened variant. The resulting
1663   /// shadow may not necessarily have the same bit width as the input
1664   /// value, but it will always be comparable to zero.
1665   Value *convertShadowToScalar(Value *V, IRBuilder<> &IRB) {
1666     if (StructType *Struct = dyn_cast<StructType>(V->getType()))
1667       return collapseStructShadow(Struct, V, IRB);
1668     if (ArrayType *Array = dyn_cast<ArrayType>(V->getType()))
1669       return collapseArrayShadow(Array, V, IRB);
1670     if (isa<VectorType>(V->getType())) {
1671       if (isa<ScalableVectorType>(V->getType()))
1672         return convertShadowToScalar(IRB.CreateOrReduce(V), IRB);
1673       unsigned BitWidth =
1674           V->getType()->getPrimitiveSizeInBits().getFixedValue();
1675       return IRB.CreateBitCast(V, IntegerType::get(*MS.C, BitWidth));
1676     }
1677     return V;
1678   }
1679 
1680   // Convert a scalar value to an i1 by comparing with 0
1681   Value *convertToBool(Value *V, IRBuilder<> &IRB, const Twine &name = "") {
1682     Type *VTy = V->getType();
1683     if (!VTy->isIntegerTy())
1684       return convertToBool(convertShadowToScalar(V, IRB), IRB, name);
1685     if (VTy->getIntegerBitWidth() == 1)
1686       // Just converting a bool to a bool, so do nothing.
1687       return V;
1688     return IRB.CreateICmpNE(V, ConstantInt::get(VTy, 0), name);
1689   }
1690 
1691   Type *ptrToIntPtrType(Type *PtrTy) const {
1692     if (VectorType *VectTy = dyn_cast<VectorType>(PtrTy)) {
1693       return VectorType::get(ptrToIntPtrType(VectTy->getElementType()),
1694                              VectTy->getElementCount());
1695     }
1696     assert(PtrTy->isIntOrPtrTy());
1697     return MS.IntptrTy;
1698   }
1699 
1700   Type *getPtrToShadowPtrType(Type *IntPtrTy, Type *ShadowTy) const {
1701     if (VectorType *VectTy = dyn_cast<VectorType>(IntPtrTy)) {
1702       return VectorType::get(
1703           getPtrToShadowPtrType(VectTy->getElementType(), ShadowTy),
1704           VectTy->getElementCount());
1705     }
1706     assert(IntPtrTy == MS.IntptrTy);
1707     return MS.PtrTy;
1708   }
1709 
1710   Constant *constToIntPtr(Type *IntPtrTy, uint64_t C) const {
1711     if (VectorType *VectTy = dyn_cast<VectorType>(IntPtrTy)) {
1712       return ConstantVector::getSplat(
1713           VectTy->getElementCount(),
1714           constToIntPtr(VectTy->getElementType(), C));
1715     }
1716     assert(IntPtrTy == MS.IntptrTy);
1717     return ConstantInt::get(MS.IntptrTy, C);
1718   }
1719 
1720   /// Compute the integer shadow offset that corresponds to a given
1721   /// application address.
1722   ///
1723   /// Offset = (Addr & ~AndMask) ^ XorMask
1724   /// Addr can be a ptr or <N x ptr>. In both cases ShadowTy the shadow type of
1725   /// a single pointee.
1726   /// Returns <shadow_ptr, origin_ptr> or <<N x shadow_ptr>, <N x origin_ptr>>.
1727   Value *getShadowPtrOffset(Value *Addr, IRBuilder<> &IRB) {
1728     Type *IntptrTy = ptrToIntPtrType(Addr->getType());
1729     Value *OffsetLong = IRB.CreatePointerCast(Addr, IntptrTy);
1730 
1731     if (uint64_t AndMask = MS.MapParams->AndMask)
1732       OffsetLong = IRB.CreateAnd(OffsetLong, constToIntPtr(IntptrTy, ~AndMask));
1733 
1734     if (uint64_t XorMask = MS.MapParams->XorMask)
1735       OffsetLong = IRB.CreateXor(OffsetLong, constToIntPtr(IntptrTy, XorMask));
1736     return OffsetLong;
1737   }
1738 
1739   /// Compute the shadow and origin addresses corresponding to a given
1740   /// application address.
1741   ///
1742   /// Shadow = ShadowBase + Offset
1743   /// Origin = (OriginBase + Offset) & ~3ULL
1744   /// Addr can be a ptr or <N x ptr>. In both cases ShadowTy the shadow type of
1745   /// a single pointee.
1746   /// Returns <shadow_ptr, origin_ptr> or <<N x shadow_ptr>, <N x origin_ptr>>.
1747   std::pair<Value *, Value *>
1748   getShadowOriginPtrUserspace(Value *Addr, IRBuilder<> &IRB, Type *ShadowTy,
1749                               MaybeAlign Alignment) {
1750     VectorType *VectTy = dyn_cast<VectorType>(Addr->getType());
1751     if (!VectTy) {
1752       assert(Addr->getType()->isPointerTy());
1753     } else {
1754       assert(VectTy->getElementType()->isPointerTy());
1755     }
1756     Type *IntptrTy = ptrToIntPtrType(Addr->getType());
1757     Value *ShadowOffset = getShadowPtrOffset(Addr, IRB);
1758     Value *ShadowLong = ShadowOffset;
1759     if (uint64_t ShadowBase = MS.MapParams->ShadowBase) {
1760       ShadowLong =
1761           IRB.CreateAdd(ShadowLong, constToIntPtr(IntptrTy, ShadowBase));
1762     }
1763     Value *ShadowPtr = IRB.CreateIntToPtr(
1764         ShadowLong, getPtrToShadowPtrType(IntptrTy, ShadowTy));
1765 
1766     Value *OriginPtr = nullptr;
1767     if (MS.TrackOrigins) {
1768       Value *OriginLong = ShadowOffset;
1769       uint64_t OriginBase = MS.MapParams->OriginBase;
1770       if (OriginBase != 0)
1771         OriginLong =
1772             IRB.CreateAdd(OriginLong, constToIntPtr(IntptrTy, OriginBase));
1773       if (!Alignment || *Alignment < kMinOriginAlignment) {
1774         uint64_t Mask = kMinOriginAlignment.value() - 1;
1775         OriginLong = IRB.CreateAnd(OriginLong, constToIntPtr(IntptrTy, ~Mask));
1776       }
1777       OriginPtr = IRB.CreateIntToPtr(
1778           OriginLong, getPtrToShadowPtrType(IntptrTy, MS.OriginTy));
1779     }
1780     return std::make_pair(ShadowPtr, OriginPtr);
1781   }
1782 
1783   template <typename... ArgsTy>
1784   Value *createMetadataCall(IRBuilder<> &IRB, FunctionCallee Callee,
1785                             ArgsTy... Args) {
1786     if (MS.TargetTriple.getArch() == Triple::systemz) {
1787       IRB.CreateCall(Callee,
1788                      {MS.MsanMetadataAlloca, std::forward<ArgsTy>(Args)...});
1789       return IRB.CreateLoad(MS.MsanMetadata, MS.MsanMetadataAlloca);
1790     }
1791 
1792     return IRB.CreateCall(Callee, {std::forward<ArgsTy>(Args)...});
1793   }
1794 
1795   std::pair<Value *, Value *> getShadowOriginPtrKernelNoVec(Value *Addr,
1796                                                             IRBuilder<> &IRB,
1797                                                             Type *ShadowTy,
1798                                                             bool isStore) {
1799     Value *ShadowOriginPtrs;
1800     const DataLayout &DL = F.getDataLayout();
1801     TypeSize Size = DL.getTypeStoreSize(ShadowTy);
1802 
1803     FunctionCallee Getter = MS.getKmsanShadowOriginAccessFn(isStore, Size);
1804     Value *AddrCast = IRB.CreatePointerCast(Addr, MS.PtrTy);
1805     if (Getter) {
1806       ShadowOriginPtrs = createMetadataCall(IRB, Getter, AddrCast);
1807     } else {
1808       Value *SizeVal = ConstantInt::get(MS.IntptrTy, Size);
1809       ShadowOriginPtrs = createMetadataCall(
1810           IRB,
1811           isStore ? MS.MsanMetadataPtrForStoreN : MS.MsanMetadataPtrForLoadN,
1812           AddrCast, SizeVal);
1813     }
1814     Value *ShadowPtr = IRB.CreateExtractValue(ShadowOriginPtrs, 0);
1815     ShadowPtr = IRB.CreatePointerCast(ShadowPtr, MS.PtrTy);
1816     Value *OriginPtr = IRB.CreateExtractValue(ShadowOriginPtrs, 1);
1817 
1818     return std::make_pair(ShadowPtr, OriginPtr);
1819   }
1820 
1821   /// Addr can be a ptr or <N x ptr>. In both cases ShadowTy the shadow type of
1822   /// a single pointee.
1823   /// Returns <shadow_ptr, origin_ptr> or <<N x shadow_ptr>, <N x origin_ptr>>.
1824   std::pair<Value *, Value *> getShadowOriginPtrKernel(Value *Addr,
1825                                                        IRBuilder<> &IRB,
1826                                                        Type *ShadowTy,
1827                                                        bool isStore) {
1828     VectorType *VectTy = dyn_cast<VectorType>(Addr->getType());
1829     if (!VectTy) {
1830       assert(Addr->getType()->isPointerTy());
1831       return getShadowOriginPtrKernelNoVec(Addr, IRB, ShadowTy, isStore);
1832     }
1833 
1834     // TODO: Support callbacs with vectors of addresses.
1835     unsigned NumElements = cast<FixedVectorType>(VectTy)->getNumElements();
1836     Value *ShadowPtrs = ConstantInt::getNullValue(
1837         FixedVectorType::get(IRB.getPtrTy(), NumElements));
1838     Value *OriginPtrs = nullptr;
1839     if (MS.TrackOrigins)
1840       OriginPtrs = ConstantInt::getNullValue(
1841           FixedVectorType::get(IRB.getPtrTy(), NumElements));
1842     for (unsigned i = 0; i < NumElements; ++i) {
1843       Value *OneAddr =
1844           IRB.CreateExtractElement(Addr, ConstantInt::get(IRB.getInt32Ty(), i));
1845       auto [ShadowPtr, OriginPtr] =
1846           getShadowOriginPtrKernelNoVec(OneAddr, IRB, ShadowTy, isStore);
1847 
1848       ShadowPtrs = IRB.CreateInsertElement(
1849           ShadowPtrs, ShadowPtr, ConstantInt::get(IRB.getInt32Ty(), i));
1850       if (MS.TrackOrigins)
1851         OriginPtrs = IRB.CreateInsertElement(
1852             OriginPtrs, OriginPtr, ConstantInt::get(IRB.getInt32Ty(), i));
1853     }
1854     return {ShadowPtrs, OriginPtrs};
1855   }
1856 
1857   std::pair<Value *, Value *> getShadowOriginPtr(Value *Addr, IRBuilder<> &IRB,
1858                                                  Type *ShadowTy,
1859                                                  MaybeAlign Alignment,
1860                                                  bool isStore) {
1861     if (MS.CompileKernel)
1862       return getShadowOriginPtrKernel(Addr, IRB, ShadowTy, isStore);
1863     return getShadowOriginPtrUserspace(Addr, IRB, ShadowTy, Alignment);
1864   }
1865 
1866   /// Compute the shadow address for a given function argument.
1867   ///
1868   /// Shadow = ParamTLS+ArgOffset.
1869   Value *getShadowPtrForArgument(IRBuilder<> &IRB, int ArgOffset) {
1870     Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy);
1871     if (ArgOffset)
1872       Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
1873     return IRB.CreateIntToPtr(Base, IRB.getPtrTy(0), "_msarg");
1874   }
1875 
1876   /// Compute the origin address for a given function argument.
1877   Value *getOriginPtrForArgument(IRBuilder<> &IRB, int ArgOffset) {
1878     if (!MS.TrackOrigins)
1879       return nullptr;
1880     Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy);
1881     if (ArgOffset)
1882       Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
1883     return IRB.CreateIntToPtr(Base, IRB.getPtrTy(0), "_msarg_o");
1884   }
1885 
1886   /// Compute the shadow address for a retval.
1887   Value *getShadowPtrForRetval(IRBuilder<> &IRB) {
1888     return IRB.CreatePointerCast(MS.RetvalTLS, IRB.getPtrTy(0), "_msret");
1889   }
1890 
1891   /// Compute the origin address for a retval.
1892   Value *getOriginPtrForRetval() {
1893     // We keep a single origin for the entire retval. Might be too optimistic.
1894     return MS.RetvalOriginTLS;
1895   }
1896 
1897   /// Set SV to be the shadow value for V.
1898   void setShadow(Value *V, Value *SV) {
1899     assert(!ShadowMap.count(V) && "Values may only have one shadow");
1900     ShadowMap[V] = PropagateShadow ? SV : getCleanShadow(V);
1901   }
1902 
1903   /// Set Origin to be the origin value for V.
1904   void setOrigin(Value *V, Value *Origin) {
1905     if (!MS.TrackOrigins)
1906       return;
1907     assert(!OriginMap.count(V) && "Values may only have one origin");
1908     LLVM_DEBUG(dbgs() << "ORIGIN: " << *V << "  ==> " << *Origin << "\n");
1909     OriginMap[V] = Origin;
1910   }
1911 
1912   Constant *getCleanShadow(Type *OrigTy) {
1913     Type *ShadowTy = getShadowTy(OrigTy);
1914     if (!ShadowTy)
1915       return nullptr;
1916     return Constant::getNullValue(ShadowTy);
1917   }
1918 
1919   /// Create a clean shadow value for a given value.
1920   ///
1921   /// Clean shadow (all zeroes) means all bits of the value are defined
1922   /// (initialized).
1923   Constant *getCleanShadow(Value *V) { return getCleanShadow(V->getType()); }
1924 
1925   /// Create a dirty shadow of a given shadow type.
1926   Constant *getPoisonedShadow(Type *ShadowTy) {
1927     assert(ShadowTy);
1928     if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy))
1929       return Constant::getAllOnesValue(ShadowTy);
1930     if (ArrayType *AT = dyn_cast<ArrayType>(ShadowTy)) {
1931       SmallVector<Constant *, 4> Vals(AT->getNumElements(),
1932                                       getPoisonedShadow(AT->getElementType()));
1933       return ConstantArray::get(AT, Vals);
1934     }
1935     if (StructType *ST = dyn_cast<StructType>(ShadowTy)) {
1936       SmallVector<Constant *, 4> Vals;
1937       for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
1938         Vals.push_back(getPoisonedShadow(ST->getElementType(i)));
1939       return ConstantStruct::get(ST, Vals);
1940     }
1941     llvm_unreachable("Unexpected shadow type");
1942   }
1943 
1944   /// Create a dirty shadow for a given value.
1945   Constant *getPoisonedShadow(Value *V) {
1946     Type *ShadowTy = getShadowTy(V);
1947     if (!ShadowTy)
1948       return nullptr;
1949     return getPoisonedShadow(ShadowTy);
1950   }
1951 
1952   /// Create a clean (zero) origin.
1953   Value *getCleanOrigin() { return Constant::getNullValue(MS.OriginTy); }
1954 
1955   /// Get the shadow value for a given Value.
1956   ///
1957   /// This function either returns the value set earlier with setShadow,
1958   /// or extracts if from ParamTLS (for function arguments).
1959   Value *getShadow(Value *V) {
1960     if (Instruction *I = dyn_cast<Instruction>(V)) {
1961       if (!PropagateShadow || I->getMetadata(LLVMContext::MD_nosanitize))
1962         return getCleanShadow(V);
1963       // For instructions the shadow is already stored in the map.
1964       Value *Shadow = ShadowMap[V];
1965       if (!Shadow) {
1966         LLVM_DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent()));
1967         (void)I;
1968         assert(Shadow && "No shadow for a value");
1969       }
1970       return Shadow;
1971     }
1972     if (UndefValue *U = dyn_cast<UndefValue>(V)) {
1973       Value *AllOnes = (PropagateShadow && PoisonUndef) ? getPoisonedShadow(V)
1974                                                         : getCleanShadow(V);
1975       LLVM_DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n");
1976       (void)U;
1977       return AllOnes;
1978     }
1979     if (Argument *A = dyn_cast<Argument>(V)) {
1980       // For arguments we compute the shadow on demand and store it in the map.
1981       Value *&ShadowPtr = ShadowMap[V];
1982       if (ShadowPtr)
1983         return ShadowPtr;
1984       Function *F = A->getParent();
1985       IRBuilder<> EntryIRB(FnPrologueEnd);
1986       unsigned ArgOffset = 0;
1987       const DataLayout &DL = F->getDataLayout();
1988       for (auto &FArg : F->args()) {
1989         if (!FArg.getType()->isSized() || FArg.getType()->isScalableTy()) {
1990           LLVM_DEBUG(dbgs() << (FArg.getType()->isScalableTy()
1991                                     ? "vscale not fully supported\n"
1992                                     : "Arg is not sized\n"));
1993           if (A == &FArg) {
1994             ShadowPtr = getCleanShadow(V);
1995             setOrigin(A, getCleanOrigin());
1996             break;
1997           }
1998           continue;
1999         }
2000 
2001         unsigned Size = FArg.hasByValAttr()
2002                             ? DL.getTypeAllocSize(FArg.getParamByValType())
2003                             : DL.getTypeAllocSize(FArg.getType());
2004 
2005         if (A == &FArg) {
2006           bool Overflow = ArgOffset + Size > kParamTLSSize;
2007           if (FArg.hasByValAttr()) {
2008             // ByVal pointer itself has clean shadow. We copy the actual
2009             // argument shadow to the underlying memory.
2010             // Figure out maximal valid memcpy alignment.
2011             const Align ArgAlign = DL.getValueOrABITypeAlignment(
2012                 FArg.getParamAlign(), FArg.getParamByValType());
2013             Value *CpShadowPtr, *CpOriginPtr;
2014             std::tie(CpShadowPtr, CpOriginPtr) =
2015                 getShadowOriginPtr(V, EntryIRB, EntryIRB.getInt8Ty(), ArgAlign,
2016                                    /*isStore*/ true);
2017             if (!PropagateShadow || Overflow) {
2018               // ParamTLS overflow.
2019               EntryIRB.CreateMemSet(
2020                   CpShadowPtr, Constant::getNullValue(EntryIRB.getInt8Ty()),
2021                   Size, ArgAlign);
2022             } else {
2023               Value *Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
2024               const Align CopyAlign = std::min(ArgAlign, kShadowTLSAlignment);
2025               Value *Cpy = EntryIRB.CreateMemCpy(CpShadowPtr, CopyAlign, Base,
2026                                                  CopyAlign, Size);
2027               LLVM_DEBUG(dbgs() << "  ByValCpy: " << *Cpy << "\n");
2028               (void)Cpy;
2029 
2030               if (MS.TrackOrigins) {
2031                 Value *OriginPtr = getOriginPtrForArgument(EntryIRB, ArgOffset);
2032                 // FIXME: OriginSize should be:
2033                 // alignTo(V % kMinOriginAlignment + Size, kMinOriginAlignment)
2034                 unsigned OriginSize = alignTo(Size, kMinOriginAlignment);
2035                 EntryIRB.CreateMemCpy(
2036                     CpOriginPtr,
2037                     /* by getShadowOriginPtr */ kMinOriginAlignment, OriginPtr,
2038                     /* by origin_tls[ArgOffset] */ kMinOriginAlignment,
2039                     OriginSize);
2040               }
2041             }
2042           }
2043 
2044           if (!PropagateShadow || Overflow || FArg.hasByValAttr() ||
2045               (MS.EagerChecks && FArg.hasAttribute(Attribute::NoUndef))) {
2046             ShadowPtr = getCleanShadow(V);
2047             setOrigin(A, getCleanOrigin());
2048           } else {
2049             // Shadow over TLS
2050             Value *Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
2051             ShadowPtr = EntryIRB.CreateAlignedLoad(getShadowTy(&FArg), Base,
2052                                                    kShadowTLSAlignment);
2053             if (MS.TrackOrigins) {
2054               Value *OriginPtr = getOriginPtrForArgument(EntryIRB, ArgOffset);
2055               setOrigin(A, EntryIRB.CreateLoad(MS.OriginTy, OriginPtr));
2056             }
2057           }
2058           LLVM_DEBUG(dbgs()
2059                      << "  ARG:    " << FArg << " ==> " << *ShadowPtr << "\n");
2060           break;
2061         }
2062 
2063         ArgOffset += alignTo(Size, kShadowTLSAlignment);
2064       }
2065       assert(ShadowPtr && "Could not find shadow for an argument");
2066       return ShadowPtr;
2067     }
2068     // For everything else the shadow is zero.
2069     return getCleanShadow(V);
2070   }
2071 
2072   /// Get the shadow for i-th argument of the instruction I.
2073   Value *getShadow(Instruction *I, int i) {
2074     return getShadow(I->getOperand(i));
2075   }
2076 
2077   /// Get the origin for a value.
2078   Value *getOrigin(Value *V) {
2079     if (!MS.TrackOrigins)
2080       return nullptr;
2081     if (!PropagateShadow || isa<Constant>(V) || isa<InlineAsm>(V))
2082       return getCleanOrigin();
2083     assert((isa<Instruction>(V) || isa<Argument>(V)) &&
2084            "Unexpected value type in getOrigin()");
2085     if (Instruction *I = dyn_cast<Instruction>(V)) {
2086       if (I->getMetadata(LLVMContext::MD_nosanitize))
2087         return getCleanOrigin();
2088     }
2089     Value *Origin = OriginMap[V];
2090     assert(Origin && "Missing origin");
2091     return Origin;
2092   }
2093 
2094   /// Get the origin for i-th argument of the instruction I.
2095   Value *getOrigin(Instruction *I, int i) {
2096     return getOrigin(I->getOperand(i));
2097   }
2098 
2099   /// Remember the place where a shadow check should be inserted.
2100   ///
2101   /// This location will be later instrumented with a check that will print a
2102   /// UMR warning in runtime if the shadow value is not 0.
2103   void insertShadowCheck(Value *Shadow, Value *Origin, Instruction *OrigIns) {
2104     assert(Shadow);
2105     if (!InsertChecks)
2106       return;
2107 
2108     if (!DebugCounter::shouldExecute(DebugInsertCheck)) {
2109       LLVM_DEBUG(dbgs() << "Skipping check of " << *Shadow << " before "
2110                         << *OrigIns << "\n");
2111       return;
2112     }
2113 #ifndef NDEBUG
2114     Type *ShadowTy = Shadow->getType();
2115     assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy) ||
2116             isa<StructType>(ShadowTy) || isa<ArrayType>(ShadowTy)) &&
2117            "Can only insert checks for integer, vector, and aggregate shadow "
2118            "types");
2119 #endif
2120     InstrumentationList.push_back(
2121         ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
2122   }
2123 
2124   /// Remember the place where a shadow check should be inserted.
2125   ///
2126   /// This location will be later instrumented with a check that will print a
2127   /// UMR warning in runtime if the value is not fully defined.
2128   void insertShadowCheck(Value *Val, Instruction *OrigIns) {
2129     assert(Val);
2130     Value *Shadow, *Origin;
2131     if (ClCheckConstantShadow) {
2132       Shadow = getShadow(Val);
2133       if (!Shadow)
2134         return;
2135       Origin = getOrigin(Val);
2136     } else {
2137       Shadow = dyn_cast_or_null<Instruction>(getShadow(Val));
2138       if (!Shadow)
2139         return;
2140       Origin = dyn_cast_or_null<Instruction>(getOrigin(Val));
2141     }
2142     insertShadowCheck(Shadow, Origin, OrigIns);
2143   }
2144 
2145   AtomicOrdering addReleaseOrdering(AtomicOrdering a) {
2146     switch (a) {
2147     case AtomicOrdering::NotAtomic:
2148       return AtomicOrdering::NotAtomic;
2149     case AtomicOrdering::Unordered:
2150     case AtomicOrdering::Monotonic:
2151     case AtomicOrdering::Release:
2152       return AtomicOrdering::Release;
2153     case AtomicOrdering::Acquire:
2154     case AtomicOrdering::AcquireRelease:
2155       return AtomicOrdering::AcquireRelease;
2156     case AtomicOrdering::SequentiallyConsistent:
2157       return AtomicOrdering::SequentiallyConsistent;
2158     }
2159     llvm_unreachable("Unknown ordering");
2160   }
2161 
2162   Value *makeAddReleaseOrderingTable(IRBuilder<> &IRB) {
2163     constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2164     uint32_t OrderingTable[NumOrderings] = {};
2165 
2166     OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2167         OrderingTable[(int)AtomicOrderingCABI::release] =
2168             (int)AtomicOrderingCABI::release;
2169     OrderingTable[(int)AtomicOrderingCABI::consume] =
2170         OrderingTable[(int)AtomicOrderingCABI::acquire] =
2171             OrderingTable[(int)AtomicOrderingCABI::acq_rel] =
2172                 (int)AtomicOrderingCABI::acq_rel;
2173     OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2174         (int)AtomicOrderingCABI::seq_cst;
2175 
2176     return ConstantDataVector::get(IRB.getContext(), OrderingTable);
2177   }
2178 
2179   AtomicOrdering addAcquireOrdering(AtomicOrdering a) {
2180     switch (a) {
2181     case AtomicOrdering::NotAtomic:
2182       return AtomicOrdering::NotAtomic;
2183     case AtomicOrdering::Unordered:
2184     case AtomicOrdering::Monotonic:
2185     case AtomicOrdering::Acquire:
2186       return AtomicOrdering::Acquire;
2187     case AtomicOrdering::Release:
2188     case AtomicOrdering::AcquireRelease:
2189       return AtomicOrdering::AcquireRelease;
2190     case AtomicOrdering::SequentiallyConsistent:
2191       return AtomicOrdering::SequentiallyConsistent;
2192     }
2193     llvm_unreachable("Unknown ordering");
2194   }
2195 
2196   Value *makeAddAcquireOrderingTable(IRBuilder<> &IRB) {
2197     constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2198     uint32_t OrderingTable[NumOrderings] = {};
2199 
2200     OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2201         OrderingTable[(int)AtomicOrderingCABI::acquire] =
2202             OrderingTable[(int)AtomicOrderingCABI::consume] =
2203                 (int)AtomicOrderingCABI::acquire;
2204     OrderingTable[(int)AtomicOrderingCABI::release] =
2205         OrderingTable[(int)AtomicOrderingCABI::acq_rel] =
2206             (int)AtomicOrderingCABI::acq_rel;
2207     OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2208         (int)AtomicOrderingCABI::seq_cst;
2209 
2210     return ConstantDataVector::get(IRB.getContext(), OrderingTable);
2211   }
2212 
2213   // ------------------- Visitors.
2214   using InstVisitor<MemorySanitizerVisitor>::visit;
2215   void visit(Instruction &I) {
2216     if (I.getMetadata(LLVMContext::MD_nosanitize))
2217       return;
2218     // Don't want to visit if we're in the prologue
2219     if (isInPrologue(I))
2220       return;
2221     if (!DebugCounter::shouldExecute(DebugInstrumentInstruction)) {
2222       LLVM_DEBUG(dbgs() << "Skipping instruction: " << I << "\n");
2223       // We still need to set the shadow and origin to clean values.
2224       setShadow(&I, getCleanShadow(&I));
2225       setOrigin(&I, getCleanOrigin());
2226       return;
2227     }
2228 
2229     Instructions.push_back(&I);
2230   }
2231 
2232   /// Instrument LoadInst
2233   ///
2234   /// Loads the corresponding shadow and (optionally) origin.
2235   /// Optionally, checks that the load address is fully defined.
2236   void visitLoadInst(LoadInst &I) {
2237     assert(I.getType()->isSized() && "Load type must have size");
2238     assert(!I.getMetadata(LLVMContext::MD_nosanitize));
2239     NextNodeIRBuilder IRB(&I);
2240     Type *ShadowTy = getShadowTy(&I);
2241     Value *Addr = I.getPointerOperand();
2242     Value *ShadowPtr = nullptr, *OriginPtr = nullptr;
2243     const Align Alignment = I.getAlign();
2244     if (PropagateShadow) {
2245       std::tie(ShadowPtr, OriginPtr) =
2246           getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
2247       setShadow(&I,
2248                 IRB.CreateAlignedLoad(ShadowTy, ShadowPtr, Alignment, "_msld"));
2249     } else {
2250       setShadow(&I, getCleanShadow(&I));
2251     }
2252 
2253     if (ClCheckAccessAddress)
2254       insertShadowCheck(I.getPointerOperand(), &I);
2255 
2256     if (I.isAtomic())
2257       I.setOrdering(addAcquireOrdering(I.getOrdering()));
2258 
2259     if (MS.TrackOrigins) {
2260       if (PropagateShadow) {
2261         const Align OriginAlignment = std::max(kMinOriginAlignment, Alignment);
2262         setOrigin(
2263             &I, IRB.CreateAlignedLoad(MS.OriginTy, OriginPtr, OriginAlignment));
2264       } else {
2265         setOrigin(&I, getCleanOrigin());
2266       }
2267     }
2268   }
2269 
2270   /// Instrument StoreInst
2271   ///
2272   /// Stores the corresponding shadow and (optionally) origin.
2273   /// Optionally, checks that the store address is fully defined.
2274   void visitStoreInst(StoreInst &I) {
2275     StoreList.push_back(&I);
2276     if (ClCheckAccessAddress)
2277       insertShadowCheck(I.getPointerOperand(), &I);
2278   }
2279 
2280   void handleCASOrRMW(Instruction &I) {
2281     assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I));
2282 
2283     IRBuilder<> IRB(&I);
2284     Value *Addr = I.getOperand(0);
2285     Value *Val = I.getOperand(1);
2286     Value *ShadowPtr = getShadowOriginPtr(Addr, IRB, getShadowTy(Val), Align(1),
2287                                           /*isStore*/ true)
2288                            .first;
2289 
2290     if (ClCheckAccessAddress)
2291       insertShadowCheck(Addr, &I);
2292 
2293     // Only test the conditional argument of cmpxchg instruction.
2294     // The other argument can potentially be uninitialized, but we can not
2295     // detect this situation reliably without possible false positives.
2296     if (isa<AtomicCmpXchgInst>(I))
2297       insertShadowCheck(Val, &I);
2298 
2299     IRB.CreateStore(getCleanShadow(Val), ShadowPtr);
2300 
2301     setShadow(&I, getCleanShadow(&I));
2302     setOrigin(&I, getCleanOrigin());
2303   }
2304 
2305   void visitAtomicRMWInst(AtomicRMWInst &I) {
2306     handleCASOrRMW(I);
2307     I.setOrdering(addReleaseOrdering(I.getOrdering()));
2308   }
2309 
2310   void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
2311     handleCASOrRMW(I);
2312     I.setSuccessOrdering(addReleaseOrdering(I.getSuccessOrdering()));
2313   }
2314 
2315   // Vector manipulation.
2316   void visitExtractElementInst(ExtractElementInst &I) {
2317     insertShadowCheck(I.getOperand(1), &I);
2318     IRBuilder<> IRB(&I);
2319     setShadow(&I, IRB.CreateExtractElement(getShadow(&I, 0), I.getOperand(1),
2320                                            "_msprop"));
2321     setOrigin(&I, getOrigin(&I, 0));
2322   }
2323 
2324   void visitInsertElementInst(InsertElementInst &I) {
2325     insertShadowCheck(I.getOperand(2), &I);
2326     IRBuilder<> IRB(&I);
2327     auto *Shadow0 = getShadow(&I, 0);
2328     auto *Shadow1 = getShadow(&I, 1);
2329     setShadow(&I, IRB.CreateInsertElement(Shadow0, Shadow1, I.getOperand(2),
2330                                           "_msprop"));
2331     setOriginForNaryOp(I);
2332   }
2333 
2334   void visitShuffleVectorInst(ShuffleVectorInst &I) {
2335     IRBuilder<> IRB(&I);
2336     auto *Shadow0 = getShadow(&I, 0);
2337     auto *Shadow1 = getShadow(&I, 1);
2338     setShadow(&I, IRB.CreateShuffleVector(Shadow0, Shadow1, I.getShuffleMask(),
2339                                           "_msprop"));
2340     setOriginForNaryOp(I);
2341   }
2342 
2343   // Casts.
2344   void visitSExtInst(SExtInst &I) {
2345     IRBuilder<> IRB(&I);
2346     setShadow(&I, IRB.CreateSExt(getShadow(&I, 0), I.getType(), "_msprop"));
2347     setOrigin(&I, getOrigin(&I, 0));
2348   }
2349 
2350   void visitZExtInst(ZExtInst &I) {
2351     IRBuilder<> IRB(&I);
2352     setShadow(&I, IRB.CreateZExt(getShadow(&I, 0), I.getType(), "_msprop"));
2353     setOrigin(&I, getOrigin(&I, 0));
2354   }
2355 
2356   void visitTruncInst(TruncInst &I) {
2357     IRBuilder<> IRB(&I);
2358     setShadow(&I, IRB.CreateTrunc(getShadow(&I, 0), I.getType(), "_msprop"));
2359     setOrigin(&I, getOrigin(&I, 0));
2360   }
2361 
2362   void visitBitCastInst(BitCastInst &I) {
2363     // Special case: if this is the bitcast (there is exactly 1 allowed) between
2364     // a musttail call and a ret, don't instrument. New instructions are not
2365     // allowed after a musttail call.
2366     if (auto *CI = dyn_cast<CallInst>(I.getOperand(0)))
2367       if (CI->isMustTailCall())
2368         return;
2369     IRBuilder<> IRB(&I);
2370     setShadow(&I, IRB.CreateBitCast(getShadow(&I, 0), getShadowTy(&I)));
2371     setOrigin(&I, getOrigin(&I, 0));
2372   }
2373 
2374   void visitPtrToIntInst(PtrToIntInst &I) {
2375     IRBuilder<> IRB(&I);
2376     setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
2377                                     "_msprop_ptrtoint"));
2378     setOrigin(&I, getOrigin(&I, 0));
2379   }
2380 
2381   void visitIntToPtrInst(IntToPtrInst &I) {
2382     IRBuilder<> IRB(&I);
2383     setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
2384                                     "_msprop_inttoptr"));
2385     setOrigin(&I, getOrigin(&I, 0));
2386   }
2387 
2388   void visitFPToSIInst(CastInst &I) { handleShadowOr(I); }
2389   void visitFPToUIInst(CastInst &I) { handleShadowOr(I); }
2390   void visitSIToFPInst(CastInst &I) { handleShadowOr(I); }
2391   void visitUIToFPInst(CastInst &I) { handleShadowOr(I); }
2392   void visitFPExtInst(CastInst &I) { handleShadowOr(I); }
2393   void visitFPTruncInst(CastInst &I) { handleShadowOr(I); }
2394 
2395   /// Propagate shadow for bitwise AND.
2396   ///
2397   /// This code is exact, i.e. if, for example, a bit in the left argument
2398   /// is defined and 0, then neither the value not definedness of the
2399   /// corresponding bit in B don't affect the resulting shadow.
2400   void visitAnd(BinaryOperator &I) {
2401     IRBuilder<> IRB(&I);
2402     //  "And" of 0 and a poisoned value results in unpoisoned value.
2403     //  1&1 => 1;     0&1 => 0;     p&1 => p;
2404     //  1&0 => 0;     0&0 => 0;     p&0 => 0;
2405     //  1&p => p;     0&p => 0;     p&p => p;
2406     //  S = (S1 & S2) | (V1 & S2) | (S1 & V2)
2407     Value *S1 = getShadow(&I, 0);
2408     Value *S2 = getShadow(&I, 1);
2409     Value *V1 = I.getOperand(0);
2410     Value *V2 = I.getOperand(1);
2411     if (V1->getType() != S1->getType()) {
2412       V1 = IRB.CreateIntCast(V1, S1->getType(), false);
2413       V2 = IRB.CreateIntCast(V2, S2->getType(), false);
2414     }
2415     Value *S1S2 = IRB.CreateAnd(S1, S2);
2416     Value *V1S2 = IRB.CreateAnd(V1, S2);
2417     Value *S1V2 = IRB.CreateAnd(S1, V2);
2418     setShadow(&I, IRB.CreateOr({S1S2, V1S2, S1V2}));
2419     setOriginForNaryOp(I);
2420   }
2421 
2422   void visitOr(BinaryOperator &I) {
2423     IRBuilder<> IRB(&I);
2424     //  "Or" of 1 and a poisoned value results in unpoisoned value.
2425     //  1|1 => 1;     0|1 => 1;     p|1 => 1;
2426     //  1|0 => 1;     0|0 => 0;     p|0 => p;
2427     //  1|p => 1;     0|p => p;     p|p => p;
2428     //  S = (S1 & S2) | (~V1 & S2) | (S1 & ~V2)
2429     Value *S1 = getShadow(&I, 0);
2430     Value *S2 = getShadow(&I, 1);
2431     Value *V1 = IRB.CreateNot(I.getOperand(0));
2432     Value *V2 = IRB.CreateNot(I.getOperand(1));
2433     if (V1->getType() != S1->getType()) {
2434       V1 = IRB.CreateIntCast(V1, S1->getType(), false);
2435       V2 = IRB.CreateIntCast(V2, S2->getType(), false);
2436     }
2437     Value *S1S2 = IRB.CreateAnd(S1, S2);
2438     Value *V1S2 = IRB.CreateAnd(V1, S2);
2439     Value *S1V2 = IRB.CreateAnd(S1, V2);
2440     setShadow(&I, IRB.CreateOr({S1S2, V1S2, S1V2}));
2441     setOriginForNaryOp(I);
2442   }
2443 
2444   /// Default propagation of shadow and/or origin.
2445   ///
2446   /// This class implements the general case of shadow propagation, used in all
2447   /// cases where we don't know and/or don't care about what the operation
2448   /// actually does. It converts all input shadow values to a common type
2449   /// (extending or truncating as necessary), and bitwise OR's them.
2450   ///
2451   /// This is much cheaper than inserting checks (i.e. requiring inputs to be
2452   /// fully initialized), and less prone to false positives.
2453   ///
2454   /// This class also implements the general case of origin propagation. For a
2455   /// Nary operation, result origin is set to the origin of an argument that is
2456   /// not entirely initialized. If there is more than one such arguments, the
2457   /// rightmost of them is picked. It does not matter which one is picked if all
2458   /// arguments are initialized.
2459   template <bool CombineShadow> class Combiner {
2460     Value *Shadow = nullptr;
2461     Value *Origin = nullptr;
2462     IRBuilder<> &IRB;
2463     MemorySanitizerVisitor *MSV;
2464 
2465   public:
2466     Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB)
2467         : IRB(IRB), MSV(MSV) {}
2468 
2469     /// Add a pair of shadow and origin values to the mix.
2470     Combiner &Add(Value *OpShadow, Value *OpOrigin) {
2471       if (CombineShadow) {
2472         assert(OpShadow);
2473         if (!Shadow)
2474           Shadow = OpShadow;
2475         else {
2476           OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
2477           Shadow = IRB.CreateOr(Shadow, OpShadow, "_msprop");
2478         }
2479       }
2480 
2481       if (MSV->MS.TrackOrigins) {
2482         assert(OpOrigin);
2483         if (!Origin) {
2484           Origin = OpOrigin;
2485         } else {
2486           Constant *ConstOrigin = dyn_cast<Constant>(OpOrigin);
2487           // No point in adding something that might result in 0 origin value.
2488           if (!ConstOrigin || !ConstOrigin->isNullValue()) {
2489             Value *Cond = MSV->convertToBool(OpShadow, IRB);
2490             Origin = IRB.CreateSelect(Cond, OpOrigin, Origin);
2491           }
2492         }
2493       }
2494       return *this;
2495     }
2496 
2497     /// Add an application value to the mix.
2498     Combiner &Add(Value *V) {
2499       Value *OpShadow = MSV->getShadow(V);
2500       Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : nullptr;
2501       return Add(OpShadow, OpOrigin);
2502     }
2503 
2504     /// Set the current combined values as the given instruction's shadow
2505     /// and origin.
2506     void Done(Instruction *I) {
2507       if (CombineShadow) {
2508         assert(Shadow);
2509         Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(I));
2510         MSV->setShadow(I, Shadow);
2511       }
2512       if (MSV->MS.TrackOrigins) {
2513         assert(Origin);
2514         MSV->setOrigin(I, Origin);
2515       }
2516     }
2517 
2518     /// Store the current combined value at the specified origin
2519     /// location.
2520     void DoneAndStoreOrigin(TypeSize TS, Value *OriginPtr) {
2521       if (MSV->MS.TrackOrigins) {
2522         assert(Origin);
2523         MSV->paintOrigin(IRB, Origin, OriginPtr, TS, kMinOriginAlignment);
2524       }
2525     }
2526   };
2527 
2528   using ShadowAndOriginCombiner = Combiner<true>;
2529   using OriginCombiner = Combiner<false>;
2530 
2531   /// Propagate origin for arbitrary operation.
2532   void setOriginForNaryOp(Instruction &I) {
2533     if (!MS.TrackOrigins)
2534       return;
2535     IRBuilder<> IRB(&I);
2536     OriginCombiner OC(this, IRB);
2537     for (Use &Op : I.operands())
2538       OC.Add(Op.get());
2539     OC.Done(&I);
2540   }
2541 
2542   size_t VectorOrPrimitiveTypeSizeInBits(Type *Ty) {
2543     assert(!(Ty->isVectorTy() && Ty->getScalarType()->isPointerTy()) &&
2544            "Vector of pointers is not a valid shadow type");
2545     return Ty->isVectorTy() ? cast<FixedVectorType>(Ty)->getNumElements() *
2546                                   Ty->getScalarSizeInBits()
2547                             : Ty->getPrimitiveSizeInBits();
2548   }
2549 
2550   /// Cast between two shadow types, extending or truncating as
2551   /// necessary.
2552   Value *CreateShadowCast(IRBuilder<> &IRB, Value *V, Type *dstTy,
2553                           bool Signed = false) {
2554     Type *srcTy = V->getType();
2555     if (srcTy == dstTy)
2556       return V;
2557     size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
2558     size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
2559     if (srcSizeInBits > 1 && dstSizeInBits == 1)
2560       return IRB.CreateICmpNE(V, getCleanShadow(V));
2561 
2562     if (dstTy->isIntegerTy() && srcTy->isIntegerTy())
2563       return IRB.CreateIntCast(V, dstTy, Signed);
2564     if (dstTy->isVectorTy() && srcTy->isVectorTy() &&
2565         cast<VectorType>(dstTy)->getElementCount() ==
2566             cast<VectorType>(srcTy)->getElementCount())
2567       return IRB.CreateIntCast(V, dstTy, Signed);
2568     Value *V1 = IRB.CreateBitCast(V, Type::getIntNTy(*MS.C, srcSizeInBits));
2569     Value *V2 =
2570         IRB.CreateIntCast(V1, Type::getIntNTy(*MS.C, dstSizeInBits), Signed);
2571     return IRB.CreateBitCast(V2, dstTy);
2572     // TODO: handle struct types.
2573   }
2574 
2575   /// Cast an application value to the type of its own shadow.
2576   Value *CreateAppToShadowCast(IRBuilder<> &IRB, Value *V) {
2577     Type *ShadowTy = getShadowTy(V);
2578     if (V->getType() == ShadowTy)
2579       return V;
2580     if (V->getType()->isPtrOrPtrVectorTy())
2581       return IRB.CreatePtrToInt(V, ShadowTy);
2582     else
2583       return IRB.CreateBitCast(V, ShadowTy);
2584   }
2585 
2586   /// Propagate shadow for arbitrary operation.
2587   void handleShadowOr(Instruction &I) {
2588     IRBuilder<> IRB(&I);
2589     ShadowAndOriginCombiner SC(this, IRB);
2590     for (Use &Op : I.operands())
2591       SC.Add(Op.get());
2592     SC.Done(&I);
2593   }
2594 
2595   void visitFNeg(UnaryOperator &I) { handleShadowOr(I); }
2596 
2597   // Handle multiplication by constant.
2598   //
2599   // Handle a special case of multiplication by constant that may have one or
2600   // more zeros in the lower bits. This makes corresponding number of lower bits
2601   // of the result zero as well. We model it by shifting the other operand
2602   // shadow left by the required number of bits. Effectively, we transform
2603   // (X * (A * 2**B)) to ((X << B) * A) and instrument (X << B) as (Sx << B).
2604   // We use multiplication by 2**N instead of shift to cover the case of
2605   // multiplication by 0, which may occur in some elements of a vector operand.
2606   void handleMulByConstant(BinaryOperator &I, Constant *ConstArg,
2607                            Value *OtherArg) {
2608     Constant *ShadowMul;
2609     Type *Ty = ConstArg->getType();
2610     if (auto *VTy = dyn_cast<VectorType>(Ty)) {
2611       unsigned NumElements = cast<FixedVectorType>(VTy)->getNumElements();
2612       Type *EltTy = VTy->getElementType();
2613       SmallVector<Constant *, 16> Elements;
2614       for (unsigned Idx = 0; Idx < NumElements; ++Idx) {
2615         if (ConstantInt *Elt =
2616                 dyn_cast<ConstantInt>(ConstArg->getAggregateElement(Idx))) {
2617           const APInt &V = Elt->getValue();
2618           APInt V2 = APInt(V.getBitWidth(), 1) << V.countr_zero();
2619           Elements.push_back(ConstantInt::get(EltTy, V2));
2620         } else {
2621           Elements.push_back(ConstantInt::get(EltTy, 1));
2622         }
2623       }
2624       ShadowMul = ConstantVector::get(Elements);
2625     } else {
2626       if (ConstantInt *Elt = dyn_cast<ConstantInt>(ConstArg)) {
2627         const APInt &V = Elt->getValue();
2628         APInt V2 = APInt(V.getBitWidth(), 1) << V.countr_zero();
2629         ShadowMul = ConstantInt::get(Ty, V2);
2630       } else {
2631         ShadowMul = ConstantInt::get(Ty, 1);
2632       }
2633     }
2634 
2635     IRBuilder<> IRB(&I);
2636     setShadow(&I,
2637               IRB.CreateMul(getShadow(OtherArg), ShadowMul, "msprop_mul_cst"));
2638     setOrigin(&I, getOrigin(OtherArg));
2639   }
2640 
2641   void visitMul(BinaryOperator &I) {
2642     Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0));
2643     Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1));
2644     if (constOp0 && !constOp1)
2645       handleMulByConstant(I, constOp0, I.getOperand(1));
2646     else if (constOp1 && !constOp0)
2647       handleMulByConstant(I, constOp1, I.getOperand(0));
2648     else
2649       handleShadowOr(I);
2650   }
2651 
2652   void visitFAdd(BinaryOperator &I) { handleShadowOr(I); }
2653   void visitFSub(BinaryOperator &I) { handleShadowOr(I); }
2654   void visitFMul(BinaryOperator &I) { handleShadowOr(I); }
2655   void visitAdd(BinaryOperator &I) { handleShadowOr(I); }
2656   void visitSub(BinaryOperator &I) { handleShadowOr(I); }
2657   void visitXor(BinaryOperator &I) { handleShadowOr(I); }
2658 
2659   void handleIntegerDiv(Instruction &I) {
2660     IRBuilder<> IRB(&I);
2661     // Strict on the second argument.
2662     insertShadowCheck(I.getOperand(1), &I);
2663     setShadow(&I, getShadow(&I, 0));
2664     setOrigin(&I, getOrigin(&I, 0));
2665   }
2666 
2667   void visitUDiv(BinaryOperator &I) { handleIntegerDiv(I); }
2668   void visitSDiv(BinaryOperator &I) { handleIntegerDiv(I); }
2669   void visitURem(BinaryOperator &I) { handleIntegerDiv(I); }
2670   void visitSRem(BinaryOperator &I) { handleIntegerDiv(I); }
2671 
2672   // Floating point division is side-effect free. We can not require that the
2673   // divisor is fully initialized and must propagate shadow. See PR37523.
2674   void visitFDiv(BinaryOperator &I) { handleShadowOr(I); }
2675   void visitFRem(BinaryOperator &I) { handleShadowOr(I); }
2676 
2677   /// Instrument == and != comparisons.
2678   ///
2679   /// Sometimes the comparison result is known even if some of the bits of the
2680   /// arguments are not.
2681   void handleEqualityComparison(ICmpInst &I) {
2682     IRBuilder<> IRB(&I);
2683     Value *A = I.getOperand(0);
2684     Value *B = I.getOperand(1);
2685     Value *Sa = getShadow(A);
2686     Value *Sb = getShadow(B);
2687 
2688     // Get rid of pointers and vectors of pointers.
2689     // For ints (and vectors of ints), types of A and Sa match,
2690     // and this is a no-op.
2691     A = IRB.CreatePointerCast(A, Sa->getType());
2692     B = IRB.CreatePointerCast(B, Sb->getType());
2693 
2694     // A == B  <==>  (C = A^B) == 0
2695     // A != B  <==>  (C = A^B) != 0
2696     // Sc = Sa | Sb
2697     Value *C = IRB.CreateXor(A, B);
2698     Value *Sc = IRB.CreateOr(Sa, Sb);
2699     // Now dealing with i = (C == 0) comparison (or C != 0, does not matter now)
2700     // Result is defined if one of the following is true
2701     // * there is a defined 1 bit in C
2702     // * C is fully defined
2703     // Si = !(C & ~Sc) && Sc
2704     Value *Zero = Constant::getNullValue(Sc->getType());
2705     Value *MinusOne = Constant::getAllOnesValue(Sc->getType());
2706     Value *LHS = IRB.CreateICmpNE(Sc, Zero);
2707     Value *RHS =
2708         IRB.CreateICmpEQ(IRB.CreateAnd(IRB.CreateXor(Sc, MinusOne), C), Zero);
2709     Value *Si = IRB.CreateAnd(LHS, RHS);
2710     Si->setName("_msprop_icmp");
2711     setShadow(&I, Si);
2712     setOriginForNaryOp(I);
2713   }
2714 
2715   /// Instrument relational comparisons.
2716   ///
2717   /// This function does exact shadow propagation for all relational
2718   /// comparisons of integers, pointers and vectors of those.
2719   /// FIXME: output seems suboptimal when one of the operands is a constant
2720   void handleRelationalComparisonExact(ICmpInst &I) {
2721     IRBuilder<> IRB(&I);
2722     Value *A = I.getOperand(0);
2723     Value *B = I.getOperand(1);
2724     Value *Sa = getShadow(A);
2725     Value *Sb = getShadow(B);
2726 
2727     // Get rid of pointers and vectors of pointers.
2728     // For ints (and vectors of ints), types of A and Sa match,
2729     // and this is a no-op.
2730     A = IRB.CreatePointerCast(A, Sa->getType());
2731     B = IRB.CreatePointerCast(B, Sb->getType());
2732 
2733     // Let [a0, a1] be the interval of possible values of A, taking into account
2734     // its undefined bits. Let [b0, b1] be the interval of possible values of B.
2735     // Then (A cmp B) is defined iff (a0 cmp b1) == (a1 cmp b0).
2736     bool IsSigned = I.isSigned();
2737 
2738     auto GetMinMaxUnsigned = [&](Value *V, Value *S) {
2739       if (IsSigned) {
2740         // Sign-flip to map from signed range to unsigned range. Relation A vs B
2741         // should be preserved, if checked with `getUnsignedPredicate()`.
2742         // Relationship between Amin, Amax, Bmin, Bmax also will not be
2743         // affected, as they are created by effectively adding/substructing from
2744         // A (or B) a value, derived from shadow, with no overflow, either
2745         // before or after sign flip.
2746         APInt MinVal =
2747             APInt::getSignedMinValue(V->getType()->getScalarSizeInBits());
2748         V = IRB.CreateXor(V, ConstantInt::get(V->getType(), MinVal));
2749       }
2750       // Minimize undefined bits.
2751       Value *Min = IRB.CreateAnd(V, IRB.CreateNot(S));
2752       Value *Max = IRB.CreateOr(V, S);
2753       return std::make_pair(Min, Max);
2754     };
2755 
2756     auto [Amin, Amax] = GetMinMaxUnsigned(A, Sa);
2757     auto [Bmin, Bmax] = GetMinMaxUnsigned(B, Sb);
2758     Value *S1 = IRB.CreateICmp(I.getUnsignedPredicate(), Amin, Bmax);
2759     Value *S2 = IRB.CreateICmp(I.getUnsignedPredicate(), Amax, Bmin);
2760 
2761     Value *Si = IRB.CreateXor(S1, S2);
2762     setShadow(&I, Si);
2763     setOriginForNaryOp(I);
2764   }
2765 
2766   /// Instrument signed relational comparisons.
2767   ///
2768   /// Handle sign bit tests: x<0, x>=0, x<=-1, x>-1 by propagating the highest
2769   /// bit of the shadow. Everything else is delegated to handleShadowOr().
2770   void handleSignedRelationalComparison(ICmpInst &I) {
2771     Constant *constOp;
2772     Value *op = nullptr;
2773     CmpInst::Predicate pre;
2774     if ((constOp = dyn_cast<Constant>(I.getOperand(1)))) {
2775       op = I.getOperand(0);
2776       pre = I.getPredicate();
2777     } else if ((constOp = dyn_cast<Constant>(I.getOperand(0)))) {
2778       op = I.getOperand(1);
2779       pre = I.getSwappedPredicate();
2780     } else {
2781       handleShadowOr(I);
2782       return;
2783     }
2784 
2785     if ((constOp->isNullValue() &&
2786          (pre == CmpInst::ICMP_SLT || pre == CmpInst::ICMP_SGE)) ||
2787         (constOp->isAllOnesValue() &&
2788          (pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE))) {
2789       IRBuilder<> IRB(&I);
2790       Value *Shadow = IRB.CreateICmpSLT(getShadow(op), getCleanShadow(op),
2791                                         "_msprop_icmp_s");
2792       setShadow(&I, Shadow);
2793       setOrigin(&I, getOrigin(op));
2794     } else {
2795       handleShadowOr(I);
2796     }
2797   }
2798 
2799   void visitICmpInst(ICmpInst &I) {
2800     if (!ClHandleICmp) {
2801       handleShadowOr(I);
2802       return;
2803     }
2804     if (I.isEquality()) {
2805       handleEqualityComparison(I);
2806       return;
2807     }
2808 
2809     assert(I.isRelational());
2810     if (ClHandleICmpExact) {
2811       handleRelationalComparisonExact(I);
2812       return;
2813     }
2814     if (I.isSigned()) {
2815       handleSignedRelationalComparison(I);
2816       return;
2817     }
2818 
2819     assert(I.isUnsigned());
2820     if ((isa<Constant>(I.getOperand(0)) || isa<Constant>(I.getOperand(1)))) {
2821       handleRelationalComparisonExact(I);
2822       return;
2823     }
2824 
2825     handleShadowOr(I);
2826   }
2827 
2828   void visitFCmpInst(FCmpInst &I) { handleShadowOr(I); }
2829 
2830   void handleShift(BinaryOperator &I) {
2831     IRBuilder<> IRB(&I);
2832     // If any of the S2 bits are poisoned, the whole thing is poisoned.
2833     // Otherwise perform the same shift on S1.
2834     Value *S1 = getShadow(&I, 0);
2835     Value *S2 = getShadow(&I, 1);
2836     Value *S2Conv =
2837         IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)), S2->getType());
2838     Value *V2 = I.getOperand(1);
2839     Value *Shift = IRB.CreateBinOp(I.getOpcode(), S1, V2);
2840     setShadow(&I, IRB.CreateOr(Shift, S2Conv));
2841     setOriginForNaryOp(I);
2842   }
2843 
2844   void visitShl(BinaryOperator &I) { handleShift(I); }
2845   void visitAShr(BinaryOperator &I) { handleShift(I); }
2846   void visitLShr(BinaryOperator &I) { handleShift(I); }
2847 
2848   void handleFunnelShift(IntrinsicInst &I) {
2849     IRBuilder<> IRB(&I);
2850     // If any of the S2 bits are poisoned, the whole thing is poisoned.
2851     // Otherwise perform the same shift on S0 and S1.
2852     Value *S0 = getShadow(&I, 0);
2853     Value *S1 = getShadow(&I, 1);
2854     Value *S2 = getShadow(&I, 2);
2855     Value *S2Conv =
2856         IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)), S2->getType());
2857     Value *V2 = I.getOperand(2);
2858     Value *Shift = IRB.CreateIntrinsic(I.getIntrinsicID(), S2Conv->getType(),
2859                                        {S0, S1, V2});
2860     setShadow(&I, IRB.CreateOr(Shift, S2Conv));
2861     setOriginForNaryOp(I);
2862   }
2863 
2864   /// Instrument llvm.memmove
2865   ///
2866   /// At this point we don't know if llvm.memmove will be inlined or not.
2867   /// If we don't instrument it and it gets inlined,
2868   /// our interceptor will not kick in and we will lose the memmove.
2869   /// If we instrument the call here, but it does not get inlined,
2870   /// we will memove the shadow twice: which is bad in case
2871   /// of overlapping regions. So, we simply lower the intrinsic to a call.
2872   ///
2873   /// Similar situation exists for memcpy and memset.
2874   void visitMemMoveInst(MemMoveInst &I) {
2875     getShadow(I.getArgOperand(1)); // Ensure shadow initialized
2876     IRBuilder<> IRB(&I);
2877     IRB.CreateCall(MS.MemmoveFn,
2878                    {I.getArgOperand(0), I.getArgOperand(1),
2879                     IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2880     I.eraseFromParent();
2881   }
2882 
2883   /// Instrument memcpy
2884   ///
2885   /// Similar to memmove: avoid copying shadow twice. This is somewhat
2886   /// unfortunate as it may slowdown small constant memcpys.
2887   /// FIXME: consider doing manual inline for small constant sizes and proper
2888   /// alignment.
2889   ///
2890   /// Note: This also handles memcpy.inline, which promises no calls to external
2891   /// functions as an optimization. However, with instrumentation enabled this
2892   /// is difficult to promise; additionally, we know that the MSan runtime
2893   /// exists and provides __msan_memcpy(). Therefore, we assume that with
2894   /// instrumentation it's safe to turn memcpy.inline into a call to
2895   /// __msan_memcpy(). Should this be wrong, such as when implementing memcpy()
2896   /// itself, instrumentation should be disabled with the no_sanitize attribute.
2897   void visitMemCpyInst(MemCpyInst &I) {
2898     getShadow(I.getArgOperand(1)); // Ensure shadow initialized
2899     IRBuilder<> IRB(&I);
2900     IRB.CreateCall(MS.MemcpyFn,
2901                    {I.getArgOperand(0), I.getArgOperand(1),
2902                     IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2903     I.eraseFromParent();
2904   }
2905 
2906   // Same as memcpy.
2907   void visitMemSetInst(MemSetInst &I) {
2908     IRBuilder<> IRB(&I);
2909     IRB.CreateCall(
2910         MS.MemsetFn,
2911         {I.getArgOperand(0),
2912          IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
2913          IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2914     I.eraseFromParent();
2915   }
2916 
2917   void visitVAStartInst(VAStartInst &I) { VAHelper->visitVAStartInst(I); }
2918 
2919   void visitVACopyInst(VACopyInst &I) { VAHelper->visitVACopyInst(I); }
2920 
2921   /// Handle vector store-like intrinsics.
2922   ///
2923   /// Instrument intrinsics that look like a simple SIMD store: writes memory,
2924   /// has 1 pointer argument and 1 vector argument, returns void.
2925   bool handleVectorStoreIntrinsic(IntrinsicInst &I) {
2926     IRBuilder<> IRB(&I);
2927     Value *Addr = I.getArgOperand(0);
2928     Value *Shadow = getShadow(&I, 1);
2929     Value *ShadowPtr, *OriginPtr;
2930 
2931     // We don't know the pointer alignment (could be unaligned SSE store!).
2932     // Have to assume to worst case.
2933     std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
2934         Addr, IRB, Shadow->getType(), Align(1), /*isStore*/ true);
2935     IRB.CreateAlignedStore(Shadow, ShadowPtr, Align(1));
2936 
2937     if (ClCheckAccessAddress)
2938       insertShadowCheck(Addr, &I);
2939 
2940     // FIXME: factor out common code from materializeStores
2941     if (MS.TrackOrigins)
2942       IRB.CreateStore(getOrigin(&I, 1), OriginPtr);
2943     return true;
2944   }
2945 
2946   /// Handle vector load-like intrinsics.
2947   ///
2948   /// Instrument intrinsics that look like a simple SIMD load: reads memory,
2949   /// has 1 pointer argument, returns a vector.
2950   bool handleVectorLoadIntrinsic(IntrinsicInst &I) {
2951     IRBuilder<> IRB(&I);
2952     Value *Addr = I.getArgOperand(0);
2953 
2954     Type *ShadowTy = getShadowTy(&I);
2955     Value *ShadowPtr = nullptr, *OriginPtr = nullptr;
2956     if (PropagateShadow) {
2957       // We don't know the pointer alignment (could be unaligned SSE load!).
2958       // Have to assume to worst case.
2959       const Align Alignment = Align(1);
2960       std::tie(ShadowPtr, OriginPtr) =
2961           getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
2962       setShadow(&I,
2963                 IRB.CreateAlignedLoad(ShadowTy, ShadowPtr, Alignment, "_msld"));
2964     } else {
2965       setShadow(&I, getCleanShadow(&I));
2966     }
2967 
2968     if (ClCheckAccessAddress)
2969       insertShadowCheck(Addr, &I);
2970 
2971     if (MS.TrackOrigins) {
2972       if (PropagateShadow)
2973         setOrigin(&I, IRB.CreateLoad(MS.OriginTy, OriginPtr));
2974       else
2975         setOrigin(&I, getCleanOrigin());
2976     }
2977     return true;
2978   }
2979 
2980   /// Handle (SIMD arithmetic)-like intrinsics.
2981   ///
2982   /// Instrument intrinsics with any number of arguments of the same type,
2983   /// equal to the return type. The type should be simple (no aggregates or
2984   /// pointers; vectors are fine).
2985   /// Caller guarantees that this intrinsic does not access memory.
2986   bool maybeHandleSimpleNomemIntrinsic(IntrinsicInst &I) {
2987     Type *RetTy = I.getType();
2988     if (!(RetTy->isIntOrIntVectorTy() || RetTy->isFPOrFPVectorTy()))
2989       return false;
2990 
2991     unsigned NumArgOperands = I.arg_size();
2992     for (unsigned i = 0; i < NumArgOperands; ++i) {
2993       Type *Ty = I.getArgOperand(i)->getType();
2994       if (Ty != RetTy)
2995         return false;
2996     }
2997 
2998     IRBuilder<> IRB(&I);
2999     ShadowAndOriginCombiner SC(this, IRB);
3000     for (unsigned i = 0; i < NumArgOperands; ++i)
3001       SC.Add(I.getArgOperand(i));
3002     SC.Done(&I);
3003 
3004     return true;
3005   }
3006 
3007   /// Heuristically instrument unknown intrinsics.
3008   ///
3009   /// The main purpose of this code is to do something reasonable with all
3010   /// random intrinsics we might encounter, most importantly - SIMD intrinsics.
3011   /// We recognize several classes of intrinsics by their argument types and
3012   /// ModRefBehaviour and apply special instrumentation when we are reasonably
3013   /// sure that we know what the intrinsic does.
3014   ///
3015   /// We special-case intrinsics where this approach fails. See llvm.bswap
3016   /// handling as an example of that.
3017   bool handleUnknownIntrinsic(IntrinsicInst &I) {
3018     unsigned NumArgOperands = I.arg_size();
3019     if (NumArgOperands == 0)
3020       return false;
3021 
3022     if (NumArgOperands == 2 && I.getArgOperand(0)->getType()->isPointerTy() &&
3023         I.getArgOperand(1)->getType()->isVectorTy() &&
3024         I.getType()->isVoidTy() && !I.onlyReadsMemory()) {
3025       // This looks like a vector store.
3026       return handleVectorStoreIntrinsic(I);
3027     }
3028 
3029     if (NumArgOperands == 1 && I.getArgOperand(0)->getType()->isPointerTy() &&
3030         I.getType()->isVectorTy() && I.onlyReadsMemory()) {
3031       // This looks like a vector load.
3032       return handleVectorLoadIntrinsic(I);
3033     }
3034 
3035     if (I.doesNotAccessMemory())
3036       if (maybeHandleSimpleNomemIntrinsic(I))
3037         return true;
3038 
3039     // FIXME: detect and handle SSE maskstore/maskload
3040     return false;
3041   }
3042 
3043   void handleInvariantGroup(IntrinsicInst &I) {
3044     setShadow(&I, getShadow(&I, 0));
3045     setOrigin(&I, getOrigin(&I, 0));
3046   }
3047 
3048   void handleLifetimeStart(IntrinsicInst &I) {
3049     if (!PoisonStack)
3050       return;
3051     AllocaInst *AI = llvm::findAllocaForValue(I.getArgOperand(1));
3052     if (!AI)
3053       InstrumentLifetimeStart = false;
3054     LifetimeStartList.push_back(std::make_pair(&I, AI));
3055   }
3056 
3057   void handleBswap(IntrinsicInst &I) {
3058     IRBuilder<> IRB(&I);
3059     Value *Op = I.getArgOperand(0);
3060     Type *OpType = Op->getType();
3061     setShadow(&I, IRB.CreateIntrinsic(Intrinsic::bswap, ArrayRef(&OpType, 1),
3062                                       getShadow(Op)));
3063     setOrigin(&I, getOrigin(Op));
3064   }
3065 
3066   void handleCountZeroes(IntrinsicInst &I) {
3067     IRBuilder<> IRB(&I);
3068     Value *Src = I.getArgOperand(0);
3069 
3070     // Set the Output shadow based on input Shadow
3071     Value *BoolShadow = IRB.CreateIsNotNull(getShadow(Src), "_mscz_bs");
3072 
3073     // If zero poison is requested, mix in with the shadow
3074     Constant *IsZeroPoison = cast<Constant>(I.getOperand(1));
3075     if (!IsZeroPoison->isZeroValue()) {
3076       Value *BoolZeroPoison = IRB.CreateIsNull(Src, "_mscz_bzp");
3077       BoolShadow = IRB.CreateOr(BoolShadow, BoolZeroPoison, "_mscz_bs");
3078     }
3079 
3080     Value *OutputShadow =
3081         IRB.CreateSExt(BoolShadow, getShadowTy(Src), "_mscz_os");
3082 
3083     setShadow(&I, OutputShadow);
3084     setOriginForNaryOp(I);
3085   }
3086 
3087   // Instrument vector convert intrinsic.
3088   //
3089   // This function instruments intrinsics like cvtsi2ss:
3090   // %Out = int_xxx_cvtyyy(%ConvertOp)
3091   // or
3092   // %Out = int_xxx_cvtyyy(%CopyOp, %ConvertOp)
3093   // Intrinsic converts \p NumUsedElements elements of \p ConvertOp to the same
3094   // number \p Out elements, and (if has 2 arguments) copies the rest of the
3095   // elements from \p CopyOp.
3096   // In most cases conversion involves floating-point value which may trigger a
3097   // hardware exception when not fully initialized. For this reason we require
3098   // \p ConvertOp[0:NumUsedElements] to be fully initialized and trap otherwise.
3099   // We copy the shadow of \p CopyOp[NumUsedElements:] to \p
3100   // Out[NumUsedElements:]. This means that intrinsics without \p CopyOp always
3101   // return a fully initialized value.
3102   void handleVectorConvertIntrinsic(IntrinsicInst &I, int NumUsedElements,
3103                                     bool HasRoundingMode = false) {
3104     IRBuilder<> IRB(&I);
3105     Value *CopyOp, *ConvertOp;
3106 
3107     assert((!HasRoundingMode ||
3108             isa<ConstantInt>(I.getArgOperand(I.arg_size() - 1))) &&
3109            "Invalid rounding mode");
3110 
3111     switch (I.arg_size() - HasRoundingMode) {
3112     case 2:
3113       CopyOp = I.getArgOperand(0);
3114       ConvertOp = I.getArgOperand(1);
3115       break;
3116     case 1:
3117       ConvertOp = I.getArgOperand(0);
3118       CopyOp = nullptr;
3119       break;
3120     default:
3121       llvm_unreachable("Cvt intrinsic with unsupported number of arguments.");
3122     }
3123 
3124     // The first *NumUsedElements* elements of ConvertOp are converted to the
3125     // same number of output elements. The rest of the output is copied from
3126     // CopyOp, or (if not available) filled with zeroes.
3127     // Combine shadow for elements of ConvertOp that are used in this operation,
3128     // and insert a check.
3129     // FIXME: consider propagating shadow of ConvertOp, at least in the case of
3130     // int->any conversion.
3131     Value *ConvertShadow = getShadow(ConvertOp);
3132     Value *AggShadow = nullptr;
3133     if (ConvertOp->getType()->isVectorTy()) {
3134       AggShadow = IRB.CreateExtractElement(
3135           ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), 0));
3136       for (int i = 1; i < NumUsedElements; ++i) {
3137         Value *MoreShadow = IRB.CreateExtractElement(
3138             ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), i));
3139         AggShadow = IRB.CreateOr(AggShadow, MoreShadow);
3140       }
3141     } else {
3142       AggShadow = ConvertShadow;
3143     }
3144     assert(AggShadow->getType()->isIntegerTy());
3145     insertShadowCheck(AggShadow, getOrigin(ConvertOp), &I);
3146 
3147     // Build result shadow by zero-filling parts of CopyOp shadow that come from
3148     // ConvertOp.
3149     if (CopyOp) {
3150       assert(CopyOp->getType() == I.getType());
3151       assert(CopyOp->getType()->isVectorTy());
3152       Value *ResultShadow = getShadow(CopyOp);
3153       Type *EltTy = cast<VectorType>(ResultShadow->getType())->getElementType();
3154       for (int i = 0; i < NumUsedElements; ++i) {
3155         ResultShadow = IRB.CreateInsertElement(
3156             ResultShadow, ConstantInt::getNullValue(EltTy),
3157             ConstantInt::get(IRB.getInt32Ty(), i));
3158       }
3159       setShadow(&I, ResultShadow);
3160       setOrigin(&I, getOrigin(CopyOp));
3161     } else {
3162       setShadow(&I, getCleanShadow(&I));
3163       setOrigin(&I, getCleanOrigin());
3164     }
3165   }
3166 
3167   // Given a scalar or vector, extract lower 64 bits (or less), and return all
3168   // zeroes if it is zero, and all ones otherwise.
3169   Value *Lower64ShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) {
3170     if (S->getType()->isVectorTy())
3171       S = CreateShadowCast(IRB, S, IRB.getInt64Ty(), /* Signed */ true);
3172     assert(S->getType()->getPrimitiveSizeInBits() <= 64);
3173     Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
3174     return CreateShadowCast(IRB, S2, T, /* Signed */ true);
3175   }
3176 
3177   // Given a vector, extract its first element, and return all
3178   // zeroes if it is zero, and all ones otherwise.
3179   Value *LowerElementShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) {
3180     Value *S1 = IRB.CreateExtractElement(S, (uint64_t)0);
3181     Value *S2 = IRB.CreateICmpNE(S1, getCleanShadow(S1));
3182     return CreateShadowCast(IRB, S2, T, /* Signed */ true);
3183   }
3184 
3185   Value *VariableShadowExtend(IRBuilder<> &IRB, Value *S) {
3186     Type *T = S->getType();
3187     assert(T->isVectorTy());
3188     Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
3189     return IRB.CreateSExt(S2, T);
3190   }
3191 
3192   // Instrument vector shift intrinsic.
3193   //
3194   // This function instruments intrinsics like int_x86_avx2_psll_w.
3195   // Intrinsic shifts %In by %ShiftSize bits.
3196   // %ShiftSize may be a vector. In that case the lower 64 bits determine shift
3197   // size, and the rest is ignored. Behavior is defined even if shift size is
3198   // greater than register (or field) width.
3199   void handleVectorShiftIntrinsic(IntrinsicInst &I, bool Variable) {
3200     assert(I.arg_size() == 2);
3201     IRBuilder<> IRB(&I);
3202     // If any of the S2 bits are poisoned, the whole thing is poisoned.
3203     // Otherwise perform the same shift on S1.
3204     Value *S1 = getShadow(&I, 0);
3205     Value *S2 = getShadow(&I, 1);
3206     Value *S2Conv = Variable ? VariableShadowExtend(IRB, S2)
3207                              : Lower64ShadowExtend(IRB, S2, getShadowTy(&I));
3208     Value *V1 = I.getOperand(0);
3209     Value *V2 = I.getOperand(1);
3210     Value *Shift = IRB.CreateCall(I.getFunctionType(), I.getCalledOperand(),
3211                                   {IRB.CreateBitCast(S1, V1->getType()), V2});
3212     Shift = IRB.CreateBitCast(Shift, getShadowTy(&I));
3213     setShadow(&I, IRB.CreateOr(Shift, S2Conv));
3214     setOriginForNaryOp(I);
3215   }
3216 
3217   // Get an MMX-sized vector type.
3218   Type *getMMXVectorTy(unsigned EltSizeInBits) {
3219     const unsigned X86_MMXSizeInBits = 64;
3220     assert(EltSizeInBits != 0 && (X86_MMXSizeInBits % EltSizeInBits) == 0 &&
3221            "Illegal MMX vector element size");
3222     return FixedVectorType::get(IntegerType::get(*MS.C, EltSizeInBits),
3223                                 X86_MMXSizeInBits / EltSizeInBits);
3224   }
3225 
3226   // Returns a signed counterpart for an (un)signed-saturate-and-pack
3227   // intrinsic.
3228   Intrinsic::ID getSignedPackIntrinsic(Intrinsic::ID id) {
3229     switch (id) {
3230     case Intrinsic::x86_sse2_packsswb_128:
3231     case Intrinsic::x86_sse2_packuswb_128:
3232       return Intrinsic::x86_sse2_packsswb_128;
3233 
3234     case Intrinsic::x86_sse2_packssdw_128:
3235     case Intrinsic::x86_sse41_packusdw:
3236       return Intrinsic::x86_sse2_packssdw_128;
3237 
3238     case Intrinsic::x86_avx2_packsswb:
3239     case Intrinsic::x86_avx2_packuswb:
3240       return Intrinsic::x86_avx2_packsswb;
3241 
3242     case Intrinsic::x86_avx2_packssdw:
3243     case Intrinsic::x86_avx2_packusdw:
3244       return Intrinsic::x86_avx2_packssdw;
3245 
3246     case Intrinsic::x86_mmx_packsswb:
3247     case Intrinsic::x86_mmx_packuswb:
3248       return Intrinsic::x86_mmx_packsswb;
3249 
3250     case Intrinsic::x86_mmx_packssdw:
3251       return Intrinsic::x86_mmx_packssdw;
3252     default:
3253       llvm_unreachable("unexpected intrinsic id");
3254     }
3255   }
3256 
3257   // Instrument vector pack intrinsic.
3258   //
3259   // This function instruments intrinsics like x86_mmx_packsswb, that
3260   // packs elements of 2 input vectors into half as many bits with saturation.
3261   // Shadow is propagated with the signed variant of the same intrinsic applied
3262   // to sext(Sa != zeroinitializer), sext(Sb != zeroinitializer).
3263   // MMXEltSizeInBits is used only for x86mmx arguments.
3264   void handleVectorPackIntrinsic(IntrinsicInst &I,
3265                                  unsigned MMXEltSizeInBits = 0) {
3266     assert(I.arg_size() == 2);
3267     IRBuilder<> IRB(&I);
3268     Value *S1 = getShadow(&I, 0);
3269     Value *S2 = getShadow(&I, 1);
3270     assert(S1->getType()->isVectorTy());
3271 
3272     // SExt and ICmpNE below must apply to individual elements of input vectors.
3273     // In case of x86mmx arguments, cast them to appropriate vector types and
3274     // back.
3275     Type *T =
3276         MMXEltSizeInBits ? getMMXVectorTy(MMXEltSizeInBits) : S1->getType();
3277     if (MMXEltSizeInBits) {
3278       S1 = IRB.CreateBitCast(S1, T);
3279       S2 = IRB.CreateBitCast(S2, T);
3280     }
3281     Value *S1_ext =
3282         IRB.CreateSExt(IRB.CreateICmpNE(S1, Constant::getNullValue(T)), T);
3283     Value *S2_ext =
3284         IRB.CreateSExt(IRB.CreateICmpNE(S2, Constant::getNullValue(T)), T);
3285     if (MMXEltSizeInBits) {
3286       S1_ext = IRB.CreateBitCast(S1_ext, getMMXVectorTy(64));
3287       S2_ext = IRB.CreateBitCast(S2_ext, getMMXVectorTy(64));
3288     }
3289 
3290     Value *S = IRB.CreateIntrinsic(getSignedPackIntrinsic(I.getIntrinsicID()),
3291                                    {}, {S1_ext, S2_ext}, /*FMFSource=*/nullptr,
3292                                    "_msprop_vector_pack");
3293     if (MMXEltSizeInBits)
3294       S = IRB.CreateBitCast(S, getShadowTy(&I));
3295     setShadow(&I, S);
3296     setOriginForNaryOp(I);
3297   }
3298 
3299   // Convert `Mask` into `<n x i1>`.
3300   Constant *createDppMask(unsigned Width, unsigned Mask) {
3301     SmallVector<Constant *, 4> R(Width);
3302     for (auto &M : R) {
3303       M = ConstantInt::getBool(F.getContext(), Mask & 1);
3304       Mask >>= 1;
3305     }
3306     return ConstantVector::get(R);
3307   }
3308 
3309   // Calculate output shadow as array of booleans `<n x i1>`, assuming if any
3310   // arg is poisoned, entire dot product is poisoned.
3311   Value *findDppPoisonedOutput(IRBuilder<> &IRB, Value *S, unsigned SrcMask,
3312                                unsigned DstMask) {
3313     const unsigned Width =
3314         cast<FixedVectorType>(S->getType())->getNumElements();
3315 
3316     S = IRB.CreateSelect(createDppMask(Width, SrcMask), S,
3317                          Constant::getNullValue(S->getType()));
3318     Value *SElem = IRB.CreateOrReduce(S);
3319     Value *IsClean = IRB.CreateIsNull(SElem, "_msdpp");
3320     Value *DstMaskV = createDppMask(Width, DstMask);
3321 
3322     return IRB.CreateSelect(
3323         IsClean, Constant::getNullValue(DstMaskV->getType()), DstMaskV);
3324   }
3325 
3326   // See `Intel Intrinsics Guide` for `_dp_p*` instructions.
3327   //
3328   // 2 and 4 element versions produce single scalar of dot product, and then
3329   // puts it into elements of output vector, selected by 4 lowest bits of the
3330   // mask. Top 4 bits of the mask control which elements of input to use for dot
3331   // product.
3332   //
3333   // 8 element version mask still has only 4 bit for input, and 4 bit for output
3334   // mask. According to the spec it just operates as 4 element version on first
3335   // 4 elements of inputs and output, and then on last 4 elements of inputs and
3336   // output.
3337   void handleDppIntrinsic(IntrinsicInst &I) {
3338     IRBuilder<> IRB(&I);
3339 
3340     Value *S0 = getShadow(&I, 0);
3341     Value *S1 = getShadow(&I, 1);
3342     Value *S = IRB.CreateOr(S0, S1);
3343 
3344     const unsigned Width =
3345         cast<FixedVectorType>(S->getType())->getNumElements();
3346     assert(Width == 2 || Width == 4 || Width == 8);
3347 
3348     const unsigned Mask = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
3349     const unsigned SrcMask = Mask >> 4;
3350     const unsigned DstMask = Mask & 0xf;
3351 
3352     // Calculate shadow as `<n x i1>`.
3353     Value *SI1 = findDppPoisonedOutput(IRB, S, SrcMask, DstMask);
3354     if (Width == 8) {
3355       // First 4 elements of shadow are already calculated. `makeDppShadow`
3356       // operats on 32 bit masks, so we can just shift masks, and repeat.
3357       SI1 = IRB.CreateOr(
3358           SI1, findDppPoisonedOutput(IRB, S, SrcMask << 4, DstMask << 4));
3359     }
3360     // Extend to real size of shadow, poisoning either all or none bits of an
3361     // element.
3362     S = IRB.CreateSExt(SI1, S->getType(), "_msdpp");
3363 
3364     setShadow(&I, S);
3365     setOriginForNaryOp(I);
3366   }
3367 
3368   Value *convertBlendvToSelectMask(IRBuilder<> &IRB, Value *C) {
3369     C = CreateAppToShadowCast(IRB, C);
3370     FixedVectorType *FVT = cast<FixedVectorType>(C->getType());
3371     unsigned ElSize = FVT->getElementType()->getPrimitiveSizeInBits();
3372     C = IRB.CreateAShr(C, ElSize - 1);
3373     FVT = FixedVectorType::get(IRB.getInt1Ty(), FVT->getNumElements());
3374     return IRB.CreateTrunc(C, FVT);
3375   }
3376 
3377   // `blendv(f, t, c)` is effectively `select(c[top_bit], t, f)`.
3378   void handleBlendvIntrinsic(IntrinsicInst &I) {
3379     Value *C = I.getOperand(2);
3380     Value *T = I.getOperand(1);
3381     Value *F = I.getOperand(0);
3382 
3383     Value *Sc = getShadow(&I, 2);
3384     Value *Oc = MS.TrackOrigins ? getOrigin(C) : nullptr;
3385 
3386     {
3387       IRBuilder<> IRB(&I);
3388       // Extract top bit from condition and its shadow.
3389       C = convertBlendvToSelectMask(IRB, C);
3390       Sc = convertBlendvToSelectMask(IRB, Sc);
3391 
3392       setShadow(C, Sc);
3393       setOrigin(C, Oc);
3394     }
3395 
3396     handleSelectLikeInst(I, C, T, F);
3397   }
3398 
3399   // Instrument sum-of-absolute-differences intrinsic.
3400   void handleVectorSadIntrinsic(IntrinsicInst &I, bool IsMMX = false) {
3401     const unsigned SignificantBitsPerResultElement = 16;
3402     Type *ResTy = IsMMX ? IntegerType::get(*MS.C, 64) : I.getType();
3403     unsigned ZeroBitsPerResultElement =
3404         ResTy->getScalarSizeInBits() - SignificantBitsPerResultElement;
3405 
3406     IRBuilder<> IRB(&I);
3407     auto *Shadow0 = getShadow(&I, 0);
3408     auto *Shadow1 = getShadow(&I, 1);
3409     Value *S = IRB.CreateOr(Shadow0, Shadow1);
3410     S = IRB.CreateBitCast(S, ResTy);
3411     S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)),
3412                        ResTy);
3413     S = IRB.CreateLShr(S, ZeroBitsPerResultElement);
3414     S = IRB.CreateBitCast(S, getShadowTy(&I));
3415     setShadow(&I, S);
3416     setOriginForNaryOp(I);
3417   }
3418 
3419   // Instrument multiply-add intrinsic.
3420   void handleVectorPmaddIntrinsic(IntrinsicInst &I,
3421                                   unsigned MMXEltSizeInBits = 0) {
3422     Type *ResTy =
3423         MMXEltSizeInBits ? getMMXVectorTy(MMXEltSizeInBits * 2) : I.getType();
3424     IRBuilder<> IRB(&I);
3425     auto *Shadow0 = getShadow(&I, 0);
3426     auto *Shadow1 = getShadow(&I, 1);
3427     Value *S = IRB.CreateOr(Shadow0, Shadow1);
3428     S = IRB.CreateBitCast(S, ResTy);
3429     S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)),
3430                        ResTy);
3431     S = IRB.CreateBitCast(S, getShadowTy(&I));
3432     setShadow(&I, S);
3433     setOriginForNaryOp(I);
3434   }
3435 
3436   // Instrument compare-packed intrinsic.
3437   // Basically, an or followed by sext(icmp ne 0) to end up with all-zeros or
3438   // all-ones shadow.
3439   void handleVectorComparePackedIntrinsic(IntrinsicInst &I) {
3440     IRBuilder<> IRB(&I);
3441     Type *ResTy = getShadowTy(&I);
3442     auto *Shadow0 = getShadow(&I, 0);
3443     auto *Shadow1 = getShadow(&I, 1);
3444     Value *S0 = IRB.CreateOr(Shadow0, Shadow1);
3445     Value *S = IRB.CreateSExt(
3446         IRB.CreateICmpNE(S0, Constant::getNullValue(ResTy)), ResTy);
3447     setShadow(&I, S);
3448     setOriginForNaryOp(I);
3449   }
3450 
3451   // Instrument compare-scalar intrinsic.
3452   // This handles both cmp* intrinsics which return the result in the first
3453   // element of a vector, and comi* which return the result as i32.
3454   void handleVectorCompareScalarIntrinsic(IntrinsicInst &I) {
3455     IRBuilder<> IRB(&I);
3456     auto *Shadow0 = getShadow(&I, 0);
3457     auto *Shadow1 = getShadow(&I, 1);
3458     Value *S0 = IRB.CreateOr(Shadow0, Shadow1);
3459     Value *S = LowerElementShadowExtend(IRB, S0, getShadowTy(&I));
3460     setShadow(&I, S);
3461     setOriginForNaryOp(I);
3462   }
3463 
3464   // Instrument generic vector reduction intrinsics
3465   // by ORing together all their fields.
3466   void handleVectorReduceIntrinsic(IntrinsicInst &I) {
3467     IRBuilder<> IRB(&I);
3468     Value *S = IRB.CreateOrReduce(getShadow(&I, 0));
3469     setShadow(&I, S);
3470     setOrigin(&I, getOrigin(&I, 0));
3471   }
3472 
3473   // Instrument vector.reduce.or intrinsic.
3474   // Valid (non-poisoned) set bits in the operand pull low the
3475   // corresponding shadow bits.
3476   void handleVectorReduceOrIntrinsic(IntrinsicInst &I) {
3477     IRBuilder<> IRB(&I);
3478     Value *OperandShadow = getShadow(&I, 0);
3479     Value *OperandUnsetBits = IRB.CreateNot(I.getOperand(0));
3480     Value *OperandUnsetOrPoison = IRB.CreateOr(OperandUnsetBits, OperandShadow);
3481     // Bit N is clean if any field's bit N is 1 and unpoison
3482     Value *OutShadowMask = IRB.CreateAndReduce(OperandUnsetOrPoison);
3483     // Otherwise, it is clean if every field's bit N is unpoison
3484     Value *OrShadow = IRB.CreateOrReduce(OperandShadow);
3485     Value *S = IRB.CreateAnd(OutShadowMask, OrShadow);
3486 
3487     setShadow(&I, S);
3488     setOrigin(&I, getOrigin(&I, 0));
3489   }
3490 
3491   // Instrument vector.reduce.and intrinsic.
3492   // Valid (non-poisoned) unset bits in the operand pull down the
3493   // corresponding shadow bits.
3494   void handleVectorReduceAndIntrinsic(IntrinsicInst &I) {
3495     IRBuilder<> IRB(&I);
3496     Value *OperandShadow = getShadow(&I, 0);
3497     Value *OperandSetOrPoison = IRB.CreateOr(I.getOperand(0), OperandShadow);
3498     // Bit N is clean if any field's bit N is 0 and unpoison
3499     Value *OutShadowMask = IRB.CreateAndReduce(OperandSetOrPoison);
3500     // Otherwise, it is clean if every field's bit N is unpoison
3501     Value *OrShadow = IRB.CreateOrReduce(OperandShadow);
3502     Value *S = IRB.CreateAnd(OutShadowMask, OrShadow);
3503 
3504     setShadow(&I, S);
3505     setOrigin(&I, getOrigin(&I, 0));
3506   }
3507 
3508   void handleStmxcsr(IntrinsicInst &I) {
3509     IRBuilder<> IRB(&I);
3510     Value *Addr = I.getArgOperand(0);
3511     Type *Ty = IRB.getInt32Ty();
3512     Value *ShadowPtr =
3513         getShadowOriginPtr(Addr, IRB, Ty, Align(1), /*isStore*/ true).first;
3514 
3515     IRB.CreateStore(getCleanShadow(Ty), ShadowPtr);
3516 
3517     if (ClCheckAccessAddress)
3518       insertShadowCheck(Addr, &I);
3519   }
3520 
3521   void handleLdmxcsr(IntrinsicInst &I) {
3522     if (!InsertChecks)
3523       return;
3524 
3525     IRBuilder<> IRB(&I);
3526     Value *Addr = I.getArgOperand(0);
3527     Type *Ty = IRB.getInt32Ty();
3528     const Align Alignment = Align(1);
3529     Value *ShadowPtr, *OriginPtr;
3530     std::tie(ShadowPtr, OriginPtr) =
3531         getShadowOriginPtr(Addr, IRB, Ty, Alignment, /*isStore*/ false);
3532 
3533     if (ClCheckAccessAddress)
3534       insertShadowCheck(Addr, &I);
3535 
3536     Value *Shadow = IRB.CreateAlignedLoad(Ty, ShadowPtr, Alignment, "_ldmxcsr");
3537     Value *Origin = MS.TrackOrigins ? IRB.CreateLoad(MS.OriginTy, OriginPtr)
3538                                     : getCleanOrigin();
3539     insertShadowCheck(Shadow, Origin, &I);
3540   }
3541 
3542   void handleMaskedExpandLoad(IntrinsicInst &I) {
3543     IRBuilder<> IRB(&I);
3544     Value *Ptr = I.getArgOperand(0);
3545     Value *Mask = I.getArgOperand(1);
3546     Value *PassThru = I.getArgOperand(2);
3547 
3548     if (ClCheckAccessAddress) {
3549       insertShadowCheck(Ptr, &I);
3550       insertShadowCheck(Mask, &I);
3551     }
3552 
3553     if (!PropagateShadow) {
3554       setShadow(&I, getCleanShadow(&I));
3555       setOrigin(&I, getCleanOrigin());
3556       return;
3557     }
3558 
3559     Type *ShadowTy = getShadowTy(&I);
3560     Type *ElementShadowTy = cast<VectorType>(ShadowTy)->getElementType();
3561     auto [ShadowPtr, OriginPtr] =
3562         getShadowOriginPtr(Ptr, IRB, ElementShadowTy, {}, /*isStore*/ false);
3563 
3564     Value *Shadow = IRB.CreateMaskedExpandLoad(
3565         ShadowTy, ShadowPtr, Mask, getShadow(PassThru), "_msmaskedexpload");
3566 
3567     setShadow(&I, Shadow);
3568 
3569     // TODO: Store origins.
3570     setOrigin(&I, getCleanOrigin());
3571   }
3572 
3573   void handleMaskedCompressStore(IntrinsicInst &I) {
3574     IRBuilder<> IRB(&I);
3575     Value *Values = I.getArgOperand(0);
3576     Value *Ptr = I.getArgOperand(1);
3577     Value *Mask = I.getArgOperand(2);
3578 
3579     if (ClCheckAccessAddress) {
3580       insertShadowCheck(Ptr, &I);
3581       insertShadowCheck(Mask, &I);
3582     }
3583 
3584     Value *Shadow = getShadow(Values);
3585     Type *ElementShadowTy =
3586         getShadowTy(cast<VectorType>(Values->getType())->getElementType());
3587     auto [ShadowPtr, OriginPtrs] =
3588         getShadowOriginPtr(Ptr, IRB, ElementShadowTy, {}, /*isStore*/ true);
3589 
3590     IRB.CreateMaskedCompressStore(Shadow, ShadowPtr, Mask);
3591 
3592     // TODO: Store origins.
3593   }
3594 
3595   void handleMaskedGather(IntrinsicInst &I) {
3596     IRBuilder<> IRB(&I);
3597     Value *Ptrs = I.getArgOperand(0);
3598     const Align Alignment(
3599         cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
3600     Value *Mask = I.getArgOperand(2);
3601     Value *PassThru = I.getArgOperand(3);
3602 
3603     Type *PtrsShadowTy = getShadowTy(Ptrs);
3604     if (ClCheckAccessAddress) {
3605       insertShadowCheck(Mask, &I);
3606       Value *MaskedPtrShadow = IRB.CreateSelect(
3607           Mask, getShadow(Ptrs), Constant::getNullValue((PtrsShadowTy)),
3608           "_msmaskedptrs");
3609       insertShadowCheck(MaskedPtrShadow, getOrigin(Ptrs), &I);
3610     }
3611 
3612     if (!PropagateShadow) {
3613       setShadow(&I, getCleanShadow(&I));
3614       setOrigin(&I, getCleanOrigin());
3615       return;
3616     }
3617 
3618     Type *ShadowTy = getShadowTy(&I);
3619     Type *ElementShadowTy = cast<VectorType>(ShadowTy)->getElementType();
3620     auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
3621         Ptrs, IRB, ElementShadowTy, Alignment, /*isStore*/ false);
3622 
3623     Value *Shadow =
3624         IRB.CreateMaskedGather(ShadowTy, ShadowPtrs, Alignment, Mask,
3625                                getShadow(PassThru), "_msmaskedgather");
3626 
3627     setShadow(&I, Shadow);
3628 
3629     // TODO: Store origins.
3630     setOrigin(&I, getCleanOrigin());
3631   }
3632 
3633   void handleMaskedScatter(IntrinsicInst &I) {
3634     IRBuilder<> IRB(&I);
3635     Value *Values = I.getArgOperand(0);
3636     Value *Ptrs = I.getArgOperand(1);
3637     const Align Alignment(
3638         cast<ConstantInt>(I.getArgOperand(2))->getZExtValue());
3639     Value *Mask = I.getArgOperand(3);
3640 
3641     Type *PtrsShadowTy = getShadowTy(Ptrs);
3642     if (ClCheckAccessAddress) {
3643       insertShadowCheck(Mask, &I);
3644       Value *MaskedPtrShadow = IRB.CreateSelect(
3645           Mask, getShadow(Ptrs), Constant::getNullValue((PtrsShadowTy)),
3646           "_msmaskedptrs");
3647       insertShadowCheck(MaskedPtrShadow, getOrigin(Ptrs), &I);
3648     }
3649 
3650     Value *Shadow = getShadow(Values);
3651     Type *ElementShadowTy =
3652         getShadowTy(cast<VectorType>(Values->getType())->getElementType());
3653     auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
3654         Ptrs, IRB, ElementShadowTy, Alignment, /*isStore*/ true);
3655 
3656     IRB.CreateMaskedScatter(Shadow, ShadowPtrs, Alignment, Mask);
3657 
3658     // TODO: Store origin.
3659   }
3660 
3661   void handleMaskedStore(IntrinsicInst &I) {
3662     IRBuilder<> IRB(&I);
3663     Value *V = I.getArgOperand(0);
3664     Value *Ptr = I.getArgOperand(1);
3665     const Align Alignment(
3666         cast<ConstantInt>(I.getArgOperand(2))->getZExtValue());
3667     Value *Mask = I.getArgOperand(3);
3668     Value *Shadow = getShadow(V);
3669 
3670     if (ClCheckAccessAddress) {
3671       insertShadowCheck(Ptr, &I);
3672       insertShadowCheck(Mask, &I);
3673     }
3674 
3675     Value *ShadowPtr;
3676     Value *OriginPtr;
3677     std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
3678         Ptr, IRB, Shadow->getType(), Alignment, /*isStore*/ true);
3679 
3680     IRB.CreateMaskedStore(Shadow, ShadowPtr, Alignment, Mask);
3681 
3682     if (!MS.TrackOrigins)
3683       return;
3684 
3685     auto &DL = F.getDataLayout();
3686     paintOrigin(IRB, getOrigin(V), OriginPtr,
3687                 DL.getTypeStoreSize(Shadow->getType()),
3688                 std::max(Alignment, kMinOriginAlignment));
3689   }
3690 
3691   void handleMaskedLoad(IntrinsicInst &I) {
3692     IRBuilder<> IRB(&I);
3693     Value *Ptr = I.getArgOperand(0);
3694     const Align Alignment(
3695         cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
3696     Value *Mask = I.getArgOperand(2);
3697     Value *PassThru = I.getArgOperand(3);
3698 
3699     if (ClCheckAccessAddress) {
3700       insertShadowCheck(Ptr, &I);
3701       insertShadowCheck(Mask, &I);
3702     }
3703 
3704     if (!PropagateShadow) {
3705       setShadow(&I, getCleanShadow(&I));
3706       setOrigin(&I, getCleanOrigin());
3707       return;
3708     }
3709 
3710     Type *ShadowTy = getShadowTy(&I);
3711     Value *ShadowPtr, *OriginPtr;
3712     std::tie(ShadowPtr, OriginPtr) =
3713         getShadowOriginPtr(Ptr, IRB, ShadowTy, Alignment, /*isStore*/ false);
3714     setShadow(&I, IRB.CreateMaskedLoad(ShadowTy, ShadowPtr, Alignment, Mask,
3715                                        getShadow(PassThru), "_msmaskedld"));
3716 
3717     if (!MS.TrackOrigins)
3718       return;
3719 
3720     // Choose between PassThru's and the loaded value's origins.
3721     Value *MaskedPassThruShadow = IRB.CreateAnd(
3722         getShadow(PassThru), IRB.CreateSExt(IRB.CreateNeg(Mask), ShadowTy));
3723 
3724     Value *NotNull = convertToBool(MaskedPassThruShadow, IRB, "_mscmp");
3725 
3726     Value *PtrOrigin = IRB.CreateLoad(MS.OriginTy, OriginPtr);
3727     Value *Origin = IRB.CreateSelect(NotNull, getOrigin(PassThru), PtrOrigin);
3728 
3729     setOrigin(&I, Origin);
3730   }
3731 
3732   // Instrument BMI / BMI2 intrinsics.
3733   // All of these intrinsics are Z = I(X, Y)
3734   // where the types of all operands and the result match, and are either i32 or
3735   // i64. The following instrumentation happens to work for all of them:
3736   //   Sz = I(Sx, Y) | (sext (Sy != 0))
3737   void handleBmiIntrinsic(IntrinsicInst &I) {
3738     IRBuilder<> IRB(&I);
3739     Type *ShadowTy = getShadowTy(&I);
3740 
3741     // If any bit of the mask operand is poisoned, then the whole thing is.
3742     Value *SMask = getShadow(&I, 1);
3743     SMask = IRB.CreateSExt(IRB.CreateICmpNE(SMask, getCleanShadow(ShadowTy)),
3744                            ShadowTy);
3745     // Apply the same intrinsic to the shadow of the first operand.
3746     Value *S = IRB.CreateCall(I.getCalledFunction(),
3747                               {getShadow(&I, 0), I.getOperand(1)});
3748     S = IRB.CreateOr(SMask, S);
3749     setShadow(&I, S);
3750     setOriginForNaryOp(I);
3751   }
3752 
3753   static SmallVector<int, 8> getPclmulMask(unsigned Width, bool OddElements) {
3754     SmallVector<int, 8> Mask;
3755     for (unsigned X = OddElements ? 1 : 0; X < Width; X += 2) {
3756       Mask.append(2, X);
3757     }
3758     return Mask;
3759   }
3760 
3761   // Instrument pclmul intrinsics.
3762   // These intrinsics operate either on odd or on even elements of the input
3763   // vectors, depending on the constant in the 3rd argument, ignoring the rest.
3764   // Replace the unused elements with copies of the used ones, ex:
3765   //   (0, 1, 2, 3) -> (0, 0, 2, 2) (even case)
3766   // or
3767   //   (0, 1, 2, 3) -> (1, 1, 3, 3) (odd case)
3768   // and then apply the usual shadow combining logic.
3769   void handlePclmulIntrinsic(IntrinsicInst &I) {
3770     IRBuilder<> IRB(&I);
3771     unsigned Width =
3772         cast<FixedVectorType>(I.getArgOperand(0)->getType())->getNumElements();
3773     assert(isa<ConstantInt>(I.getArgOperand(2)) &&
3774            "pclmul 3rd operand must be a constant");
3775     unsigned Imm = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
3776     Value *Shuf0 = IRB.CreateShuffleVector(getShadow(&I, 0),
3777                                            getPclmulMask(Width, Imm & 0x01));
3778     Value *Shuf1 = IRB.CreateShuffleVector(getShadow(&I, 1),
3779                                            getPclmulMask(Width, Imm & 0x10));
3780     ShadowAndOriginCombiner SOC(this, IRB);
3781     SOC.Add(Shuf0, getOrigin(&I, 0));
3782     SOC.Add(Shuf1, getOrigin(&I, 1));
3783     SOC.Done(&I);
3784   }
3785 
3786   // Instrument _mm_*_sd|ss intrinsics
3787   void handleUnarySdSsIntrinsic(IntrinsicInst &I) {
3788     IRBuilder<> IRB(&I);
3789     unsigned Width =
3790         cast<FixedVectorType>(I.getArgOperand(0)->getType())->getNumElements();
3791     Value *First = getShadow(&I, 0);
3792     Value *Second = getShadow(&I, 1);
3793     // First element of second operand, remaining elements of first operand
3794     SmallVector<int, 16> Mask;
3795     Mask.push_back(Width);
3796     for (unsigned i = 1; i < Width; i++)
3797       Mask.push_back(i);
3798     Value *Shadow = IRB.CreateShuffleVector(First, Second, Mask);
3799 
3800     setShadow(&I, Shadow);
3801     setOriginForNaryOp(I);
3802   }
3803 
3804   void handleVtestIntrinsic(IntrinsicInst &I) {
3805     IRBuilder<> IRB(&I);
3806     Value *Shadow0 = getShadow(&I, 0);
3807     Value *Shadow1 = getShadow(&I, 1);
3808     Value *Or = IRB.CreateOr(Shadow0, Shadow1);
3809     Value *NZ = IRB.CreateICmpNE(Or, Constant::getNullValue(Or->getType()));
3810     Value *Scalar = convertShadowToScalar(NZ, IRB);
3811     Value *Shadow = IRB.CreateZExt(Scalar, getShadowTy(&I));
3812 
3813     setShadow(&I, Shadow);
3814     setOriginForNaryOp(I);
3815   }
3816 
3817   void handleBinarySdSsIntrinsic(IntrinsicInst &I) {
3818     IRBuilder<> IRB(&I);
3819     unsigned Width =
3820         cast<FixedVectorType>(I.getArgOperand(0)->getType())->getNumElements();
3821     Value *First = getShadow(&I, 0);
3822     Value *Second = getShadow(&I, 1);
3823     Value *OrShadow = IRB.CreateOr(First, Second);
3824     // First element of both OR'd together, remaining elements of first operand
3825     SmallVector<int, 16> Mask;
3826     Mask.push_back(Width);
3827     for (unsigned i = 1; i < Width; i++)
3828       Mask.push_back(i);
3829     Value *Shadow = IRB.CreateShuffleVector(First, OrShadow, Mask);
3830 
3831     setShadow(&I, Shadow);
3832     setOriginForNaryOp(I);
3833   }
3834 
3835   // _mm_round_ps / _mm_round_ps.
3836   // Similar to maybeHandleSimpleNomemIntrinsic except
3837   // the second argument is guranteed to be a constant integer.
3838   void handleRoundPdPsIntrinsic(IntrinsicInst &I) {
3839     assert(I.getArgOperand(0)->getType() == I.getType());
3840     assert(I.arg_size() == 2);
3841     assert(isa<ConstantInt>(I.getArgOperand(1)));
3842 
3843     IRBuilder<> IRB(&I);
3844     ShadowAndOriginCombiner SC(this, IRB);
3845     SC.Add(I.getArgOperand(0));
3846     SC.Done(&I);
3847   }
3848 
3849   // Instrument abs intrinsic.
3850   // handleUnknownIntrinsic can't handle it because of the last
3851   // is_int_min_poison argument which does not match the result type.
3852   void handleAbsIntrinsic(IntrinsicInst &I) {
3853     assert(I.getType()->isIntOrIntVectorTy());
3854     assert(I.getArgOperand(0)->getType() == I.getType());
3855 
3856     // FIXME: Handle is_int_min_poison.
3857     IRBuilder<> IRB(&I);
3858     setShadow(&I, getShadow(&I, 0));
3859     setOrigin(&I, getOrigin(&I, 0));
3860   }
3861 
3862   void handleIsFpClass(IntrinsicInst &I) {
3863     IRBuilder<> IRB(&I);
3864     Value *Shadow = getShadow(&I, 0);
3865     setShadow(&I, IRB.CreateICmpNE(Shadow, getCleanShadow(Shadow)));
3866     setOrigin(&I, getOrigin(&I, 0));
3867   }
3868 
3869   void handleArithmeticWithOverflow(IntrinsicInst &I) {
3870     IRBuilder<> IRB(&I);
3871     Value *Shadow0 = getShadow(&I, 0);
3872     Value *Shadow1 = getShadow(&I, 1);
3873     Value *ShadowElt0 = IRB.CreateOr(Shadow0, Shadow1);
3874     Value *ShadowElt1 =
3875         IRB.CreateICmpNE(ShadowElt0, getCleanShadow(ShadowElt0));
3876 
3877     Value *Shadow = PoisonValue::get(getShadowTy(&I));
3878     Shadow = IRB.CreateInsertValue(Shadow, ShadowElt0, 0);
3879     Shadow = IRB.CreateInsertValue(Shadow, ShadowElt1, 1);
3880 
3881     setShadow(&I, Shadow);
3882     setOriginForNaryOp(I);
3883   }
3884 
3885   /// Handle Arm NEON vector store intrinsics (vst{2,3,4}, vst1x_{2,3,4},
3886   /// and vst{2,3,4}lane).
3887   ///
3888   /// Arm NEON vector store intrinsics have the output address (pointer) as the
3889   /// last argument, with the initial arguments being the inputs (and lane
3890   /// number for vst{2,3,4}lane). They return void.
3891   ///
3892   /// - st4 interleaves the output e.g., st4 (inA, inB, inC, inD, outP) writes
3893   ///   abcdabcdabcdabcd... into *outP
3894   /// - st1_x4 is non-interleaved e.g., st1_x4 (inA, inB, inC, inD, outP)
3895   ///   writes aaaa...bbbb...cccc...dddd... into *outP
3896   /// - st4lane has arguments of (inA, inB, inC, inD, lane, outP)
3897   /// These instructions can all be instrumented with essentially the same
3898   /// MSan logic, simply by applying the corresponding intrinsic to the shadow.
3899   void handleNEONVectorStoreIntrinsic(IntrinsicInst &I, bool useLane) {
3900     IRBuilder<> IRB(&I);
3901 
3902     // Don't use getNumOperands() because it includes the callee
3903     int numArgOperands = I.arg_size();
3904 
3905     // The last arg operand is the output (pointer)
3906     assert(numArgOperands >= 1);
3907     Value *Addr = I.getArgOperand(numArgOperands - 1);
3908     assert(Addr->getType()->isPointerTy());
3909     int skipTrailingOperands = 1;
3910 
3911     if (ClCheckAccessAddress)
3912       insertShadowCheck(Addr, &I);
3913 
3914     // Second-last operand is the lane number (for vst{2,3,4}lane)
3915     if (useLane) {
3916       skipTrailingOperands++;
3917       assert(numArgOperands >= static_cast<int>(skipTrailingOperands));
3918       assert(isa<IntegerType>(
3919           I.getArgOperand(numArgOperands - skipTrailingOperands)->getType()));
3920     }
3921 
3922     SmallVector<Value *, 8> ShadowArgs;
3923     // All the initial operands are the inputs
3924     for (int i = 0; i < numArgOperands - skipTrailingOperands; i++) {
3925       assert(isa<FixedVectorType>(I.getArgOperand(i)->getType()));
3926       Value *Shadow = getShadow(&I, i);
3927       ShadowArgs.append(1, Shadow);
3928     }
3929 
3930     // MSan's GetShadowTy assumes the LHS is the type we want the shadow for
3931     // e.g., for:
3932     //     [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
3933     // we know the type of the output (and its shadow) is <16 x i8>.
3934     //
3935     // Arm NEON VST is unusual because the last argument is the output address:
3936     //     define void @st2_16b(<16 x i8> %A, <16 x i8> %B, ptr %P) {
3937     //         call void @llvm.aarch64.neon.st2.v16i8.p0
3938     //                   (<16 x i8> [[A]], <16 x i8> [[B]], ptr [[P]])
3939     // and we have no type information about P's operand. We must manually
3940     // compute the type (<16 x i8> x 2).
3941     FixedVectorType *OutputVectorTy = FixedVectorType::get(
3942         cast<FixedVectorType>(I.getArgOperand(0)->getType())->getElementType(),
3943         cast<FixedVectorType>(I.getArgOperand(0)->getType())->getNumElements() *
3944             (numArgOperands - skipTrailingOperands));
3945     Type *OutputShadowTy = getShadowTy(OutputVectorTy);
3946 
3947     if (useLane)
3948       ShadowArgs.append(1,
3949                         I.getArgOperand(numArgOperands - skipTrailingOperands));
3950 
3951     Value *OutputShadowPtr, *OutputOriginPtr;
3952     // AArch64 NEON does not need alignment (unless OS requires it)
3953     std::tie(OutputShadowPtr, OutputOriginPtr) = getShadowOriginPtr(
3954         Addr, IRB, OutputShadowTy, Align(1), /*isStore*/ true);
3955     ShadowArgs.append(1, OutputShadowPtr);
3956 
3957     CallInst *CI =
3958         IRB.CreateIntrinsic(IRB.getVoidTy(), I.getIntrinsicID(), ShadowArgs);
3959     setShadow(&I, CI);
3960 
3961     if (MS.TrackOrigins) {
3962       // TODO: if we modelled the vst* instruction more precisely, we could
3963       // more accurately track the origins (e.g., if both inputs are
3964       // uninitialized for vst2, we currently blame the second input, even
3965       // though part of the output depends only on the first input).
3966       //
3967       // This is particularly imprecise for vst{2,3,4}lane, since only one
3968       // lane of each input is actually copied to the output.
3969       OriginCombiner OC(this, IRB);
3970       for (int i = 0; i < numArgOperands - skipTrailingOperands; i++)
3971         OC.Add(I.getArgOperand(i));
3972 
3973       const DataLayout &DL = F.getDataLayout();
3974       OC.DoneAndStoreOrigin(DL.getTypeStoreSize(OutputVectorTy),
3975                             OutputOriginPtr);
3976     }
3977   }
3978 
3979   /// Handle intrinsics by applying the intrinsic to the shadows.
3980   ///
3981   /// The trailing arguments are passed verbatim to the intrinsic, though any
3982   /// uninitialized trailing arguments can also taint the shadow e.g., for an
3983   /// intrinsic with one trailing verbatim argument:
3984   ///     out = intrinsic(var1, var2, opType)
3985   /// we compute:
3986   ///     shadow[out] =
3987   ///         intrinsic(shadow[var1], shadow[var2], opType) | shadow[opType]
3988   ///
3989   /// For example, this can be applied to the Arm NEON vector table intrinsics
3990   /// (tbl{1,2,3,4}).
3991   ///
3992   /// The origin is approximated using setOriginForNaryOp.
3993   void handleIntrinsicByApplyingToShadow(IntrinsicInst &I,
3994                                          unsigned int trailingVerbatimArgs) {
3995     IRBuilder<> IRB(&I);
3996 
3997     assert(trailingVerbatimArgs < I.arg_size());
3998 
3999     SmallVector<Value *, 8> ShadowArgs;
4000     // Don't use getNumOperands() because it includes the callee
4001     for (unsigned int i = 0; i < I.arg_size() - trailingVerbatimArgs; i++) {
4002       Value *Shadow = getShadow(&I, i);
4003       ShadowArgs.push_back(Shadow);
4004     }
4005 
4006     for (unsigned int i = I.arg_size() - trailingVerbatimArgs; i < I.arg_size();
4007          i++) {
4008       Value *Arg = I.getArgOperand(i);
4009       ShadowArgs.push_back(Arg);
4010     }
4011 
4012     CallInst *CI =
4013         IRB.CreateIntrinsic(I.getType(), I.getIntrinsicID(), ShadowArgs);
4014     Value *CombinedShadow = CI;
4015 
4016     // Combine the computed shadow with the shadow of trailing args
4017     for (unsigned int i = I.arg_size() - trailingVerbatimArgs; i < I.arg_size();
4018          i++) {
4019       Value *Shadow =
4020           CreateShadowCast(IRB, getShadow(&I, i), CombinedShadow->getType());
4021       CombinedShadow = IRB.CreateOr(Shadow, CombinedShadow, "_msprop");
4022     }
4023 
4024     setShadow(&I, CombinedShadow);
4025 
4026     setOriginForNaryOp(I);
4027   }
4028 
4029   void visitIntrinsicInst(IntrinsicInst &I) {
4030     switch (I.getIntrinsicID()) {
4031     case Intrinsic::uadd_with_overflow:
4032     case Intrinsic::sadd_with_overflow:
4033     case Intrinsic::usub_with_overflow:
4034     case Intrinsic::ssub_with_overflow:
4035     case Intrinsic::umul_with_overflow:
4036     case Intrinsic::smul_with_overflow:
4037       handleArithmeticWithOverflow(I);
4038       break;
4039     case Intrinsic::abs:
4040       handleAbsIntrinsic(I);
4041       break;
4042     case Intrinsic::is_fpclass:
4043       handleIsFpClass(I);
4044       break;
4045     case Intrinsic::lifetime_start:
4046       handleLifetimeStart(I);
4047       break;
4048     case Intrinsic::launder_invariant_group:
4049     case Intrinsic::strip_invariant_group:
4050       handleInvariantGroup(I);
4051       break;
4052     case Intrinsic::bswap:
4053       handleBswap(I);
4054       break;
4055     case Intrinsic::ctlz:
4056     case Intrinsic::cttz:
4057       handleCountZeroes(I);
4058       break;
4059     case Intrinsic::masked_compressstore:
4060       handleMaskedCompressStore(I);
4061       break;
4062     case Intrinsic::masked_expandload:
4063       handleMaskedExpandLoad(I);
4064       break;
4065     case Intrinsic::masked_gather:
4066       handleMaskedGather(I);
4067       break;
4068     case Intrinsic::masked_scatter:
4069       handleMaskedScatter(I);
4070       break;
4071     case Intrinsic::masked_store:
4072       handleMaskedStore(I);
4073       break;
4074     case Intrinsic::masked_load:
4075       handleMaskedLoad(I);
4076       break;
4077     case Intrinsic::vector_reduce_and:
4078       handleVectorReduceAndIntrinsic(I);
4079       break;
4080     case Intrinsic::vector_reduce_or:
4081       handleVectorReduceOrIntrinsic(I);
4082       break;
4083     case Intrinsic::vector_reduce_add:
4084     case Intrinsic::vector_reduce_xor:
4085     case Intrinsic::vector_reduce_mul:
4086       handleVectorReduceIntrinsic(I);
4087       break;
4088     case Intrinsic::x86_sse_stmxcsr:
4089       handleStmxcsr(I);
4090       break;
4091     case Intrinsic::x86_sse_ldmxcsr:
4092       handleLdmxcsr(I);
4093       break;
4094     case Intrinsic::x86_avx512_vcvtsd2usi64:
4095     case Intrinsic::x86_avx512_vcvtsd2usi32:
4096     case Intrinsic::x86_avx512_vcvtss2usi64:
4097     case Intrinsic::x86_avx512_vcvtss2usi32:
4098     case Intrinsic::x86_avx512_cvttss2usi64:
4099     case Intrinsic::x86_avx512_cvttss2usi:
4100     case Intrinsic::x86_avx512_cvttsd2usi64:
4101     case Intrinsic::x86_avx512_cvttsd2usi:
4102     case Intrinsic::x86_avx512_cvtusi2ss:
4103     case Intrinsic::x86_avx512_cvtusi642sd:
4104     case Intrinsic::x86_avx512_cvtusi642ss:
4105       handleVectorConvertIntrinsic(I, 1, true);
4106       break;
4107     case Intrinsic::x86_sse2_cvtsd2si64:
4108     case Intrinsic::x86_sse2_cvtsd2si:
4109     case Intrinsic::x86_sse2_cvtsd2ss:
4110     case Intrinsic::x86_sse2_cvttsd2si64:
4111     case Intrinsic::x86_sse2_cvttsd2si:
4112     case Intrinsic::x86_sse_cvtss2si64:
4113     case Intrinsic::x86_sse_cvtss2si:
4114     case Intrinsic::x86_sse_cvttss2si64:
4115     case Intrinsic::x86_sse_cvttss2si:
4116       handleVectorConvertIntrinsic(I, 1);
4117       break;
4118     case Intrinsic::x86_sse_cvtps2pi:
4119     case Intrinsic::x86_sse_cvttps2pi:
4120       handleVectorConvertIntrinsic(I, 2);
4121       break;
4122 
4123     case Intrinsic::x86_avx512_psll_w_512:
4124     case Intrinsic::x86_avx512_psll_d_512:
4125     case Intrinsic::x86_avx512_psll_q_512:
4126     case Intrinsic::x86_avx512_pslli_w_512:
4127     case Intrinsic::x86_avx512_pslli_d_512:
4128     case Intrinsic::x86_avx512_pslli_q_512:
4129     case Intrinsic::x86_avx512_psrl_w_512:
4130     case Intrinsic::x86_avx512_psrl_d_512:
4131     case Intrinsic::x86_avx512_psrl_q_512:
4132     case Intrinsic::x86_avx512_psra_w_512:
4133     case Intrinsic::x86_avx512_psra_d_512:
4134     case Intrinsic::x86_avx512_psra_q_512:
4135     case Intrinsic::x86_avx512_psrli_w_512:
4136     case Intrinsic::x86_avx512_psrli_d_512:
4137     case Intrinsic::x86_avx512_psrli_q_512:
4138     case Intrinsic::x86_avx512_psrai_w_512:
4139     case Intrinsic::x86_avx512_psrai_d_512:
4140     case Intrinsic::x86_avx512_psrai_q_512:
4141     case Intrinsic::x86_avx512_psra_q_256:
4142     case Intrinsic::x86_avx512_psra_q_128:
4143     case Intrinsic::x86_avx512_psrai_q_256:
4144     case Intrinsic::x86_avx512_psrai_q_128:
4145     case Intrinsic::x86_avx2_psll_w:
4146     case Intrinsic::x86_avx2_psll_d:
4147     case Intrinsic::x86_avx2_psll_q:
4148     case Intrinsic::x86_avx2_pslli_w:
4149     case Intrinsic::x86_avx2_pslli_d:
4150     case Intrinsic::x86_avx2_pslli_q:
4151     case Intrinsic::x86_avx2_psrl_w:
4152     case Intrinsic::x86_avx2_psrl_d:
4153     case Intrinsic::x86_avx2_psrl_q:
4154     case Intrinsic::x86_avx2_psra_w:
4155     case Intrinsic::x86_avx2_psra_d:
4156     case Intrinsic::x86_avx2_psrli_w:
4157     case Intrinsic::x86_avx2_psrli_d:
4158     case Intrinsic::x86_avx2_psrli_q:
4159     case Intrinsic::x86_avx2_psrai_w:
4160     case Intrinsic::x86_avx2_psrai_d:
4161     case Intrinsic::x86_sse2_psll_w:
4162     case Intrinsic::x86_sse2_psll_d:
4163     case Intrinsic::x86_sse2_psll_q:
4164     case Intrinsic::x86_sse2_pslli_w:
4165     case Intrinsic::x86_sse2_pslli_d:
4166     case Intrinsic::x86_sse2_pslli_q:
4167     case Intrinsic::x86_sse2_psrl_w:
4168     case Intrinsic::x86_sse2_psrl_d:
4169     case Intrinsic::x86_sse2_psrl_q:
4170     case Intrinsic::x86_sse2_psra_w:
4171     case Intrinsic::x86_sse2_psra_d:
4172     case Intrinsic::x86_sse2_psrli_w:
4173     case Intrinsic::x86_sse2_psrli_d:
4174     case Intrinsic::x86_sse2_psrli_q:
4175     case Intrinsic::x86_sse2_psrai_w:
4176     case Intrinsic::x86_sse2_psrai_d:
4177     case Intrinsic::x86_mmx_psll_w:
4178     case Intrinsic::x86_mmx_psll_d:
4179     case Intrinsic::x86_mmx_psll_q:
4180     case Intrinsic::x86_mmx_pslli_w:
4181     case Intrinsic::x86_mmx_pslli_d:
4182     case Intrinsic::x86_mmx_pslli_q:
4183     case Intrinsic::x86_mmx_psrl_w:
4184     case Intrinsic::x86_mmx_psrl_d:
4185     case Intrinsic::x86_mmx_psrl_q:
4186     case Intrinsic::x86_mmx_psra_w:
4187     case Intrinsic::x86_mmx_psra_d:
4188     case Intrinsic::x86_mmx_psrli_w:
4189     case Intrinsic::x86_mmx_psrli_d:
4190     case Intrinsic::x86_mmx_psrli_q:
4191     case Intrinsic::x86_mmx_psrai_w:
4192     case Intrinsic::x86_mmx_psrai_d:
4193     case Intrinsic::aarch64_neon_rshrn:
4194     case Intrinsic::aarch64_neon_sqrshl:
4195     case Intrinsic::aarch64_neon_sqrshrn:
4196     case Intrinsic::aarch64_neon_sqrshrun:
4197     case Intrinsic::aarch64_neon_sqshl:
4198     case Intrinsic::aarch64_neon_sqshlu:
4199     case Intrinsic::aarch64_neon_sqshrn:
4200     case Intrinsic::aarch64_neon_sqshrun:
4201     case Intrinsic::aarch64_neon_srshl:
4202     case Intrinsic::aarch64_neon_sshl:
4203     case Intrinsic::aarch64_neon_uqrshl:
4204     case Intrinsic::aarch64_neon_uqrshrn:
4205     case Intrinsic::aarch64_neon_uqshl:
4206     case Intrinsic::aarch64_neon_uqshrn:
4207     case Intrinsic::aarch64_neon_urshl:
4208     case Intrinsic::aarch64_neon_ushl:
4209       // Not handled here: aarch64_neon_vsli (vector shift left and insert)
4210       handleVectorShiftIntrinsic(I, /* Variable */ false);
4211       break;
4212     case Intrinsic::x86_avx2_psllv_d:
4213     case Intrinsic::x86_avx2_psllv_d_256:
4214     case Intrinsic::x86_avx512_psllv_d_512:
4215     case Intrinsic::x86_avx2_psllv_q:
4216     case Intrinsic::x86_avx2_psllv_q_256:
4217     case Intrinsic::x86_avx512_psllv_q_512:
4218     case Intrinsic::x86_avx2_psrlv_d:
4219     case Intrinsic::x86_avx2_psrlv_d_256:
4220     case Intrinsic::x86_avx512_psrlv_d_512:
4221     case Intrinsic::x86_avx2_psrlv_q:
4222     case Intrinsic::x86_avx2_psrlv_q_256:
4223     case Intrinsic::x86_avx512_psrlv_q_512:
4224     case Intrinsic::x86_avx2_psrav_d:
4225     case Intrinsic::x86_avx2_psrav_d_256:
4226     case Intrinsic::x86_avx512_psrav_d_512:
4227     case Intrinsic::x86_avx512_psrav_q_128:
4228     case Intrinsic::x86_avx512_psrav_q_256:
4229     case Intrinsic::x86_avx512_psrav_q_512:
4230       handleVectorShiftIntrinsic(I, /* Variable */ true);
4231       break;
4232 
4233     case Intrinsic::x86_sse2_packsswb_128:
4234     case Intrinsic::x86_sse2_packssdw_128:
4235     case Intrinsic::x86_sse2_packuswb_128:
4236     case Intrinsic::x86_sse41_packusdw:
4237     case Intrinsic::x86_avx2_packsswb:
4238     case Intrinsic::x86_avx2_packssdw:
4239     case Intrinsic::x86_avx2_packuswb:
4240     case Intrinsic::x86_avx2_packusdw:
4241       handleVectorPackIntrinsic(I);
4242       break;
4243 
4244     case Intrinsic::x86_sse41_pblendvb:
4245     case Intrinsic::x86_sse41_blendvpd:
4246     case Intrinsic::x86_sse41_blendvps:
4247     case Intrinsic::x86_avx_blendv_pd_256:
4248     case Intrinsic::x86_avx_blendv_ps_256:
4249     case Intrinsic::x86_avx2_pblendvb:
4250       handleBlendvIntrinsic(I);
4251       break;
4252 
4253     case Intrinsic::x86_avx_dp_ps_256:
4254     case Intrinsic::x86_sse41_dppd:
4255     case Intrinsic::x86_sse41_dpps:
4256       handleDppIntrinsic(I);
4257       break;
4258 
4259     case Intrinsic::x86_mmx_packsswb:
4260     case Intrinsic::x86_mmx_packuswb:
4261       handleVectorPackIntrinsic(I, 16);
4262       break;
4263 
4264     case Intrinsic::x86_mmx_packssdw:
4265       handleVectorPackIntrinsic(I, 32);
4266       break;
4267 
4268     case Intrinsic::x86_mmx_psad_bw:
4269       handleVectorSadIntrinsic(I, true);
4270       break;
4271     case Intrinsic::x86_sse2_psad_bw:
4272     case Intrinsic::x86_avx2_psad_bw:
4273       handleVectorSadIntrinsic(I);
4274       break;
4275 
4276     case Intrinsic::x86_sse2_pmadd_wd:
4277     case Intrinsic::x86_avx2_pmadd_wd:
4278     case Intrinsic::x86_ssse3_pmadd_ub_sw_128:
4279     case Intrinsic::x86_avx2_pmadd_ub_sw:
4280       handleVectorPmaddIntrinsic(I);
4281       break;
4282 
4283     case Intrinsic::x86_ssse3_pmadd_ub_sw:
4284       handleVectorPmaddIntrinsic(I, 8);
4285       break;
4286 
4287     case Intrinsic::x86_mmx_pmadd_wd:
4288       handleVectorPmaddIntrinsic(I, 16);
4289       break;
4290 
4291     case Intrinsic::x86_sse_cmp_ss:
4292     case Intrinsic::x86_sse2_cmp_sd:
4293     case Intrinsic::x86_sse_comieq_ss:
4294     case Intrinsic::x86_sse_comilt_ss:
4295     case Intrinsic::x86_sse_comile_ss:
4296     case Intrinsic::x86_sse_comigt_ss:
4297     case Intrinsic::x86_sse_comige_ss:
4298     case Intrinsic::x86_sse_comineq_ss:
4299     case Intrinsic::x86_sse_ucomieq_ss:
4300     case Intrinsic::x86_sse_ucomilt_ss:
4301     case Intrinsic::x86_sse_ucomile_ss:
4302     case Intrinsic::x86_sse_ucomigt_ss:
4303     case Intrinsic::x86_sse_ucomige_ss:
4304     case Intrinsic::x86_sse_ucomineq_ss:
4305     case Intrinsic::x86_sse2_comieq_sd:
4306     case Intrinsic::x86_sse2_comilt_sd:
4307     case Intrinsic::x86_sse2_comile_sd:
4308     case Intrinsic::x86_sse2_comigt_sd:
4309     case Intrinsic::x86_sse2_comige_sd:
4310     case Intrinsic::x86_sse2_comineq_sd:
4311     case Intrinsic::x86_sse2_ucomieq_sd:
4312     case Intrinsic::x86_sse2_ucomilt_sd:
4313     case Intrinsic::x86_sse2_ucomile_sd:
4314     case Intrinsic::x86_sse2_ucomigt_sd:
4315     case Intrinsic::x86_sse2_ucomige_sd:
4316     case Intrinsic::x86_sse2_ucomineq_sd:
4317       handleVectorCompareScalarIntrinsic(I);
4318       break;
4319 
4320     case Intrinsic::x86_avx_cmp_pd_256:
4321     case Intrinsic::x86_avx_cmp_ps_256:
4322     case Intrinsic::x86_sse2_cmp_pd:
4323     case Intrinsic::x86_sse_cmp_ps:
4324       handleVectorComparePackedIntrinsic(I);
4325       break;
4326 
4327     case Intrinsic::x86_bmi_bextr_32:
4328     case Intrinsic::x86_bmi_bextr_64:
4329     case Intrinsic::x86_bmi_bzhi_32:
4330     case Intrinsic::x86_bmi_bzhi_64:
4331     case Intrinsic::x86_bmi_pdep_32:
4332     case Intrinsic::x86_bmi_pdep_64:
4333     case Intrinsic::x86_bmi_pext_32:
4334     case Intrinsic::x86_bmi_pext_64:
4335       handleBmiIntrinsic(I);
4336       break;
4337 
4338     case Intrinsic::x86_pclmulqdq:
4339     case Intrinsic::x86_pclmulqdq_256:
4340     case Intrinsic::x86_pclmulqdq_512:
4341       handlePclmulIntrinsic(I);
4342       break;
4343 
4344     case Intrinsic::x86_sse41_round_pd:
4345     case Intrinsic::x86_sse41_round_ps:
4346       handleRoundPdPsIntrinsic(I);
4347       break;
4348 
4349     case Intrinsic::x86_sse41_round_sd:
4350     case Intrinsic::x86_sse41_round_ss:
4351       handleUnarySdSsIntrinsic(I);
4352       break;
4353 
4354     case Intrinsic::x86_sse2_max_sd:
4355     case Intrinsic::x86_sse_max_ss:
4356     case Intrinsic::x86_sse2_min_sd:
4357     case Intrinsic::x86_sse_min_ss:
4358       handleBinarySdSsIntrinsic(I);
4359       break;
4360 
4361     case Intrinsic::x86_avx_vtestc_pd:
4362     case Intrinsic::x86_avx_vtestc_pd_256:
4363     case Intrinsic::x86_avx_vtestc_ps:
4364     case Intrinsic::x86_avx_vtestc_ps_256:
4365     case Intrinsic::x86_avx_vtestnzc_pd:
4366     case Intrinsic::x86_avx_vtestnzc_pd_256:
4367     case Intrinsic::x86_avx_vtestnzc_ps:
4368     case Intrinsic::x86_avx_vtestnzc_ps_256:
4369     case Intrinsic::x86_avx_vtestz_pd:
4370     case Intrinsic::x86_avx_vtestz_pd_256:
4371     case Intrinsic::x86_avx_vtestz_ps:
4372     case Intrinsic::x86_avx_vtestz_ps_256:
4373     case Intrinsic::x86_avx_ptestc_256:
4374     case Intrinsic::x86_avx_ptestnzc_256:
4375     case Intrinsic::x86_avx_ptestz_256:
4376     case Intrinsic::x86_sse41_ptestc:
4377     case Intrinsic::x86_sse41_ptestnzc:
4378     case Intrinsic::x86_sse41_ptestz:
4379       handleVtestIntrinsic(I);
4380       break;
4381 
4382     case Intrinsic::fshl:
4383     case Intrinsic::fshr:
4384       handleFunnelShift(I);
4385       break;
4386 
4387     case Intrinsic::is_constant:
4388       // The result of llvm.is.constant() is always defined.
4389       setShadow(&I, getCleanShadow(&I));
4390       setOrigin(&I, getCleanOrigin());
4391       break;
4392 
4393     case Intrinsic::aarch64_neon_st1x2:
4394     case Intrinsic::aarch64_neon_st1x3:
4395     case Intrinsic::aarch64_neon_st1x4:
4396     case Intrinsic::aarch64_neon_st2:
4397     case Intrinsic::aarch64_neon_st3:
4398     case Intrinsic::aarch64_neon_st4: {
4399       handleNEONVectorStoreIntrinsic(I, false);
4400       break;
4401     }
4402 
4403     case Intrinsic::aarch64_neon_st2lane:
4404     case Intrinsic::aarch64_neon_st3lane:
4405     case Intrinsic::aarch64_neon_st4lane: {
4406       handleNEONVectorStoreIntrinsic(I, true);
4407       break;
4408     }
4409 
4410     // Arm NEON vector table intrinsics have the source/table register(s) as
4411     // arguments, followed by the index register. They return the output.
4412     //
4413     // 'TBL writes a zero if an index is out-of-range, while TBX leaves the
4414     //  original value unchanged in the destination register.'
4415     // Conveniently, zero denotes a clean shadow, which means out-of-range
4416     // indices for TBL will initialize the user data with zero and also clean
4417     // the shadow. (For TBX, neither the user data nor the shadow will be
4418     // updated, which is also correct.)
4419     case Intrinsic::aarch64_neon_tbl1:
4420     case Intrinsic::aarch64_neon_tbl2:
4421     case Intrinsic::aarch64_neon_tbl3:
4422     case Intrinsic::aarch64_neon_tbl4:
4423     case Intrinsic::aarch64_neon_tbx1:
4424     case Intrinsic::aarch64_neon_tbx2:
4425     case Intrinsic::aarch64_neon_tbx3:
4426     case Intrinsic::aarch64_neon_tbx4: {
4427       // The last trailing argument (index register) should be handled verbatim
4428       handleIntrinsicByApplyingToShadow(I, 1);
4429       break;
4430     }
4431 
4432     default:
4433       if (!handleUnknownIntrinsic(I))
4434         visitInstruction(I);
4435       break;
4436     }
4437   }
4438 
4439   void visitLibAtomicLoad(CallBase &CB) {
4440     // Since we use getNextNode here, we can't have CB terminate the BB.
4441     assert(isa<CallInst>(CB));
4442 
4443     IRBuilder<> IRB(&CB);
4444     Value *Size = CB.getArgOperand(0);
4445     Value *SrcPtr = CB.getArgOperand(1);
4446     Value *DstPtr = CB.getArgOperand(2);
4447     Value *Ordering = CB.getArgOperand(3);
4448     // Convert the call to have at least Acquire ordering to make sure
4449     // the shadow operations aren't reordered before it.
4450     Value *NewOrdering =
4451         IRB.CreateExtractElement(makeAddAcquireOrderingTable(IRB), Ordering);
4452     CB.setArgOperand(3, NewOrdering);
4453 
4454     NextNodeIRBuilder NextIRB(&CB);
4455     Value *SrcShadowPtr, *SrcOriginPtr;
4456     std::tie(SrcShadowPtr, SrcOriginPtr) =
4457         getShadowOriginPtr(SrcPtr, NextIRB, NextIRB.getInt8Ty(), Align(1),
4458                            /*isStore*/ false);
4459     Value *DstShadowPtr =
4460         getShadowOriginPtr(DstPtr, NextIRB, NextIRB.getInt8Ty(), Align(1),
4461                            /*isStore*/ true)
4462             .first;
4463 
4464     NextIRB.CreateMemCpy(DstShadowPtr, Align(1), SrcShadowPtr, Align(1), Size);
4465     if (MS.TrackOrigins) {
4466       Value *SrcOrigin = NextIRB.CreateAlignedLoad(MS.OriginTy, SrcOriginPtr,
4467                                                    kMinOriginAlignment);
4468       Value *NewOrigin = updateOrigin(SrcOrigin, NextIRB);
4469       NextIRB.CreateCall(MS.MsanSetOriginFn, {DstPtr, Size, NewOrigin});
4470     }
4471   }
4472 
4473   void visitLibAtomicStore(CallBase &CB) {
4474     IRBuilder<> IRB(&CB);
4475     Value *Size = CB.getArgOperand(0);
4476     Value *DstPtr = CB.getArgOperand(2);
4477     Value *Ordering = CB.getArgOperand(3);
4478     // Convert the call to have at least Release ordering to make sure
4479     // the shadow operations aren't reordered after it.
4480     Value *NewOrdering =
4481         IRB.CreateExtractElement(makeAddReleaseOrderingTable(IRB), Ordering);
4482     CB.setArgOperand(3, NewOrdering);
4483 
4484     Value *DstShadowPtr =
4485         getShadowOriginPtr(DstPtr, IRB, IRB.getInt8Ty(), Align(1),
4486                            /*isStore*/ true)
4487             .first;
4488 
4489     // Atomic store always paints clean shadow/origin. See file header.
4490     IRB.CreateMemSet(DstShadowPtr, getCleanShadow(IRB.getInt8Ty()), Size,
4491                      Align(1));
4492   }
4493 
4494   void visitCallBase(CallBase &CB) {
4495     assert(!CB.getMetadata(LLVMContext::MD_nosanitize));
4496     if (CB.isInlineAsm()) {
4497       // For inline asm (either a call to asm function, or callbr instruction),
4498       // do the usual thing: check argument shadow and mark all outputs as
4499       // clean. Note that any side effects of the inline asm that are not
4500       // immediately visible in its constraints are not handled.
4501       if (ClHandleAsmConservative)
4502         visitAsmInstruction(CB);
4503       else
4504         visitInstruction(CB);
4505       return;
4506     }
4507     LibFunc LF;
4508     if (TLI->getLibFunc(CB, LF)) {
4509       // libatomic.a functions need to have special handling because there isn't
4510       // a good way to intercept them or compile the library with
4511       // instrumentation.
4512       switch (LF) {
4513       case LibFunc_atomic_load:
4514         if (!isa<CallInst>(CB)) {
4515           llvm::errs() << "MSAN -- cannot instrument invoke of libatomic load."
4516                           "Ignoring!\n";
4517           break;
4518         }
4519         visitLibAtomicLoad(CB);
4520         return;
4521       case LibFunc_atomic_store:
4522         visitLibAtomicStore(CB);
4523         return;
4524       default:
4525         break;
4526       }
4527     }
4528 
4529     if (auto *Call = dyn_cast<CallInst>(&CB)) {
4530       assert(!isa<IntrinsicInst>(Call) && "intrinsics are handled elsewhere");
4531 
4532       // We are going to insert code that relies on the fact that the callee
4533       // will become a non-readonly function after it is instrumented by us. To
4534       // prevent this code from being optimized out, mark that function
4535       // non-readonly in advance.
4536       // TODO: We can likely do better than dropping memory() completely here.
4537       AttributeMask B;
4538       B.addAttribute(Attribute::Memory).addAttribute(Attribute::Speculatable);
4539 
4540       Call->removeFnAttrs(B);
4541       if (Function *Func = Call->getCalledFunction()) {
4542         Func->removeFnAttrs(B);
4543       }
4544 
4545       maybeMarkSanitizerLibraryCallNoBuiltin(Call, TLI);
4546     }
4547     IRBuilder<> IRB(&CB);
4548     bool MayCheckCall = MS.EagerChecks;
4549     if (Function *Func = CB.getCalledFunction()) {
4550       // __sanitizer_unaligned_{load,store} functions may be called by users
4551       // and always expects shadows in the TLS. So don't check them.
4552       MayCheckCall &= !Func->getName().starts_with("__sanitizer_unaligned_");
4553     }
4554 
4555     unsigned ArgOffset = 0;
4556     LLVM_DEBUG(dbgs() << "  CallSite: " << CB << "\n");
4557     for (const auto &[i, A] : llvm::enumerate(CB.args())) {
4558       if (!A->getType()->isSized()) {
4559         LLVM_DEBUG(dbgs() << "Arg " << i << " is not sized: " << CB << "\n");
4560         continue;
4561       }
4562 
4563       if (A->getType()->isScalableTy()) {
4564         LLVM_DEBUG(dbgs() << "Arg  " << i << " is vscale: " << CB << "\n");
4565         // Handle as noundef, but don't reserve tls slots.
4566         insertShadowCheck(A, &CB);
4567         continue;
4568       }
4569 
4570       unsigned Size = 0;
4571       const DataLayout &DL = F.getDataLayout();
4572 
4573       bool ByVal = CB.paramHasAttr(i, Attribute::ByVal);
4574       bool NoUndef = CB.paramHasAttr(i, Attribute::NoUndef);
4575       bool EagerCheck = MayCheckCall && !ByVal && NoUndef;
4576 
4577       if (EagerCheck) {
4578         insertShadowCheck(A, &CB);
4579         Size = DL.getTypeAllocSize(A->getType());
4580       } else {
4581         Value *Store = nullptr;
4582         // Compute the Shadow for arg even if it is ByVal, because
4583         // in that case getShadow() will copy the actual arg shadow to
4584         // __msan_param_tls.
4585         Value *ArgShadow = getShadow(A);
4586         Value *ArgShadowBase = getShadowPtrForArgument(IRB, ArgOffset);
4587         LLVM_DEBUG(dbgs() << "  Arg#" << i << ": " << *A
4588                           << " Shadow: " << *ArgShadow << "\n");
4589         if (ByVal) {
4590           // ByVal requires some special handling as it's too big for a single
4591           // load
4592           assert(A->getType()->isPointerTy() &&
4593                  "ByVal argument is not a pointer!");
4594           Size = DL.getTypeAllocSize(CB.getParamByValType(i));
4595           if (ArgOffset + Size > kParamTLSSize)
4596             break;
4597           const MaybeAlign ParamAlignment(CB.getParamAlign(i));
4598           MaybeAlign Alignment = std::nullopt;
4599           if (ParamAlignment)
4600             Alignment = std::min(*ParamAlignment, kShadowTLSAlignment);
4601           Value *AShadowPtr, *AOriginPtr;
4602           std::tie(AShadowPtr, AOriginPtr) =
4603               getShadowOriginPtr(A, IRB, IRB.getInt8Ty(), Alignment,
4604                                  /*isStore*/ false);
4605           if (!PropagateShadow) {
4606             Store = IRB.CreateMemSet(ArgShadowBase,
4607                                      Constant::getNullValue(IRB.getInt8Ty()),
4608                                      Size, Alignment);
4609           } else {
4610             Store = IRB.CreateMemCpy(ArgShadowBase, Alignment, AShadowPtr,
4611                                      Alignment, Size);
4612             if (MS.TrackOrigins) {
4613               Value *ArgOriginBase = getOriginPtrForArgument(IRB, ArgOffset);
4614               // FIXME: OriginSize should be:
4615               // alignTo(A % kMinOriginAlignment + Size, kMinOriginAlignment)
4616               unsigned OriginSize = alignTo(Size, kMinOriginAlignment);
4617               IRB.CreateMemCpy(
4618                   ArgOriginBase,
4619                   /* by origin_tls[ArgOffset] */ kMinOriginAlignment,
4620                   AOriginPtr,
4621                   /* by getShadowOriginPtr */ kMinOriginAlignment, OriginSize);
4622             }
4623           }
4624         } else {
4625           // Any other parameters mean we need bit-grained tracking of uninit
4626           // data
4627           Size = DL.getTypeAllocSize(A->getType());
4628           if (ArgOffset + Size > kParamTLSSize)
4629             break;
4630           Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase,
4631                                          kShadowTLSAlignment);
4632           Constant *Cst = dyn_cast<Constant>(ArgShadow);
4633           if (MS.TrackOrigins && !(Cst && Cst->isNullValue())) {
4634             IRB.CreateStore(getOrigin(A),
4635                             getOriginPtrForArgument(IRB, ArgOffset));
4636           }
4637         }
4638         (void)Store;
4639         assert(Store != nullptr);
4640         LLVM_DEBUG(dbgs() << "  Param:" << *Store << "\n");
4641       }
4642       assert(Size != 0);
4643       ArgOffset += alignTo(Size, kShadowTLSAlignment);
4644     }
4645     LLVM_DEBUG(dbgs() << "  done with call args\n");
4646 
4647     FunctionType *FT = CB.getFunctionType();
4648     if (FT->isVarArg()) {
4649       VAHelper->visitCallBase(CB, IRB);
4650     }
4651 
4652     // Now, get the shadow for the RetVal.
4653     if (!CB.getType()->isSized())
4654       return;
4655     // Don't emit the epilogue for musttail call returns.
4656     if (isa<CallInst>(CB) && cast<CallInst>(CB).isMustTailCall())
4657       return;
4658 
4659     if (MayCheckCall && CB.hasRetAttr(Attribute::NoUndef)) {
4660       setShadow(&CB, getCleanShadow(&CB));
4661       setOrigin(&CB, getCleanOrigin());
4662       return;
4663     }
4664 
4665     IRBuilder<> IRBBefore(&CB);
4666     // Until we have full dynamic coverage, make sure the retval shadow is 0.
4667     Value *Base = getShadowPtrForRetval(IRBBefore);
4668     IRBBefore.CreateAlignedStore(getCleanShadow(&CB), Base,
4669                                  kShadowTLSAlignment);
4670     BasicBlock::iterator NextInsn;
4671     if (isa<CallInst>(CB)) {
4672       NextInsn = ++CB.getIterator();
4673       assert(NextInsn != CB.getParent()->end());
4674     } else {
4675       BasicBlock *NormalDest = cast<InvokeInst>(CB).getNormalDest();
4676       if (!NormalDest->getSinglePredecessor()) {
4677         // FIXME: this case is tricky, so we are just conservative here.
4678         // Perhaps we need to split the edge between this BB and NormalDest,
4679         // but a naive attempt to use SplitEdge leads to a crash.
4680         setShadow(&CB, getCleanShadow(&CB));
4681         setOrigin(&CB, getCleanOrigin());
4682         return;
4683       }
4684       // FIXME: NextInsn is likely in a basic block that has not been visited
4685       // yet. Anything inserted there will be instrumented by MSan later!
4686       NextInsn = NormalDest->getFirstInsertionPt();
4687       assert(NextInsn != NormalDest->end() &&
4688              "Could not find insertion point for retval shadow load");
4689     }
4690     IRBuilder<> IRBAfter(&*NextInsn);
4691     Value *RetvalShadow = IRBAfter.CreateAlignedLoad(
4692         getShadowTy(&CB), getShadowPtrForRetval(IRBAfter), kShadowTLSAlignment,
4693         "_msret");
4694     setShadow(&CB, RetvalShadow);
4695     if (MS.TrackOrigins)
4696       setOrigin(&CB, IRBAfter.CreateLoad(MS.OriginTy, getOriginPtrForRetval()));
4697   }
4698 
4699   bool isAMustTailRetVal(Value *RetVal) {
4700     if (auto *I = dyn_cast<BitCastInst>(RetVal)) {
4701       RetVal = I->getOperand(0);
4702     }
4703     if (auto *I = dyn_cast<CallInst>(RetVal)) {
4704       return I->isMustTailCall();
4705     }
4706     return false;
4707   }
4708 
4709   void visitReturnInst(ReturnInst &I) {
4710     IRBuilder<> IRB(&I);
4711     Value *RetVal = I.getReturnValue();
4712     if (!RetVal)
4713       return;
4714     // Don't emit the epilogue for musttail call returns.
4715     if (isAMustTailRetVal(RetVal))
4716       return;
4717     Value *ShadowPtr = getShadowPtrForRetval(IRB);
4718     bool HasNoUndef = F.hasRetAttribute(Attribute::NoUndef);
4719     bool StoreShadow = !(MS.EagerChecks && HasNoUndef);
4720     // FIXME: Consider using SpecialCaseList to specify a list of functions that
4721     // must always return fully initialized values. For now, we hardcode "main".
4722     bool EagerCheck = (MS.EagerChecks && HasNoUndef) || (F.getName() == "main");
4723 
4724     Value *Shadow = getShadow(RetVal);
4725     bool StoreOrigin = true;
4726     if (EagerCheck) {
4727       insertShadowCheck(RetVal, &I);
4728       Shadow = getCleanShadow(RetVal);
4729       StoreOrigin = false;
4730     }
4731 
4732     // The caller may still expect information passed over TLS if we pass our
4733     // check
4734     if (StoreShadow) {
4735       IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
4736       if (MS.TrackOrigins && StoreOrigin)
4737         IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval());
4738     }
4739   }
4740 
4741   void visitPHINode(PHINode &I) {
4742     IRBuilder<> IRB(&I);
4743     if (!PropagateShadow) {
4744       setShadow(&I, getCleanShadow(&I));
4745       setOrigin(&I, getCleanOrigin());
4746       return;
4747     }
4748 
4749     ShadowPHINodes.push_back(&I);
4750     setShadow(&I, IRB.CreatePHI(getShadowTy(&I), I.getNumIncomingValues(),
4751                                 "_msphi_s"));
4752     if (MS.TrackOrigins)
4753       setOrigin(
4754           &I, IRB.CreatePHI(MS.OriginTy, I.getNumIncomingValues(), "_msphi_o"));
4755   }
4756 
4757   Value *getLocalVarIdptr(AllocaInst &I) {
4758     ConstantInt *IntConst =
4759         ConstantInt::get(Type::getInt32Ty((*F.getParent()).getContext()), 0);
4760     return new GlobalVariable(*F.getParent(), IntConst->getType(),
4761                               /*isConstant=*/false, GlobalValue::PrivateLinkage,
4762                               IntConst);
4763   }
4764 
4765   Value *getLocalVarDescription(AllocaInst &I) {
4766     return createPrivateConstGlobalForString(*F.getParent(), I.getName());
4767   }
4768 
4769   void poisonAllocaUserspace(AllocaInst &I, IRBuilder<> &IRB, Value *Len) {
4770     if (PoisonStack && ClPoisonStackWithCall) {
4771       IRB.CreateCall(MS.MsanPoisonStackFn, {&I, Len});
4772     } else {
4773       Value *ShadowBase, *OriginBase;
4774       std::tie(ShadowBase, OriginBase) = getShadowOriginPtr(
4775           &I, IRB, IRB.getInt8Ty(), Align(1), /*isStore*/ true);
4776 
4777       Value *PoisonValue = IRB.getInt8(PoisonStack ? ClPoisonStackPattern : 0);
4778       IRB.CreateMemSet(ShadowBase, PoisonValue, Len, I.getAlign());
4779     }
4780 
4781     if (PoisonStack && MS.TrackOrigins) {
4782       Value *Idptr = getLocalVarIdptr(I);
4783       if (ClPrintStackNames) {
4784         Value *Descr = getLocalVarDescription(I);
4785         IRB.CreateCall(MS.MsanSetAllocaOriginWithDescriptionFn,
4786                        {&I, Len, Idptr, Descr});
4787       } else {
4788         IRB.CreateCall(MS.MsanSetAllocaOriginNoDescriptionFn, {&I, Len, Idptr});
4789       }
4790     }
4791   }
4792 
4793   void poisonAllocaKmsan(AllocaInst &I, IRBuilder<> &IRB, Value *Len) {
4794     Value *Descr = getLocalVarDescription(I);
4795     if (PoisonStack) {
4796       IRB.CreateCall(MS.MsanPoisonAllocaFn, {&I, Len, Descr});
4797     } else {
4798       IRB.CreateCall(MS.MsanUnpoisonAllocaFn, {&I, Len});
4799     }
4800   }
4801 
4802   void instrumentAlloca(AllocaInst &I, Instruction *InsPoint = nullptr) {
4803     if (!InsPoint)
4804       InsPoint = &I;
4805     NextNodeIRBuilder IRB(InsPoint);
4806     const DataLayout &DL = F.getDataLayout();
4807     TypeSize TS = DL.getTypeAllocSize(I.getAllocatedType());
4808     Value *Len = IRB.CreateTypeSize(MS.IntptrTy, TS);
4809     if (I.isArrayAllocation())
4810       Len = IRB.CreateMul(Len,
4811                           IRB.CreateZExtOrTrunc(I.getArraySize(), MS.IntptrTy));
4812 
4813     if (MS.CompileKernel)
4814       poisonAllocaKmsan(I, IRB, Len);
4815     else
4816       poisonAllocaUserspace(I, IRB, Len);
4817   }
4818 
4819   void visitAllocaInst(AllocaInst &I) {
4820     setShadow(&I, getCleanShadow(&I));
4821     setOrigin(&I, getCleanOrigin());
4822     // We'll get to this alloca later unless it's poisoned at the corresponding
4823     // llvm.lifetime.start.
4824     AllocaSet.insert(&I);
4825   }
4826 
4827   void visitSelectInst(SelectInst &I) {
4828     // a = select b, c, d
4829     Value *B = I.getCondition();
4830     Value *C = I.getTrueValue();
4831     Value *D = I.getFalseValue();
4832 
4833     handleSelectLikeInst(I, B, C, D);
4834   }
4835 
4836   void handleSelectLikeInst(Instruction &I, Value *B, Value *C, Value *D) {
4837     IRBuilder<> IRB(&I);
4838 
4839     Value *Sb = getShadow(B);
4840     Value *Sc = getShadow(C);
4841     Value *Sd = getShadow(D);
4842 
4843     Value *Ob = MS.TrackOrigins ? getOrigin(B) : nullptr;
4844     Value *Oc = MS.TrackOrigins ? getOrigin(C) : nullptr;
4845     Value *Od = MS.TrackOrigins ? getOrigin(D) : nullptr;
4846 
4847     // Result shadow if condition shadow is 0.
4848     Value *Sa0 = IRB.CreateSelect(B, Sc, Sd);
4849     Value *Sa1;
4850     if (I.getType()->isAggregateType()) {
4851       // To avoid "sign extending" i1 to an arbitrary aggregate type, we just do
4852       // an extra "select". This results in much more compact IR.
4853       // Sa = select Sb, poisoned, (select b, Sc, Sd)
4854       Sa1 = getPoisonedShadow(getShadowTy(I.getType()));
4855     } else {
4856       // Sa = select Sb, [ (c^d) | Sc | Sd ], [ b ? Sc : Sd ]
4857       // If Sb (condition is poisoned), look for bits in c and d that are equal
4858       // and both unpoisoned.
4859       // If !Sb (condition is unpoisoned), simply pick one of Sc and Sd.
4860 
4861       // Cast arguments to shadow-compatible type.
4862       C = CreateAppToShadowCast(IRB, C);
4863       D = CreateAppToShadowCast(IRB, D);
4864 
4865       // Result shadow if condition shadow is 1.
4866       Sa1 = IRB.CreateOr({IRB.CreateXor(C, D), Sc, Sd});
4867     }
4868     Value *Sa = IRB.CreateSelect(Sb, Sa1, Sa0, "_msprop_select");
4869     setShadow(&I, Sa);
4870     if (MS.TrackOrigins) {
4871       // Origins are always i32, so any vector conditions must be flattened.
4872       // FIXME: consider tracking vector origins for app vectors?
4873       if (B->getType()->isVectorTy()) {
4874         B = convertToBool(B, IRB);
4875         Sb = convertToBool(Sb, IRB);
4876       }
4877       // a = select b, c, d
4878       // Oa = Sb ? Ob : (b ? Oc : Od)
4879       setOrigin(&I, IRB.CreateSelect(Sb, Ob, IRB.CreateSelect(B, Oc, Od)));
4880     }
4881   }
4882 
4883   void visitLandingPadInst(LandingPadInst &I) {
4884     // Do nothing.
4885     // See https://github.com/google/sanitizers/issues/504
4886     setShadow(&I, getCleanShadow(&I));
4887     setOrigin(&I, getCleanOrigin());
4888   }
4889 
4890   void visitCatchSwitchInst(CatchSwitchInst &I) {
4891     setShadow(&I, getCleanShadow(&I));
4892     setOrigin(&I, getCleanOrigin());
4893   }
4894 
4895   void visitFuncletPadInst(FuncletPadInst &I) {
4896     setShadow(&I, getCleanShadow(&I));
4897     setOrigin(&I, getCleanOrigin());
4898   }
4899 
4900   void visitGetElementPtrInst(GetElementPtrInst &I) { handleShadowOr(I); }
4901 
4902   void visitExtractValueInst(ExtractValueInst &I) {
4903     IRBuilder<> IRB(&I);
4904     Value *Agg = I.getAggregateOperand();
4905     LLVM_DEBUG(dbgs() << "ExtractValue:  " << I << "\n");
4906     Value *AggShadow = getShadow(Agg);
4907     LLVM_DEBUG(dbgs() << "   AggShadow:  " << *AggShadow << "\n");
4908     Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices());
4909     LLVM_DEBUG(dbgs() << "   ResShadow:  " << *ResShadow << "\n");
4910     setShadow(&I, ResShadow);
4911     setOriginForNaryOp(I);
4912   }
4913 
4914   void visitInsertValueInst(InsertValueInst &I) {
4915     IRBuilder<> IRB(&I);
4916     LLVM_DEBUG(dbgs() << "InsertValue:  " << I << "\n");
4917     Value *AggShadow = getShadow(I.getAggregateOperand());
4918     Value *InsShadow = getShadow(I.getInsertedValueOperand());
4919     LLVM_DEBUG(dbgs() << "   AggShadow:  " << *AggShadow << "\n");
4920     LLVM_DEBUG(dbgs() << "   InsShadow:  " << *InsShadow << "\n");
4921     Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices());
4922     LLVM_DEBUG(dbgs() << "   Res:        " << *Res << "\n");
4923     setShadow(&I, Res);
4924     setOriginForNaryOp(I);
4925   }
4926 
4927   void dumpInst(Instruction &I) {
4928     if (CallInst *CI = dyn_cast<CallInst>(&I)) {
4929       errs() << "ZZZ call " << CI->getCalledFunction()->getName() << "\n";
4930     } else {
4931       errs() << "ZZZ " << I.getOpcodeName() << "\n";
4932     }
4933     errs() << "QQQ " << I << "\n";
4934   }
4935 
4936   void visitResumeInst(ResumeInst &I) {
4937     LLVM_DEBUG(dbgs() << "Resume: " << I << "\n");
4938     // Nothing to do here.
4939   }
4940 
4941   void visitCleanupReturnInst(CleanupReturnInst &CRI) {
4942     LLVM_DEBUG(dbgs() << "CleanupReturn: " << CRI << "\n");
4943     // Nothing to do here.
4944   }
4945 
4946   void visitCatchReturnInst(CatchReturnInst &CRI) {
4947     LLVM_DEBUG(dbgs() << "CatchReturn: " << CRI << "\n");
4948     // Nothing to do here.
4949   }
4950 
4951   void instrumentAsmArgument(Value *Operand, Type *ElemTy, Instruction &I,
4952                              IRBuilder<> &IRB, const DataLayout &DL,
4953                              bool isOutput) {
4954     // For each assembly argument, we check its value for being initialized.
4955     // If the argument is a pointer, we assume it points to a single element
4956     // of the corresponding type (or to a 8-byte word, if the type is unsized).
4957     // Each such pointer is instrumented with a call to the runtime library.
4958     Type *OpType = Operand->getType();
4959     // Check the operand value itself.
4960     insertShadowCheck(Operand, &I);
4961     if (!OpType->isPointerTy() || !isOutput) {
4962       assert(!isOutput);
4963       return;
4964     }
4965     if (!ElemTy->isSized())
4966       return;
4967     auto Size = DL.getTypeStoreSize(ElemTy);
4968     Value *SizeVal = IRB.CreateTypeSize(MS.IntptrTy, Size);
4969     if (MS.CompileKernel) {
4970       IRB.CreateCall(MS.MsanInstrumentAsmStoreFn, {Operand, SizeVal});
4971     } else {
4972       // ElemTy, derived from elementtype(), does not encode the alignment of
4973       // the pointer. Conservatively assume that the shadow memory is unaligned.
4974       // When Size is large, avoid StoreInst as it would expand to many
4975       // instructions.
4976       auto [ShadowPtr, _] =
4977           getShadowOriginPtrUserspace(Operand, IRB, IRB.getInt8Ty(), Align(1));
4978       if (Size <= 32)
4979         IRB.CreateAlignedStore(getCleanShadow(ElemTy), ShadowPtr, Align(1));
4980       else
4981         IRB.CreateMemSet(ShadowPtr, ConstantInt::getNullValue(IRB.getInt8Ty()),
4982                          SizeVal, Align(1));
4983     }
4984   }
4985 
4986   /// Get the number of output arguments returned by pointers.
4987   int getNumOutputArgs(InlineAsm *IA, CallBase *CB) {
4988     int NumRetOutputs = 0;
4989     int NumOutputs = 0;
4990     Type *RetTy = cast<Value>(CB)->getType();
4991     if (!RetTy->isVoidTy()) {
4992       // Register outputs are returned via the CallInst return value.
4993       auto *ST = dyn_cast<StructType>(RetTy);
4994       if (ST)
4995         NumRetOutputs = ST->getNumElements();
4996       else
4997         NumRetOutputs = 1;
4998     }
4999     InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
5000     for (const InlineAsm::ConstraintInfo &Info : Constraints) {
5001       switch (Info.Type) {
5002       case InlineAsm::isOutput:
5003         NumOutputs++;
5004         break;
5005       default:
5006         break;
5007       }
5008     }
5009     return NumOutputs - NumRetOutputs;
5010   }
5011 
5012   void visitAsmInstruction(Instruction &I) {
5013     // Conservative inline assembly handling: check for poisoned shadow of
5014     // asm() arguments, then unpoison the result and all the memory locations
5015     // pointed to by those arguments.
5016     // An inline asm() statement in C++ contains lists of input and output
5017     // arguments used by the assembly code. These are mapped to operands of the
5018     // CallInst as follows:
5019     //  - nR register outputs ("=r) are returned by value in a single structure
5020     //  (SSA value of the CallInst);
5021     //  - nO other outputs ("=m" and others) are returned by pointer as first
5022     // nO operands of the CallInst;
5023     //  - nI inputs ("r", "m" and others) are passed to CallInst as the
5024     // remaining nI operands.
5025     // The total number of asm() arguments in the source is nR+nO+nI, and the
5026     // corresponding CallInst has nO+nI+1 operands (the last operand is the
5027     // function to be called).
5028     const DataLayout &DL = F.getDataLayout();
5029     CallBase *CB = cast<CallBase>(&I);
5030     IRBuilder<> IRB(&I);
5031     InlineAsm *IA = cast<InlineAsm>(CB->getCalledOperand());
5032     int OutputArgs = getNumOutputArgs(IA, CB);
5033     // The last operand of a CallInst is the function itself.
5034     int NumOperands = CB->getNumOperands() - 1;
5035 
5036     // Check input arguments. Doing so before unpoisoning output arguments, so
5037     // that we won't overwrite uninit values before checking them.
5038     for (int i = OutputArgs; i < NumOperands; i++) {
5039       Value *Operand = CB->getOperand(i);
5040       instrumentAsmArgument(Operand, CB->getParamElementType(i), I, IRB, DL,
5041                             /*isOutput*/ false);
5042     }
5043     // Unpoison output arguments. This must happen before the actual InlineAsm
5044     // call, so that the shadow for memory published in the asm() statement
5045     // remains valid.
5046     for (int i = 0; i < OutputArgs; i++) {
5047       Value *Operand = CB->getOperand(i);
5048       instrumentAsmArgument(Operand, CB->getParamElementType(i), I, IRB, DL,
5049                             /*isOutput*/ true);
5050     }
5051 
5052     setShadow(&I, getCleanShadow(&I));
5053     setOrigin(&I, getCleanOrigin());
5054   }
5055 
5056   void visitFreezeInst(FreezeInst &I) {
5057     // Freeze always returns a fully defined value.
5058     setShadow(&I, getCleanShadow(&I));
5059     setOrigin(&I, getCleanOrigin());
5060   }
5061 
5062   void visitInstruction(Instruction &I) {
5063     // Everything else: stop propagating and check for poisoned shadow.
5064     if (ClDumpStrictInstructions)
5065       dumpInst(I);
5066     LLVM_DEBUG(dbgs() << "DEFAULT: " << I << "\n");
5067     for (size_t i = 0, n = I.getNumOperands(); i < n; i++) {
5068       Value *Operand = I.getOperand(i);
5069       if (Operand->getType()->isSized())
5070         insertShadowCheck(Operand, &I);
5071     }
5072     setShadow(&I, getCleanShadow(&I));
5073     setOrigin(&I, getCleanOrigin());
5074   }
5075 };
5076 
5077 struct VarArgHelperBase : public VarArgHelper {
5078   Function &F;
5079   MemorySanitizer &MS;
5080   MemorySanitizerVisitor &MSV;
5081   SmallVector<CallInst *, 16> VAStartInstrumentationList;
5082   const unsigned VAListTagSize;
5083 
5084   VarArgHelperBase(Function &F, MemorySanitizer &MS,
5085                    MemorySanitizerVisitor &MSV, unsigned VAListTagSize)
5086       : F(F), MS(MS), MSV(MSV), VAListTagSize(VAListTagSize) {}
5087 
5088   Value *getShadowAddrForVAArgument(IRBuilder<> &IRB, unsigned ArgOffset) {
5089     Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
5090     return IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
5091   }
5092 
5093   /// Compute the shadow address for a given va_arg.
5094   Value *getShadowPtrForVAArgument(IRBuilder<> &IRB, unsigned ArgOffset) {
5095     Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
5096     Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
5097     return IRB.CreateIntToPtr(Base, MS.PtrTy, "_msarg_va_s");
5098   }
5099 
5100   /// Compute the shadow address for a given va_arg.
5101   Value *getShadowPtrForVAArgument(IRBuilder<> &IRB, unsigned ArgOffset,
5102                                    unsigned ArgSize) {
5103     // Make sure we don't overflow __msan_va_arg_tls.
5104     if (ArgOffset + ArgSize > kParamTLSSize)
5105       return nullptr;
5106     return getShadowPtrForVAArgument(IRB, ArgOffset);
5107   }
5108 
5109   /// Compute the origin address for a given va_arg.
5110   Value *getOriginPtrForVAArgument(IRBuilder<> &IRB, int ArgOffset) {
5111     Value *Base = IRB.CreatePointerCast(MS.VAArgOriginTLS, MS.IntptrTy);
5112     // getOriginPtrForVAArgument() is always called after
5113     // getShadowPtrForVAArgument(), so __msan_va_arg_origin_tls can never
5114     // overflow.
5115     Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
5116     return IRB.CreateIntToPtr(Base, MS.PtrTy, "_msarg_va_o");
5117   }
5118 
5119   void CleanUnusedTLS(IRBuilder<> &IRB, Value *ShadowBase,
5120                       unsigned BaseOffset) {
5121     // The tails of __msan_va_arg_tls is not large enough to fit full
5122     // value shadow, but it will be copied to backup anyway. Make it
5123     // clean.
5124     if (BaseOffset >= kParamTLSSize)
5125       return;
5126     Value *TailSize =
5127         ConstantInt::getSigned(IRB.getInt32Ty(), kParamTLSSize - BaseOffset);
5128     IRB.CreateMemSet(ShadowBase, ConstantInt::getNullValue(IRB.getInt8Ty()),
5129                      TailSize, Align(8));
5130   }
5131 
5132   void unpoisonVAListTagForInst(IntrinsicInst &I) {
5133     IRBuilder<> IRB(&I);
5134     Value *VAListTag = I.getArgOperand(0);
5135     const Align Alignment = Align(8);
5136     auto [ShadowPtr, OriginPtr] = MSV.getShadowOriginPtr(
5137         VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
5138     // Unpoison the whole __va_list_tag.
5139     IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
5140                      VAListTagSize, Alignment, false);
5141   }
5142 
5143   void visitVAStartInst(VAStartInst &I) override {
5144     if (F.getCallingConv() == CallingConv::Win64)
5145       return;
5146     VAStartInstrumentationList.push_back(&I);
5147     unpoisonVAListTagForInst(I);
5148   }
5149 
5150   void visitVACopyInst(VACopyInst &I) override {
5151     if (F.getCallingConv() == CallingConv::Win64)
5152       return;
5153     unpoisonVAListTagForInst(I);
5154   }
5155 };
5156 
5157 /// AMD64-specific implementation of VarArgHelper.
5158 struct VarArgAMD64Helper : public VarArgHelperBase {
5159   // An unfortunate workaround for asymmetric lowering of va_arg stuff.
5160   // See a comment in visitCallBase for more details.
5161   static const unsigned AMD64GpEndOffset = 48; // AMD64 ABI Draft 0.99.6 p3.5.7
5162   static const unsigned AMD64FpEndOffsetSSE = 176;
5163   // If SSE is disabled, fp_offset in va_list is zero.
5164   static const unsigned AMD64FpEndOffsetNoSSE = AMD64GpEndOffset;
5165 
5166   unsigned AMD64FpEndOffset;
5167   AllocaInst *VAArgTLSCopy = nullptr;
5168   AllocaInst *VAArgTLSOriginCopy = nullptr;
5169   Value *VAArgOverflowSize = nullptr;
5170 
5171   enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
5172 
5173   VarArgAMD64Helper(Function &F, MemorySanitizer &MS,
5174                     MemorySanitizerVisitor &MSV)
5175       : VarArgHelperBase(F, MS, MSV, /*VAListTagSize=*/24) {
5176     AMD64FpEndOffset = AMD64FpEndOffsetSSE;
5177     for (const auto &Attr : F.getAttributes().getFnAttrs()) {
5178       if (Attr.isStringAttribute() &&
5179           (Attr.getKindAsString() == "target-features")) {
5180         if (Attr.getValueAsString().contains("-sse"))
5181           AMD64FpEndOffset = AMD64FpEndOffsetNoSSE;
5182         break;
5183       }
5184     }
5185   }
5186 
5187   ArgKind classifyArgument(Value *arg) {
5188     // A very rough approximation of X86_64 argument classification rules.
5189     Type *T = arg->getType();
5190     if (T->isX86_FP80Ty())
5191       return AK_Memory;
5192     if (T->isFPOrFPVectorTy())
5193       return AK_FloatingPoint;
5194     if (T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64)
5195       return AK_GeneralPurpose;
5196     if (T->isPointerTy())
5197       return AK_GeneralPurpose;
5198     return AK_Memory;
5199   }
5200 
5201   // For VarArg functions, store the argument shadow in an ABI-specific format
5202   // that corresponds to va_list layout.
5203   // We do this because Clang lowers va_arg in the frontend, and this pass
5204   // only sees the low level code that deals with va_list internals.
5205   // A much easier alternative (provided that Clang emits va_arg instructions)
5206   // would have been to associate each live instance of va_list with a copy of
5207   // MSanParamTLS, and extract shadow on va_arg() call in the argument list
5208   // order.
5209   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
5210     unsigned GpOffset = 0;
5211     unsigned FpOffset = AMD64GpEndOffset;
5212     unsigned OverflowOffset = AMD64FpEndOffset;
5213     const DataLayout &DL = F.getDataLayout();
5214 
5215     for (const auto &[ArgNo, A] : llvm::enumerate(CB.args())) {
5216       bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
5217       bool IsByVal = CB.paramHasAttr(ArgNo, Attribute::ByVal);
5218       if (IsByVal) {
5219         // ByVal arguments always go to the overflow area.
5220         // Fixed arguments passed through the overflow area will be stepped
5221         // over by va_start, so don't count them towards the offset.
5222         if (IsFixed)
5223           continue;
5224         assert(A->getType()->isPointerTy());
5225         Type *RealTy = CB.getParamByValType(ArgNo);
5226         uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
5227         uint64_t AlignedSize = alignTo(ArgSize, 8);
5228         unsigned BaseOffset = OverflowOffset;
5229         Value *ShadowBase = getShadowPtrForVAArgument(IRB, OverflowOffset);
5230         Value *OriginBase = nullptr;
5231         if (MS.TrackOrigins)
5232           OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
5233         OverflowOffset += AlignedSize;
5234 
5235         if (OverflowOffset > kParamTLSSize) {
5236           CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
5237           continue; // We have no space to copy shadow there.
5238         }
5239 
5240         Value *ShadowPtr, *OriginPtr;
5241         std::tie(ShadowPtr, OriginPtr) =
5242             MSV.getShadowOriginPtr(A, IRB, IRB.getInt8Ty(), kShadowTLSAlignment,
5243                                    /*isStore*/ false);
5244         IRB.CreateMemCpy(ShadowBase, kShadowTLSAlignment, ShadowPtr,
5245                          kShadowTLSAlignment, ArgSize);
5246         if (MS.TrackOrigins)
5247           IRB.CreateMemCpy(OriginBase, kShadowTLSAlignment, OriginPtr,
5248                            kShadowTLSAlignment, ArgSize);
5249       } else {
5250         ArgKind AK = classifyArgument(A);
5251         if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
5252           AK = AK_Memory;
5253         if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
5254           AK = AK_Memory;
5255         Value *ShadowBase, *OriginBase = nullptr;
5256         switch (AK) {
5257         case AK_GeneralPurpose:
5258           ShadowBase = getShadowPtrForVAArgument(IRB, GpOffset);
5259           if (MS.TrackOrigins)
5260             OriginBase = getOriginPtrForVAArgument(IRB, GpOffset);
5261           GpOffset += 8;
5262           assert(GpOffset <= kParamTLSSize);
5263           break;
5264         case AK_FloatingPoint:
5265           ShadowBase = getShadowPtrForVAArgument(IRB, FpOffset);
5266           if (MS.TrackOrigins)
5267             OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
5268           FpOffset += 16;
5269           assert(FpOffset <= kParamTLSSize);
5270           break;
5271         case AK_Memory:
5272           if (IsFixed)
5273             continue;
5274           uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
5275           uint64_t AlignedSize = alignTo(ArgSize, 8);
5276           unsigned BaseOffset = OverflowOffset;
5277           ShadowBase = getShadowPtrForVAArgument(IRB, OverflowOffset);
5278           if (MS.TrackOrigins) {
5279             OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
5280           }
5281           OverflowOffset += AlignedSize;
5282           if (OverflowOffset > kParamTLSSize) {
5283             // We have no space to copy shadow there.
5284             CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
5285             continue;
5286           }
5287         }
5288         // Take fixed arguments into account for GpOffset and FpOffset,
5289         // but don't actually store shadows for them.
5290         // TODO(glider): don't call get*PtrForVAArgument() for them.
5291         if (IsFixed)
5292           continue;
5293         Value *Shadow = MSV.getShadow(A);
5294         IRB.CreateAlignedStore(Shadow, ShadowBase, kShadowTLSAlignment);
5295         if (MS.TrackOrigins) {
5296           Value *Origin = MSV.getOrigin(A);
5297           TypeSize StoreSize = DL.getTypeStoreSize(Shadow->getType());
5298           MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
5299                           std::max(kShadowTLSAlignment, kMinOriginAlignment));
5300         }
5301       }
5302     }
5303     Constant *OverflowSize =
5304         ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
5305     IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
5306   }
5307 
5308   void finalizeInstrumentation() override {
5309     assert(!VAArgOverflowSize && !VAArgTLSCopy &&
5310            "finalizeInstrumentation called twice");
5311     if (!VAStartInstrumentationList.empty()) {
5312       // If there is a va_start in this function, make a backup copy of
5313       // va_arg_tls somewhere in the function entry block.
5314       IRBuilder<> IRB(MSV.FnPrologueEnd);
5315       VAArgOverflowSize =
5316           IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
5317       Value *CopySize = IRB.CreateAdd(
5318           ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset), VAArgOverflowSize);
5319       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
5320       VAArgTLSCopy->setAlignment(kShadowTLSAlignment);
5321       IRB.CreateMemSet(VAArgTLSCopy, Constant::getNullValue(IRB.getInt8Ty()),
5322                        CopySize, kShadowTLSAlignment, false);
5323 
5324       Value *SrcSize = IRB.CreateBinaryIntrinsic(
5325           Intrinsic::umin, CopySize,
5326           ConstantInt::get(MS.IntptrTy, kParamTLSSize));
5327       IRB.CreateMemCpy(VAArgTLSCopy, kShadowTLSAlignment, MS.VAArgTLS,
5328                        kShadowTLSAlignment, SrcSize);
5329       if (MS.TrackOrigins) {
5330         VAArgTLSOriginCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
5331         VAArgTLSOriginCopy->setAlignment(kShadowTLSAlignment);
5332         IRB.CreateMemCpy(VAArgTLSOriginCopy, kShadowTLSAlignment,
5333                          MS.VAArgOriginTLS, kShadowTLSAlignment, SrcSize);
5334       }
5335     }
5336 
5337     // Instrument va_start.
5338     // Copy va_list shadow from the backup copy of the TLS contents.
5339     for (CallInst *OrigInst : VAStartInstrumentationList) {
5340       NextNodeIRBuilder IRB(OrigInst);
5341       Value *VAListTag = OrigInst->getArgOperand(0);
5342 
5343       Value *RegSaveAreaPtrPtr = IRB.CreateIntToPtr(
5344           IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
5345                         ConstantInt::get(MS.IntptrTy, 16)),
5346           MS.PtrTy);
5347       Value *RegSaveAreaPtr = IRB.CreateLoad(MS.PtrTy, RegSaveAreaPtrPtr);
5348       Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
5349       const Align Alignment = Align(16);
5350       std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
5351           MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
5352                                  Alignment, /*isStore*/ true);
5353       IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
5354                        AMD64FpEndOffset);
5355       if (MS.TrackOrigins)
5356         IRB.CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
5357                          Alignment, AMD64FpEndOffset);
5358       Value *OverflowArgAreaPtrPtr = IRB.CreateIntToPtr(
5359           IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
5360                         ConstantInt::get(MS.IntptrTy, 8)),
5361           MS.PtrTy);
5362       Value *OverflowArgAreaPtr =
5363           IRB.CreateLoad(MS.PtrTy, OverflowArgAreaPtrPtr);
5364       Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
5365       std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
5366           MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.getInt8Ty(),
5367                                  Alignment, /*isStore*/ true);
5368       Value *SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSCopy,
5369                                              AMD64FpEndOffset);
5370       IRB.CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
5371                        VAArgOverflowSize);
5372       if (MS.TrackOrigins) {
5373         SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSOriginCopy,
5374                                         AMD64FpEndOffset);
5375         IRB.CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
5376                          VAArgOverflowSize);
5377       }
5378     }
5379   }
5380 };
5381 
5382 /// AArch64-specific implementation of VarArgHelper.
5383 struct VarArgAArch64Helper : public VarArgHelperBase {
5384   static const unsigned kAArch64GrArgSize = 64;
5385   static const unsigned kAArch64VrArgSize = 128;
5386 
5387   static const unsigned AArch64GrBegOffset = 0;
5388   static const unsigned AArch64GrEndOffset = kAArch64GrArgSize;
5389   // Make VR space aligned to 16 bytes.
5390   static const unsigned AArch64VrBegOffset = AArch64GrEndOffset;
5391   static const unsigned AArch64VrEndOffset =
5392       AArch64VrBegOffset + kAArch64VrArgSize;
5393   static const unsigned AArch64VAEndOffset = AArch64VrEndOffset;
5394 
5395   AllocaInst *VAArgTLSCopy = nullptr;
5396   Value *VAArgOverflowSize = nullptr;
5397 
5398   enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
5399 
5400   VarArgAArch64Helper(Function &F, MemorySanitizer &MS,
5401                       MemorySanitizerVisitor &MSV)
5402       : VarArgHelperBase(F, MS, MSV, /*VAListTagSize=*/32) {}
5403 
5404   // A very rough approximation of aarch64 argument classification rules.
5405   std::pair<ArgKind, uint64_t> classifyArgument(Type *T) {
5406     if (T->isIntOrPtrTy() && T->getPrimitiveSizeInBits() <= 64)
5407       return {AK_GeneralPurpose, 1};
5408     if (T->isFloatingPointTy() && T->getPrimitiveSizeInBits() <= 128)
5409       return {AK_FloatingPoint, 1};
5410 
5411     if (T->isArrayTy()) {
5412       auto R = classifyArgument(T->getArrayElementType());
5413       R.second *= T->getScalarType()->getArrayNumElements();
5414       return R;
5415     }
5416 
5417     if (const FixedVectorType *FV = dyn_cast<FixedVectorType>(T)) {
5418       auto R = classifyArgument(FV->getScalarType());
5419       R.second *= FV->getNumElements();
5420       return R;
5421     }
5422 
5423     LLVM_DEBUG(errs() << "Unknown vararg type: " << *T << "\n");
5424     return {AK_Memory, 0};
5425   }
5426 
5427   // The instrumentation stores the argument shadow in a non ABI-specific
5428   // format because it does not know which argument is named (since Clang,
5429   // like x86_64 case, lowers the va_args in the frontend and this pass only
5430   // sees the low level code that deals with va_list internals).
5431   // The first seven GR registers are saved in the first 56 bytes of the
5432   // va_arg tls arra, followed by the first 8 FP/SIMD registers, and then
5433   // the remaining arguments.
5434   // Using constant offset within the va_arg TLS array allows fast copy
5435   // in the finalize instrumentation.
5436   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
5437     unsigned GrOffset = AArch64GrBegOffset;
5438     unsigned VrOffset = AArch64VrBegOffset;
5439     unsigned OverflowOffset = AArch64VAEndOffset;
5440 
5441     const DataLayout &DL = F.getDataLayout();
5442     for (const auto &[ArgNo, A] : llvm::enumerate(CB.args())) {
5443       bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
5444       auto [AK, RegNum] = classifyArgument(A->getType());
5445       if (AK == AK_GeneralPurpose &&
5446           (GrOffset + RegNum * 8) > AArch64GrEndOffset)
5447         AK = AK_Memory;
5448       if (AK == AK_FloatingPoint &&
5449           (VrOffset + RegNum * 16) > AArch64VrEndOffset)
5450         AK = AK_Memory;
5451       Value *Base;
5452       switch (AK) {
5453       case AK_GeneralPurpose:
5454         Base = getShadowPtrForVAArgument(IRB, GrOffset);
5455         GrOffset += 8 * RegNum;
5456         break;
5457       case AK_FloatingPoint:
5458         Base = getShadowPtrForVAArgument(IRB, VrOffset);
5459         VrOffset += 16 * RegNum;
5460         break;
5461       case AK_Memory:
5462         // Don't count fixed arguments in the overflow area - va_start will
5463         // skip right over them.
5464         if (IsFixed)
5465           continue;
5466         uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
5467         uint64_t AlignedSize = alignTo(ArgSize, 8);
5468         unsigned BaseOffset = OverflowOffset;
5469         Base = getShadowPtrForVAArgument(IRB, BaseOffset);
5470         OverflowOffset += AlignedSize;
5471         if (OverflowOffset > kParamTLSSize) {
5472           // We have no space to copy shadow there.
5473           CleanUnusedTLS(IRB, Base, BaseOffset);
5474           continue;
5475         }
5476         break;
5477       }
5478       // Count Gp/Vr fixed arguments to their respective offsets, but don't
5479       // bother to actually store a shadow.
5480       if (IsFixed)
5481         continue;
5482       IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
5483     }
5484     Constant *OverflowSize =
5485         ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AArch64VAEndOffset);
5486     IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
5487   }
5488 
5489   // Retrieve a va_list field of 'void*' size.
5490   Value *getVAField64(IRBuilder<> &IRB, Value *VAListTag, int offset) {
5491     Value *SaveAreaPtrPtr = IRB.CreateIntToPtr(
5492         IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
5493                       ConstantInt::get(MS.IntptrTy, offset)),
5494         MS.PtrTy);
5495     return IRB.CreateLoad(Type::getInt64Ty(*MS.C), SaveAreaPtrPtr);
5496   }
5497 
5498   // Retrieve a va_list field of 'int' size.
5499   Value *getVAField32(IRBuilder<> &IRB, Value *VAListTag, int offset) {
5500     Value *SaveAreaPtr = IRB.CreateIntToPtr(
5501         IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
5502                       ConstantInt::get(MS.IntptrTy, offset)),
5503         MS.PtrTy);
5504     Value *SaveArea32 = IRB.CreateLoad(IRB.getInt32Ty(), SaveAreaPtr);
5505     return IRB.CreateSExt(SaveArea32, MS.IntptrTy);
5506   }
5507 
5508   void finalizeInstrumentation() override {
5509     assert(!VAArgOverflowSize && !VAArgTLSCopy &&
5510            "finalizeInstrumentation called twice");
5511     if (!VAStartInstrumentationList.empty()) {
5512       // If there is a va_start in this function, make a backup copy of
5513       // va_arg_tls somewhere in the function entry block.
5514       IRBuilder<> IRB(MSV.FnPrologueEnd);
5515       VAArgOverflowSize =
5516           IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
5517       Value *CopySize = IRB.CreateAdd(
5518           ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset), VAArgOverflowSize);
5519       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
5520       VAArgTLSCopy->setAlignment(kShadowTLSAlignment);
5521       IRB.CreateMemSet(VAArgTLSCopy, Constant::getNullValue(IRB.getInt8Ty()),
5522                        CopySize, kShadowTLSAlignment, false);
5523 
5524       Value *SrcSize = IRB.CreateBinaryIntrinsic(
5525           Intrinsic::umin, CopySize,
5526           ConstantInt::get(MS.IntptrTy, kParamTLSSize));
5527       IRB.CreateMemCpy(VAArgTLSCopy, kShadowTLSAlignment, MS.VAArgTLS,
5528                        kShadowTLSAlignment, SrcSize);
5529     }
5530 
5531     Value *GrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64GrArgSize);
5532     Value *VrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64VrArgSize);
5533 
5534     // Instrument va_start, copy va_list shadow from the backup copy of
5535     // the TLS contents.
5536     for (CallInst *OrigInst : VAStartInstrumentationList) {
5537       NextNodeIRBuilder IRB(OrigInst);
5538 
5539       Value *VAListTag = OrigInst->getArgOperand(0);
5540 
5541       // The variadic ABI for AArch64 creates two areas to save the incoming
5542       // argument registers (one for 64-bit general register xn-x7 and another
5543       // for 128-bit FP/SIMD vn-v7).
5544       // We need then to propagate the shadow arguments on both regions
5545       // 'va::__gr_top + va::__gr_offs' and 'va::__vr_top + va::__vr_offs'.
5546       // The remaining arguments are saved on shadow for 'va::stack'.
5547       // One caveat is it requires only to propagate the non-named arguments,
5548       // however on the call site instrumentation 'all' the arguments are
5549       // saved. So to copy the shadow values from the va_arg TLS array
5550       // we need to adjust the offset for both GR and VR fields based on
5551       // the __{gr,vr}_offs value (since they are stores based on incoming
5552       // named arguments).
5553       Type *RegSaveAreaPtrTy = IRB.getPtrTy();
5554 
5555       // Read the stack pointer from the va_list.
5556       Value *StackSaveAreaPtr =
5557           IRB.CreateIntToPtr(getVAField64(IRB, VAListTag, 0), RegSaveAreaPtrTy);
5558 
5559       // Read both the __gr_top and __gr_off and add them up.
5560       Value *GrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 8);
5561       Value *GrOffSaveArea = getVAField32(IRB, VAListTag, 24);
5562 
5563       Value *GrRegSaveAreaPtr = IRB.CreateIntToPtr(
5564           IRB.CreateAdd(GrTopSaveAreaPtr, GrOffSaveArea), RegSaveAreaPtrTy);
5565 
5566       // Read both the __vr_top and __vr_off and add them up.
5567       Value *VrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 16);
5568       Value *VrOffSaveArea = getVAField32(IRB, VAListTag, 28);
5569 
5570       Value *VrRegSaveAreaPtr = IRB.CreateIntToPtr(
5571           IRB.CreateAdd(VrTopSaveAreaPtr, VrOffSaveArea), RegSaveAreaPtrTy);
5572 
5573       // It does not know how many named arguments is being used and, on the
5574       // callsite all the arguments were saved.  Since __gr_off is defined as
5575       // '0 - ((8 - named_gr) * 8)', the idea is to just propagate the variadic
5576       // argument by ignoring the bytes of shadow from named arguments.
5577       Value *GrRegSaveAreaShadowPtrOff =
5578           IRB.CreateAdd(GrArgSize, GrOffSaveArea);
5579 
5580       Value *GrRegSaveAreaShadowPtr =
5581           MSV.getShadowOriginPtr(GrRegSaveAreaPtr, IRB, IRB.getInt8Ty(),
5582                                  Align(8), /*isStore*/ true)
5583               .first;
5584 
5585       Value *GrSrcPtr =
5586           IRB.CreateInBoundsPtrAdd(VAArgTLSCopy, GrRegSaveAreaShadowPtrOff);
5587       Value *GrCopySize = IRB.CreateSub(GrArgSize, GrRegSaveAreaShadowPtrOff);
5588 
5589       IRB.CreateMemCpy(GrRegSaveAreaShadowPtr, Align(8), GrSrcPtr, Align(8),
5590                        GrCopySize);
5591 
5592       // Again, but for FP/SIMD values.
5593       Value *VrRegSaveAreaShadowPtrOff =
5594           IRB.CreateAdd(VrArgSize, VrOffSaveArea);
5595 
5596       Value *VrRegSaveAreaShadowPtr =
5597           MSV.getShadowOriginPtr(VrRegSaveAreaPtr, IRB, IRB.getInt8Ty(),
5598                                  Align(8), /*isStore*/ true)
5599               .first;
5600 
5601       Value *VrSrcPtr = IRB.CreateInBoundsPtrAdd(
5602           IRB.CreateInBoundsPtrAdd(VAArgTLSCopy,
5603                                    IRB.getInt32(AArch64VrBegOffset)),
5604           VrRegSaveAreaShadowPtrOff);
5605       Value *VrCopySize = IRB.CreateSub(VrArgSize, VrRegSaveAreaShadowPtrOff);
5606 
5607       IRB.CreateMemCpy(VrRegSaveAreaShadowPtr, Align(8), VrSrcPtr, Align(8),
5608                        VrCopySize);
5609 
5610       // And finally for remaining arguments.
5611       Value *StackSaveAreaShadowPtr =
5612           MSV.getShadowOriginPtr(StackSaveAreaPtr, IRB, IRB.getInt8Ty(),
5613                                  Align(16), /*isStore*/ true)
5614               .first;
5615 
5616       Value *StackSrcPtr = IRB.CreateInBoundsPtrAdd(
5617           VAArgTLSCopy, IRB.getInt32(AArch64VAEndOffset));
5618 
5619       IRB.CreateMemCpy(StackSaveAreaShadowPtr, Align(16), StackSrcPtr,
5620                        Align(16), VAArgOverflowSize);
5621     }
5622   }
5623 };
5624 
5625 /// PowerPC-specific implementation of VarArgHelper.
5626 struct VarArgPowerPCHelper : public VarArgHelperBase {
5627   AllocaInst *VAArgTLSCopy = nullptr;
5628   Value *VAArgSize = nullptr;
5629 
5630   VarArgPowerPCHelper(Function &F, MemorySanitizer &MS,
5631                       MemorySanitizerVisitor &MSV, unsigned VAListTagSize)
5632       : VarArgHelperBase(F, MS, MSV, VAListTagSize) {}
5633 
5634   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
5635     // For PowerPC, we need to deal with alignment of stack arguments -
5636     // they are mostly aligned to 8 bytes, but vectors and i128 arrays
5637     // are aligned to 16 bytes, byvals can be aligned to 8 or 16 bytes,
5638     // For that reason, we compute current offset from stack pointer (which is
5639     // always properly aligned), and offset for the first vararg, then subtract
5640     // them.
5641     unsigned VAArgBase;
5642     Triple TargetTriple(F.getParent()->getTargetTriple());
5643     // Parameter save area starts at 48 bytes from frame pointer for ABIv1,
5644     // and 32 bytes for ABIv2.  This is usually determined by target
5645     // endianness, but in theory could be overridden by function attribute.
5646     if (TargetTriple.isPPC64()) {
5647       if (TargetTriple.isPPC64ELFv2ABI())
5648         VAArgBase = 32;
5649       else
5650         VAArgBase = 48;
5651     } else {
5652       // Parameter save area is 8 bytes from frame pointer in PPC32
5653       VAArgBase = 8;
5654     }
5655     unsigned VAArgOffset = VAArgBase;
5656     const DataLayout &DL = F.getDataLayout();
5657     for (const auto &[ArgNo, A] : llvm::enumerate(CB.args())) {
5658       bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
5659       bool IsByVal = CB.paramHasAttr(ArgNo, Attribute::ByVal);
5660       if (IsByVal) {
5661         assert(A->getType()->isPointerTy());
5662         Type *RealTy = CB.getParamByValType(ArgNo);
5663         uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
5664         Align ArgAlign = CB.getParamAlign(ArgNo).value_or(Align(8));
5665         if (ArgAlign < 8)
5666           ArgAlign = Align(8);
5667         VAArgOffset = alignTo(VAArgOffset, ArgAlign);
5668         if (!IsFixed) {
5669           Value *Base =
5670               getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
5671           if (Base) {
5672             Value *AShadowPtr, *AOriginPtr;
5673             std::tie(AShadowPtr, AOriginPtr) =
5674                 MSV.getShadowOriginPtr(A, IRB, IRB.getInt8Ty(),
5675                                        kShadowTLSAlignment, /*isStore*/ false);
5676 
5677             IRB.CreateMemCpy(Base, kShadowTLSAlignment, AShadowPtr,
5678                              kShadowTLSAlignment, ArgSize);
5679           }
5680         }
5681         VAArgOffset += alignTo(ArgSize, Align(8));
5682       } else {
5683         Value *Base;
5684         uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
5685         Align ArgAlign = Align(8);
5686         if (A->getType()->isArrayTy()) {
5687           // Arrays are aligned to element size, except for long double
5688           // arrays, which are aligned to 8 bytes.
5689           Type *ElementTy = A->getType()->getArrayElementType();
5690           if (!ElementTy->isPPC_FP128Ty())
5691             ArgAlign = Align(DL.getTypeAllocSize(ElementTy));
5692         } else if (A->getType()->isVectorTy()) {
5693           // Vectors are naturally aligned.
5694           ArgAlign = Align(ArgSize);
5695         }
5696         if (ArgAlign < 8)
5697           ArgAlign = Align(8);
5698         VAArgOffset = alignTo(VAArgOffset, ArgAlign);
5699         if (DL.isBigEndian()) {
5700           // Adjusting the shadow for argument with size < 8 to match the
5701           // placement of bits in big endian system
5702           if (ArgSize < 8)
5703             VAArgOffset += (8 - ArgSize);
5704         }
5705         if (!IsFixed) {
5706           Base =
5707               getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
5708           if (Base)
5709             IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
5710         }
5711         VAArgOffset += ArgSize;
5712         VAArgOffset = alignTo(VAArgOffset, Align(8));
5713       }
5714       if (IsFixed)
5715         VAArgBase = VAArgOffset;
5716     }
5717 
5718     Constant *TotalVAArgSize =
5719         ConstantInt::get(MS.IntptrTy, VAArgOffset - VAArgBase);
5720     // Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
5721     // a new class member i.e. it is the total size of all VarArgs.
5722     IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
5723   }
5724 
5725   void finalizeInstrumentation() override {
5726     assert(!VAArgSize && !VAArgTLSCopy &&
5727            "finalizeInstrumentation called twice");
5728     IRBuilder<> IRB(MSV.FnPrologueEnd);
5729     VAArgSize = IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
5730     Value *CopySize = VAArgSize;
5731 
5732     if (!VAStartInstrumentationList.empty()) {
5733       // If there is a va_start in this function, make a backup copy of
5734       // va_arg_tls somewhere in the function entry block.
5735 
5736       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
5737       VAArgTLSCopy->setAlignment(kShadowTLSAlignment);
5738       IRB.CreateMemSet(VAArgTLSCopy, Constant::getNullValue(IRB.getInt8Ty()),
5739                        CopySize, kShadowTLSAlignment, false);
5740 
5741       Value *SrcSize = IRB.CreateBinaryIntrinsic(
5742           Intrinsic::umin, CopySize,
5743           ConstantInt::get(IRB.getInt64Ty(), kParamTLSSize));
5744       IRB.CreateMemCpy(VAArgTLSCopy, kShadowTLSAlignment, MS.VAArgTLS,
5745                        kShadowTLSAlignment, SrcSize);
5746     }
5747 
5748     // Instrument va_start.
5749     // Copy va_list shadow from the backup copy of the TLS contents.
5750     Triple TargetTriple(F.getParent()->getTargetTriple());
5751     for (CallInst *OrigInst : VAStartInstrumentationList) {
5752       NextNodeIRBuilder IRB(OrigInst);
5753       Value *VAListTag = OrigInst->getArgOperand(0);
5754       Value *RegSaveAreaPtrPtr = IRB.CreatePtrToInt(VAListTag, MS.IntptrTy);
5755 
5756       // In PPC32 va_list_tag is a struct, whereas in PPC64 it's a pointer
5757       if (!TargetTriple.isPPC64()) {
5758         RegSaveAreaPtrPtr =
5759             IRB.CreateAdd(RegSaveAreaPtrPtr, ConstantInt::get(MS.IntptrTy, 8));
5760       }
5761       RegSaveAreaPtrPtr = IRB.CreateIntToPtr(RegSaveAreaPtrPtr, MS.PtrTy);
5762 
5763       Value *RegSaveAreaPtr = IRB.CreateLoad(MS.PtrTy, RegSaveAreaPtrPtr);
5764       Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
5765       const DataLayout &DL = F.getDataLayout();
5766       unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
5767       const Align Alignment = Align(IntptrSize);
5768       std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
5769           MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
5770                                  Alignment, /*isStore*/ true);
5771       IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
5772                        CopySize);
5773     }
5774   }
5775 };
5776 
5777 /// SystemZ-specific implementation of VarArgHelper.
5778 struct VarArgSystemZHelper : public VarArgHelperBase {
5779   static const unsigned SystemZGpOffset = 16;
5780   static const unsigned SystemZGpEndOffset = 56;
5781   static const unsigned SystemZFpOffset = 128;
5782   static const unsigned SystemZFpEndOffset = 160;
5783   static const unsigned SystemZMaxVrArgs = 8;
5784   static const unsigned SystemZRegSaveAreaSize = 160;
5785   static const unsigned SystemZOverflowOffset = 160;
5786   static const unsigned SystemZVAListTagSize = 32;
5787   static const unsigned SystemZOverflowArgAreaPtrOffset = 16;
5788   static const unsigned SystemZRegSaveAreaPtrOffset = 24;
5789 
5790   bool IsSoftFloatABI;
5791   AllocaInst *VAArgTLSCopy = nullptr;
5792   AllocaInst *VAArgTLSOriginCopy = nullptr;
5793   Value *VAArgOverflowSize = nullptr;
5794 
5795   enum class ArgKind {
5796     GeneralPurpose,
5797     FloatingPoint,
5798     Vector,
5799     Memory,
5800     Indirect,
5801   };
5802 
5803   enum class ShadowExtension { None, Zero, Sign };
5804 
5805   VarArgSystemZHelper(Function &F, MemorySanitizer &MS,
5806                       MemorySanitizerVisitor &MSV)
5807       : VarArgHelperBase(F, MS, MSV, SystemZVAListTagSize),
5808         IsSoftFloatABI(F.getFnAttribute("use-soft-float").getValueAsBool()) {}
5809 
5810   ArgKind classifyArgument(Type *T) {
5811     // T is a SystemZABIInfo::classifyArgumentType() output, and there are
5812     // only a few possibilities of what it can be. In particular, enums, single
5813     // element structs and large types have already been taken care of.
5814 
5815     // Some i128 and fp128 arguments are converted to pointers only in the
5816     // back end.
5817     if (T->isIntegerTy(128) || T->isFP128Ty())
5818       return ArgKind::Indirect;
5819     if (T->isFloatingPointTy())
5820       return IsSoftFloatABI ? ArgKind::GeneralPurpose : ArgKind::FloatingPoint;
5821     if (T->isIntegerTy() || T->isPointerTy())
5822       return ArgKind::GeneralPurpose;
5823     if (T->isVectorTy())
5824       return ArgKind::Vector;
5825     return ArgKind::Memory;
5826   }
5827 
5828   ShadowExtension getShadowExtension(const CallBase &CB, unsigned ArgNo) {
5829     // ABI says: "One of the simple integer types no more than 64 bits wide.
5830     // ... If such an argument is shorter than 64 bits, replace it by a full
5831     // 64-bit integer representing the same number, using sign or zero
5832     // extension". Shadow for an integer argument has the same type as the
5833     // argument itself, so it can be sign or zero extended as well.
5834     bool ZExt = CB.paramHasAttr(ArgNo, Attribute::ZExt);
5835     bool SExt = CB.paramHasAttr(ArgNo, Attribute::SExt);
5836     if (ZExt) {
5837       assert(!SExt);
5838       return ShadowExtension::Zero;
5839     }
5840     if (SExt) {
5841       assert(!ZExt);
5842       return ShadowExtension::Sign;
5843     }
5844     return ShadowExtension::None;
5845   }
5846 
5847   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
5848     unsigned GpOffset = SystemZGpOffset;
5849     unsigned FpOffset = SystemZFpOffset;
5850     unsigned VrIndex = 0;
5851     unsigned OverflowOffset = SystemZOverflowOffset;
5852     const DataLayout &DL = F.getDataLayout();
5853     for (const auto &[ArgNo, A] : llvm::enumerate(CB.args())) {
5854       bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
5855       // SystemZABIInfo does not produce ByVal parameters.
5856       assert(!CB.paramHasAttr(ArgNo, Attribute::ByVal));
5857       Type *T = A->getType();
5858       ArgKind AK = classifyArgument(T);
5859       if (AK == ArgKind::Indirect) {
5860         T = MS.PtrTy;
5861         AK = ArgKind::GeneralPurpose;
5862       }
5863       if (AK == ArgKind::GeneralPurpose && GpOffset >= SystemZGpEndOffset)
5864         AK = ArgKind::Memory;
5865       if (AK == ArgKind::FloatingPoint && FpOffset >= SystemZFpEndOffset)
5866         AK = ArgKind::Memory;
5867       if (AK == ArgKind::Vector && (VrIndex >= SystemZMaxVrArgs || !IsFixed))
5868         AK = ArgKind::Memory;
5869       Value *ShadowBase = nullptr;
5870       Value *OriginBase = nullptr;
5871       ShadowExtension SE = ShadowExtension::None;
5872       switch (AK) {
5873       case ArgKind::GeneralPurpose: {
5874         // Always keep track of GpOffset, but store shadow only for varargs.
5875         uint64_t ArgSize = 8;
5876         if (GpOffset + ArgSize <= kParamTLSSize) {
5877           if (!IsFixed) {
5878             SE = getShadowExtension(CB, ArgNo);
5879             uint64_t GapSize = 0;
5880             if (SE == ShadowExtension::None) {
5881               uint64_t ArgAllocSize = DL.getTypeAllocSize(T);
5882               assert(ArgAllocSize <= ArgSize);
5883               GapSize = ArgSize - ArgAllocSize;
5884             }
5885             ShadowBase = getShadowAddrForVAArgument(IRB, GpOffset + GapSize);
5886             if (MS.TrackOrigins)
5887               OriginBase = getOriginPtrForVAArgument(IRB, GpOffset + GapSize);
5888           }
5889           GpOffset += ArgSize;
5890         } else {
5891           GpOffset = kParamTLSSize;
5892         }
5893         break;
5894       }
5895       case ArgKind::FloatingPoint: {
5896         // Always keep track of FpOffset, but store shadow only for varargs.
5897         uint64_t ArgSize = 8;
5898         if (FpOffset + ArgSize <= kParamTLSSize) {
5899           if (!IsFixed) {
5900             // PoP says: "A short floating-point datum requires only the
5901             // left-most 32 bit positions of a floating-point register".
5902             // Therefore, in contrast to AK_GeneralPurpose and AK_Memory,
5903             // don't extend shadow and don't mind the gap.
5904             ShadowBase = getShadowAddrForVAArgument(IRB, FpOffset);
5905             if (MS.TrackOrigins)
5906               OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
5907           }
5908           FpOffset += ArgSize;
5909         } else {
5910           FpOffset = kParamTLSSize;
5911         }
5912         break;
5913       }
5914       case ArgKind::Vector: {
5915         // Keep track of VrIndex. No need to store shadow, since vector varargs
5916         // go through AK_Memory.
5917         assert(IsFixed);
5918         VrIndex++;
5919         break;
5920       }
5921       case ArgKind::Memory: {
5922         // Keep track of OverflowOffset and store shadow only for varargs.
5923         // Ignore fixed args, since we need to copy only the vararg portion of
5924         // the overflow area shadow.
5925         if (!IsFixed) {
5926           uint64_t ArgAllocSize = DL.getTypeAllocSize(T);
5927           uint64_t ArgSize = alignTo(ArgAllocSize, 8);
5928           if (OverflowOffset + ArgSize <= kParamTLSSize) {
5929             SE = getShadowExtension(CB, ArgNo);
5930             uint64_t GapSize =
5931                 SE == ShadowExtension::None ? ArgSize - ArgAllocSize : 0;
5932             ShadowBase =
5933                 getShadowAddrForVAArgument(IRB, OverflowOffset + GapSize);
5934             if (MS.TrackOrigins)
5935               OriginBase =
5936                   getOriginPtrForVAArgument(IRB, OverflowOffset + GapSize);
5937             OverflowOffset += ArgSize;
5938           } else {
5939             OverflowOffset = kParamTLSSize;
5940           }
5941         }
5942         break;
5943       }
5944       case ArgKind::Indirect:
5945         llvm_unreachable("Indirect must be converted to GeneralPurpose");
5946       }
5947       if (ShadowBase == nullptr)
5948         continue;
5949       Value *Shadow = MSV.getShadow(A);
5950       if (SE != ShadowExtension::None)
5951         Shadow = MSV.CreateShadowCast(IRB, Shadow, IRB.getInt64Ty(),
5952                                       /*Signed*/ SE == ShadowExtension::Sign);
5953       ShadowBase = IRB.CreateIntToPtr(ShadowBase, MS.PtrTy, "_msarg_va_s");
5954       IRB.CreateStore(Shadow, ShadowBase);
5955       if (MS.TrackOrigins) {
5956         Value *Origin = MSV.getOrigin(A);
5957         TypeSize StoreSize = DL.getTypeStoreSize(Shadow->getType());
5958         MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
5959                         kMinOriginAlignment);
5960       }
5961     }
5962     Constant *OverflowSize = ConstantInt::get(
5963         IRB.getInt64Ty(), OverflowOffset - SystemZOverflowOffset);
5964     IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
5965   }
5966 
5967   void copyRegSaveArea(IRBuilder<> &IRB, Value *VAListTag) {
5968     Value *RegSaveAreaPtrPtr = IRB.CreateIntToPtr(
5969         IRB.CreateAdd(
5970             IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
5971             ConstantInt::get(MS.IntptrTy, SystemZRegSaveAreaPtrOffset)),
5972         MS.PtrTy);
5973     Value *RegSaveAreaPtr = IRB.CreateLoad(MS.PtrTy, RegSaveAreaPtrPtr);
5974     Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
5975     const Align Alignment = Align(8);
5976     std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
5977         MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(), Alignment,
5978                                /*isStore*/ true);
5979     // TODO(iii): copy only fragments filled by visitCallBase()
5980     // TODO(iii): support packed-stack && !use-soft-float
5981     // For use-soft-float functions, it is enough to copy just the GPRs.
5982     unsigned RegSaveAreaSize =
5983         IsSoftFloatABI ? SystemZGpEndOffset : SystemZRegSaveAreaSize;
5984     IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
5985                      RegSaveAreaSize);
5986     if (MS.TrackOrigins)
5987       IRB.CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
5988                        Alignment, RegSaveAreaSize);
5989   }
5990 
5991   // FIXME: This implementation limits OverflowOffset to kParamTLSSize, so we
5992   // don't know real overflow size and can't clear shadow beyond kParamTLSSize.
5993   void copyOverflowArea(IRBuilder<> &IRB, Value *VAListTag) {
5994     Value *OverflowArgAreaPtrPtr = IRB.CreateIntToPtr(
5995         IRB.CreateAdd(
5996             IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
5997             ConstantInt::get(MS.IntptrTy, SystemZOverflowArgAreaPtrOffset)),
5998         MS.PtrTy);
5999     Value *OverflowArgAreaPtr = IRB.CreateLoad(MS.PtrTy, OverflowArgAreaPtrPtr);
6000     Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
6001     const Align Alignment = Align(8);
6002     std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
6003         MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.getInt8Ty(),
6004                                Alignment, /*isStore*/ true);
6005     Value *SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSCopy,
6006                                            SystemZOverflowOffset);
6007     IRB.CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
6008                      VAArgOverflowSize);
6009     if (MS.TrackOrigins) {
6010       SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSOriginCopy,
6011                                       SystemZOverflowOffset);
6012       IRB.CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
6013                        VAArgOverflowSize);
6014     }
6015   }
6016 
6017   void finalizeInstrumentation() override {
6018     assert(!VAArgOverflowSize && !VAArgTLSCopy &&
6019            "finalizeInstrumentation called twice");
6020     if (!VAStartInstrumentationList.empty()) {
6021       // If there is a va_start in this function, make a backup copy of
6022       // va_arg_tls somewhere in the function entry block.
6023       IRBuilder<> IRB(MSV.FnPrologueEnd);
6024       VAArgOverflowSize =
6025           IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
6026       Value *CopySize =
6027           IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, SystemZOverflowOffset),
6028                         VAArgOverflowSize);
6029       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
6030       VAArgTLSCopy->setAlignment(kShadowTLSAlignment);
6031       IRB.CreateMemSet(VAArgTLSCopy, Constant::getNullValue(IRB.getInt8Ty()),
6032                        CopySize, kShadowTLSAlignment, false);
6033 
6034       Value *SrcSize = IRB.CreateBinaryIntrinsic(
6035           Intrinsic::umin, CopySize,
6036           ConstantInt::get(MS.IntptrTy, kParamTLSSize));
6037       IRB.CreateMemCpy(VAArgTLSCopy, kShadowTLSAlignment, MS.VAArgTLS,
6038                        kShadowTLSAlignment, SrcSize);
6039       if (MS.TrackOrigins) {
6040         VAArgTLSOriginCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
6041         VAArgTLSOriginCopy->setAlignment(kShadowTLSAlignment);
6042         IRB.CreateMemCpy(VAArgTLSOriginCopy, kShadowTLSAlignment,
6043                          MS.VAArgOriginTLS, kShadowTLSAlignment, SrcSize);
6044       }
6045     }
6046 
6047     // Instrument va_start.
6048     // Copy va_list shadow from the backup copy of the TLS contents.
6049     for (CallInst *OrigInst : VAStartInstrumentationList) {
6050       NextNodeIRBuilder IRB(OrigInst);
6051       Value *VAListTag = OrigInst->getArgOperand(0);
6052       copyRegSaveArea(IRB, VAListTag);
6053       copyOverflowArea(IRB, VAListTag);
6054     }
6055   }
6056 };
6057 
6058 /// i386-specific implementation of VarArgHelper.
6059 struct VarArgI386Helper : public VarArgHelperBase {
6060   AllocaInst *VAArgTLSCopy = nullptr;
6061   Value *VAArgSize = nullptr;
6062 
6063   VarArgI386Helper(Function &F, MemorySanitizer &MS,
6064                    MemorySanitizerVisitor &MSV)
6065       : VarArgHelperBase(F, MS, MSV, /*VAListTagSize=*/4) {}
6066 
6067   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
6068     const DataLayout &DL = F.getDataLayout();
6069     unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
6070     unsigned VAArgOffset = 0;
6071     for (const auto &[ArgNo, A] : llvm::enumerate(CB.args())) {
6072       bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
6073       bool IsByVal = CB.paramHasAttr(ArgNo, Attribute::ByVal);
6074       if (IsByVal) {
6075         assert(A->getType()->isPointerTy());
6076         Type *RealTy = CB.getParamByValType(ArgNo);
6077         uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
6078         Align ArgAlign = CB.getParamAlign(ArgNo).value_or(Align(IntptrSize));
6079         if (ArgAlign < IntptrSize)
6080           ArgAlign = Align(IntptrSize);
6081         VAArgOffset = alignTo(VAArgOffset, ArgAlign);
6082         if (!IsFixed) {
6083           Value *Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
6084           if (Base) {
6085             Value *AShadowPtr, *AOriginPtr;
6086             std::tie(AShadowPtr, AOriginPtr) =
6087                 MSV.getShadowOriginPtr(A, IRB, IRB.getInt8Ty(),
6088                                        kShadowTLSAlignment, /*isStore*/ false);
6089 
6090             IRB.CreateMemCpy(Base, kShadowTLSAlignment, AShadowPtr,
6091                              kShadowTLSAlignment, ArgSize);
6092           }
6093           VAArgOffset += alignTo(ArgSize, Align(IntptrSize));
6094         }
6095       } else {
6096         Value *Base;
6097         uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
6098         Align ArgAlign = Align(IntptrSize);
6099         VAArgOffset = alignTo(VAArgOffset, ArgAlign);
6100         if (DL.isBigEndian()) {
6101           // Adjusting the shadow for argument with size < IntptrSize to match
6102           // the placement of bits in big endian system
6103           if (ArgSize < IntptrSize)
6104             VAArgOffset += (IntptrSize - ArgSize);
6105         }
6106         if (!IsFixed) {
6107           Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
6108           if (Base)
6109             IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
6110           VAArgOffset += ArgSize;
6111           VAArgOffset = alignTo(VAArgOffset, Align(IntptrSize));
6112         }
6113       }
6114     }
6115 
6116     Constant *TotalVAArgSize = ConstantInt::get(MS.IntptrTy, VAArgOffset);
6117     // Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
6118     // a new class member i.e. it is the total size of all VarArgs.
6119     IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
6120   }
6121 
6122   void finalizeInstrumentation() override {
6123     assert(!VAArgSize && !VAArgTLSCopy &&
6124            "finalizeInstrumentation called twice");
6125     IRBuilder<> IRB(MSV.FnPrologueEnd);
6126     VAArgSize = IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
6127     Value *CopySize = VAArgSize;
6128 
6129     if (!VAStartInstrumentationList.empty()) {
6130       // If there is a va_start in this function, make a backup copy of
6131       // va_arg_tls somewhere in the function entry block.
6132       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
6133       VAArgTLSCopy->setAlignment(kShadowTLSAlignment);
6134       IRB.CreateMemSet(VAArgTLSCopy, Constant::getNullValue(IRB.getInt8Ty()),
6135                        CopySize, kShadowTLSAlignment, false);
6136 
6137       Value *SrcSize = IRB.CreateBinaryIntrinsic(
6138           Intrinsic::umin, CopySize,
6139           ConstantInt::get(IRB.getInt64Ty(), kParamTLSSize));
6140       IRB.CreateMemCpy(VAArgTLSCopy, kShadowTLSAlignment, MS.VAArgTLS,
6141                        kShadowTLSAlignment, SrcSize);
6142     }
6143 
6144     // Instrument va_start.
6145     // Copy va_list shadow from the backup copy of the TLS contents.
6146     for (CallInst *OrigInst : VAStartInstrumentationList) {
6147       NextNodeIRBuilder IRB(OrigInst);
6148       Value *VAListTag = OrigInst->getArgOperand(0);
6149       Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
6150       Value *RegSaveAreaPtrPtr =
6151           IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
6152                              PointerType::get(RegSaveAreaPtrTy, 0));
6153       Value *RegSaveAreaPtr =
6154           IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
6155       Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
6156       const DataLayout &DL = F.getDataLayout();
6157       unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
6158       const Align Alignment = Align(IntptrSize);
6159       std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
6160           MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
6161                                  Alignment, /*isStore*/ true);
6162       IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
6163                        CopySize);
6164     }
6165   }
6166 };
6167 
6168 /// Implementation of VarArgHelper that is used for ARM32, MIPS, RISCV,
6169 /// LoongArch64.
6170 struct VarArgGenericHelper : public VarArgHelperBase {
6171   AllocaInst *VAArgTLSCopy = nullptr;
6172   Value *VAArgSize = nullptr;
6173 
6174   VarArgGenericHelper(Function &F, MemorySanitizer &MS,
6175                       MemorySanitizerVisitor &MSV, const unsigned VAListTagSize)
6176       : VarArgHelperBase(F, MS, MSV, VAListTagSize) {}
6177 
6178   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
6179     unsigned VAArgOffset = 0;
6180     const DataLayout &DL = F.getDataLayout();
6181     unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
6182     for (const auto &[ArgNo, A] : llvm::enumerate(CB.args())) {
6183       bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
6184       if (IsFixed)
6185         continue;
6186       uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
6187       if (DL.isBigEndian()) {
6188         // Adjusting the shadow for argument with size < IntptrSize to match the
6189         // placement of bits in big endian system
6190         if (ArgSize < IntptrSize)
6191           VAArgOffset += (IntptrSize - ArgSize);
6192       }
6193       Value *Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
6194       VAArgOffset += ArgSize;
6195       VAArgOffset = alignTo(VAArgOffset, IntptrSize);
6196       if (!Base)
6197         continue;
6198       IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
6199     }
6200 
6201     Constant *TotalVAArgSize = ConstantInt::get(MS.IntptrTy, VAArgOffset);
6202     // Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
6203     // a new class member i.e. it is the total size of all VarArgs.
6204     IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
6205   }
6206 
6207   void finalizeInstrumentation() override {
6208     assert(!VAArgSize && !VAArgTLSCopy &&
6209            "finalizeInstrumentation called twice");
6210     IRBuilder<> IRB(MSV.FnPrologueEnd);
6211     VAArgSize = IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
6212     Value *CopySize = VAArgSize;
6213 
6214     if (!VAStartInstrumentationList.empty()) {
6215       // If there is a va_start in this function, make a backup copy of
6216       // va_arg_tls somewhere in the function entry block.
6217       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
6218       VAArgTLSCopy->setAlignment(kShadowTLSAlignment);
6219       IRB.CreateMemSet(VAArgTLSCopy, Constant::getNullValue(IRB.getInt8Ty()),
6220                        CopySize, kShadowTLSAlignment, false);
6221 
6222       Value *SrcSize = IRB.CreateBinaryIntrinsic(
6223           Intrinsic::umin, CopySize,
6224           ConstantInt::get(IRB.getInt64Ty(), kParamTLSSize));
6225       IRB.CreateMemCpy(VAArgTLSCopy, kShadowTLSAlignment, MS.VAArgTLS,
6226                        kShadowTLSAlignment, SrcSize);
6227     }
6228 
6229     // Instrument va_start.
6230     // Copy va_list shadow from the backup copy of the TLS contents.
6231     for (CallInst *OrigInst : VAStartInstrumentationList) {
6232       NextNodeIRBuilder IRB(OrigInst);
6233       Value *VAListTag = OrigInst->getArgOperand(0);
6234       Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
6235       Value *RegSaveAreaPtrPtr =
6236           IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
6237                              PointerType::get(RegSaveAreaPtrTy, 0));
6238       Value *RegSaveAreaPtr =
6239           IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
6240       Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
6241       const DataLayout &DL = F.getDataLayout();
6242       unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
6243       const Align Alignment = Align(IntptrSize);
6244       std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
6245           MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
6246                                  Alignment, /*isStore*/ true);
6247       IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
6248                        CopySize);
6249     }
6250   }
6251 };
6252 
6253 // ARM32, Loongarch64, MIPS and RISCV share the same calling conventions
6254 // regarding VAArgs.
6255 using VarArgARM32Helper = VarArgGenericHelper;
6256 using VarArgRISCVHelper = VarArgGenericHelper;
6257 using VarArgMIPSHelper = VarArgGenericHelper;
6258 using VarArgLoongArch64Helper = VarArgGenericHelper;
6259 
6260 /// A no-op implementation of VarArgHelper.
6261 struct VarArgNoOpHelper : public VarArgHelper {
6262   VarArgNoOpHelper(Function &F, MemorySanitizer &MS,
6263                    MemorySanitizerVisitor &MSV) {}
6264 
6265   void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {}
6266 
6267   void visitVAStartInst(VAStartInst &I) override {}
6268 
6269   void visitVACopyInst(VACopyInst &I) override {}
6270 
6271   void finalizeInstrumentation() override {}
6272 };
6273 
6274 } // end anonymous namespace
6275 
6276 static VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
6277                                         MemorySanitizerVisitor &Visitor) {
6278   // VarArg handling is only implemented on AMD64. False positives are possible
6279   // on other platforms.
6280   Triple TargetTriple(Func.getParent()->getTargetTriple());
6281 
6282   if (TargetTriple.getArch() == Triple::x86)
6283     return new VarArgI386Helper(Func, Msan, Visitor);
6284 
6285   if (TargetTriple.getArch() == Triple::x86_64)
6286     return new VarArgAMD64Helper(Func, Msan, Visitor);
6287 
6288   if (TargetTriple.isARM())
6289     return new VarArgARM32Helper(Func, Msan, Visitor, /*VAListTagSize=*/4);
6290 
6291   if (TargetTriple.isAArch64())
6292     return new VarArgAArch64Helper(Func, Msan, Visitor);
6293 
6294   if (TargetTriple.isSystemZ())
6295     return new VarArgSystemZHelper(Func, Msan, Visitor);
6296 
6297   // On PowerPC32 VAListTag is a struct
6298   // {char, char, i16 padding, char *, char *}
6299   if (TargetTriple.isPPC32())
6300     return new VarArgPowerPCHelper(Func, Msan, Visitor, /*VAListTagSize=*/12);
6301 
6302   if (TargetTriple.isPPC64())
6303     return new VarArgPowerPCHelper(Func, Msan, Visitor, /*VAListTagSize=*/8);
6304 
6305   if (TargetTriple.isRISCV32())
6306     return new VarArgRISCVHelper(Func, Msan, Visitor, /*VAListTagSize=*/4);
6307 
6308   if (TargetTriple.isRISCV64())
6309     return new VarArgRISCVHelper(Func, Msan, Visitor, /*VAListTagSize=*/8);
6310 
6311   if (TargetTriple.isMIPS32())
6312     return new VarArgMIPSHelper(Func, Msan, Visitor, /*VAListTagSize=*/4);
6313 
6314   if (TargetTriple.isMIPS64())
6315     return new VarArgMIPSHelper(Func, Msan, Visitor, /*VAListTagSize=*/8);
6316 
6317   if (TargetTriple.isLoongArch64())
6318     return new VarArgLoongArch64Helper(Func, Msan, Visitor,
6319                                        /*VAListTagSize=*/8);
6320 
6321   return new VarArgNoOpHelper(Func, Msan, Visitor);
6322 }
6323 
6324 bool MemorySanitizer::sanitizeFunction(Function &F, TargetLibraryInfo &TLI) {
6325   if (!CompileKernel && F.getName() == kMsanModuleCtorName)
6326     return false;
6327 
6328   if (F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
6329     return false;
6330 
6331   MemorySanitizerVisitor Visitor(F, *this, TLI);
6332 
6333   // Clear out memory attributes.
6334   AttributeMask B;
6335   B.addAttribute(Attribute::Memory).addAttribute(Attribute::Speculatable);
6336   F.removeFnAttrs(B);
6337 
6338   return Visitor.runOnFunction();
6339 }
6340