xref: /llvm-project/llvm/unittests/Analysis/MemorySSATest.cpp (revision 6481dc57612671ebe77fe9c34214fba94e1b3b27)
1 //===- MemorySSA.cpp - Unit tests for MemorySSA ---------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 #include "llvm/Analysis/MemorySSA.h"
9 #include "llvm/Analysis/AliasAnalysis.h"
10 #include "llvm/Analysis/AssumptionCache.h"
11 #include "llvm/Analysis/BasicAliasAnalysis.h"
12 #include "llvm/Analysis/MemorySSAUpdater.h"
13 #include "llvm/Analysis/TargetLibraryInfo.h"
14 #include "llvm/AsmParser/Parser.h"
15 #include "llvm/IR/BasicBlock.h"
16 #include "llvm/IR/DataLayout.h"
17 #include "llvm/IR/Dominators.h"
18 #include "llvm/IR/IRBuilder.h"
19 #include "llvm/IR/Instructions.h"
20 #include "llvm/IR/LLVMContext.h"
21 #include "llvm/Support/SourceMgr.h"
22 #include "gtest/gtest.h"
23 
24 using namespace llvm;
25 
26 const static char DLString[] = "e-i64:64-f80:128-n8:16:32:64-S128";
27 
28 /// There's a lot of common setup between these tests. This fixture helps reduce
29 /// that. Tests should mock up a function, store it in F, and then call
30 /// setupAnalyses().
31 class MemorySSATest : public testing::Test {
32 protected:
33   // N.B. Many of these members depend on each other (e.g. the Module depends on
34   // the Context, etc.). So, order matters here (and in TestAnalyses).
35   LLVMContext C;
36   Module M;
37   IRBuilder<> B;
38   DataLayout DL;
39   TargetLibraryInfoImpl TLII;
40   TargetLibraryInfo TLI;
41   Function *F;
42 
43   // Things that we need to build after the function is created.
44   struct TestAnalyses {
45     DominatorTree DT;
46     AssumptionCache AC;
47     AAResults AA;
48     BasicAAResult BAA;
49     // We need to defer MSSA construction until AA is *entirely* set up, which
50     // requires calling addAAResult. Hence, we just use a pointer here.
51     std::unique_ptr<MemorySSA> MSSA;
52     MemorySSAWalker *Walker;
53 
54     TestAnalyses(MemorySSATest &Test)
55         : DT(*Test.F), AC(*Test.F), AA(Test.TLI),
56           BAA(Test.DL, *Test.F, Test.TLI, AC, &DT) {
57       AA.addAAResult(BAA);
58       MSSA = std::make_unique<MemorySSA>(*Test.F, &AA, &DT);
59       Walker = MSSA->getWalker();
60     }
61   };
62 
63   std::unique_ptr<TestAnalyses> Analyses;
64 
65   void setupAnalyses() {
66     assert(F);
67     Analyses.reset(new TestAnalyses(*this));
68   }
69 
70 public:
71   MemorySSATest()
72       : M("MemorySSATest", C), B(C), DL(DLString), TLI(TLII), F(nullptr) {}
73 };
74 
75 TEST_F(MemorySSATest, CreateALoad) {
76   // We create a diamond where there is a store on one side, and then after
77   // building MemorySSA, create a load after the merge point, and use it to test
78   // updating by creating an access for the load.
79   F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
80                        GlobalValue::ExternalLinkage, "F", &M);
81   BasicBlock *Entry(BasicBlock::Create(C, "", F));
82   BasicBlock *Left(BasicBlock::Create(C, "", F));
83   BasicBlock *Right(BasicBlock::Create(C, "", F));
84   BasicBlock *Merge(BasicBlock::Create(C, "", F));
85   B.SetInsertPoint(Entry);
86   B.CreateCondBr(B.getTrue(), Left, Right);
87   B.SetInsertPoint(Left);
88   Argument *PointerArg = &*F->arg_begin();
89   B.CreateStore(B.getInt8(16), PointerArg);
90   BranchInst::Create(Merge, Left);
91   BranchInst::Create(Merge, Right);
92 
93   setupAnalyses();
94   MemorySSA &MSSA = *Analyses->MSSA;
95   MemorySSAUpdater Updater(&MSSA);
96   // Add the load
97   B.SetInsertPoint(Merge);
98   LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg);
99 
100   // MemoryPHI should already exist.
101   MemoryPhi *MP = MSSA.getMemoryAccess(Merge);
102   EXPECT_NE(MP, nullptr);
103 
104   // Create the load memory acccess
105   MemoryUse *LoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
106       LoadInst, MP, Merge, MemorySSA::Beginning));
107   MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
108   EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
109   MSSA.verifyMemorySSA();
110 }
111 TEST_F(MemorySSATest, CreateLoadsAndStoreUpdater) {
112   // We create a diamond, then build memoryssa with no memory accesses, and
113   // incrementally update it by inserting a store in the, entry, a load in the
114   // merge point, then a store in the branch, another load in the merge point,
115   // and then a store in the entry.
116   F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
117                        GlobalValue::ExternalLinkage, "F", &M);
118   BasicBlock *Entry(BasicBlock::Create(C, "", F));
119   BasicBlock *Left(BasicBlock::Create(C, "", F));
120   BasicBlock *Right(BasicBlock::Create(C, "", F));
121   BasicBlock *Merge(BasicBlock::Create(C, "", F));
122   B.SetInsertPoint(Entry);
123   B.CreateCondBr(B.getTrue(), Left, Right);
124   B.SetInsertPoint(Left->begin());
125   Argument *PointerArg = &*F->arg_begin();
126   B.SetInsertPoint(Left);
127   B.CreateBr(Merge);
128   B.SetInsertPoint(Right);
129   B.CreateBr(Merge);
130 
131   setupAnalyses();
132   MemorySSA &MSSA = *Analyses->MSSA;
133   MemorySSAUpdater Updater(&MSSA);
134   // Add the store
135   B.SetInsertPoint(Entry->begin());
136   StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
137   MemoryAccess *EntryStoreAccess = Updater.createMemoryAccessInBB(
138       EntryStore, nullptr, Entry, MemorySSA::Beginning);
139   Updater.insertDef(cast<MemoryDef>(EntryStoreAccess));
140 
141   // Add the load
142   B.SetInsertPoint(Merge->begin());
143   LoadInst *FirstLoad = B.CreateLoad(B.getInt8Ty(), PointerArg);
144 
145   // MemoryPHI should not already exist.
146   MemoryPhi *MP = MSSA.getMemoryAccess(Merge);
147   EXPECT_EQ(MP, nullptr);
148 
149   // Create the load memory access
150   MemoryUse *FirstLoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
151       FirstLoad, nullptr, Merge, MemorySSA::Beginning));
152   Updater.insertUse(FirstLoadAccess);
153   // Should just have a load using the entry access, because it should discover
154   // the phi is trivial
155   EXPECT_EQ(FirstLoadAccess->getDefiningAccess(), EntryStoreAccess);
156 
157   // Create a store on the left
158   // Add the store
159   B.SetInsertPoint(Left->begin());
160   StoreInst *LeftStore = B.CreateStore(B.getInt8(16), PointerArg);
161   MemoryAccess *LeftStoreAccess = Updater.createMemoryAccessInBB(
162       LeftStore, nullptr, Left, MemorySSA::Beginning);
163   Updater.insertDef(cast<MemoryDef>(LeftStoreAccess), false);
164 
165   // MemoryPHI should exist after adding LeftStore.
166   MP = MSSA.getMemoryAccess(Merge);
167   EXPECT_NE(MP, nullptr);
168 
169   // Add the second load
170   B.SetInsertPoint(Merge->begin());
171   LoadInst *SecondLoad = B.CreateLoad(B.getInt8Ty(), PointerArg);
172 
173   // Create the load memory access
174   MemoryUse *SecondLoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
175       SecondLoad, nullptr, Merge, MemorySSA::Beginning));
176   Updater.insertUse(SecondLoadAccess);
177   // Now the load should be a phi of the entry store and the left store
178   MemoryPhi *MergePhi =
179       dyn_cast<MemoryPhi>(SecondLoadAccess->getDefiningAccess());
180   EXPECT_NE(MergePhi, nullptr);
181   EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
182   EXPECT_EQ(MergePhi->getIncomingValue(1), LeftStoreAccess);
183   // Now create a store below the existing one in the entry
184   B.SetInsertPoint(--Entry->end());
185   StoreInst *SecondEntryStore = B.CreateStore(B.getInt8(16), PointerArg);
186   MemoryAccess *SecondEntryStoreAccess = Updater.createMemoryAccessInBB(
187       SecondEntryStore, nullptr, Entry, MemorySSA::End);
188   // Insert it twice just to test renaming
189   Updater.insertDef(cast<MemoryDef>(SecondEntryStoreAccess), false);
190   EXPECT_NE(FirstLoadAccess->getDefiningAccess(), MergePhi);
191   Updater.insertDef(cast<MemoryDef>(SecondEntryStoreAccess), true);
192   EXPECT_EQ(FirstLoadAccess->getDefiningAccess(), MergePhi);
193   // and make sure the phi below it got updated, despite being blocks away
194   MergePhi = dyn_cast<MemoryPhi>(SecondLoadAccess->getDefiningAccess());
195   EXPECT_NE(MergePhi, nullptr);
196   EXPECT_EQ(MergePhi->getIncomingValue(0), SecondEntryStoreAccess);
197   EXPECT_EQ(MergePhi->getIncomingValue(1), LeftStoreAccess);
198   MSSA.verifyMemorySSA();
199 }
200 
201 TEST_F(MemorySSATest, CreateALoadUpdater) {
202   // We create a diamond, then build memoryssa with no memory accesses, and
203   // incrementally update it by inserting a store in one of the branches, and a
204   // load in the merge point
205   F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
206                        GlobalValue::ExternalLinkage, "F", &M);
207   BasicBlock *Entry(BasicBlock::Create(C, "", F));
208   BasicBlock *Left(BasicBlock::Create(C, "", F));
209   BasicBlock *Right(BasicBlock::Create(C, "", F));
210   BasicBlock *Merge(BasicBlock::Create(C, "", F));
211   B.SetInsertPoint(Entry);
212   B.CreateCondBr(B.getTrue(), Left, Right);
213   B.SetInsertPoint(Left->begin());
214   Argument *PointerArg = &*F->arg_begin();
215   B.SetInsertPoint(Left);
216   B.CreateBr(Merge);
217   B.SetInsertPoint(Right);
218   B.CreateBr(Merge);
219 
220   setupAnalyses();
221   MemorySSA &MSSA = *Analyses->MSSA;
222   MemorySSAUpdater Updater(&MSSA);
223   B.SetInsertPoint(Left->begin());
224   // Add the store
225   StoreInst *SI = B.CreateStore(B.getInt8(16), PointerArg);
226   MemoryAccess *StoreAccess =
227       Updater.createMemoryAccessInBB(SI, nullptr, Left, MemorySSA::Beginning);
228   Updater.insertDef(cast<MemoryDef>(StoreAccess));
229 
230   // MemoryPHI should be created when inserting the def
231   MemoryPhi *MP = MSSA.getMemoryAccess(Merge);
232   EXPECT_NE(MP, nullptr);
233 
234   // Add the load
235   B.SetInsertPoint(Merge->begin());
236   LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg);
237 
238   // Create the load memory acccess
239   MemoryUse *LoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
240       LoadInst, nullptr, Merge, MemorySSA::Beginning));
241   Updater.insertUse(LoadAccess);
242   MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
243   EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
244   MSSA.verifyMemorySSA();
245 }
246 
247 TEST_F(MemorySSATest, SinkLoad) {
248   F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
249                        GlobalValue::ExternalLinkage, "F", &M);
250   BasicBlock *Entry(BasicBlock::Create(C, "", F));
251   BasicBlock *Left(BasicBlock::Create(C, "", F));
252   BasicBlock *Right(BasicBlock::Create(C, "", F));
253   BasicBlock *Merge(BasicBlock::Create(C, "", F));
254   B.SetInsertPoint(Entry);
255   B.CreateCondBr(B.getTrue(), Left, Right);
256   B.SetInsertPoint(Left->begin());
257   Argument *PointerArg = &*F->arg_begin();
258   B.SetInsertPoint(Left);
259   B.CreateBr(Merge);
260   B.SetInsertPoint(Right);
261   B.CreateBr(Merge);
262 
263   // Load in left block
264   B.SetInsertPoint(Left->begin());
265   LoadInst *LoadInst1 = B.CreateLoad(B.getInt8Ty(), PointerArg);
266   // Store in merge block
267   B.SetInsertPoint(Merge->begin());
268   B.CreateStore(B.getInt8(16), PointerArg);
269 
270   setupAnalyses();
271   MemorySSA &MSSA = *Analyses->MSSA;
272   MemorySSAUpdater Updater(&MSSA);
273 
274   // Mimic sinking of a load:
275   // - clone load
276   // - insert in "exit" block
277   // - insert in mssa
278   // - remove from original block
279 
280   LoadInst *LoadInstClone = cast<LoadInst>(LoadInst1->clone());
281   LoadInstClone->insertInto(Merge, Merge->begin());
282   MemoryAccess * NewLoadAccess =
283       Updater.createMemoryAccessInBB(LoadInstClone, nullptr,
284                                      LoadInstClone->getParent(),
285                                      MemorySSA::Beginning);
286   Updater.insertUse(cast<MemoryUse>(NewLoadAccess));
287   MSSA.verifyMemorySSA();
288   Updater.removeMemoryAccess(MSSA.getMemoryAccess(LoadInst1));
289   MSSA.verifyMemorySSA();
290 }
291 
292 TEST_F(MemorySSATest, MoveAStore) {
293   // We create a diamond where there is a in the entry, a store on one side, and
294   // a load at the end.  After building MemorySSA, we test updating by moving
295   // the store from the side block to the entry block. This destroys the old
296   // access.
297   F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
298                        GlobalValue::ExternalLinkage, "F", &M);
299   BasicBlock *Entry(BasicBlock::Create(C, "", F));
300   BasicBlock *Left(BasicBlock::Create(C, "", F));
301   BasicBlock *Right(BasicBlock::Create(C, "", F));
302   BasicBlock *Merge(BasicBlock::Create(C, "", F));
303   B.SetInsertPoint(Entry);
304   Argument *PointerArg = &*F->arg_begin();
305   StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
306   B.CreateCondBr(B.getTrue(), Left, Right);
307   B.SetInsertPoint(Left);
308   StoreInst *SideStore = B.CreateStore(B.getInt8(16), PointerArg);
309   BranchInst::Create(Merge, Left);
310   BranchInst::Create(Merge, Right);
311   B.SetInsertPoint(Merge);
312   B.CreateLoad(B.getInt8Ty(), PointerArg);
313   setupAnalyses();
314   MemorySSA &MSSA = *Analyses->MSSA;
315   MemorySSAUpdater Updater(&MSSA);
316   // Move the store
317   SideStore->moveBefore(Entry->getTerminator());
318   MemoryAccess *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore);
319   MemoryAccess *SideStoreAccess = MSSA.getMemoryAccess(SideStore);
320   MemoryAccess *NewStoreAccess = Updater.createMemoryAccessAfter(
321       SideStore, EntryStoreAccess, EntryStoreAccess);
322   EntryStoreAccess->replaceAllUsesWith(NewStoreAccess);
323   Updater.removeMemoryAccess(SideStoreAccess);
324   MSSA.verifyMemorySSA();
325 }
326 
327 TEST_F(MemorySSATest, MoveAStoreUpdater) {
328   // We create a diamond where there is a in the entry, a store on one side, and
329   // a load at the end.  After building MemorySSA, we test updating by moving
330   // the store from the side block to the entry block.  This destroys the old
331   // access.
332   F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
333                        GlobalValue::ExternalLinkage, "F", &M);
334   BasicBlock *Entry(BasicBlock::Create(C, "", F));
335   BasicBlock *Left(BasicBlock::Create(C, "", F));
336   BasicBlock *Right(BasicBlock::Create(C, "", F));
337   BasicBlock *Merge(BasicBlock::Create(C, "", F));
338   B.SetInsertPoint(Entry);
339   Argument *PointerArg = &*F->arg_begin();
340   StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
341   B.CreateCondBr(B.getTrue(), Left, Right);
342   B.SetInsertPoint(Left);
343   auto *SideStore = B.CreateStore(B.getInt8(16), PointerArg);
344   BranchInst::Create(Merge, Left);
345   BranchInst::Create(Merge, Right);
346   B.SetInsertPoint(Merge);
347   auto *MergeLoad = B.CreateLoad(B.getInt8Ty(), PointerArg);
348   setupAnalyses();
349   MemorySSA &MSSA = *Analyses->MSSA;
350   MemorySSAUpdater Updater(&MSSA);
351 
352   // Move the store
353   SideStore->moveBefore(Entry->getTerminator());
354   auto *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore);
355   auto *SideStoreAccess = MSSA.getMemoryAccess(SideStore);
356   auto *NewStoreAccess = Updater.createMemoryAccessAfter(
357       SideStore, EntryStoreAccess, EntryStoreAccess);
358   // Before, the load will point to a phi of the EntryStore and SideStore.
359   auto *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(MergeLoad));
360   EXPECT_TRUE(isa<MemoryPhi>(LoadAccess->getDefiningAccess()));
361   MemoryPhi *MergePhi = cast<MemoryPhi>(LoadAccess->getDefiningAccess());
362   EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
363   EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess);
364   Updater.removeMemoryAccess(SideStoreAccess);
365   Updater.insertDef(cast<MemoryDef>(NewStoreAccess));
366   // After it's a phi of the new side store access.
367   EXPECT_EQ(MergePhi->getIncomingValue(0), NewStoreAccess);
368   EXPECT_EQ(MergePhi->getIncomingValue(1), NewStoreAccess);
369   MSSA.verifyMemorySSA();
370 }
371 
372 TEST_F(MemorySSATest, MoveAStoreUpdaterMove) {
373   // We create a diamond where there is a in the entry, a store on one side, and
374   // a load at the end.  After building MemorySSA, we test updating by moving
375   // the store from the side block to the entry block.  This does not destroy
376   // the old access.
377   F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
378                        GlobalValue::ExternalLinkage, "F", &M);
379   BasicBlock *Entry(BasicBlock::Create(C, "", F));
380   BasicBlock *Left(BasicBlock::Create(C, "", F));
381   BasicBlock *Right(BasicBlock::Create(C, "", F));
382   BasicBlock *Merge(BasicBlock::Create(C, "", F));
383   B.SetInsertPoint(Entry);
384   Argument *PointerArg = &*F->arg_begin();
385   StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
386   B.CreateCondBr(B.getTrue(), Left, Right);
387   B.SetInsertPoint(Left);
388   auto *SideStore = B.CreateStore(B.getInt8(16), PointerArg);
389   BranchInst::Create(Merge, Left);
390   BranchInst::Create(Merge, Right);
391   B.SetInsertPoint(Merge);
392   auto *MergeLoad = B.CreateLoad(B.getInt8Ty(), PointerArg);
393   setupAnalyses();
394   MemorySSA &MSSA = *Analyses->MSSA;
395   MemorySSAUpdater Updater(&MSSA);
396 
397   // Move the store
398   auto *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore);
399   auto *SideStoreAccess = MSSA.getMemoryAccess(SideStore);
400   // Before, the load will point to a phi of the EntryStore and SideStore.
401   auto *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(MergeLoad));
402   EXPECT_TRUE(isa<MemoryPhi>(LoadAccess->getDefiningAccess()));
403   MemoryPhi *MergePhi = cast<MemoryPhi>(LoadAccess->getDefiningAccess());
404   EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
405   EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess);
406   SideStore->moveBefore(*EntryStore->getParent(), ++EntryStore->getIterator());
407   Updater.moveAfter(SideStoreAccess, EntryStoreAccess);
408   // After, it's a phi of the side store.
409   EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess);
410   EXPECT_EQ(MergePhi->getIncomingValue(1), SideStoreAccess);
411 
412   MSSA.verifyMemorySSA();
413 }
414 
415 TEST_F(MemorySSATest, MoveAStoreAllAround) {
416   // We create a diamond where there is a in the entry, a store on one side, and
417   // a load at the end.  After building MemorySSA, we test updating by moving
418   // the store from the side block to the entry block, then to the other side
419   // block, then to before the load.  This does not destroy the old access.
420   F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
421                        GlobalValue::ExternalLinkage, "F", &M);
422   BasicBlock *Entry(BasicBlock::Create(C, "", F));
423   BasicBlock *Left(BasicBlock::Create(C, "", F));
424   BasicBlock *Right(BasicBlock::Create(C, "", F));
425   BasicBlock *Merge(BasicBlock::Create(C, "", F));
426   B.SetInsertPoint(Entry);
427   Argument *PointerArg = &*F->arg_begin();
428   StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
429   B.CreateCondBr(B.getTrue(), Left, Right);
430   B.SetInsertPoint(Left);
431   auto *SideStore = B.CreateStore(B.getInt8(16), PointerArg);
432   BranchInst::Create(Merge, Left);
433   BranchInst::Create(Merge, Right);
434   B.SetInsertPoint(Merge);
435   auto *MergeLoad = B.CreateLoad(B.getInt8Ty(), PointerArg);
436   setupAnalyses();
437   MemorySSA &MSSA = *Analyses->MSSA;
438   MemorySSAUpdater Updater(&MSSA);
439 
440   // Move the store
441   auto *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore);
442   auto *SideStoreAccess = MSSA.getMemoryAccess(SideStore);
443   // Before, the load will point to a phi of the EntryStore and SideStore.
444   auto *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(MergeLoad));
445   EXPECT_TRUE(isa<MemoryPhi>(LoadAccess->getDefiningAccess()));
446   MemoryPhi *MergePhi = cast<MemoryPhi>(LoadAccess->getDefiningAccess());
447   EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
448   EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess);
449   // Move the store before the entry store
450   SideStore->moveBefore(*EntryStore->getParent(), EntryStore->getIterator());
451   Updater.moveBefore(SideStoreAccess, EntryStoreAccess);
452   // After, it's a phi of the entry store.
453   EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
454   EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
455   MSSA.verifyMemorySSA();
456   // Now move the store to the right branch
457   SideStore->moveBefore(*Right, Right->begin());
458   Updater.moveToPlace(SideStoreAccess, Right, MemorySSA::Beginning);
459   MSSA.verifyMemorySSA();
460   EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
461   EXPECT_EQ(MergePhi->getIncomingValue(1), SideStoreAccess);
462   // Now move it before the load
463   SideStore->moveBefore(MergeLoad);
464   Updater.moveBefore(SideStoreAccess, LoadAccess);
465   EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
466   EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
467   MSSA.verifyMemorySSA();
468 }
469 
470 TEST_F(MemorySSATest, RemoveAPhi) {
471   // We create a diamond where there is a store on one side, and then a load
472   // after the merge point.  This enables us to test a bunch of different
473   // removal cases.
474   F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
475                        GlobalValue::ExternalLinkage, "F", &M);
476   BasicBlock *Entry(BasicBlock::Create(C, "", F));
477   BasicBlock *Left(BasicBlock::Create(C, "", F));
478   BasicBlock *Right(BasicBlock::Create(C, "", F));
479   BasicBlock *Merge(BasicBlock::Create(C, "", F));
480   B.SetInsertPoint(Entry);
481   B.CreateCondBr(B.getTrue(), Left, Right);
482   B.SetInsertPoint(Left);
483   Argument *PointerArg = &*F->arg_begin();
484   StoreInst *StoreInst = B.CreateStore(B.getInt8(16), PointerArg);
485   BranchInst::Create(Merge, Left);
486   BranchInst::Create(Merge, Right);
487   B.SetInsertPoint(Merge);
488   LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg);
489 
490   setupAnalyses();
491   MemorySSA &MSSA = *Analyses->MSSA;
492   MemorySSAUpdater Updater(&MSSA);
493 
494   // Before, the load will be a use of a phi<store, liveonentry>.
495   MemoryUse *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(LoadInst));
496   MemoryDef *StoreAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreInst));
497   MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
498   EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
499   // Kill the store
500   Updater.removeMemoryAccess(StoreAccess);
501   MemoryPhi *MP = cast<MemoryPhi>(DefiningAccess);
502   // Verify the phi ended up as liveonentry, liveonentry
503   for (auto &Op : MP->incoming_values())
504     EXPECT_TRUE(MSSA.isLiveOnEntryDef(cast<MemoryAccess>(Op.get())));
505   // Replace the phi uses with the live on entry def
506   MP->replaceAllUsesWith(MSSA.getLiveOnEntryDef());
507   // Verify the load is now defined by liveOnEntryDef
508   EXPECT_TRUE(MSSA.isLiveOnEntryDef(LoadAccess->getDefiningAccess()));
509   // Remove the PHI
510   Updater.removeMemoryAccess(MP);
511   MSSA.verifyMemorySSA();
512 }
513 
514 TEST_F(MemorySSATest, RemoveMemoryAccess) {
515   // We create a diamond where there is a store on one side, and then a load
516   // after the merge point.  This enables us to test a bunch of different
517   // removal cases.
518   F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
519                        GlobalValue::ExternalLinkage, "F", &M);
520   BasicBlock *Entry(BasicBlock::Create(C, "", F));
521   BasicBlock *Left(BasicBlock::Create(C, "", F));
522   BasicBlock *Right(BasicBlock::Create(C, "", F));
523   BasicBlock *Merge(BasicBlock::Create(C, "", F));
524   B.SetInsertPoint(Entry);
525   B.CreateCondBr(B.getTrue(), Left, Right);
526   B.SetInsertPoint(Left);
527   Argument *PointerArg = &*F->arg_begin();
528   StoreInst *StoreInst = B.CreateStore(B.getInt8(16), PointerArg);
529   BranchInst::Create(Merge, Left);
530   BranchInst::Create(Merge, Right);
531   B.SetInsertPoint(Merge);
532   LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg);
533 
534   setupAnalyses();
535   MemorySSA &MSSA = *Analyses->MSSA;
536   MemorySSAWalker *Walker = Analyses->Walker;
537   MemorySSAUpdater Updater(&MSSA);
538 
539   // Before, the load will be a use of a phi<store, liveonentry>. It should be
540   // the same after.
541   MemoryUse *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(LoadInst));
542   MemoryDef *StoreAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreInst));
543   MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
544   EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
545   // The load is currently clobbered by one of the phi arguments, so the walker
546   // should determine the clobbering access as the phi.
547   EXPECT_EQ(DefiningAccess, Walker->getClobberingMemoryAccess(LoadInst));
548   Updater.removeMemoryAccess(StoreAccess);
549   MSSA.verifyMemorySSA();
550   // After the removeaccess, let's see if we got the right accesses
551   // The load should still point to the phi ...
552   EXPECT_EQ(DefiningAccess, LoadAccess->getDefiningAccess());
553   // but we should now get live on entry for the clobbering definition of the
554   // load, since it will walk past the phi node since every argument is the
555   // same.
556   // XXX: This currently requires either removing the phi or resetting optimized
557   // on the load
558 
559   EXPECT_FALSE(
560       MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(LoadInst)));
561   // If we reset optimized, we get live on entry.
562   LoadAccess->resetOptimized();
563   EXPECT_TRUE(
564       MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(LoadInst)));
565   // The phi should now be a two entry phi with two live on entry defs.
566   for (const auto &Op : DefiningAccess->operands()) {
567     MemoryAccess *Operand = cast<MemoryAccess>(&*Op);
568     EXPECT_TRUE(MSSA.isLiveOnEntryDef(Operand));
569   }
570 
571   // Now we try to remove the single valued phi
572   Updater.removeMemoryAccess(DefiningAccess);
573   MSSA.verifyMemorySSA();
574   // Now the load should be a load of live on entry.
575   EXPECT_TRUE(MSSA.isLiveOnEntryDef(LoadAccess->getDefiningAccess()));
576 }
577 
578 // We had a bug with caching where the walker would report MemoryDef#3's clobber
579 // (below) was MemoryDef#1.
580 //
581 // define void @F(i8*) {
582 //   %A = alloca i8, i8 1
583 // ; 1 = MemoryDef(liveOnEntry)
584 //   store i8 0, i8* %A
585 // ; 2 = MemoryDef(1)
586 //   store i8 1, i8* %A
587 // ; 3 = MemoryDef(2)
588 //   store i8 2, i8* %A
589 // }
590 TEST_F(MemorySSATest, TestTripleStore) {
591   F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
592                        GlobalValue::ExternalLinkage, "F", &M);
593   B.SetInsertPoint(BasicBlock::Create(C, "", F));
594   Type *Int8 = Type::getInt8Ty(C);
595   Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
596   StoreInst *S1 = B.CreateStore(ConstantInt::get(Int8, 0), Alloca);
597   StoreInst *S2 = B.CreateStore(ConstantInt::get(Int8, 1), Alloca);
598   StoreInst *S3 = B.CreateStore(ConstantInt::get(Int8, 2), Alloca);
599 
600   setupAnalyses();
601   MemorySSA &MSSA = *Analyses->MSSA;
602   MemorySSAWalker *Walker = Analyses->Walker;
603 
604   unsigned I = 0;
605   for (StoreInst *V : {S1, S2, S3}) {
606     // Everything should be clobbered by its defining access
607     MemoryAccess *DefiningAccess = MSSA.getMemoryAccess(V)->getDefiningAccess();
608     MemoryAccess *WalkerClobber = Walker->getClobberingMemoryAccess(V);
609     EXPECT_EQ(DefiningAccess, WalkerClobber)
610         << "Store " << I << " doesn't have the correct clobbering access";
611     // EXPECT_EQ expands such that if we increment I above, it won't get
612     // incremented except when we try to print the error message.
613     ++I;
614   }
615 }
616 
617 // ...And fixing the above bug made it obvious that, when walking, MemorySSA's
618 // walker was caching the initial node it walked. This was fine (albeit
619 // mostly redundant) unless the initial node being walked is a clobber for the
620 // query. In that case, we'd cache that the node clobbered itself.
621 TEST_F(MemorySSATest, TestStoreAndLoad) {
622   F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
623                        GlobalValue::ExternalLinkage, "F", &M);
624   B.SetInsertPoint(BasicBlock::Create(C, "", F));
625   Type *Int8 = Type::getInt8Ty(C);
626   Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
627   Instruction *SI = B.CreateStore(ConstantInt::get(Int8, 0), Alloca);
628   Instruction *LI = B.CreateLoad(Int8, Alloca);
629 
630   setupAnalyses();
631   MemorySSA &MSSA = *Analyses->MSSA;
632   MemorySSAWalker *Walker = Analyses->Walker;
633 
634   MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(LI);
635   EXPECT_EQ(LoadClobber, MSSA.getMemoryAccess(SI));
636   EXPECT_TRUE(MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(SI)));
637 }
638 
639 // Another bug (related to the above two fixes): It was noted that, given the
640 // following code:
641 // ; 1 = MemoryDef(liveOnEntry)
642 // store i8 0, i8* %1
643 //
644 // ...A query to getClobberingMemoryAccess(MemoryAccess*, MemoryLocation) would
645 // hand back the store (correctly). A later call to
646 // getClobberingMemoryAccess(const Instruction*) would also hand back the store
647 // (incorrectly; it should return liveOnEntry).
648 //
649 // This test checks that repeated calls to either function returns what they're
650 // meant to.
651 TEST_F(MemorySSATest, TestStoreDoubleQuery) {
652   F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
653                        GlobalValue::ExternalLinkage, "F", &M);
654   B.SetInsertPoint(BasicBlock::Create(C, "", F));
655   Type *Int8 = Type::getInt8Ty(C);
656   Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
657   StoreInst *SI = B.CreateStore(ConstantInt::get(Int8, 0), Alloca);
658 
659   setupAnalyses();
660   MemorySSA &MSSA = *Analyses->MSSA;
661   MemorySSAWalker *Walker = Analyses->Walker;
662 
663   MemoryAccess *StoreAccess = MSSA.getMemoryAccess(SI);
664   MemoryLocation StoreLoc = MemoryLocation::get(SI);
665   MemoryAccess *Clobber =
666       Walker->getClobberingMemoryAccess(StoreAccess, StoreLoc);
667   MemoryAccess *LiveOnEntry = Walker->getClobberingMemoryAccess(SI);
668 
669   EXPECT_EQ(Clobber, StoreAccess);
670   EXPECT_TRUE(MSSA.isLiveOnEntryDef(LiveOnEntry));
671 
672   // Try again (with entries in the cache already) for good measure...
673   Clobber = Walker->getClobberingMemoryAccess(StoreAccess, StoreLoc);
674   LiveOnEntry = Walker->getClobberingMemoryAccess(SI);
675   EXPECT_EQ(Clobber, StoreAccess);
676   EXPECT_TRUE(MSSA.isLiveOnEntryDef(LiveOnEntry));
677 }
678 
679 // Bug: During phi optimization, the walker wouldn't cache to the proper result
680 // in the farthest-walked BB.
681 //
682 // Specifically, it would assume that whatever we walked to was a clobber.
683 // "Whatever we walked to" isn't a clobber if we hit a cache entry.
684 //
685 // ...So, we need a test case that looks like:
686 //    A
687 //   / \
688 //  B   |
689 //   \ /
690 //    C
691 //
692 // Where, when we try to optimize a thing in 'C', a blocker is found in 'B'.
693 // The walk must determine that the blocker exists by using cache entries *while
694 // walking* 'B'.
695 TEST_F(MemorySSATest, PartialWalkerCacheWithPhis) {
696   F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
697                        GlobalValue::ExternalLinkage, "F", &M);
698   B.SetInsertPoint(BasicBlock::Create(C, "A", F));
699   Type *Int8 = Type::getInt8Ty(C);
700   Constant *One = ConstantInt::get(Int8, 1);
701   Constant *Zero = ConstantInt::get(Int8, 0);
702   Value *AllocA = B.CreateAlloca(Int8, One, "a");
703   Value *AllocB = B.CreateAlloca(Int8, One, "b");
704   BasicBlock *IfThen = BasicBlock::Create(C, "B", F);
705   BasicBlock *IfEnd = BasicBlock::Create(C, "C", F);
706 
707   B.CreateCondBr(UndefValue::get(Type::getInt1Ty(C)), IfThen, IfEnd);
708 
709   B.SetInsertPoint(IfThen);
710   Instruction *FirstStore = B.CreateStore(Zero, AllocA);
711   B.CreateStore(Zero, AllocB);
712   Instruction *ALoad0 = B.CreateLoad(Int8, AllocA, "");
713   Instruction *BStore = B.CreateStore(Zero, AllocB);
714   // Due to use optimization/etc. we make a store to A, which is removed after
715   // we build MSSA. This helps keep the test case simple-ish.
716   Instruction *KillStore = B.CreateStore(Zero, AllocA);
717   Instruction *ALoad = B.CreateLoad(Int8, AllocA, "");
718   B.CreateBr(IfEnd);
719 
720   B.SetInsertPoint(IfEnd);
721   Instruction *BelowPhi = B.CreateStore(Zero, AllocA);
722 
723   setupAnalyses();
724   MemorySSA &MSSA = *Analyses->MSSA;
725   MemorySSAWalker *Walker = Analyses->Walker;
726   MemorySSAUpdater Updater(&MSSA);
727 
728   // Kill `KillStore`; it exists solely so that the load after it won't be
729   // optimized to FirstStore.
730   Updater.removeMemoryAccess(MSSA.getMemoryAccess(KillStore));
731   KillStore->eraseFromParent();
732   auto *ALoadMA = cast<MemoryUse>(MSSA.getMemoryAccess(ALoad));
733   EXPECT_EQ(ALoadMA->getDefiningAccess(), MSSA.getMemoryAccess(BStore));
734 
735   // Populate the cache for the store to AllocB directly after FirstStore. It
736   // should point to something in block B (so something in D can't be optimized
737   // to it).
738   MemoryAccess *Load0Clobber = Walker->getClobberingMemoryAccess(ALoad0);
739   EXPECT_EQ(MSSA.getMemoryAccess(FirstStore), Load0Clobber);
740 
741   // If the bug exists, this will introduce a bad cache entry for %a on BStore.
742   // It will point to the store to %b after FirstStore. This only happens during
743   // phi optimization.
744   MemoryAccess *BottomClobber = Walker->getClobberingMemoryAccess(BelowPhi);
745   MemoryAccess *Phi = MSSA.getMemoryAccess(IfEnd);
746   EXPECT_EQ(BottomClobber, Phi);
747 
748   // This query will first check the cache for {%a, BStore}. It should point to
749   // FirstStore, not to the store after FirstStore.
750   MemoryAccess *UseClobber = Walker->getClobberingMemoryAccess(ALoad);
751   EXPECT_EQ(UseClobber, MSSA.getMemoryAccess(FirstStore));
752 }
753 
754 // Test that our walker properly handles loads with the invariant group
755 // attribute. It's a bit hacky, since we add the invariant attribute *after*
756 // building MSSA. Otherwise, the use optimizer will optimize it for us, which
757 // isn't what we want.
758 // FIXME: It may be easier/cleaner to just add an 'optimize uses?' flag to MSSA.
759 TEST_F(MemorySSATest, WalkerInvariantLoadOpt) {
760   F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
761                        GlobalValue::ExternalLinkage, "F", &M);
762   B.SetInsertPoint(BasicBlock::Create(C, "", F));
763   Type *Int8 = Type::getInt8Ty(C);
764   Constant *One = ConstantInt::get(Int8, 1);
765   Value *AllocA = B.CreateAlloca(Int8, One, "");
766 
767   Instruction *Store = B.CreateStore(One, AllocA);
768   Instruction *Load = B.CreateLoad(Int8, AllocA);
769 
770   setupAnalyses();
771   MemorySSA &MSSA = *Analyses->MSSA;
772   MemorySSAWalker *Walker = Analyses->Walker;
773 
774   auto *LoadMA = cast<MemoryUse>(MSSA.getMemoryAccess(Load));
775   auto *StoreMA = cast<MemoryDef>(MSSA.getMemoryAccess(Store));
776   EXPECT_EQ(LoadMA->getDefiningAccess(), StoreMA);
777 
778   // ...At the time of writing, no cache should exist for LoadMA. Be a bit
779   // flexible to future changes.
780   Walker->invalidateInfo(LoadMA);
781   Load->setMetadata(LLVMContext::MD_invariant_load, MDNode::get(C, {}));
782 
783   MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(LoadMA);
784   EXPECT_EQ(LoadClobber, MSSA.getLiveOnEntryDef());
785 }
786 
787 // Test loads get reoptimized properly by the walker.
788 TEST_F(MemorySSATest, WalkerReopt) {
789   F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
790                        GlobalValue::ExternalLinkage, "F", &M);
791   B.SetInsertPoint(BasicBlock::Create(C, "", F));
792   Type *Int8 = Type::getInt8Ty(C);
793   Value *AllocaA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
794   Instruction *SIA = B.CreateStore(ConstantInt::get(Int8, 0), AllocaA);
795   Value *AllocaB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
796   Instruction *SIB = B.CreateStore(ConstantInt::get(Int8, 0), AllocaB);
797   Instruction *LIA = B.CreateLoad(Int8, AllocaA);
798 
799   setupAnalyses();
800   MemorySSA &MSSA = *Analyses->MSSA;
801   MemorySSAWalker *Walker = Analyses->Walker;
802   MemorySSAUpdater Updater(&MSSA);
803 
804   MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(LIA);
805   MemoryUse *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(LIA));
806   EXPECT_EQ(LoadClobber, MSSA.getMemoryAccess(SIA));
807   EXPECT_TRUE(MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(SIA)));
808   Updater.removeMemoryAccess(LoadAccess);
809 
810   // Create the load memory access pointing to an unoptimized place.
811   MemoryUse *NewLoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
812       LIA, MSSA.getMemoryAccess(SIB), LIA->getParent(), MemorySSA::End));
813   // This should it cause it to be optimized
814   EXPECT_EQ(Walker->getClobberingMemoryAccess(NewLoadAccess), LoadClobber);
815   EXPECT_EQ(NewLoadAccess->getDefiningAccess(), LoadClobber);
816 }
817 
818 // Test out MemorySSAUpdater::moveBefore
819 TEST_F(MemorySSATest, MoveAboveMemoryDef) {
820   F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
821                        GlobalValue::ExternalLinkage, "F", &M);
822   B.SetInsertPoint(BasicBlock::Create(C, "", F));
823 
824   Type *Int8 = Type::getInt8Ty(C);
825   Value *A = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
826   Value *B_ = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
827   Value *C = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "C");
828 
829   StoreInst *StoreA0 = B.CreateStore(ConstantInt::get(Int8, 0), A);
830   StoreInst *StoreB = B.CreateStore(ConstantInt::get(Int8, 0), B_);
831   LoadInst *LoadB = B.CreateLoad(Int8, B_);
832   StoreInst *StoreA1 = B.CreateStore(ConstantInt::get(Int8, 4), A);
833   StoreInst *StoreC = B.CreateStore(ConstantInt::get(Int8, 4), C);
834   StoreInst *StoreA2 = B.CreateStore(ConstantInt::get(Int8, 4), A);
835   LoadInst *LoadC = B.CreateLoad(Int8, C);
836 
837   setupAnalyses();
838   MemorySSA &MSSA = *Analyses->MSSA;
839   MemorySSAWalker &Walker = *Analyses->Walker;
840 
841   MemorySSAUpdater Updater(&MSSA);
842   StoreC->moveBefore(StoreB);
843   Updater.moveBefore(cast<MemoryDef>(MSSA.getMemoryAccess(StoreC)),
844                      cast<MemoryDef>(MSSA.getMemoryAccess(StoreB)));
845 
846   MSSA.verifyMemorySSA();
847 
848   EXPECT_EQ(MSSA.getMemoryAccess(StoreB)->getDefiningAccess(),
849             MSSA.getMemoryAccess(StoreC));
850   EXPECT_EQ(MSSA.getMemoryAccess(StoreC)->getDefiningAccess(),
851             MSSA.getMemoryAccess(StoreA0));
852   EXPECT_EQ(MSSA.getMemoryAccess(StoreA2)->getDefiningAccess(),
853             MSSA.getMemoryAccess(StoreA1));
854   EXPECT_EQ(Walker.getClobberingMemoryAccess(LoadB),
855             MSSA.getMemoryAccess(StoreB));
856   EXPECT_EQ(Walker.getClobberingMemoryAccess(LoadC),
857             MSSA.getMemoryAccess(StoreC));
858 
859   // exercise block numbering
860   EXPECT_TRUE(MSSA.locallyDominates(MSSA.getMemoryAccess(StoreC),
861                                     MSSA.getMemoryAccess(StoreB)));
862   EXPECT_TRUE(MSSA.locallyDominates(MSSA.getMemoryAccess(StoreA1),
863                                     MSSA.getMemoryAccess(StoreA2)));
864 }
865 
866 TEST_F(MemorySSATest, Irreducible) {
867   // Create the equivalent of
868   // x = something
869   // if (...)
870   //    goto second_loop_entry
871   // while (...) {
872   // second_loop_entry:
873   // }
874   // use(x)
875 
876   SmallVector<PHINode *, 8> Inserted;
877   IRBuilder<> B(C);
878   F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
879                        GlobalValue::ExternalLinkage, "F", &M);
880 
881   // Make blocks
882   BasicBlock *IfBB = BasicBlock::Create(C, "if", F);
883   BasicBlock *LoopStartBB = BasicBlock::Create(C, "loopstart", F);
884   BasicBlock *LoopMainBB = BasicBlock::Create(C, "loopmain", F);
885   BasicBlock *AfterLoopBB = BasicBlock::Create(C, "afterloop", F);
886   B.SetInsertPoint(IfBB);
887   B.CreateCondBr(B.getTrue(), LoopMainBB, LoopStartBB);
888   B.SetInsertPoint(LoopStartBB);
889   B.CreateBr(LoopMainBB);
890   B.SetInsertPoint(LoopMainBB);
891   B.CreateCondBr(B.getTrue(), LoopStartBB, AfterLoopBB);
892   B.SetInsertPoint(AfterLoopBB);
893   Argument *FirstArg = &*F->arg_begin();
894   setupAnalyses();
895   MemorySSA &MSSA = *Analyses->MSSA;
896   MemorySSAUpdater Updater(&MSSA);
897   // Create the load memory acccess
898   LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), FirstArg);
899   MemoryUse *LoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
900       LoadInst, nullptr, AfterLoopBB, MemorySSA::Beginning));
901   Updater.insertUse(LoadAccess);
902   MSSA.verifyMemorySSA();
903 }
904 
905 TEST_F(MemorySSATest, MoveToBeforeLiveOnEntryInvalidatesCache) {
906   // Create:
907   //   %1 = alloca i8
908   //   ; 1 = MemoryDef(liveOnEntry)
909   //   store i8 0, i8* %1
910   //   ; 2 = MemoryDef(1)
911   //   store i8 0, i8* %1
912   //
913   // ...And be sure that MSSA's caching doesn't give us `1` for the clobber of
914   // `2` after `1` is removed.
915   IRBuilder<> B(C);
916   F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
917                        GlobalValue::ExternalLinkage, "F", &M);
918 
919   BasicBlock *Entry = BasicBlock::Create(C, "if", F);
920   B.SetInsertPoint(Entry);
921 
922   Value *A = B.CreateAlloca(B.getInt8Ty());
923   StoreInst *StoreA = B.CreateStore(B.getInt8(0), A);
924   StoreInst *StoreB = B.CreateStore(B.getInt8(0), A);
925 
926   setupAnalyses();
927 
928   MemorySSA &MSSA = *Analyses->MSSA;
929 
930   auto *DefA = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA));
931   auto *DefB = cast<MemoryDef>(MSSA.getMemoryAccess(StoreB));
932 
933   MemoryAccess *BClobber = MSSA.getWalker()->getClobberingMemoryAccess(DefB);
934   ASSERT_EQ(DefA, BClobber);
935 
936   MemorySSAUpdater(&MSSA).removeMemoryAccess(DefA);
937   StoreA->eraseFromParent();
938 
939   EXPECT_EQ(DefB->getDefiningAccess(), MSSA.getLiveOnEntryDef());
940 
941   EXPECT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(DefB),
942             MSSA.getLiveOnEntryDef())
943       << "(DefA = " << DefA << ")";
944 }
945 
946 TEST_F(MemorySSATest, RemovingDefInvalidatesCache) {
947   // Create:
948   //   %x = alloca i8
949   //   %y = alloca i8
950   //   ; 1 = MemoryDef(liveOnEntry)
951   //   store i8 0, i8* %x
952   //   ; 2 = MemoryDef(1)
953   //   store i8 0, i8* %y
954   //   ; 3 = MemoryDef(2)
955   //   store i8 0, i8* %x
956   //
957   // And be sure that MSSA's caching handles the removal of def `1`
958   // appropriately.
959   IRBuilder<> B(C);
960   F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
961                        GlobalValue::ExternalLinkage, "F", &M);
962 
963   BasicBlock *Entry = BasicBlock::Create(C, "if", F);
964   B.SetInsertPoint(Entry);
965 
966   Value *X = B.CreateAlloca(B.getInt8Ty());
967   Value *Y = B.CreateAlloca(B.getInt8Ty());
968   StoreInst *StoreX1 = B.CreateStore(B.getInt8(0), X);
969   StoreInst *StoreY = B.CreateStore(B.getInt8(0), Y);
970   StoreInst *StoreX2 = B.CreateStore(B.getInt8(0), X);
971 
972   setupAnalyses();
973 
974   MemorySSA &MSSA = *Analyses->MSSA;
975 
976   auto *DefX1 = cast<MemoryDef>(MSSA.getMemoryAccess(StoreX1));
977   auto *DefY = cast<MemoryDef>(MSSA.getMemoryAccess(StoreY));
978   auto *DefX2 = cast<MemoryDef>(MSSA.getMemoryAccess(StoreX2));
979 
980   EXPECT_EQ(DefX2->getDefiningAccess(), DefY);
981   MemoryAccess *X2Clobber = MSSA.getWalker()->getClobberingMemoryAccess(DefX2);
982   ASSERT_EQ(DefX1, X2Clobber);
983 
984   MemorySSAUpdater(&MSSA).removeMemoryAccess(DefX1);
985   StoreX1->eraseFromParent();
986 
987   EXPECT_EQ(DefX2->getDefiningAccess(), DefY);
988   EXPECT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(DefX2),
989             MSSA.getLiveOnEntryDef())
990       << "(DefX1 = " << DefX1 << ")";
991 }
992 
993 // Test Must alias for optimized defs.
994 TEST_F(MemorySSATest, TestStoreMustAlias) {
995   F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
996                        GlobalValue::ExternalLinkage, "F", &M);
997   B.SetInsertPoint(BasicBlock::Create(C, "", F));
998   Type *Int8 = Type::getInt8Ty(C);
999   Value *AllocaA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
1000   Value *AllocaB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
1001   StoreInst *SA1 = B.CreateStore(ConstantInt::get(Int8, 1), AllocaA);
1002   StoreInst *SB1 = B.CreateStore(ConstantInt::get(Int8, 1), AllocaB);
1003   StoreInst *SA2 = B.CreateStore(ConstantInt::get(Int8, 2), AllocaA);
1004   StoreInst *SB2 = B.CreateStore(ConstantInt::get(Int8, 2), AllocaB);
1005   StoreInst *SA3 = B.CreateStore(ConstantInt::get(Int8, 3), AllocaA);
1006   StoreInst *SB3 = B.CreateStore(ConstantInt::get(Int8, 3), AllocaB);
1007 
1008   setupAnalyses();
1009   MemorySSA &MSSA = *Analyses->MSSA;
1010   MemorySSAWalker *Walker = Analyses->Walker;
1011 
1012   unsigned I = 0;
1013   for (StoreInst *V : {SA1, SB1, SA2, SB2, SA3, SB3}) {
1014     MemoryDef *MemDef = dyn_cast_or_null<MemoryDef>(MSSA.getMemoryAccess(V));
1015     EXPECT_EQ(MemDef->isOptimized(), false)
1016         << "Store " << I << " is optimized from the start?";
1017     if (V == SA1)
1018       Walker->getClobberingMemoryAccess(V);
1019     else {
1020       MemoryAccess *Def = MemDef->getDefiningAccess();
1021       MemoryAccess *Clob = Walker->getClobberingMemoryAccess(V);
1022       EXPECT_NE(Def, Clob) << "Store " << I
1023                            << " has Defining Access equal to Clobbering Access";
1024     }
1025     EXPECT_EQ(MemDef->isOptimized(), true)
1026         << "Store " << I << " was not optimized";
1027     // EXPECT_EQ expands such that if we increment I above, it won't get
1028     // incremented except when we try to print the error message.
1029     ++I;
1030   }
1031 }
1032 
1033 // Test May alias for optimized defs.
1034 TEST_F(MemorySSATest, TestStoreMayAlias) {
1035   F = Function::Create(
1036       FunctionType::get(B.getVoidTy(), {B.getPtrTy(), B.getPtrTy()}, false),
1037       GlobalValue::ExternalLinkage, "F", &M);
1038   B.SetInsertPoint(BasicBlock::Create(C, "", F));
1039   Type *Int8 = Type::getInt8Ty(C);
1040   auto *ArgIt = F->arg_begin();
1041   Argument *PointerA = &*ArgIt;
1042   Argument *PointerB = &*(++ArgIt);
1043   Value *AllocaC = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "C");
1044   // Store into arg1, must alias because it's LOE => must
1045   StoreInst *SA1 = B.CreateStore(ConstantInt::get(Int8, 0), PointerA);
1046   // Store into arg2, may alias store to arg1 => may
1047   StoreInst *SB1 = B.CreateStore(ConstantInt::get(Int8, 1), PointerB);
1048   // Store into aloca, no alias with args, so must alias LOE => must
1049   StoreInst *SC1 = B.CreateStore(ConstantInt::get(Int8, 2), AllocaC);
1050   // Store into arg1, may alias store to arg2 => may
1051   StoreInst *SA2 = B.CreateStore(ConstantInt::get(Int8, 3), PointerA);
1052   // Store into arg2, may alias store to arg1 => may
1053   StoreInst *SB2 = B.CreateStore(ConstantInt::get(Int8, 4), PointerB);
1054   // Store into aloca, no alias with args, so must alias SC1 => must
1055   StoreInst *SC2 = B.CreateStore(ConstantInt::get(Int8, 5), AllocaC);
1056   // Store into arg2, must alias store to arg2 => must
1057   StoreInst *SB3 = B.CreateStore(ConstantInt::get(Int8, 6), PointerB);
1058   std::initializer_list<StoreInst *> Sts = {SA1, SB1, SC1, SA2, SB2, SC2, SB3};
1059 
1060   setupAnalyses();
1061   MemorySSA &MSSA = *Analyses->MSSA;
1062   MemorySSAWalker *Walker = Analyses->Walker;
1063 
1064   unsigned I = 0;
1065   for (StoreInst *V : Sts) {
1066     MemoryDef *MemDef = dyn_cast_or_null<MemoryDef>(MSSA.getMemoryAccess(V));
1067     EXPECT_EQ(MemDef->isOptimized(), false)
1068         << "Store " << I << " is optimized from the start?";
1069     ++I;
1070   }
1071 
1072   for (StoreInst *V : Sts)
1073     Walker->getClobberingMemoryAccess(V);
1074 
1075   I = 0;
1076   for (StoreInst *V : Sts) {
1077     MemoryDef *MemDef = dyn_cast_or_null<MemoryDef>(MSSA.getMemoryAccess(V));
1078     EXPECT_EQ(MemDef->isOptimized(), true)
1079         << "Store " << I << " was not optimized";
1080     // EXPECT_EQ expands such that if we increment I above, it won't get
1081     // incremented except when we try to print the error message.
1082     ++I;
1083   }
1084 }
1085 
1086 TEST_F(MemorySSATest, LifetimeMarkersAreClobbers) {
1087   // Example code:
1088   // define void @a(i8* %foo) {
1089   //   %bar = getelementptr i8, i8* %foo, i64 1
1090   //   %baz = getelementptr i8, i8* %foo, i64 2
1091   //   store i8 0, i8* %foo
1092   //   store i8 0, i8* %bar
1093   //   call void @llvm.lifetime.end.p0i8(i64 3, i8* %foo)
1094   //   call void @llvm.lifetime.start.p0i8(i64 3, i8* %foo)
1095   //   store i8 0, i8* %foo
1096   //   store i8 0, i8* %bar
1097   //   call void @llvm.memset.p0i8(i8* %baz, i8 0, i64 1)
1098   //   ret void
1099   // }
1100   //
1101   // Patterns like this are possible after inlining; the stores to %foo and %bar
1102   // should both be clobbered by the lifetime.start call if they're dominated by
1103   // it.
1104 
1105   IRBuilder<> B(C);
1106   F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
1107                        GlobalValue::ExternalLinkage, "F", &M);
1108 
1109   // Make blocks
1110   BasicBlock *Entry = BasicBlock::Create(C, "entry", F);
1111 
1112   B.SetInsertPoint(Entry);
1113   Value *Foo = &*F->arg_begin();
1114 
1115   Value *Bar = B.CreatePtrAdd(Foo, B.getInt64(1), "bar");
1116   Value *Baz = B.CreatePtrAdd(Foo, B.getInt64(2), "baz");
1117 
1118   B.CreateStore(B.getInt8(0), Foo);
1119   B.CreateStore(B.getInt8(0), Bar);
1120 
1121   auto GetLifetimeIntrinsic = [&](Intrinsic::ID ID) {
1122     return Intrinsic::getDeclaration(&M, ID, {Foo->getType()});
1123   };
1124 
1125   B.CreateCall(GetLifetimeIntrinsic(Intrinsic::lifetime_end),
1126                {B.getInt64(3), Foo});
1127   Instruction *LifetimeStart = B.CreateCall(
1128       GetLifetimeIntrinsic(Intrinsic::lifetime_start), {B.getInt64(3), Foo});
1129 
1130   Instruction *FooStore = B.CreateStore(B.getInt8(0), Foo);
1131   Instruction *BarStore = B.CreateStore(B.getInt8(0), Bar);
1132   Instruction *BazMemSet = B.CreateMemSet(Baz, B.getInt8(0), 1, Align(1));
1133 
1134   setupAnalyses();
1135   MemorySSA &MSSA = *Analyses->MSSA;
1136 
1137   MemoryAccess *LifetimeStartAccess = MSSA.getMemoryAccess(LifetimeStart);
1138   ASSERT_NE(LifetimeStartAccess, nullptr);
1139 
1140   MemoryAccess *FooAccess = MSSA.getMemoryAccess(FooStore);
1141   ASSERT_NE(FooAccess, nullptr);
1142 
1143   MemoryAccess *BarAccess = MSSA.getMemoryAccess(BarStore);
1144   ASSERT_NE(BarAccess, nullptr);
1145 
1146   MemoryAccess *BazAccess = MSSA.getMemoryAccess(BazMemSet);
1147   ASSERT_NE(BazAccess, nullptr);
1148 
1149   MemoryAccess *FooClobber =
1150       MSSA.getWalker()->getClobberingMemoryAccess(FooAccess);
1151   EXPECT_EQ(FooClobber, LifetimeStartAccess);
1152 
1153   MemoryAccess *BarClobber =
1154       MSSA.getWalker()->getClobberingMemoryAccess(BarAccess);
1155   EXPECT_EQ(BarClobber, LifetimeStartAccess);
1156 
1157   MemoryAccess *BazClobber =
1158       MSSA.getWalker()->getClobberingMemoryAccess(BazAccess);
1159   EXPECT_EQ(BazClobber, LifetimeStartAccess);
1160 
1161   MemoryAccess *LifetimeStartClobber =
1162       MSSA.getWalker()->getClobberingMemoryAccess(
1163           LifetimeStartAccess, MemoryLocation::getAfter(Foo));
1164   EXPECT_EQ(LifetimeStartClobber, LifetimeStartAccess);
1165 }
1166 
1167 TEST_F(MemorySSATest, DefOptimizationsAreInvalidatedOnMoving) {
1168   IRBuilder<> B(C);
1169   F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getInt1Ty()}, false),
1170                        GlobalValue::ExternalLinkage, "F", &M);
1171 
1172   // Make a CFG like
1173   //     entry
1174   //      / \
1175   //     a   b
1176   //      \ /
1177   //       c
1178   //
1179   // Put a def in A and a def in B, move the def from A -> B, observe as the
1180   // optimization is invalidated.
1181   BasicBlock *Entry = BasicBlock::Create(C, "entry", F);
1182   BasicBlock *BlockA = BasicBlock::Create(C, "a", F);
1183   BasicBlock *BlockB = BasicBlock::Create(C, "b", F);
1184   BasicBlock *BlockC = BasicBlock::Create(C, "c", F);
1185 
1186   B.SetInsertPoint(Entry);
1187   Type *Int8 = Type::getInt8Ty(C);
1188   Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "alloc");
1189   StoreInst *StoreEntry = B.CreateStore(B.getInt8(0), Alloca);
1190   B.CreateCondBr(B.getTrue(), BlockA, BlockB);
1191 
1192   B.SetInsertPoint(BlockA);
1193   StoreInst *StoreA = B.CreateStore(B.getInt8(1), Alloca);
1194   B.CreateBr(BlockC);
1195 
1196   B.SetInsertPoint(BlockB);
1197   StoreInst *StoreB = B.CreateStore(B.getInt8(2), Alloca);
1198   B.CreateBr(BlockC);
1199 
1200   B.SetInsertPoint(BlockC);
1201   B.CreateUnreachable();
1202 
1203   setupAnalyses();
1204   MemorySSA &MSSA = *Analyses->MSSA;
1205 
1206   auto *AccessEntry = cast<MemoryDef>(MSSA.getMemoryAccess(StoreEntry));
1207   auto *StoreAEntry = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA));
1208   auto *StoreBEntry = cast<MemoryDef>(MSSA.getMemoryAccess(StoreB));
1209 
1210   ASSERT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(StoreAEntry),
1211             AccessEntry);
1212   ASSERT_TRUE(StoreAEntry->isOptimized());
1213 
1214   ASSERT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(StoreBEntry),
1215             AccessEntry);
1216   ASSERT_TRUE(StoreBEntry->isOptimized());
1217 
1218   // Note that if we did InsertionPlace::Beginning, we don't go out of our way
1219   // to invalidate the cache for StoreBEntry. If the user wants to actually do
1220   // moves like these, it's up to them to ensure that nearby cache entries are
1221   // correctly invalidated (which, in general, requires walking all instructions
1222   // that the moved instruction dominates. So we probably shouldn't be doing
1223   // moves like this in general. Still, works as a test-case. ;) )
1224   MemorySSAUpdater(&MSSA).moveToPlace(StoreAEntry, BlockB,
1225                                       MemorySSA::InsertionPlace::End);
1226   ASSERT_FALSE(StoreAEntry->isOptimized());
1227   ASSERT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(StoreAEntry),
1228             StoreBEntry);
1229 }
1230 
1231 TEST_F(MemorySSATest, TestOptimizedDefsAreProperUses) {
1232   F = Function::Create(
1233       FunctionType::get(B.getVoidTy(), {B.getPtrTy(), B.getPtrTy()}, false),
1234       GlobalValue::ExternalLinkage, "F", &M);
1235   B.SetInsertPoint(BasicBlock::Create(C, "", F));
1236   Type *Int8 = Type::getInt8Ty(C);
1237   Value *AllocA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
1238   Value *AllocB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
1239 
1240   StoreInst *StoreA = B.CreateStore(ConstantInt::get(Int8, 0), AllocA);
1241   StoreInst *StoreB = B.CreateStore(ConstantInt::get(Int8, 1), AllocB);
1242   StoreInst *StoreA2 = B.CreateStore(ConstantInt::get(Int8, 2), AllocA);
1243 
1244   setupAnalyses();
1245   MemorySSA &MSSA = *Analyses->MSSA;
1246   MemorySSAWalker *Walker = Analyses->Walker;
1247 
1248   // If these don't hold, there's no chance of the test result being useful.
1249   ASSERT_EQ(Walker->getClobberingMemoryAccess(StoreA),
1250             MSSA.getLiveOnEntryDef());
1251   ASSERT_EQ(Walker->getClobberingMemoryAccess(StoreB),
1252             MSSA.getLiveOnEntryDef());
1253   auto *StoreAAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA));
1254   auto *StoreA2Access = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA2));
1255   ASSERT_EQ(Walker->getClobberingMemoryAccess(StoreA2), StoreAAccess);
1256   ASSERT_EQ(StoreA2Access->getOptimized(), StoreAAccess);
1257 
1258   auto *StoreBAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreB));
1259   ASSERT_LT(StoreAAccess->getID(), StoreBAccess->getID());
1260   ASSERT_LT(StoreBAccess->getID(), StoreA2Access->getID());
1261 
1262   auto SortVecByID = [](std::vector<const MemoryDef *> &Defs) {
1263     llvm::sort(Defs, [](const MemoryDef *LHS, const MemoryDef *RHS) {
1264       return LHS->getID() < RHS->getID();
1265     });
1266   };
1267 
1268   auto SortedUserList = [&](const MemoryDef *MD) {
1269     std::vector<const MemoryDef *> Result;
1270     transform(MD->users(), std::back_inserter(Result),
1271               [](const User *U) { return cast<MemoryDef>(U); });
1272     SortVecByID(Result);
1273     return Result;
1274   };
1275 
1276   // Use std::vectors, since they have nice pretty-printing if the test fails.
1277   // Parens are necessary because EXPECT_EQ is a macro, and we have commas in
1278   // our init lists...
1279   EXPECT_EQ(SortedUserList(StoreAAccess),
1280             (std::vector<const MemoryDef *>{StoreBAccess, StoreA2Access}));
1281 
1282   EXPECT_EQ(SortedUserList(StoreBAccess),
1283             std::vector<const MemoryDef *>{StoreA2Access});
1284 
1285   // StoreAAccess should be present twice, since it uses liveOnEntry for both
1286   // its defining and optimized accesses. This is a bit awkward, and is not
1287   // relied upon anywhere at the moment. If this is painful, we can fix it.
1288   EXPECT_EQ(SortedUserList(cast<MemoryDef>(MSSA.getLiveOnEntryDef())),
1289             (std::vector<const MemoryDef *>{StoreAAccess, StoreAAccess,
1290                                             StoreBAccess}));
1291 }
1292 
1293 //   entry
1294 //     |
1295 //   header
1296 //    / \
1297 // body  |
1298 //    \ /
1299 //    exit
1300 // header:
1301 //  ; 1 = MemoryDef(liveOnEntry)
1302 // body:
1303 //  ; 2 = MemoryDef(1)
1304 // exit:
1305 //  ; 3 = MemoryPhi({body, 2}, {header, 1})
1306 //  ; 4 = MemoryDef(3); optimized to 3, cannot optimize thorugh phi.
1307 //  Insert edge: entry -> exit, check mssa Update is correct.
1308 TEST_F(MemorySSATest, TestAddedEdgeToBlockWithPhiNotOpt) {
1309   F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
1310                        GlobalValue::ExternalLinkage, "F", &M);
1311   Argument *PointerArg = &*F->arg_begin();
1312   BasicBlock *Entry(BasicBlock::Create(C, "entry", F));
1313   BasicBlock *Header(BasicBlock::Create(C, "header", F));
1314   BasicBlock *Body(BasicBlock::Create(C, "body", F));
1315   BasicBlock *Exit(BasicBlock::Create(C, "exit", F));
1316   B.SetInsertPoint(Entry);
1317   BranchInst::Create(Header, Entry);
1318   B.SetInsertPoint(Header);
1319   B.CreateStore(B.getInt8(16), PointerArg);
1320   B.CreateCondBr(B.getTrue(), Exit, Body);
1321   B.SetInsertPoint(Body);
1322   B.CreateStore(B.getInt8(16), PointerArg);
1323   BranchInst::Create(Exit, Body);
1324   B.SetInsertPoint(Exit);
1325   StoreInst *S1 = B.CreateStore(B.getInt8(16), PointerArg);
1326 
1327   setupAnalyses();
1328   MemorySSA &MSSA = *Analyses->MSSA;
1329   MemorySSAWalker *Walker = Analyses->Walker;
1330   std::unique_ptr<MemorySSAUpdater> MSSAU =
1331       std::make_unique<MemorySSAUpdater>(&MSSA);
1332 
1333   MemoryPhi *Phi = MSSA.getMemoryAccess(Exit);
1334   EXPECT_EQ(Phi, Walker->getClobberingMemoryAccess(S1));
1335 
1336   // Alter CFG, add edge: entry -> exit
1337   Entry->getTerminator()->eraseFromParent();
1338   B.SetInsertPoint(Entry);
1339   B.CreateCondBr(B.getTrue(), Header, Exit);
1340   SmallVector<CFGUpdate, 1> Updates;
1341   Updates.push_back({cfg::UpdateKind::Insert, Entry, Exit});
1342   Analyses->DT.applyUpdates(Updates);
1343   MSSAU->applyInsertUpdates(Updates, Analyses->DT);
1344   EXPECT_EQ(Phi, Walker->getClobberingMemoryAccess(S1));
1345 }
1346 
1347 //   entry
1348 //     |
1349 //   header
1350 //    / \
1351 // body  |
1352 //    \ /
1353 //    exit
1354 // header:
1355 //  ; 1 = MemoryDef(liveOnEntry)
1356 // body:
1357 //  ; 2 = MemoryDef(1)
1358 // exit:
1359 //  ; 3 = MemoryPhi({body, 2}, {header, 1})
1360 //  ; 4 = MemoryDef(3); optimize this to 1 now, added edge should invalidate
1361 //  the optimized access.
1362 //  Insert edge: entry -> exit, check mssa Update is correct.
1363 TEST_F(MemorySSATest, TestAddedEdgeToBlockWithPhiOpt) {
1364   F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
1365                        GlobalValue::ExternalLinkage, "F", &M);
1366   Argument *PointerArg = &*F->arg_begin();
1367   Type *Int8 = Type::getInt8Ty(C);
1368   BasicBlock *Entry(BasicBlock::Create(C, "entry", F));
1369   BasicBlock *Header(BasicBlock::Create(C, "header", F));
1370   BasicBlock *Body(BasicBlock::Create(C, "body", F));
1371   BasicBlock *Exit(BasicBlock::Create(C, "exit", F));
1372 
1373   B.SetInsertPoint(Entry);
1374   Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
1375   BranchInst::Create(Header, Entry);
1376 
1377   B.SetInsertPoint(Header);
1378   StoreInst *S1 = B.CreateStore(B.getInt8(16), PointerArg);
1379   B.CreateCondBr(B.getTrue(), Exit, Body);
1380 
1381   B.SetInsertPoint(Body);
1382   B.CreateStore(ConstantInt::get(Int8, 0), Alloca);
1383   BranchInst::Create(Exit, Body);
1384 
1385   B.SetInsertPoint(Exit);
1386   StoreInst *S2 = B.CreateStore(B.getInt8(16), PointerArg);
1387 
1388   setupAnalyses();
1389   MemorySSA &MSSA = *Analyses->MSSA;
1390   MemorySSAWalker *Walker = Analyses->Walker;
1391   std::unique_ptr<MemorySSAUpdater> MSSAU =
1392       std::make_unique<MemorySSAUpdater>(&MSSA);
1393 
1394   MemoryDef *DefS1 = cast<MemoryDef>(MSSA.getMemoryAccess(S1));
1395   EXPECT_EQ(DefS1, Walker->getClobberingMemoryAccess(S2));
1396 
1397   // Alter CFG, add edge: entry -> exit
1398   Entry->getTerminator()->eraseFromParent();
1399   B.SetInsertPoint(Entry);
1400   B.CreateCondBr(B.getTrue(), Header, Exit);
1401   SmallVector<CFGUpdate, 1> Updates;
1402   Updates.push_back({cfg::UpdateKind::Insert, Entry, Exit});
1403   Analyses->DT.applyUpdates(Updates);
1404   MSSAU->applyInsertUpdates(Updates, Analyses->DT);
1405 
1406   MemoryPhi *Phi = MSSA.getMemoryAccess(Exit);
1407   EXPECT_EQ(Phi, Walker->getClobberingMemoryAccess(S2));
1408 }
1409 
1410 //   entry
1411 //    /  |
1412 //   a   |
1413 //  / \  |
1414 //  b c  f
1415 //  \ /  |
1416 //   d   |
1417 //    \ /
1418 //     e
1419 // f:
1420 //  ; 1 = MemoryDef(liveOnEntry)
1421 // e:
1422 //  ; 2 = MemoryPhi({d, liveOnEntry}, {f, 1})
1423 //
1424 // Insert edge: f -> c, check update is correct.
1425 // After update:
1426 // f:
1427 //  ; 1 = MemoryDef(liveOnEntry)
1428 // c:
1429 //  ; 3 = MemoryPhi({a, liveOnEntry}, {f, 1})
1430 // d:
1431 //  ; 4 = MemoryPhi({b, liveOnEntry}, {c, 3})
1432 // e:
1433 //  ; 2 = MemoryPhi({d, 4}, {f, 1})
1434 TEST_F(MemorySSATest, TestAddedEdgeToBlockWithNoPhiAddNewPhis) {
1435   F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
1436                        GlobalValue::ExternalLinkage, "F", &M);
1437   Argument *PointerArg = &*F->arg_begin();
1438   BasicBlock *Entry(BasicBlock::Create(C, "entry", F));
1439   BasicBlock *ABlock(BasicBlock::Create(C, "a", F));
1440   BasicBlock *BBlock(BasicBlock::Create(C, "b", F));
1441   BasicBlock *CBlock(BasicBlock::Create(C, "c", F));
1442   BasicBlock *DBlock(BasicBlock::Create(C, "d", F));
1443   BasicBlock *EBlock(BasicBlock::Create(C, "e", F));
1444   BasicBlock *FBlock(BasicBlock::Create(C, "f", F));
1445 
1446   B.SetInsertPoint(Entry);
1447   B.CreateCondBr(B.getTrue(), ABlock, FBlock);
1448   B.SetInsertPoint(ABlock);
1449   B.CreateCondBr(B.getTrue(), BBlock, CBlock);
1450   B.SetInsertPoint(BBlock);
1451   BranchInst::Create(DBlock, BBlock);
1452   B.SetInsertPoint(CBlock);
1453   BranchInst::Create(DBlock, CBlock);
1454   B.SetInsertPoint(DBlock);
1455   BranchInst::Create(EBlock, DBlock);
1456   B.SetInsertPoint(FBlock);
1457   B.CreateStore(B.getInt8(16), PointerArg);
1458   BranchInst::Create(EBlock, FBlock);
1459 
1460   setupAnalyses();
1461   MemorySSA &MSSA = *Analyses->MSSA;
1462   std::unique_ptr<MemorySSAUpdater> MSSAU =
1463       std::make_unique<MemorySSAUpdater>(&MSSA);
1464 
1465   // Alter CFG, add edge: f -> c
1466   FBlock->getTerminator()->eraseFromParent();
1467   B.SetInsertPoint(FBlock);
1468   B.CreateCondBr(B.getTrue(), CBlock, EBlock);
1469   SmallVector<CFGUpdate, 1> Updates;
1470   Updates.push_back({cfg::UpdateKind::Insert, FBlock, CBlock});
1471   Analyses->DT.applyUpdates(Updates);
1472   MSSAU->applyInsertUpdates(Updates, Analyses->DT);
1473 
1474   MemoryPhi *MPC = MSSA.getMemoryAccess(CBlock);
1475   EXPECT_NE(MPC, nullptr);
1476   MemoryPhi *MPD = MSSA.getMemoryAccess(DBlock);
1477   EXPECT_NE(MPD, nullptr);
1478   MemoryPhi *MPE = MSSA.getMemoryAccess(EBlock);
1479   EXPECT_EQ(MPD, MPE->getIncomingValueForBlock(DBlock));
1480 }
1481 
1482 TEST_F(MemorySSATest, TestCallClobber) {
1483   F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
1484                        GlobalValue::ExternalLinkage, "F", &M);
1485 
1486   Value *Pointer1 = &*F->arg_begin();
1487   BasicBlock *Entry(BasicBlock::Create(C, "", F));
1488   B.SetInsertPoint(Entry);
1489   Value *Pointer2 = B.CreatePtrAdd(Pointer1, B.getInt64(1));
1490   Instruction *StorePointer1 = B.CreateStore(B.getInt8(0), Pointer1);
1491   Instruction *StorePointer2 = B.CreateStore(B.getInt8(0), Pointer2);
1492   Instruction *MemSet = B.CreateMemSet(Pointer2, B.getInt8(0), 1, Align(1));
1493 
1494   setupAnalyses();
1495   MemorySSA &MSSA = *Analyses->MSSA;
1496   MemorySSAWalker *Walker = Analyses->Walker;
1497 
1498   MemoryUseOrDef *Store1Access = MSSA.getMemoryAccess(StorePointer1);
1499   MemoryUseOrDef *Store2Access = MSSA.getMemoryAccess(StorePointer2);
1500   MemoryUseOrDef *MemSetAccess = MSSA.getMemoryAccess(MemSet);
1501 
1502   MemoryAccess *Pointer1Clobber = Walker->getClobberingMemoryAccess(
1503       MemSetAccess, MemoryLocation(Pointer1, LocationSize::precise(1)));
1504   EXPECT_EQ(Pointer1Clobber, Store1Access);
1505 
1506   MemoryAccess *Pointer2Clobber = Walker->getClobberingMemoryAccess(
1507       MemSetAccess, MemoryLocation(Pointer2, LocationSize::precise(1)));
1508   EXPECT_EQ(Pointer2Clobber, MemSetAccess);
1509 
1510   MemoryAccess *MemSetClobber = Walker->getClobberingMemoryAccess(MemSetAccess);
1511   EXPECT_EQ(MemSetClobber, Store2Access);
1512 }
1513 
1514 TEST_F(MemorySSATest, TestLoadClobber) {
1515   F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getPtrTy()}, false),
1516                        GlobalValue::ExternalLinkage, "F", &M);
1517 
1518   Value *Pointer1 = &*F->arg_begin();
1519   BasicBlock *Entry(BasicBlock::Create(C, "", F));
1520   B.SetInsertPoint(Entry);
1521   Value *Pointer2 = B.CreatePtrAdd(Pointer1, B.getInt64(1));
1522   Instruction *LoadPointer1 =
1523       B.CreateLoad(B.getInt8Ty(), Pointer1, /* Volatile */ true);
1524   Instruction *LoadPointer2 =
1525       B.CreateLoad(B.getInt8Ty(), Pointer2, /* Volatile */ true);
1526 
1527   setupAnalyses();
1528   MemorySSA &MSSA = *Analyses->MSSA;
1529   MemorySSAWalker *Walker = Analyses->Walker;
1530 
1531   MemoryUseOrDef *Load1Access = MSSA.getMemoryAccess(LoadPointer1);
1532   MemoryUseOrDef *Load2Access = MSSA.getMemoryAccess(LoadPointer2);
1533 
1534   // When providing a memory location, we should never return a load as the
1535   // clobber.
1536   MemoryAccess *Pointer1Clobber = Walker->getClobberingMemoryAccess(
1537       Load2Access, MemoryLocation(Pointer1, LocationSize::precise(1)));
1538   EXPECT_TRUE(MSSA.isLiveOnEntryDef(Pointer1Clobber));
1539 
1540   MemoryAccess *Pointer2Clobber = Walker->getClobberingMemoryAccess(
1541       Load2Access, MemoryLocation(Pointer2, LocationSize::precise(1)));
1542   EXPECT_TRUE(MSSA.isLiveOnEntryDef(Pointer2Clobber));
1543 
1544   MemoryAccess *Load2Clobber = Walker->getClobberingMemoryAccess(Load2Access);
1545   EXPECT_EQ(Load2Clobber, Load1Access);
1546 }
1547 
1548 // We want to test if the location information are retained
1549 // when the IsGuaranteedLoopInvariant function handles a
1550 // memory access referring to a pointer defined in the entry
1551 // block, hence automatically guaranteed to be loop invariant.
1552 TEST_F(MemorySSATest, TestLoopInvariantEntryBlockPointer) {
1553   SMDiagnostic E;
1554   auto LocalM =
1555       parseAssemblyString("define void @test(i64 %a0, i8* %a1, i1* %a2) {\n"
1556                           "entry:\n"
1557                           "%v0 = getelementptr i8, i8* %a1, i64 %a0\n"
1558                           "%v1 = bitcast i8* %v0 to i64*\n"
1559                           "%v2 = bitcast i8* %v0 to i32*\n"
1560                           "%v3 = load i1, i1* %a2\n"
1561                           "br i1 %v3, label %body, label %exit\n"
1562                           "body:\n"
1563                           "store i32 1, i32* %v2\n"
1564                           "br label %exit\n"
1565                           "exit:\n"
1566                           "store i64 0, i64* %v1\n"
1567                           "ret void\n"
1568                           "}",
1569                           E, C);
1570   ASSERT_TRUE(LocalM);
1571   F = LocalM->getFunction("test");
1572   ASSERT_TRUE(F);
1573   // Setup the analysis
1574   setupAnalyses();
1575   MemorySSA &MSSA = *Analyses->MSSA;
1576   // Find the exit block
1577   for (auto &BB : *F) {
1578     if (BB.getName() == "exit") {
1579       // Get the store instruction
1580       auto *SI = BB.getFirstNonPHI();
1581       // Get the memory access and location
1582       MemoryAccess *MA = MSSA.getMemoryAccess(SI);
1583       MemoryLocation ML = MemoryLocation::get(SI);
1584       // Use the 'upward_defs_iterator' which internally calls
1585       // IsGuaranteedLoopInvariant
1586       auto ItA = upward_defs_begin({MA, ML}, MSSA.getDomTree());
1587       auto ItB =
1588           upward_defs_begin({ItA->first, ItA->second}, MSSA.getDomTree());
1589       // Check if the location information have been retained
1590       EXPECT_TRUE(ItB->second.Size.isPrecise());
1591       EXPECT_TRUE(ItB->second.Size.hasValue());
1592       EXPECT_TRUE(ItB->second.Size.getValue() == 8);
1593     }
1594   }
1595 }
1596 
1597 TEST_F(MemorySSATest, TestInvariantGroup) {
1598   SMDiagnostic E;
1599   auto M = parseAssemblyString("declare void @f(i8*)\n"
1600                                "define i8 @test(i8* %p) {\n"
1601                                "entry:\n"
1602                                "  store i8 42, i8* %p, !invariant.group !0\n"
1603                                "  call void @f(i8* %p)\n"
1604                                "  %v = load i8, i8* %p, !invariant.group !0\n"
1605                                "  ret i8 %v\n"
1606                                "}\n"
1607                                "!0 = !{}",
1608                                E, C);
1609   ASSERT_TRUE(M);
1610   F = M->getFunction("test");
1611   ASSERT_TRUE(F);
1612   setupAnalyses();
1613   MemorySSA &MSSA = *Analyses->MSSA;
1614   MemorySSAWalker *Walker = Analyses->Walker;
1615 
1616   auto &BB = F->getEntryBlock();
1617   auto &SI = cast<StoreInst>(*BB.begin());
1618   auto &Call = cast<CallBase>(*std::next(BB.begin()));
1619   auto &LI = cast<LoadInst>(*std::next(std::next(BB.begin())));
1620 
1621   {
1622     MemoryAccess *SAccess = MSSA.getMemoryAccess(&SI);
1623     MemoryAccess *LAccess = MSSA.getMemoryAccess(&LI);
1624     MemoryAccess *SClobber = Walker->getClobberingMemoryAccess(SAccess);
1625     EXPECT_TRUE(MSSA.isLiveOnEntryDef(SClobber));
1626     MemoryAccess *LClobber = Walker->getClobberingMemoryAccess(LAccess);
1627     EXPECT_EQ(SAccess, LClobber);
1628   }
1629 
1630   // remove store and verify that the memory accesses still make sense
1631   MemorySSAUpdater Updater(&MSSA);
1632   Updater.removeMemoryAccess(&SI);
1633   SI.eraseFromParent();
1634 
1635   {
1636     MemoryAccess *CallAccess = MSSA.getMemoryAccess(&Call);
1637     MemoryAccess *LAccess = MSSA.getMemoryAccess(&LI);
1638     MemoryAccess *LClobber = Walker->getClobberingMemoryAccess(LAccess);
1639     EXPECT_EQ(CallAccess, LClobber);
1640   }
1641 }
1642 
1643 static BasicBlock *getBasicBlockByName(Function &F, StringRef Name) {
1644   for (BasicBlock &BB : F)
1645     if (BB.getName() == Name)
1646       return &BB;
1647   llvm_unreachable("Expected to find basic block!");
1648 }
1649 
1650 static Instruction *getInstructionByName(Function &F, StringRef Name) {
1651   for (BasicBlock &BB : F)
1652     for (Instruction &I : BB)
1653       if (I.getName() == Name)
1654         return &I;
1655   llvm_unreachable("Expected to find instruction!");
1656 }
1657 
1658 TEST_F(MemorySSATest, TestVisitedBlocks) {
1659   SMDiagnostic E;
1660   auto M = parseAssemblyString(
1661       "define void @test(i64* noalias %P, i64 %N) {\n"
1662       "preheader.n:\n"
1663       "  br label %header.n\n"
1664       "header.n:\n"
1665       "  %n = phi i64 [ 0, %preheader.n ], [ %inc.n, %latch.n ]\n"
1666       "  %guard.cond.i = icmp slt i64 0, %N\n"
1667       "  br i1 %guard.cond.i, label %header.i.check, label %other.i\n"
1668       "header.i.check:\n"
1669       "  br label %preheader.i\n"
1670       "preheader.i:\n"
1671       "  br label %header.i\n"
1672       "header.i:\n"
1673       "  %i = phi i64 [ 0, %preheader.i ], [ %inc.i, %header.i ]\n"
1674       "  %v1 = load i64, i64* %P, align 8\n"
1675       "  %v2 = load i64, i64* %P, align 8\n"
1676       "  %inc.i = add nsw i64 %i, 1\n"
1677       "  %cmp.i = icmp slt i64 %inc.i, %N\n"
1678       "  br i1 %cmp.i, label %header.i, label %exit.i\n"
1679       "exit.i:\n"
1680       "  br label %commonexit\n"
1681       "other.i:\n"
1682       "  br label %commonexit\n"
1683       "commonexit:\n"
1684       "  br label %latch.n\n"
1685       "latch.n:\n"
1686       "  %inc.n = add nsw i64 %n, 1\n"
1687       "  %cmp.n = icmp slt i64 %inc.n, %N\n"
1688       "  br i1 %cmp.n, label %header.n, label %exit.n\n"
1689       "exit.n:\n"
1690       "  ret void\n"
1691       "}\n",
1692       E, C);
1693   ASSERT_TRUE(M);
1694   F = M->getFunction("test");
1695   ASSERT_TRUE(F);
1696   setupAnalyses();
1697   MemorySSA &MSSA = *Analyses->MSSA;
1698   MemorySSAUpdater Updater(&MSSA);
1699 
1700   {
1701     // Move %v1 before the terminator of %header.i.check
1702     BasicBlock *BB = getBasicBlockByName(*F, "header.i.check");
1703     Instruction *LI = getInstructionByName(*F, "v1");
1704     LI->moveBefore(BB->getTerminator());
1705     if (MemoryUseOrDef *MUD = MSSA.getMemoryAccess(LI))
1706       Updater.moveToPlace(MUD, BB, MemorySSA::BeforeTerminator);
1707 
1708     // Change the termiantor of %header.i.check to `br label true, label
1709     // %preheader.i, label %other.i`
1710     BB->getTerminator()->eraseFromParent();
1711     ConstantInt *BoolTrue = ConstantInt::getTrue(F->getContext());
1712     BranchInst::Create(getBasicBlockByName(*F, "preheader.i"),
1713                        getBasicBlockByName(*F, "other.i"), BoolTrue, BB);
1714     SmallVector<DominatorTree::UpdateType, 4> DTUpdates;
1715     DTUpdates.push_back(DominatorTree::UpdateType(
1716         DominatorTree::Insert, BB, getBasicBlockByName(*F, "other.i")));
1717     Updater.applyUpdates(DTUpdates, Analyses->DT, true);
1718   }
1719 
1720   // After the first moveToPlace(), %other.i is in VisitedBlocks, even after
1721   // there is a new edge to %other.i, which makes the second moveToPlace()
1722   // traverse incorrectly.
1723   {
1724     // Move %v2 before the terminator of %preheader.i
1725     BasicBlock *BB = getBasicBlockByName(*F, "preheader.i");
1726     Instruction *LI = getInstructionByName(*F, "v2");
1727     LI->moveBefore(BB->getTerminator());
1728     // Check that there is no assertion of "Incomplete phi during partial
1729     // rename"
1730     if (MemoryUseOrDef *MUD = MSSA.getMemoryAccess(LI))
1731       Updater.moveToPlace(MUD, BB, MemorySSA::BeforeTerminator);
1732   }
1733 }
1734 
1735 TEST_F(MemorySSATest, TestNoDbgInsts) {
1736   SMDiagnostic E;
1737   auto M = parseAssemblyString(R"(
1738       define void @test() presplitcoroutine {
1739       entry:
1740         %i = alloca i32
1741         call void @llvm.dbg.declare(metadata ptr %i, metadata !6, metadata !DIExpression()), !dbg !10
1742         call void @llvm.dbg.value(metadata ptr %i, metadata !6, metadata !DIExpression()), !dbg !10
1743         ret void
1744       }
1745 
1746       declare void @llvm.dbg.declare(metadata, metadata, metadata) nocallback nofree nosync nounwind readnone speculatable willreturn
1747       declare void @llvm.dbg.value(metadata, metadata, metadata) nocallback nofree nosync nounwind readnone speculatable willreturn
1748 
1749       !llvm.dbg.cu = !{!0}
1750 
1751       !0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus_14, file: !1, producer: "clang version 15.0.0", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, retainedTypes: !2, splitDebugInlining: false, nameTableKind: None)
1752       !1 = !DIFile(filename: "repro.cpp", directory: ".")
1753       !2 = !{}
1754       !3 = !{i32 7, !"Dwarf Version", i32 4}
1755       !4 = !{i32 2, !"Debug Info Version", i32 3}
1756       !5 = !{!"clang version 15.0.0"}
1757       !6 = !DILocalVariable(name: "i", scope: !7, file: !1, line: 24, type: !10)
1758       !7 = distinct !DILexicalBlock(scope: !8, file: !1, line: 23, column: 12)
1759       !8 = distinct !DISubprogram(name: "foo", linkageName: "_Z3foov", scope: !1, file: !1, line: 23, type: !9, scopeLine: 23, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: !0, retainedNodes: !2)
1760       !9 = !DISubroutineType(types: !2)
1761       !10 = !DILocation(line: 24, column: 7, scope: !7)
1762     )",
1763     E, C);
1764   ASSERT_TRUE(M);
1765   F = M->getFunction("test");
1766   ASSERT_TRUE(F);
1767   setupAnalyses();
1768   MemorySSA &MSSA = *Analyses->MSSA;
1769   MemorySSAUpdater Updater(&MSSA);
1770 
1771   BasicBlock &Entry = F->front();
1772   auto I = Entry.begin();
1773   Instruction *DbgDeclare = cast<Instruction>(I++);
1774   Instruction *DbgValue = cast<Instruction>(I++);
1775   ASSERT_EQ(MSSA.getMemoryAccess(DbgDeclare), nullptr);
1776   ASSERT_EQ(MSSA.getMemoryAccess(DbgValue), nullptr);
1777 }
1778