1 //===- MemorySSA.cpp - Unit tests for MemorySSA ---------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 #include "llvm/Analysis/MemorySSA.h" 9 #include "llvm/Analysis/AliasAnalysis.h" 10 #include "llvm/Analysis/BasicAliasAnalysis.h" 11 #include "llvm/Analysis/MemorySSAUpdater.h" 12 #include "llvm/IR/BasicBlock.h" 13 #include "llvm/IR/DataLayout.h" 14 #include "llvm/IR/Dominators.h" 15 #include "llvm/IR/IRBuilder.h" 16 #include "llvm/IR/Instructions.h" 17 #include "llvm/IR/LLVMContext.h" 18 #include "gtest/gtest.h" 19 20 using namespace llvm; 21 22 const static char DLString[] = "e-i64:64-f80:128-n8:16:32:64-S128"; 23 24 /// There's a lot of common setup between these tests. This fixture helps reduce 25 /// that. Tests should mock up a function, store it in F, and then call 26 /// setupAnalyses(). 27 class MemorySSATest : public testing::Test { 28 protected: 29 // N.B. Many of these members depend on each other (e.g. the Module depends on 30 // the Context, etc.). So, order matters here (and in TestAnalyses). 31 LLVMContext C; 32 Module M; 33 IRBuilder<> B; 34 DataLayout DL; 35 TargetLibraryInfoImpl TLII; 36 TargetLibraryInfo TLI; 37 Function *F; 38 39 // Things that we need to build after the function is created. 40 struct TestAnalyses { 41 DominatorTree DT; 42 AssumptionCache AC; 43 AAResults AA; 44 BasicAAResult BAA; 45 // We need to defer MSSA construction until AA is *entirely* set up, which 46 // requires calling addAAResult. Hence, we just use a pointer here. 47 std::unique_ptr<MemorySSA> MSSA; 48 MemorySSAWalker *Walker; 49 50 TestAnalyses(MemorySSATest &Test) 51 : DT(*Test.F), AC(*Test.F), AA(Test.TLI), 52 BAA(Test.DL, *Test.F, Test.TLI, AC, &DT) { 53 AA.addAAResult(BAA); 54 MSSA = std::make_unique<MemorySSA>(*Test.F, &AA, &DT); 55 Walker = MSSA->getWalker(); 56 } 57 }; 58 59 std::unique_ptr<TestAnalyses> Analyses; 60 61 void setupAnalyses() { 62 assert(F); 63 Analyses.reset(new TestAnalyses(*this)); 64 } 65 66 public: 67 MemorySSATest() 68 : M("MemorySSATest", C), B(C), DL(DLString), TLI(TLII), F(nullptr) {} 69 }; 70 71 TEST_F(MemorySSATest, CreateALoad) { 72 // We create a diamond where there is a store on one side, and then after 73 // building MemorySSA, create a load after the merge point, and use it to test 74 // updating by creating an access for the load. 75 F = Function::Create( 76 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 77 GlobalValue::ExternalLinkage, "F", &M); 78 BasicBlock *Entry(BasicBlock::Create(C, "", F)); 79 BasicBlock *Left(BasicBlock::Create(C, "", F)); 80 BasicBlock *Right(BasicBlock::Create(C, "", F)); 81 BasicBlock *Merge(BasicBlock::Create(C, "", F)); 82 B.SetInsertPoint(Entry); 83 B.CreateCondBr(B.getTrue(), Left, Right); 84 B.SetInsertPoint(Left); 85 Argument *PointerArg = &*F->arg_begin(); 86 B.CreateStore(B.getInt8(16), PointerArg); 87 BranchInst::Create(Merge, Left); 88 BranchInst::Create(Merge, Right); 89 90 setupAnalyses(); 91 MemorySSA &MSSA = *Analyses->MSSA; 92 MemorySSAUpdater Updater(&MSSA); 93 // Add the load 94 B.SetInsertPoint(Merge); 95 LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg); 96 97 // MemoryPHI should already exist. 98 MemoryPhi *MP = MSSA.getMemoryAccess(Merge); 99 EXPECT_NE(MP, nullptr); 100 101 // Create the load memory acccess 102 MemoryUse *LoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB( 103 LoadInst, MP, Merge, MemorySSA::Beginning)); 104 MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess(); 105 EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess)); 106 MSSA.verifyMemorySSA(); 107 } 108 TEST_F(MemorySSATest, CreateLoadsAndStoreUpdater) { 109 // We create a diamond, then build memoryssa with no memory accesses, and 110 // incrementally update it by inserting a store in the, entry, a load in the 111 // merge point, then a store in the branch, another load in the merge point, 112 // and then a store in the entry. 113 F = Function::Create( 114 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 115 GlobalValue::ExternalLinkage, "F", &M); 116 BasicBlock *Entry(BasicBlock::Create(C, "", F)); 117 BasicBlock *Left(BasicBlock::Create(C, "", F)); 118 BasicBlock *Right(BasicBlock::Create(C, "", F)); 119 BasicBlock *Merge(BasicBlock::Create(C, "", F)); 120 B.SetInsertPoint(Entry); 121 B.CreateCondBr(B.getTrue(), Left, Right); 122 B.SetInsertPoint(Left, Left->begin()); 123 Argument *PointerArg = &*F->arg_begin(); 124 B.SetInsertPoint(Left); 125 B.CreateBr(Merge); 126 B.SetInsertPoint(Right); 127 B.CreateBr(Merge); 128 129 setupAnalyses(); 130 MemorySSA &MSSA = *Analyses->MSSA; 131 MemorySSAUpdater Updater(&MSSA); 132 // Add the store 133 B.SetInsertPoint(Entry, Entry->begin()); 134 StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg); 135 MemoryAccess *EntryStoreAccess = Updater.createMemoryAccessInBB( 136 EntryStore, nullptr, Entry, MemorySSA::Beginning); 137 Updater.insertDef(cast<MemoryDef>(EntryStoreAccess)); 138 139 // Add the load 140 B.SetInsertPoint(Merge, Merge->begin()); 141 LoadInst *FirstLoad = B.CreateLoad(B.getInt8Ty(), PointerArg); 142 143 // MemoryPHI should not already exist. 144 MemoryPhi *MP = MSSA.getMemoryAccess(Merge); 145 EXPECT_EQ(MP, nullptr); 146 147 // Create the load memory access 148 MemoryUse *FirstLoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB( 149 FirstLoad, nullptr, Merge, MemorySSA::Beginning)); 150 Updater.insertUse(FirstLoadAccess); 151 // Should just have a load using the entry access, because it should discover 152 // the phi is trivial 153 EXPECT_EQ(FirstLoadAccess->getDefiningAccess(), EntryStoreAccess); 154 155 // Create a store on the left 156 // Add the store 157 B.SetInsertPoint(Left, Left->begin()); 158 StoreInst *LeftStore = B.CreateStore(B.getInt8(16), PointerArg); 159 MemoryAccess *LeftStoreAccess = Updater.createMemoryAccessInBB( 160 LeftStore, nullptr, Left, MemorySSA::Beginning); 161 Updater.insertDef(cast<MemoryDef>(LeftStoreAccess), false); 162 163 // MemoryPHI should exist after adding LeftStore. 164 MP = MSSA.getMemoryAccess(Merge); 165 EXPECT_NE(MP, nullptr); 166 167 // Add the second load 168 B.SetInsertPoint(Merge, Merge->begin()); 169 LoadInst *SecondLoad = B.CreateLoad(B.getInt8Ty(), PointerArg); 170 171 // Create the load memory access 172 MemoryUse *SecondLoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB( 173 SecondLoad, nullptr, Merge, MemorySSA::Beginning)); 174 Updater.insertUse(SecondLoadAccess); 175 // Now the load should be a phi of the entry store and the left store 176 MemoryPhi *MergePhi = 177 dyn_cast<MemoryPhi>(SecondLoadAccess->getDefiningAccess()); 178 EXPECT_NE(MergePhi, nullptr); 179 EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess); 180 EXPECT_EQ(MergePhi->getIncomingValue(1), LeftStoreAccess); 181 // Now create a store below the existing one in the entry 182 B.SetInsertPoint(Entry, --Entry->end()); 183 StoreInst *SecondEntryStore = B.CreateStore(B.getInt8(16), PointerArg); 184 MemoryAccess *SecondEntryStoreAccess = Updater.createMemoryAccessInBB( 185 SecondEntryStore, nullptr, Entry, MemorySSA::End); 186 // Insert it twice just to test renaming 187 Updater.insertDef(cast<MemoryDef>(SecondEntryStoreAccess), false); 188 EXPECT_NE(FirstLoadAccess->getDefiningAccess(), MergePhi); 189 Updater.insertDef(cast<MemoryDef>(SecondEntryStoreAccess), true); 190 EXPECT_EQ(FirstLoadAccess->getDefiningAccess(), MergePhi); 191 // and make sure the phi below it got updated, despite being blocks away 192 MergePhi = dyn_cast<MemoryPhi>(SecondLoadAccess->getDefiningAccess()); 193 EXPECT_NE(MergePhi, nullptr); 194 EXPECT_EQ(MergePhi->getIncomingValue(0), SecondEntryStoreAccess); 195 EXPECT_EQ(MergePhi->getIncomingValue(1), LeftStoreAccess); 196 MSSA.verifyMemorySSA(); 197 } 198 199 TEST_F(MemorySSATest, CreateALoadUpdater) { 200 // We create a diamond, then build memoryssa with no memory accesses, and 201 // incrementally update it by inserting a store in one of the branches, and a 202 // load in the merge point 203 F = Function::Create( 204 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 205 GlobalValue::ExternalLinkage, "F", &M); 206 BasicBlock *Entry(BasicBlock::Create(C, "", F)); 207 BasicBlock *Left(BasicBlock::Create(C, "", F)); 208 BasicBlock *Right(BasicBlock::Create(C, "", F)); 209 BasicBlock *Merge(BasicBlock::Create(C, "", F)); 210 B.SetInsertPoint(Entry); 211 B.CreateCondBr(B.getTrue(), Left, Right); 212 B.SetInsertPoint(Left, Left->begin()); 213 Argument *PointerArg = &*F->arg_begin(); 214 B.SetInsertPoint(Left); 215 B.CreateBr(Merge); 216 B.SetInsertPoint(Right); 217 B.CreateBr(Merge); 218 219 setupAnalyses(); 220 MemorySSA &MSSA = *Analyses->MSSA; 221 MemorySSAUpdater Updater(&MSSA); 222 B.SetInsertPoint(Left, Left->begin()); 223 // Add the store 224 StoreInst *SI = B.CreateStore(B.getInt8(16), PointerArg); 225 MemoryAccess *StoreAccess = 226 Updater.createMemoryAccessInBB(SI, nullptr, Left, MemorySSA::Beginning); 227 Updater.insertDef(cast<MemoryDef>(StoreAccess)); 228 229 // MemoryPHI should be created when inserting the def 230 MemoryPhi *MP = MSSA.getMemoryAccess(Merge); 231 EXPECT_NE(MP, nullptr); 232 233 // Add the load 234 B.SetInsertPoint(Merge, Merge->begin()); 235 LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg); 236 237 // Create the load memory acccess 238 MemoryUse *LoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB( 239 LoadInst, nullptr, Merge, MemorySSA::Beginning)); 240 Updater.insertUse(LoadAccess); 241 MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess(); 242 EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess)); 243 MSSA.verifyMemorySSA(); 244 } 245 246 TEST_F(MemorySSATest, SinkLoad) { 247 F = Function::Create( 248 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 249 GlobalValue::ExternalLinkage, "F", &M); 250 BasicBlock *Entry(BasicBlock::Create(C, "", F)); 251 BasicBlock *Left(BasicBlock::Create(C, "", F)); 252 BasicBlock *Right(BasicBlock::Create(C, "", F)); 253 BasicBlock *Merge(BasicBlock::Create(C, "", F)); 254 B.SetInsertPoint(Entry); 255 B.CreateCondBr(B.getTrue(), Left, Right); 256 B.SetInsertPoint(Left, Left->begin()); 257 Argument *PointerArg = &*F->arg_begin(); 258 B.SetInsertPoint(Left); 259 B.CreateBr(Merge); 260 B.SetInsertPoint(Right); 261 B.CreateBr(Merge); 262 263 // Load in left block 264 B.SetInsertPoint(Left, Left->begin()); 265 LoadInst *LoadInst1 = B.CreateLoad(B.getInt8Ty(), PointerArg); 266 // Store in merge block 267 B.SetInsertPoint(Merge, Merge->begin()); 268 B.CreateStore(B.getInt8(16), PointerArg); 269 270 setupAnalyses(); 271 MemorySSA &MSSA = *Analyses->MSSA; 272 MemorySSAUpdater Updater(&MSSA); 273 274 // Mimic sinking of a load: 275 // - clone load 276 // - insert in "exit" block 277 // - insert in mssa 278 // - remove from original block 279 280 LoadInst *LoadInstClone = cast<LoadInst>(LoadInst1->clone()); 281 Merge->getInstList().insert(Merge->begin(), LoadInstClone); 282 MemoryAccess * NewLoadAccess = 283 Updater.createMemoryAccessInBB(LoadInstClone, nullptr, 284 LoadInstClone->getParent(), 285 MemorySSA::Beginning); 286 Updater.insertUse(cast<MemoryUse>(NewLoadAccess)); 287 MSSA.verifyMemorySSA(); 288 Updater.removeMemoryAccess(MSSA.getMemoryAccess(LoadInst1)); 289 MSSA.verifyMemorySSA(); 290 } 291 292 TEST_F(MemorySSATest, MoveAStore) { 293 // We create a diamond where there is a in the entry, a store on one side, and 294 // a load at the end. After building MemorySSA, we test updating by moving 295 // the store from the side block to the entry block. This destroys the old 296 // access. 297 F = Function::Create( 298 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 299 GlobalValue::ExternalLinkage, "F", &M); 300 BasicBlock *Entry(BasicBlock::Create(C, "", F)); 301 BasicBlock *Left(BasicBlock::Create(C, "", F)); 302 BasicBlock *Right(BasicBlock::Create(C, "", F)); 303 BasicBlock *Merge(BasicBlock::Create(C, "", F)); 304 B.SetInsertPoint(Entry); 305 Argument *PointerArg = &*F->arg_begin(); 306 StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg); 307 B.CreateCondBr(B.getTrue(), Left, Right); 308 B.SetInsertPoint(Left); 309 StoreInst *SideStore = B.CreateStore(B.getInt8(16), PointerArg); 310 BranchInst::Create(Merge, Left); 311 BranchInst::Create(Merge, Right); 312 B.SetInsertPoint(Merge); 313 B.CreateLoad(B.getInt8Ty(), PointerArg); 314 setupAnalyses(); 315 MemorySSA &MSSA = *Analyses->MSSA; 316 MemorySSAUpdater Updater(&MSSA); 317 // Move the store 318 SideStore->moveBefore(Entry->getTerminator()); 319 MemoryAccess *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore); 320 MemoryAccess *SideStoreAccess = MSSA.getMemoryAccess(SideStore); 321 MemoryAccess *NewStoreAccess = Updater.createMemoryAccessAfter( 322 SideStore, EntryStoreAccess, EntryStoreAccess); 323 EntryStoreAccess->replaceAllUsesWith(NewStoreAccess); 324 Updater.removeMemoryAccess(SideStoreAccess); 325 MSSA.verifyMemorySSA(); 326 } 327 328 TEST_F(MemorySSATest, MoveAStoreUpdater) { 329 // We create a diamond where there is a in the entry, a store on one side, and 330 // a load at the end. After building MemorySSA, we test updating by moving 331 // the store from the side block to the entry block. This destroys the old 332 // access. 333 F = Function::Create( 334 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 335 GlobalValue::ExternalLinkage, "F", &M); 336 BasicBlock *Entry(BasicBlock::Create(C, "", F)); 337 BasicBlock *Left(BasicBlock::Create(C, "", F)); 338 BasicBlock *Right(BasicBlock::Create(C, "", F)); 339 BasicBlock *Merge(BasicBlock::Create(C, "", F)); 340 B.SetInsertPoint(Entry); 341 Argument *PointerArg = &*F->arg_begin(); 342 StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg); 343 B.CreateCondBr(B.getTrue(), Left, Right); 344 B.SetInsertPoint(Left); 345 auto *SideStore = B.CreateStore(B.getInt8(16), PointerArg); 346 BranchInst::Create(Merge, Left); 347 BranchInst::Create(Merge, Right); 348 B.SetInsertPoint(Merge); 349 auto *MergeLoad = B.CreateLoad(B.getInt8Ty(), PointerArg); 350 setupAnalyses(); 351 MemorySSA &MSSA = *Analyses->MSSA; 352 MemorySSAUpdater Updater(&MSSA); 353 354 // Move the store 355 SideStore->moveBefore(Entry->getTerminator()); 356 auto *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore); 357 auto *SideStoreAccess = MSSA.getMemoryAccess(SideStore); 358 auto *NewStoreAccess = Updater.createMemoryAccessAfter( 359 SideStore, EntryStoreAccess, EntryStoreAccess); 360 // Before, the load will point to a phi of the EntryStore and SideStore. 361 auto *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(MergeLoad)); 362 EXPECT_TRUE(isa<MemoryPhi>(LoadAccess->getDefiningAccess())); 363 MemoryPhi *MergePhi = cast<MemoryPhi>(LoadAccess->getDefiningAccess()); 364 EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess); 365 EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess); 366 Updater.removeMemoryAccess(SideStoreAccess); 367 Updater.insertDef(cast<MemoryDef>(NewStoreAccess)); 368 // After it's a phi of the new side store access. 369 EXPECT_EQ(MergePhi->getIncomingValue(0), NewStoreAccess); 370 EXPECT_EQ(MergePhi->getIncomingValue(1), NewStoreAccess); 371 MSSA.verifyMemorySSA(); 372 } 373 374 TEST_F(MemorySSATest, MoveAStoreUpdaterMove) { 375 // We create a diamond where there is a in the entry, a store on one side, and 376 // a load at the end. After building MemorySSA, we test updating by moving 377 // the store from the side block to the entry block. This does not destroy 378 // the old access. 379 F = Function::Create( 380 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 381 GlobalValue::ExternalLinkage, "F", &M); 382 BasicBlock *Entry(BasicBlock::Create(C, "", F)); 383 BasicBlock *Left(BasicBlock::Create(C, "", F)); 384 BasicBlock *Right(BasicBlock::Create(C, "", F)); 385 BasicBlock *Merge(BasicBlock::Create(C, "", F)); 386 B.SetInsertPoint(Entry); 387 Argument *PointerArg = &*F->arg_begin(); 388 StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg); 389 B.CreateCondBr(B.getTrue(), Left, Right); 390 B.SetInsertPoint(Left); 391 auto *SideStore = B.CreateStore(B.getInt8(16), PointerArg); 392 BranchInst::Create(Merge, Left); 393 BranchInst::Create(Merge, Right); 394 B.SetInsertPoint(Merge); 395 auto *MergeLoad = B.CreateLoad(B.getInt8Ty(), PointerArg); 396 setupAnalyses(); 397 MemorySSA &MSSA = *Analyses->MSSA; 398 MemorySSAUpdater Updater(&MSSA); 399 400 // Move the store 401 auto *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore); 402 auto *SideStoreAccess = MSSA.getMemoryAccess(SideStore); 403 // Before, the load will point to a phi of the EntryStore and SideStore. 404 auto *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(MergeLoad)); 405 EXPECT_TRUE(isa<MemoryPhi>(LoadAccess->getDefiningAccess())); 406 MemoryPhi *MergePhi = cast<MemoryPhi>(LoadAccess->getDefiningAccess()); 407 EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess); 408 EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess); 409 SideStore->moveBefore(*EntryStore->getParent(), ++EntryStore->getIterator()); 410 Updater.moveAfter(SideStoreAccess, EntryStoreAccess); 411 // After, it's a phi of the side store. 412 EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess); 413 EXPECT_EQ(MergePhi->getIncomingValue(1), SideStoreAccess); 414 415 MSSA.verifyMemorySSA(); 416 } 417 418 TEST_F(MemorySSATest, MoveAStoreAllAround) { 419 // We create a diamond where there is a in the entry, a store on one side, and 420 // a load at the end. After building MemorySSA, we test updating by moving 421 // the store from the side block to the entry block, then to the other side 422 // block, then to before the load. This does not destroy the old access. 423 F = Function::Create( 424 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 425 GlobalValue::ExternalLinkage, "F", &M); 426 BasicBlock *Entry(BasicBlock::Create(C, "", F)); 427 BasicBlock *Left(BasicBlock::Create(C, "", F)); 428 BasicBlock *Right(BasicBlock::Create(C, "", F)); 429 BasicBlock *Merge(BasicBlock::Create(C, "", F)); 430 B.SetInsertPoint(Entry); 431 Argument *PointerArg = &*F->arg_begin(); 432 StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg); 433 B.CreateCondBr(B.getTrue(), Left, Right); 434 B.SetInsertPoint(Left); 435 auto *SideStore = B.CreateStore(B.getInt8(16), PointerArg); 436 BranchInst::Create(Merge, Left); 437 BranchInst::Create(Merge, Right); 438 B.SetInsertPoint(Merge); 439 auto *MergeLoad = B.CreateLoad(B.getInt8Ty(), PointerArg); 440 setupAnalyses(); 441 MemorySSA &MSSA = *Analyses->MSSA; 442 MemorySSAUpdater Updater(&MSSA); 443 444 // Move the store 445 auto *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore); 446 auto *SideStoreAccess = MSSA.getMemoryAccess(SideStore); 447 // Before, the load will point to a phi of the EntryStore and SideStore. 448 auto *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(MergeLoad)); 449 EXPECT_TRUE(isa<MemoryPhi>(LoadAccess->getDefiningAccess())); 450 MemoryPhi *MergePhi = cast<MemoryPhi>(LoadAccess->getDefiningAccess()); 451 EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess); 452 EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess); 453 // Move the store before the entry store 454 SideStore->moveBefore(*EntryStore->getParent(), EntryStore->getIterator()); 455 Updater.moveBefore(SideStoreAccess, EntryStoreAccess); 456 // After, it's a phi of the entry store. 457 EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess); 458 EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess); 459 MSSA.verifyMemorySSA(); 460 // Now move the store to the right branch 461 SideStore->moveBefore(*Right, Right->begin()); 462 Updater.moveToPlace(SideStoreAccess, Right, MemorySSA::Beginning); 463 MSSA.verifyMemorySSA(); 464 EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess); 465 EXPECT_EQ(MergePhi->getIncomingValue(1), SideStoreAccess); 466 // Now move it before the load 467 SideStore->moveBefore(MergeLoad); 468 Updater.moveBefore(SideStoreAccess, LoadAccess); 469 EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess); 470 EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess); 471 MSSA.verifyMemorySSA(); 472 } 473 474 TEST_F(MemorySSATest, RemoveAPhi) { 475 // We create a diamond where there is a store on one side, and then a load 476 // after the merge point. This enables us to test a bunch of different 477 // removal cases. 478 F = Function::Create( 479 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 480 GlobalValue::ExternalLinkage, "F", &M); 481 BasicBlock *Entry(BasicBlock::Create(C, "", F)); 482 BasicBlock *Left(BasicBlock::Create(C, "", F)); 483 BasicBlock *Right(BasicBlock::Create(C, "", F)); 484 BasicBlock *Merge(BasicBlock::Create(C, "", F)); 485 B.SetInsertPoint(Entry); 486 B.CreateCondBr(B.getTrue(), Left, Right); 487 B.SetInsertPoint(Left); 488 Argument *PointerArg = &*F->arg_begin(); 489 StoreInst *StoreInst = B.CreateStore(B.getInt8(16), PointerArg); 490 BranchInst::Create(Merge, Left); 491 BranchInst::Create(Merge, Right); 492 B.SetInsertPoint(Merge); 493 LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg); 494 495 setupAnalyses(); 496 MemorySSA &MSSA = *Analyses->MSSA; 497 MemorySSAUpdater Updater(&MSSA); 498 499 // Before, the load will be a use of a phi<store, liveonentry>. 500 MemoryUse *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(LoadInst)); 501 MemoryDef *StoreAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreInst)); 502 MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess(); 503 EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess)); 504 // Kill the store 505 Updater.removeMemoryAccess(StoreAccess); 506 MemoryPhi *MP = cast<MemoryPhi>(DefiningAccess); 507 // Verify the phi ended up as liveonentry, liveonentry 508 for (auto &Op : MP->incoming_values()) 509 EXPECT_TRUE(MSSA.isLiveOnEntryDef(cast<MemoryAccess>(Op.get()))); 510 // Replace the phi uses with the live on entry def 511 MP->replaceAllUsesWith(MSSA.getLiveOnEntryDef()); 512 // Verify the load is now defined by liveOnEntryDef 513 EXPECT_TRUE(MSSA.isLiveOnEntryDef(LoadAccess->getDefiningAccess())); 514 // Remove the PHI 515 Updater.removeMemoryAccess(MP); 516 MSSA.verifyMemorySSA(); 517 } 518 519 TEST_F(MemorySSATest, RemoveMemoryAccess) { 520 // We create a diamond where there is a store on one side, and then a load 521 // after the merge point. This enables us to test a bunch of different 522 // removal cases. 523 F = Function::Create( 524 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 525 GlobalValue::ExternalLinkage, "F", &M); 526 BasicBlock *Entry(BasicBlock::Create(C, "", F)); 527 BasicBlock *Left(BasicBlock::Create(C, "", F)); 528 BasicBlock *Right(BasicBlock::Create(C, "", F)); 529 BasicBlock *Merge(BasicBlock::Create(C, "", F)); 530 B.SetInsertPoint(Entry); 531 B.CreateCondBr(B.getTrue(), Left, Right); 532 B.SetInsertPoint(Left); 533 Argument *PointerArg = &*F->arg_begin(); 534 StoreInst *StoreInst = B.CreateStore(B.getInt8(16), PointerArg); 535 BranchInst::Create(Merge, Left); 536 BranchInst::Create(Merge, Right); 537 B.SetInsertPoint(Merge); 538 LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg); 539 540 setupAnalyses(); 541 MemorySSA &MSSA = *Analyses->MSSA; 542 MemorySSAWalker *Walker = Analyses->Walker; 543 MemorySSAUpdater Updater(&MSSA); 544 545 // Before, the load will be a use of a phi<store, liveonentry>. It should be 546 // the same after. 547 MemoryUse *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(LoadInst)); 548 MemoryDef *StoreAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreInst)); 549 MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess(); 550 EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess)); 551 // The load is currently clobbered by one of the phi arguments, so the walker 552 // should determine the clobbering access as the phi. 553 EXPECT_EQ(DefiningAccess, Walker->getClobberingMemoryAccess(LoadInst)); 554 Updater.removeMemoryAccess(StoreAccess); 555 MSSA.verifyMemorySSA(); 556 // After the removeaccess, let's see if we got the right accesses 557 // The load should still point to the phi ... 558 EXPECT_EQ(DefiningAccess, LoadAccess->getDefiningAccess()); 559 // but we should now get live on entry for the clobbering definition of the 560 // load, since it will walk past the phi node since every argument is the 561 // same. 562 // XXX: This currently requires either removing the phi or resetting optimized 563 // on the load 564 565 EXPECT_FALSE( 566 MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(LoadInst))); 567 // If we reset optimized, we get live on entry. 568 LoadAccess->resetOptimized(); 569 EXPECT_TRUE( 570 MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(LoadInst))); 571 // The phi should now be a two entry phi with two live on entry defs. 572 for (const auto &Op : DefiningAccess->operands()) { 573 MemoryAccess *Operand = cast<MemoryAccess>(&*Op); 574 EXPECT_TRUE(MSSA.isLiveOnEntryDef(Operand)); 575 } 576 577 // Now we try to remove the single valued phi 578 Updater.removeMemoryAccess(DefiningAccess); 579 MSSA.verifyMemorySSA(); 580 // Now the load should be a load of live on entry. 581 EXPECT_TRUE(MSSA.isLiveOnEntryDef(LoadAccess->getDefiningAccess())); 582 } 583 584 // We had a bug with caching where the walker would report MemoryDef#3's clobber 585 // (below) was MemoryDef#1. 586 // 587 // define void @F(i8*) { 588 // %A = alloca i8, i8 1 589 // ; 1 = MemoryDef(liveOnEntry) 590 // store i8 0, i8* %A 591 // ; 2 = MemoryDef(1) 592 // store i8 1, i8* %A 593 // ; 3 = MemoryDef(2) 594 // store i8 2, i8* %A 595 // } 596 TEST_F(MemorySSATest, TestTripleStore) { 597 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false), 598 GlobalValue::ExternalLinkage, "F", &M); 599 B.SetInsertPoint(BasicBlock::Create(C, "", F)); 600 Type *Int8 = Type::getInt8Ty(C); 601 Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A"); 602 StoreInst *S1 = B.CreateStore(ConstantInt::get(Int8, 0), Alloca); 603 StoreInst *S2 = B.CreateStore(ConstantInt::get(Int8, 1), Alloca); 604 StoreInst *S3 = B.CreateStore(ConstantInt::get(Int8, 2), Alloca); 605 606 setupAnalyses(); 607 MemorySSA &MSSA = *Analyses->MSSA; 608 MemorySSAWalker *Walker = Analyses->Walker; 609 610 unsigned I = 0; 611 for (StoreInst *V : {S1, S2, S3}) { 612 // Everything should be clobbered by its defining access 613 MemoryAccess *DefiningAccess = MSSA.getMemoryAccess(V)->getDefiningAccess(); 614 MemoryAccess *WalkerClobber = Walker->getClobberingMemoryAccess(V); 615 EXPECT_EQ(DefiningAccess, WalkerClobber) 616 << "Store " << I << " doesn't have the correct clobbering access"; 617 // EXPECT_EQ expands such that if we increment I above, it won't get 618 // incremented except when we try to print the error message. 619 ++I; 620 } 621 } 622 623 // ...And fixing the above bug made it obvious that, when walking, MemorySSA's 624 // walker was caching the initial node it walked. This was fine (albeit 625 // mostly redundant) unless the initial node being walked is a clobber for the 626 // query. In that case, we'd cache that the node clobbered itself. 627 TEST_F(MemorySSATest, TestStoreAndLoad) { 628 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false), 629 GlobalValue::ExternalLinkage, "F", &M); 630 B.SetInsertPoint(BasicBlock::Create(C, "", F)); 631 Type *Int8 = Type::getInt8Ty(C); 632 Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A"); 633 Instruction *SI = B.CreateStore(ConstantInt::get(Int8, 0), Alloca); 634 Instruction *LI = B.CreateLoad(Int8, Alloca); 635 636 setupAnalyses(); 637 MemorySSA &MSSA = *Analyses->MSSA; 638 MemorySSAWalker *Walker = Analyses->Walker; 639 640 MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(LI); 641 EXPECT_EQ(LoadClobber, MSSA.getMemoryAccess(SI)); 642 EXPECT_TRUE(MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(SI))); 643 } 644 645 // Another bug (related to the above two fixes): It was noted that, given the 646 // following code: 647 // ; 1 = MemoryDef(liveOnEntry) 648 // store i8 0, i8* %1 649 // 650 // ...A query to getClobberingMemoryAccess(MemoryAccess*, MemoryLocation) would 651 // hand back the store (correctly). A later call to 652 // getClobberingMemoryAccess(const Instruction*) would also hand back the store 653 // (incorrectly; it should return liveOnEntry). 654 // 655 // This test checks that repeated calls to either function returns what they're 656 // meant to. 657 TEST_F(MemorySSATest, TestStoreDoubleQuery) { 658 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false), 659 GlobalValue::ExternalLinkage, "F", &M); 660 B.SetInsertPoint(BasicBlock::Create(C, "", F)); 661 Type *Int8 = Type::getInt8Ty(C); 662 Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A"); 663 StoreInst *SI = B.CreateStore(ConstantInt::get(Int8, 0), Alloca); 664 665 setupAnalyses(); 666 MemorySSA &MSSA = *Analyses->MSSA; 667 MemorySSAWalker *Walker = Analyses->Walker; 668 669 MemoryAccess *StoreAccess = MSSA.getMemoryAccess(SI); 670 MemoryLocation StoreLoc = MemoryLocation::get(SI); 671 MemoryAccess *Clobber = 672 Walker->getClobberingMemoryAccess(StoreAccess, StoreLoc); 673 MemoryAccess *LiveOnEntry = Walker->getClobberingMemoryAccess(SI); 674 675 EXPECT_EQ(Clobber, StoreAccess); 676 EXPECT_TRUE(MSSA.isLiveOnEntryDef(LiveOnEntry)); 677 678 // Try again (with entries in the cache already) for good measure... 679 Clobber = Walker->getClobberingMemoryAccess(StoreAccess, StoreLoc); 680 LiveOnEntry = Walker->getClobberingMemoryAccess(SI); 681 EXPECT_EQ(Clobber, StoreAccess); 682 EXPECT_TRUE(MSSA.isLiveOnEntryDef(LiveOnEntry)); 683 } 684 685 // Bug: During phi optimization, the walker wouldn't cache to the proper result 686 // in the farthest-walked BB. 687 // 688 // Specifically, it would assume that whatever we walked to was a clobber. 689 // "Whatever we walked to" isn't a clobber if we hit a cache entry. 690 // 691 // ...So, we need a test case that looks like: 692 // A 693 // / \ 694 // B | 695 // \ / 696 // C 697 // 698 // Where, when we try to optimize a thing in 'C', a blocker is found in 'B'. 699 // The walk must determine that the blocker exists by using cache entries *while 700 // walking* 'B'. 701 TEST_F(MemorySSATest, PartialWalkerCacheWithPhis) { 702 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false), 703 GlobalValue::ExternalLinkage, "F", &M); 704 B.SetInsertPoint(BasicBlock::Create(C, "A", F)); 705 Type *Int8 = Type::getInt8Ty(C); 706 Constant *One = ConstantInt::get(Int8, 1); 707 Constant *Zero = ConstantInt::get(Int8, 0); 708 Value *AllocA = B.CreateAlloca(Int8, One, "a"); 709 Value *AllocB = B.CreateAlloca(Int8, One, "b"); 710 BasicBlock *IfThen = BasicBlock::Create(C, "B", F); 711 BasicBlock *IfEnd = BasicBlock::Create(C, "C", F); 712 713 B.CreateCondBr(UndefValue::get(Type::getInt1Ty(C)), IfThen, IfEnd); 714 715 B.SetInsertPoint(IfThen); 716 Instruction *FirstStore = B.CreateStore(Zero, AllocA); 717 B.CreateStore(Zero, AllocB); 718 Instruction *ALoad0 = B.CreateLoad(Int8, AllocA, ""); 719 Instruction *BStore = B.CreateStore(Zero, AllocB); 720 // Due to use optimization/etc. we make a store to A, which is removed after 721 // we build MSSA. This helps keep the test case simple-ish. 722 Instruction *KillStore = B.CreateStore(Zero, AllocA); 723 Instruction *ALoad = B.CreateLoad(Int8, AllocA, ""); 724 B.CreateBr(IfEnd); 725 726 B.SetInsertPoint(IfEnd); 727 Instruction *BelowPhi = B.CreateStore(Zero, AllocA); 728 729 setupAnalyses(); 730 MemorySSA &MSSA = *Analyses->MSSA; 731 MemorySSAWalker *Walker = Analyses->Walker; 732 MemorySSAUpdater Updater(&MSSA); 733 734 // Kill `KillStore`; it exists solely so that the load after it won't be 735 // optimized to FirstStore. 736 Updater.removeMemoryAccess(MSSA.getMemoryAccess(KillStore)); 737 KillStore->eraseFromParent(); 738 auto *ALoadMA = cast<MemoryUse>(MSSA.getMemoryAccess(ALoad)); 739 EXPECT_EQ(ALoadMA->getDefiningAccess(), MSSA.getMemoryAccess(BStore)); 740 741 // Populate the cache for the store to AllocB directly after FirstStore. It 742 // should point to something in block B (so something in D can't be optimized 743 // to it). 744 MemoryAccess *Load0Clobber = Walker->getClobberingMemoryAccess(ALoad0); 745 EXPECT_EQ(MSSA.getMemoryAccess(FirstStore), Load0Clobber); 746 747 // If the bug exists, this will introduce a bad cache entry for %a on BStore. 748 // It will point to the store to %b after FirstStore. This only happens during 749 // phi optimization. 750 MemoryAccess *BottomClobber = Walker->getClobberingMemoryAccess(BelowPhi); 751 MemoryAccess *Phi = MSSA.getMemoryAccess(IfEnd); 752 EXPECT_EQ(BottomClobber, Phi); 753 754 // This query will first check the cache for {%a, BStore}. It should point to 755 // FirstStore, not to the store after FirstStore. 756 MemoryAccess *UseClobber = Walker->getClobberingMemoryAccess(ALoad); 757 EXPECT_EQ(UseClobber, MSSA.getMemoryAccess(FirstStore)); 758 } 759 760 // Test that our walker properly handles loads with the invariant group 761 // attribute. It's a bit hacky, since we add the invariant attribute *after* 762 // building MSSA. Otherwise, the use optimizer will optimize it for us, which 763 // isn't what we want. 764 // FIXME: It may be easier/cleaner to just add an 'optimize uses?' flag to MSSA. 765 TEST_F(MemorySSATest, WalkerInvariantLoadOpt) { 766 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false), 767 GlobalValue::ExternalLinkage, "F", &M); 768 B.SetInsertPoint(BasicBlock::Create(C, "", F)); 769 Type *Int8 = Type::getInt8Ty(C); 770 Constant *One = ConstantInt::get(Int8, 1); 771 Value *AllocA = B.CreateAlloca(Int8, One, ""); 772 773 Instruction *Store = B.CreateStore(One, AllocA); 774 Instruction *Load = B.CreateLoad(Int8, AllocA); 775 776 setupAnalyses(); 777 MemorySSA &MSSA = *Analyses->MSSA; 778 MemorySSAWalker *Walker = Analyses->Walker; 779 780 auto *LoadMA = cast<MemoryUse>(MSSA.getMemoryAccess(Load)); 781 auto *StoreMA = cast<MemoryDef>(MSSA.getMemoryAccess(Store)); 782 EXPECT_EQ(LoadMA->getDefiningAccess(), StoreMA); 783 784 // ...At the time of writing, no cache should exist for LoadMA. Be a bit 785 // flexible to future changes. 786 Walker->invalidateInfo(LoadMA); 787 Load->setMetadata(LLVMContext::MD_invariant_load, MDNode::get(C, {})); 788 789 MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(LoadMA); 790 EXPECT_EQ(LoadClobber, MSSA.getLiveOnEntryDef()); 791 } 792 793 // Test loads get reoptimized properly by the walker. 794 TEST_F(MemorySSATest, WalkerReopt) { 795 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false), 796 GlobalValue::ExternalLinkage, "F", &M); 797 B.SetInsertPoint(BasicBlock::Create(C, "", F)); 798 Type *Int8 = Type::getInt8Ty(C); 799 Value *AllocaA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A"); 800 Instruction *SIA = B.CreateStore(ConstantInt::get(Int8, 0), AllocaA); 801 Value *AllocaB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B"); 802 Instruction *SIB = B.CreateStore(ConstantInt::get(Int8, 0), AllocaB); 803 Instruction *LIA = B.CreateLoad(Int8, AllocaA); 804 805 setupAnalyses(); 806 MemorySSA &MSSA = *Analyses->MSSA; 807 MemorySSAWalker *Walker = Analyses->Walker; 808 MemorySSAUpdater Updater(&MSSA); 809 810 MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(LIA); 811 MemoryUse *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(LIA)); 812 EXPECT_EQ(LoadClobber, MSSA.getMemoryAccess(SIA)); 813 EXPECT_TRUE(MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(SIA))); 814 Updater.removeMemoryAccess(LoadAccess); 815 816 // Create the load memory access pointing to an unoptimized place. 817 MemoryUse *NewLoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB( 818 LIA, MSSA.getMemoryAccess(SIB), LIA->getParent(), MemorySSA::End)); 819 // This should it cause it to be optimized 820 EXPECT_EQ(Walker->getClobberingMemoryAccess(NewLoadAccess), LoadClobber); 821 EXPECT_EQ(NewLoadAccess->getDefiningAccess(), LoadClobber); 822 } 823 824 // Test out MemorySSAUpdater::moveBefore 825 TEST_F(MemorySSATest, MoveAboveMemoryDef) { 826 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false), 827 GlobalValue::ExternalLinkage, "F", &M); 828 B.SetInsertPoint(BasicBlock::Create(C, "", F)); 829 830 Type *Int8 = Type::getInt8Ty(C); 831 Value *A = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A"); 832 Value *B_ = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B"); 833 Value *C = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "C"); 834 835 StoreInst *StoreA0 = B.CreateStore(ConstantInt::get(Int8, 0), A); 836 StoreInst *StoreB = B.CreateStore(ConstantInt::get(Int8, 0), B_); 837 LoadInst *LoadB = B.CreateLoad(Int8, B_); 838 StoreInst *StoreA1 = B.CreateStore(ConstantInt::get(Int8, 4), A); 839 StoreInst *StoreC = B.CreateStore(ConstantInt::get(Int8, 4), C); 840 StoreInst *StoreA2 = B.CreateStore(ConstantInt::get(Int8, 4), A); 841 LoadInst *LoadC = B.CreateLoad(Int8, C); 842 843 setupAnalyses(); 844 MemorySSA &MSSA = *Analyses->MSSA; 845 MemorySSAWalker &Walker = *Analyses->Walker; 846 847 MemorySSAUpdater Updater(&MSSA); 848 StoreC->moveBefore(StoreB); 849 Updater.moveBefore(cast<MemoryDef>(MSSA.getMemoryAccess(StoreC)), 850 cast<MemoryDef>(MSSA.getMemoryAccess(StoreB))); 851 852 MSSA.verifyMemorySSA(); 853 854 EXPECT_EQ(MSSA.getMemoryAccess(StoreB)->getDefiningAccess(), 855 MSSA.getMemoryAccess(StoreC)); 856 EXPECT_EQ(MSSA.getMemoryAccess(StoreC)->getDefiningAccess(), 857 MSSA.getMemoryAccess(StoreA0)); 858 EXPECT_EQ(MSSA.getMemoryAccess(StoreA2)->getDefiningAccess(), 859 MSSA.getMemoryAccess(StoreA1)); 860 EXPECT_EQ(Walker.getClobberingMemoryAccess(LoadB), 861 MSSA.getMemoryAccess(StoreB)); 862 EXPECT_EQ(Walker.getClobberingMemoryAccess(LoadC), 863 MSSA.getMemoryAccess(StoreC)); 864 865 // exercise block numbering 866 EXPECT_TRUE(MSSA.locallyDominates(MSSA.getMemoryAccess(StoreC), 867 MSSA.getMemoryAccess(StoreB))); 868 EXPECT_TRUE(MSSA.locallyDominates(MSSA.getMemoryAccess(StoreA1), 869 MSSA.getMemoryAccess(StoreA2))); 870 } 871 872 TEST_F(MemorySSATest, Irreducible) { 873 // Create the equivalent of 874 // x = something 875 // if (...) 876 // goto second_loop_entry 877 // while (...) { 878 // second_loop_entry: 879 // } 880 // use(x) 881 882 SmallVector<PHINode *, 8> Inserted; 883 IRBuilder<> B(C); 884 F = Function::Create( 885 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 886 GlobalValue::ExternalLinkage, "F", &M); 887 888 // Make blocks 889 BasicBlock *IfBB = BasicBlock::Create(C, "if", F); 890 BasicBlock *LoopStartBB = BasicBlock::Create(C, "loopstart", F); 891 BasicBlock *LoopMainBB = BasicBlock::Create(C, "loopmain", F); 892 BasicBlock *AfterLoopBB = BasicBlock::Create(C, "afterloop", F); 893 B.SetInsertPoint(IfBB); 894 B.CreateCondBr(B.getTrue(), LoopMainBB, LoopStartBB); 895 B.SetInsertPoint(LoopStartBB); 896 B.CreateBr(LoopMainBB); 897 B.SetInsertPoint(LoopMainBB); 898 B.CreateCondBr(B.getTrue(), LoopStartBB, AfterLoopBB); 899 B.SetInsertPoint(AfterLoopBB); 900 Argument *FirstArg = &*F->arg_begin(); 901 setupAnalyses(); 902 MemorySSA &MSSA = *Analyses->MSSA; 903 MemorySSAUpdater Updater(&MSSA); 904 // Create the load memory acccess 905 LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), FirstArg); 906 MemoryUse *LoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB( 907 LoadInst, nullptr, AfterLoopBB, MemorySSA::Beginning)); 908 Updater.insertUse(LoadAccess); 909 MSSA.verifyMemorySSA(); 910 } 911 912 TEST_F(MemorySSATest, MoveToBeforeLiveOnEntryInvalidatesCache) { 913 // Create: 914 // %1 = alloca i8 915 // ; 1 = MemoryDef(liveOnEntry) 916 // store i8 0, i8* %1 917 // ; 2 = MemoryDef(1) 918 // store i8 0, i8* %1 919 // 920 // ...And be sure that MSSA's caching doesn't give us `1` for the clobber of 921 // `2` after `1` is removed. 922 IRBuilder<> B(C); 923 F = Function::Create( 924 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 925 GlobalValue::ExternalLinkage, "F", &M); 926 927 BasicBlock *Entry = BasicBlock::Create(C, "if", F); 928 B.SetInsertPoint(Entry); 929 930 Value *A = B.CreateAlloca(B.getInt8Ty()); 931 StoreInst *StoreA = B.CreateStore(B.getInt8(0), A); 932 StoreInst *StoreB = B.CreateStore(B.getInt8(0), A); 933 934 setupAnalyses(); 935 936 MemorySSA &MSSA = *Analyses->MSSA; 937 938 auto *DefA = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA)); 939 auto *DefB = cast<MemoryDef>(MSSA.getMemoryAccess(StoreB)); 940 941 MemoryAccess *BClobber = MSSA.getWalker()->getClobberingMemoryAccess(DefB); 942 ASSERT_EQ(DefA, BClobber); 943 944 MemorySSAUpdater(&MSSA).removeMemoryAccess(DefA); 945 StoreA->eraseFromParent(); 946 947 EXPECT_EQ(DefB->getDefiningAccess(), MSSA.getLiveOnEntryDef()); 948 949 EXPECT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(DefB), 950 MSSA.getLiveOnEntryDef()) 951 << "(DefA = " << DefA << ")"; 952 } 953 954 TEST_F(MemorySSATest, RemovingDefInvalidatesCache) { 955 // Create: 956 // %x = alloca i8 957 // %y = alloca i8 958 // ; 1 = MemoryDef(liveOnEntry) 959 // store i8 0, i8* %x 960 // ; 2 = MemoryDef(1) 961 // store i8 0, i8* %y 962 // ; 3 = MemoryDef(2) 963 // store i8 0, i8* %x 964 // 965 // And be sure that MSSA's caching handles the removal of def `1` 966 // appropriately. 967 IRBuilder<> B(C); 968 F = Function::Create( 969 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 970 GlobalValue::ExternalLinkage, "F", &M); 971 972 BasicBlock *Entry = BasicBlock::Create(C, "if", F); 973 B.SetInsertPoint(Entry); 974 975 Value *X = B.CreateAlloca(B.getInt8Ty()); 976 Value *Y = B.CreateAlloca(B.getInt8Ty()); 977 StoreInst *StoreX1 = B.CreateStore(B.getInt8(0), X); 978 StoreInst *StoreY = B.CreateStore(B.getInt8(0), Y); 979 StoreInst *StoreX2 = B.CreateStore(B.getInt8(0), X); 980 981 setupAnalyses(); 982 983 MemorySSA &MSSA = *Analyses->MSSA; 984 985 auto *DefX1 = cast<MemoryDef>(MSSA.getMemoryAccess(StoreX1)); 986 auto *DefY = cast<MemoryDef>(MSSA.getMemoryAccess(StoreY)); 987 auto *DefX2 = cast<MemoryDef>(MSSA.getMemoryAccess(StoreX2)); 988 989 EXPECT_EQ(DefX2->getDefiningAccess(), DefY); 990 MemoryAccess *X2Clobber = MSSA.getWalker()->getClobberingMemoryAccess(DefX2); 991 ASSERT_EQ(DefX1, X2Clobber); 992 993 MemorySSAUpdater(&MSSA).removeMemoryAccess(DefX1); 994 StoreX1->eraseFromParent(); 995 996 EXPECT_EQ(DefX2->getDefiningAccess(), DefY); 997 EXPECT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(DefX2), 998 MSSA.getLiveOnEntryDef()) 999 << "(DefX1 = " << DefX1 << ")"; 1000 } 1001 1002 // Test Must alias for optimized uses 1003 TEST_F(MemorySSATest, TestLoadMustAlias) { 1004 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false), 1005 GlobalValue::ExternalLinkage, "F", &M); 1006 B.SetInsertPoint(BasicBlock::Create(C, "", F)); 1007 Type *Int8 = Type::getInt8Ty(C); 1008 Value *AllocaA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A"); 1009 Value *AllocaB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B"); 1010 1011 B.CreateStore(ConstantInt::get(Int8, 1), AllocaB); 1012 // Check load from LOE 1013 LoadInst *LA1 = B.CreateLoad(Int8, AllocaA, ""); 1014 // Check load alias cached for second load 1015 LoadInst *LA2 = B.CreateLoad(Int8, AllocaA, ""); 1016 1017 B.CreateStore(ConstantInt::get(Int8, 1), AllocaA); 1018 // Check load from store/def 1019 LoadInst *LA3 = B.CreateLoad(Int8, AllocaA, ""); 1020 // Check load alias cached for second load 1021 LoadInst *LA4 = B.CreateLoad(Int8, AllocaA, ""); 1022 1023 setupAnalyses(); 1024 MemorySSA &MSSA = *Analyses->MSSA; 1025 1026 unsigned I = 0; 1027 for (LoadInst *V : {LA1, LA2}) { 1028 MemoryUse *MemUse = dyn_cast_or_null<MemoryUse>(MSSA.getMemoryAccess(V)); 1029 EXPECT_EQ(MemUse->getOptimizedAccessType(), None) 1030 << "Load " << I << " doesn't have the correct alias information"; 1031 // EXPECT_EQ expands such that if we increment I above, it won't get 1032 // incremented except when we try to print the error message. 1033 ++I; 1034 } 1035 for (LoadInst *V : {LA3, LA4}) { 1036 MemoryUse *MemUse = dyn_cast_or_null<MemoryUse>(MSSA.getMemoryAccess(V)); 1037 EXPECT_EQ(MemUse->getOptimizedAccessType(), MustAlias) 1038 << "Load " << I << " doesn't have the correct alias information"; 1039 // EXPECT_EQ expands such that if we increment I above, it won't get 1040 // incremented except when we try to print the error message. 1041 ++I; 1042 } 1043 } 1044 1045 // Test Must alias for optimized defs. 1046 TEST_F(MemorySSATest, TestStoreMustAlias) { 1047 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false), 1048 GlobalValue::ExternalLinkage, "F", &M); 1049 B.SetInsertPoint(BasicBlock::Create(C, "", F)); 1050 Type *Int8 = Type::getInt8Ty(C); 1051 Value *AllocaA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A"); 1052 Value *AllocaB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B"); 1053 StoreInst *SA1 = B.CreateStore(ConstantInt::get(Int8, 1), AllocaA); 1054 StoreInst *SB1 = B.CreateStore(ConstantInt::get(Int8, 1), AllocaB); 1055 StoreInst *SA2 = B.CreateStore(ConstantInt::get(Int8, 2), AllocaA); 1056 StoreInst *SB2 = B.CreateStore(ConstantInt::get(Int8, 2), AllocaB); 1057 StoreInst *SA3 = B.CreateStore(ConstantInt::get(Int8, 3), AllocaA); 1058 StoreInst *SB3 = B.CreateStore(ConstantInt::get(Int8, 3), AllocaB); 1059 1060 setupAnalyses(); 1061 MemorySSA &MSSA = *Analyses->MSSA; 1062 MemorySSAWalker *Walker = Analyses->Walker; 1063 1064 unsigned I = 0; 1065 for (StoreInst *V : {SA1, SB1, SA2, SB2, SA3, SB3}) { 1066 MemoryDef *MemDef = dyn_cast_or_null<MemoryDef>(MSSA.getMemoryAccess(V)); 1067 EXPECT_EQ(MemDef->isOptimized(), false) 1068 << "Store " << I << " is optimized from the start?"; 1069 EXPECT_EQ(MemDef->getOptimizedAccessType(), None) 1070 << "Store " << I 1071 << " has correct alias information before being optimized?"; 1072 if (V == SA1) 1073 Walker->getClobberingMemoryAccess(V); 1074 else { 1075 MemoryAccess *Def = MemDef->getDefiningAccess(); 1076 MemoryAccess *Clob = Walker->getClobberingMemoryAccess(V); 1077 EXPECT_NE(Def, Clob) << "Store " << I 1078 << " has Defining Access equal to Clobbering Access"; 1079 } 1080 EXPECT_EQ(MemDef->isOptimized(), true) 1081 << "Store " << I << " was not optimized"; 1082 if (I == 0 || I == 1) 1083 EXPECT_EQ(MemDef->getOptimizedAccessType(), None) 1084 << "Store " << I << " doesn't have the correct alias information"; 1085 else 1086 EXPECT_EQ(MemDef->getOptimizedAccessType(), MustAlias) 1087 << "Store " << I << " doesn't have the correct alias information"; 1088 // EXPECT_EQ expands such that if we increment I above, it won't get 1089 // incremented except when we try to print the error message. 1090 ++I; 1091 } 1092 } 1093 1094 // Test May alias for optimized uses. 1095 TEST_F(MemorySSATest, TestLoadMayAlias) { 1096 F = Function::Create(FunctionType::get(B.getVoidTy(), 1097 {B.getInt8PtrTy(), B.getInt8PtrTy()}, 1098 false), 1099 GlobalValue::ExternalLinkage, "F", &M); 1100 B.SetInsertPoint(BasicBlock::Create(C, "", F)); 1101 Type *Int8 = Type::getInt8Ty(C); 1102 auto *ArgIt = F->arg_begin(); 1103 Argument *PointerA = &*ArgIt; 1104 Argument *PointerB = &*(++ArgIt); 1105 B.CreateStore(ConstantInt::get(Int8, 1), PointerB); 1106 LoadInst *LA1 = B.CreateLoad(Int8, PointerA, ""); 1107 B.CreateStore(ConstantInt::get(Int8, 0), PointerA); 1108 LoadInst *LB1 = B.CreateLoad(Int8, PointerB, ""); 1109 B.CreateStore(ConstantInt::get(Int8, 0), PointerA); 1110 LoadInst *LA2 = B.CreateLoad(Int8, PointerA, ""); 1111 B.CreateStore(ConstantInt::get(Int8, 0), PointerB); 1112 LoadInst *LB2 = B.CreateLoad(Int8, PointerB, ""); 1113 1114 setupAnalyses(); 1115 MemorySSA &MSSA = *Analyses->MSSA; 1116 1117 unsigned I = 0; 1118 for (LoadInst *V : {LA1, LB1}) { 1119 MemoryUse *MemUse = dyn_cast_or_null<MemoryUse>(MSSA.getMemoryAccess(V)); 1120 EXPECT_EQ(MemUse->getOptimizedAccessType(), MayAlias) 1121 << "Load " << I << " doesn't have the correct alias information"; 1122 // EXPECT_EQ expands such that if we increment I above, it won't get 1123 // incremented except when we try to print the error message. 1124 ++I; 1125 } 1126 for (LoadInst *V : {LA2, LB2}) { 1127 MemoryUse *MemUse = dyn_cast_or_null<MemoryUse>(MSSA.getMemoryAccess(V)); 1128 EXPECT_EQ(MemUse->getOptimizedAccessType(), MustAlias) 1129 << "Load " << I << " doesn't have the correct alias information"; 1130 // EXPECT_EQ expands such that if we increment I above, it won't get 1131 // incremented except when we try to print the error message. 1132 ++I; 1133 } 1134 } 1135 1136 // Test May alias for optimized defs. 1137 TEST_F(MemorySSATest, TestStoreMayAlias) { 1138 F = Function::Create(FunctionType::get(B.getVoidTy(), 1139 {B.getInt8PtrTy(), B.getInt8PtrTy()}, 1140 false), 1141 GlobalValue::ExternalLinkage, "F", &M); 1142 B.SetInsertPoint(BasicBlock::Create(C, "", F)); 1143 Type *Int8 = Type::getInt8Ty(C); 1144 auto *ArgIt = F->arg_begin(); 1145 Argument *PointerA = &*ArgIt; 1146 Argument *PointerB = &*(++ArgIt); 1147 Value *AllocaC = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "C"); 1148 // Store into arg1, must alias because it's LOE => must 1149 StoreInst *SA1 = B.CreateStore(ConstantInt::get(Int8, 0), PointerA); 1150 // Store into arg2, may alias store to arg1 => may 1151 StoreInst *SB1 = B.CreateStore(ConstantInt::get(Int8, 1), PointerB); 1152 // Store into aloca, no alias with args, so must alias LOE => must 1153 StoreInst *SC1 = B.CreateStore(ConstantInt::get(Int8, 2), AllocaC); 1154 // Store into arg1, may alias store to arg2 => may 1155 StoreInst *SA2 = B.CreateStore(ConstantInt::get(Int8, 3), PointerA); 1156 // Store into arg2, may alias store to arg1 => may 1157 StoreInst *SB2 = B.CreateStore(ConstantInt::get(Int8, 4), PointerB); 1158 // Store into aloca, no alias with args, so must alias SC1 => must 1159 StoreInst *SC2 = B.CreateStore(ConstantInt::get(Int8, 5), AllocaC); 1160 // Store into arg2, must alias store to arg2 => must 1161 StoreInst *SB3 = B.CreateStore(ConstantInt::get(Int8, 6), PointerB); 1162 std::initializer_list<StoreInst *> Sts = {SA1, SB1, SC1, SA2, SB2, SC2, SB3}; 1163 1164 setupAnalyses(); 1165 MemorySSA &MSSA = *Analyses->MSSA; 1166 MemorySSAWalker *Walker = Analyses->Walker; 1167 1168 unsigned I = 0; 1169 for (StoreInst *V : Sts) { 1170 MemoryDef *MemDef = dyn_cast_or_null<MemoryDef>(MSSA.getMemoryAccess(V)); 1171 EXPECT_EQ(MemDef->isOptimized(), false) 1172 << "Store " << I << " is optimized from the start?"; 1173 EXPECT_EQ(MemDef->getOptimizedAccessType(), None) 1174 << "Store " << I 1175 << " has correct alias information before being optimized?"; 1176 ++I; 1177 } 1178 1179 for (StoreInst *V : Sts) 1180 Walker->getClobberingMemoryAccess(V); 1181 1182 I = 0; 1183 for (StoreInst *V : Sts) { 1184 MemoryDef *MemDef = dyn_cast_or_null<MemoryDef>(MSSA.getMemoryAccess(V)); 1185 EXPECT_EQ(MemDef->isOptimized(), true) 1186 << "Store " << I << " was not optimized"; 1187 if (I == 1 || I == 3 || I == 4) 1188 EXPECT_EQ(MemDef->getOptimizedAccessType(), MayAlias) 1189 << "Store " << I << " doesn't have the correct alias information"; 1190 else if (I == 0 || I == 2) 1191 EXPECT_EQ(MemDef->getOptimizedAccessType(), None) 1192 << "Store " << I << " doesn't have the correct alias information"; 1193 else 1194 EXPECT_EQ(MemDef->getOptimizedAccessType(), MustAlias) 1195 << "Store " << I << " doesn't have the correct alias information"; 1196 // EXPECT_EQ expands such that if we increment I above, it won't get 1197 // incremented except when we try to print the error message. 1198 ++I; 1199 } 1200 } 1201 1202 TEST_F(MemorySSATest, LifetimeMarkersAreClobbers) { 1203 // Example code: 1204 // define void @a(i8* %foo) { 1205 // %bar = getelementptr i8, i8* %foo, i64 1 1206 // %baz = getelementptr i8, i8* %foo, i64 2 1207 // store i8 0, i8* %foo 1208 // store i8 0, i8* %bar 1209 // call void @llvm.lifetime.end.p0i8(i64 3, i8* %foo) 1210 // call void @llvm.lifetime.start.p0i8(i64 3, i8* %foo) 1211 // store i8 0, i8* %foo 1212 // store i8 0, i8* %bar 1213 // call void @llvm.memset.p0i8(i8* %baz, i8 0, i64 1) 1214 // ret void 1215 // } 1216 // 1217 // Patterns like this are possible after inlining; the stores to %foo and %bar 1218 // should both be clobbered by the lifetime.start call if they're dominated by 1219 // it. 1220 1221 IRBuilder<> B(C); 1222 F = Function::Create( 1223 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 1224 GlobalValue::ExternalLinkage, "F", &M); 1225 1226 // Make blocks 1227 BasicBlock *Entry = BasicBlock::Create(C, "entry", F); 1228 1229 B.SetInsertPoint(Entry); 1230 Value *Foo = &*F->arg_begin(); 1231 1232 Value *Bar = B.CreateGEP(B.getInt8Ty(), Foo, B.getInt64(1), "bar"); 1233 Value *Baz = B.CreateGEP(B.getInt8Ty(), Foo, B.getInt64(2), "baz"); 1234 1235 B.CreateStore(B.getInt8(0), Foo); 1236 B.CreateStore(B.getInt8(0), Bar); 1237 1238 auto GetLifetimeIntrinsic = [&](Intrinsic::ID ID) { 1239 return Intrinsic::getDeclaration(&M, ID, {Foo->getType()}); 1240 }; 1241 1242 B.CreateCall(GetLifetimeIntrinsic(Intrinsic::lifetime_end), 1243 {B.getInt64(3), Foo}); 1244 Instruction *LifetimeStart = B.CreateCall( 1245 GetLifetimeIntrinsic(Intrinsic::lifetime_start), {B.getInt64(3), Foo}); 1246 1247 Instruction *FooStore = B.CreateStore(B.getInt8(0), Foo); 1248 Instruction *BarStore = B.CreateStore(B.getInt8(0), Bar); 1249 Instruction *BazMemSet = B.CreateMemSet(Baz, B.getInt8(0), 1, Align(1)); 1250 1251 setupAnalyses(); 1252 MemorySSA &MSSA = *Analyses->MSSA; 1253 1254 MemoryAccess *LifetimeStartAccess = MSSA.getMemoryAccess(LifetimeStart); 1255 ASSERT_NE(LifetimeStartAccess, nullptr); 1256 1257 MemoryAccess *FooAccess = MSSA.getMemoryAccess(FooStore); 1258 ASSERT_NE(FooAccess, nullptr); 1259 1260 MemoryAccess *BarAccess = MSSA.getMemoryAccess(BarStore); 1261 ASSERT_NE(BarAccess, nullptr); 1262 1263 MemoryAccess *BazAccess = MSSA.getMemoryAccess(BazMemSet); 1264 ASSERT_NE(BazAccess, nullptr); 1265 1266 MemoryAccess *FooClobber = 1267 MSSA.getWalker()->getClobberingMemoryAccess(FooAccess); 1268 EXPECT_EQ(FooClobber, LifetimeStartAccess); 1269 1270 MemoryAccess *BarClobber = 1271 MSSA.getWalker()->getClobberingMemoryAccess(BarAccess); 1272 EXPECT_EQ(BarClobber, LifetimeStartAccess); 1273 1274 MemoryAccess *BazClobber = 1275 MSSA.getWalker()->getClobberingMemoryAccess(BazAccess); 1276 EXPECT_EQ(BazClobber, LifetimeStartAccess); 1277 1278 MemoryAccess *LifetimeStartClobber = 1279 MSSA.getWalker()->getClobberingMemoryAccess( 1280 LifetimeStartAccess, MemoryLocation::getAfter(Foo)); 1281 EXPECT_EQ(LifetimeStartClobber, LifetimeStartAccess); 1282 } 1283 1284 TEST_F(MemorySSATest, DefOptimizationsAreInvalidatedOnMoving) { 1285 IRBuilder<> B(C); 1286 F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getInt1Ty()}, false), 1287 GlobalValue::ExternalLinkage, "F", &M); 1288 1289 // Make a CFG like 1290 // entry 1291 // / \ 1292 // a b 1293 // \ / 1294 // c 1295 // 1296 // Put a def in A and a def in B, move the def from A -> B, observe as the 1297 // optimization is invalidated. 1298 BasicBlock *Entry = BasicBlock::Create(C, "entry", F); 1299 BasicBlock *BlockA = BasicBlock::Create(C, "a", F); 1300 BasicBlock *BlockB = BasicBlock::Create(C, "b", F); 1301 BasicBlock *BlockC = BasicBlock::Create(C, "c", F); 1302 1303 B.SetInsertPoint(Entry); 1304 Type *Int8 = Type::getInt8Ty(C); 1305 Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "alloc"); 1306 StoreInst *StoreEntry = B.CreateStore(B.getInt8(0), Alloca); 1307 B.CreateCondBr(B.getTrue(), BlockA, BlockB); 1308 1309 B.SetInsertPoint(BlockA); 1310 StoreInst *StoreA = B.CreateStore(B.getInt8(1), Alloca); 1311 B.CreateBr(BlockC); 1312 1313 B.SetInsertPoint(BlockB); 1314 StoreInst *StoreB = B.CreateStore(B.getInt8(2), Alloca); 1315 B.CreateBr(BlockC); 1316 1317 B.SetInsertPoint(BlockC); 1318 B.CreateUnreachable(); 1319 1320 setupAnalyses(); 1321 MemorySSA &MSSA = *Analyses->MSSA; 1322 1323 auto *AccessEntry = cast<MemoryDef>(MSSA.getMemoryAccess(StoreEntry)); 1324 auto *StoreAEntry = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA)); 1325 auto *StoreBEntry = cast<MemoryDef>(MSSA.getMemoryAccess(StoreB)); 1326 1327 ASSERT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(StoreAEntry), 1328 AccessEntry); 1329 ASSERT_TRUE(StoreAEntry->isOptimized()); 1330 1331 ASSERT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(StoreBEntry), 1332 AccessEntry); 1333 ASSERT_TRUE(StoreBEntry->isOptimized()); 1334 1335 // Note that if we did InsertionPlace::Beginning, we don't go out of our way 1336 // to invalidate the cache for StoreBEntry. If the user wants to actually do 1337 // moves like these, it's up to them to ensure that nearby cache entries are 1338 // correctly invalidated (which, in general, requires walking all instructions 1339 // that the moved instruction dominates. So we probably shouldn't be doing 1340 // moves like this in general. Still, works as a test-case. ;) ) 1341 MemorySSAUpdater(&MSSA).moveToPlace(StoreAEntry, BlockB, 1342 MemorySSA::InsertionPlace::End); 1343 ASSERT_FALSE(StoreAEntry->isOptimized()); 1344 ASSERT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(StoreAEntry), 1345 StoreBEntry); 1346 } 1347 1348 TEST_F(MemorySSATest, TestOptimizedDefsAreProperUses) { 1349 F = Function::Create(FunctionType::get(B.getVoidTy(), 1350 {B.getInt8PtrTy(), B.getInt8PtrTy()}, 1351 false), 1352 GlobalValue::ExternalLinkage, "F", &M); 1353 B.SetInsertPoint(BasicBlock::Create(C, "", F)); 1354 Type *Int8 = Type::getInt8Ty(C); 1355 Value *AllocA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A"); 1356 Value *AllocB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B"); 1357 1358 StoreInst *StoreA = B.CreateStore(ConstantInt::get(Int8, 0), AllocA); 1359 StoreInst *StoreB = B.CreateStore(ConstantInt::get(Int8, 1), AllocB); 1360 StoreInst *StoreA2 = B.CreateStore(ConstantInt::get(Int8, 2), AllocA); 1361 1362 setupAnalyses(); 1363 MemorySSA &MSSA = *Analyses->MSSA; 1364 MemorySSAWalker *Walker = Analyses->Walker; 1365 1366 // If these don't hold, there's no chance of the test result being useful. 1367 ASSERT_EQ(Walker->getClobberingMemoryAccess(StoreA), 1368 MSSA.getLiveOnEntryDef()); 1369 ASSERT_EQ(Walker->getClobberingMemoryAccess(StoreB), 1370 MSSA.getLiveOnEntryDef()); 1371 auto *StoreAAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA)); 1372 auto *StoreA2Access = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA2)); 1373 ASSERT_EQ(Walker->getClobberingMemoryAccess(StoreA2), StoreAAccess); 1374 ASSERT_EQ(StoreA2Access->getOptimized(), StoreAAccess); 1375 1376 auto *StoreBAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreB)); 1377 ASSERT_LT(StoreAAccess->getID(), StoreBAccess->getID()); 1378 ASSERT_LT(StoreBAccess->getID(), StoreA2Access->getID()); 1379 1380 auto SortVecByID = [](std::vector<const MemoryDef *> &Defs) { 1381 llvm::sort(Defs, [](const MemoryDef *LHS, const MemoryDef *RHS) { 1382 return LHS->getID() < RHS->getID(); 1383 }); 1384 }; 1385 1386 auto SortedUserList = [&](const MemoryDef *MD) { 1387 std::vector<const MemoryDef *> Result; 1388 transform(MD->users(), std::back_inserter(Result), 1389 [](const User *U) { return cast<MemoryDef>(U); }); 1390 SortVecByID(Result); 1391 return Result; 1392 }; 1393 1394 // Use std::vectors, since they have nice pretty-printing if the test fails. 1395 // Parens are necessary because EXPECT_EQ is a macro, and we have commas in 1396 // our init lists... 1397 EXPECT_EQ(SortedUserList(StoreAAccess), 1398 (std::vector<const MemoryDef *>{StoreBAccess, StoreA2Access})); 1399 1400 EXPECT_EQ(SortedUserList(StoreBAccess), 1401 std::vector<const MemoryDef *>{StoreA2Access}); 1402 1403 // StoreAAccess should be present twice, since it uses liveOnEntry for both 1404 // its defining and optimized accesses. This is a bit awkward, and is not 1405 // relied upon anywhere at the moment. If this is painful, we can fix it. 1406 EXPECT_EQ(SortedUserList(cast<MemoryDef>(MSSA.getLiveOnEntryDef())), 1407 (std::vector<const MemoryDef *>{StoreAAccess, StoreAAccess, 1408 StoreBAccess})); 1409 } 1410 1411 // entry 1412 // | 1413 // header 1414 // / \ 1415 // body | 1416 // \ / 1417 // exit 1418 // header: 1419 // ; 1 = MemoryDef(liveOnEntry) 1420 // body: 1421 // ; 2 = MemoryDef(1) 1422 // exit: 1423 // ; 3 = MemoryPhi({body, 2}, {header, 1}) 1424 // ; 4 = MemoryDef(3); optimized to 3, cannot optimize thorugh phi. 1425 // Insert edge: entry -> exit, check mssa Update is correct. 1426 TEST_F(MemorySSATest, TestAddedEdgeToBlockWithPhiNotOpt) { 1427 F = Function::Create( 1428 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 1429 GlobalValue::ExternalLinkage, "F", &M); 1430 Argument *PointerArg = &*F->arg_begin(); 1431 BasicBlock *Entry(BasicBlock::Create(C, "entry", F)); 1432 BasicBlock *Header(BasicBlock::Create(C, "header", F)); 1433 BasicBlock *Body(BasicBlock::Create(C, "body", F)); 1434 BasicBlock *Exit(BasicBlock::Create(C, "exit", F)); 1435 B.SetInsertPoint(Entry); 1436 BranchInst::Create(Header, Entry); 1437 B.SetInsertPoint(Header); 1438 B.CreateStore(B.getInt8(16), PointerArg); 1439 B.CreateCondBr(B.getTrue(), Exit, Body); 1440 B.SetInsertPoint(Body); 1441 B.CreateStore(B.getInt8(16), PointerArg); 1442 BranchInst::Create(Exit, Body); 1443 B.SetInsertPoint(Exit); 1444 StoreInst *S1 = B.CreateStore(B.getInt8(16), PointerArg); 1445 1446 setupAnalyses(); 1447 MemorySSA &MSSA = *Analyses->MSSA; 1448 MemorySSAWalker *Walker = Analyses->Walker; 1449 std::unique_ptr<MemorySSAUpdater> MSSAU = 1450 std::make_unique<MemorySSAUpdater>(&MSSA); 1451 1452 MemoryPhi *Phi = MSSA.getMemoryAccess(Exit); 1453 EXPECT_EQ(Phi, Walker->getClobberingMemoryAccess(S1)); 1454 1455 // Alter CFG, add edge: entry -> exit 1456 Entry->getTerminator()->eraseFromParent(); 1457 B.SetInsertPoint(Entry); 1458 B.CreateCondBr(B.getTrue(), Header, Exit); 1459 SmallVector<CFGUpdate, 1> Updates; 1460 Updates.push_back({cfg::UpdateKind::Insert, Entry, Exit}); 1461 Analyses->DT.applyUpdates(Updates); 1462 MSSAU->applyInsertUpdates(Updates, Analyses->DT); 1463 EXPECT_EQ(Phi, Walker->getClobberingMemoryAccess(S1)); 1464 } 1465 1466 // entry 1467 // | 1468 // header 1469 // / \ 1470 // body | 1471 // \ / 1472 // exit 1473 // header: 1474 // ; 1 = MemoryDef(liveOnEntry) 1475 // body: 1476 // ; 2 = MemoryDef(1) 1477 // exit: 1478 // ; 3 = MemoryPhi({body, 2}, {header, 1}) 1479 // ; 4 = MemoryDef(3); optimize this to 1 now, added edge should invalidate 1480 // the optimized access. 1481 // Insert edge: entry -> exit, check mssa Update is correct. 1482 TEST_F(MemorySSATest, TestAddedEdgeToBlockWithPhiOpt) { 1483 F = Function::Create( 1484 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 1485 GlobalValue::ExternalLinkage, "F", &M); 1486 Argument *PointerArg = &*F->arg_begin(); 1487 Type *Int8 = Type::getInt8Ty(C); 1488 BasicBlock *Entry(BasicBlock::Create(C, "entry", F)); 1489 BasicBlock *Header(BasicBlock::Create(C, "header", F)); 1490 BasicBlock *Body(BasicBlock::Create(C, "body", F)); 1491 BasicBlock *Exit(BasicBlock::Create(C, "exit", F)); 1492 1493 B.SetInsertPoint(Entry); 1494 Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A"); 1495 BranchInst::Create(Header, Entry); 1496 1497 B.SetInsertPoint(Header); 1498 StoreInst *S1 = B.CreateStore(B.getInt8(16), PointerArg); 1499 B.CreateCondBr(B.getTrue(), Exit, Body); 1500 1501 B.SetInsertPoint(Body); 1502 B.CreateStore(ConstantInt::get(Int8, 0), Alloca); 1503 BranchInst::Create(Exit, Body); 1504 1505 B.SetInsertPoint(Exit); 1506 StoreInst *S2 = B.CreateStore(B.getInt8(16), PointerArg); 1507 1508 setupAnalyses(); 1509 MemorySSA &MSSA = *Analyses->MSSA; 1510 MemorySSAWalker *Walker = Analyses->Walker; 1511 std::unique_ptr<MemorySSAUpdater> MSSAU = 1512 std::make_unique<MemorySSAUpdater>(&MSSA); 1513 1514 MemoryDef *DefS1 = cast<MemoryDef>(MSSA.getMemoryAccess(S1)); 1515 EXPECT_EQ(DefS1, Walker->getClobberingMemoryAccess(S2)); 1516 1517 // Alter CFG, add edge: entry -> exit 1518 Entry->getTerminator()->eraseFromParent(); 1519 B.SetInsertPoint(Entry); 1520 B.CreateCondBr(B.getTrue(), Header, Exit); 1521 SmallVector<CFGUpdate, 1> Updates; 1522 Updates.push_back({cfg::UpdateKind::Insert, Entry, Exit}); 1523 Analyses->DT.applyUpdates(Updates); 1524 MSSAU->applyInsertUpdates(Updates, Analyses->DT); 1525 1526 MemoryPhi *Phi = MSSA.getMemoryAccess(Exit); 1527 EXPECT_EQ(Phi, Walker->getClobberingMemoryAccess(S2)); 1528 } 1529 1530 // entry 1531 // / | 1532 // a | 1533 // / \ | 1534 // b c f 1535 // \ / | 1536 // d | 1537 // \ / 1538 // e 1539 // f: 1540 // ; 1 = MemoryDef(liveOnEntry) 1541 // e: 1542 // ; 2 = MemoryPhi({d, liveOnEntry}, {f, 1}) 1543 // 1544 // Insert edge: f -> c, check update is correct. 1545 // After update: 1546 // f: 1547 // ; 1 = MemoryDef(liveOnEntry) 1548 // c: 1549 // ; 3 = MemoryPhi({a, liveOnEntry}, {f, 1}) 1550 // d: 1551 // ; 4 = MemoryPhi({b, liveOnEntry}, {c, 3}) 1552 // e: 1553 // ; 2 = MemoryPhi({d, 4}, {f, 1}) 1554 TEST_F(MemorySSATest, TestAddedEdgeToBlockWithNoPhiAddNewPhis) { 1555 F = Function::Create( 1556 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 1557 GlobalValue::ExternalLinkage, "F", &M); 1558 Argument *PointerArg = &*F->arg_begin(); 1559 BasicBlock *Entry(BasicBlock::Create(C, "entry", F)); 1560 BasicBlock *ABlock(BasicBlock::Create(C, "a", F)); 1561 BasicBlock *BBlock(BasicBlock::Create(C, "b", F)); 1562 BasicBlock *CBlock(BasicBlock::Create(C, "c", F)); 1563 BasicBlock *DBlock(BasicBlock::Create(C, "d", F)); 1564 BasicBlock *EBlock(BasicBlock::Create(C, "e", F)); 1565 BasicBlock *FBlock(BasicBlock::Create(C, "f", F)); 1566 1567 B.SetInsertPoint(Entry); 1568 B.CreateCondBr(B.getTrue(), ABlock, FBlock); 1569 B.SetInsertPoint(ABlock); 1570 B.CreateCondBr(B.getTrue(), BBlock, CBlock); 1571 B.SetInsertPoint(BBlock); 1572 BranchInst::Create(DBlock, BBlock); 1573 B.SetInsertPoint(CBlock); 1574 BranchInst::Create(DBlock, CBlock); 1575 B.SetInsertPoint(DBlock); 1576 BranchInst::Create(EBlock, DBlock); 1577 B.SetInsertPoint(FBlock); 1578 B.CreateStore(B.getInt8(16), PointerArg); 1579 BranchInst::Create(EBlock, FBlock); 1580 1581 setupAnalyses(); 1582 MemorySSA &MSSA = *Analyses->MSSA; 1583 std::unique_ptr<MemorySSAUpdater> MSSAU = 1584 std::make_unique<MemorySSAUpdater>(&MSSA); 1585 1586 // Alter CFG, add edge: f -> c 1587 FBlock->getTerminator()->eraseFromParent(); 1588 B.SetInsertPoint(FBlock); 1589 B.CreateCondBr(B.getTrue(), CBlock, EBlock); 1590 SmallVector<CFGUpdate, 1> Updates; 1591 Updates.push_back({cfg::UpdateKind::Insert, FBlock, CBlock}); 1592 Analyses->DT.applyUpdates(Updates); 1593 MSSAU->applyInsertUpdates(Updates, Analyses->DT); 1594 1595 MemoryPhi *MPC = MSSA.getMemoryAccess(CBlock); 1596 EXPECT_NE(MPC, nullptr); 1597 MemoryPhi *MPD = MSSA.getMemoryAccess(DBlock); 1598 EXPECT_NE(MPD, nullptr); 1599 MemoryPhi *MPE = MSSA.getMemoryAccess(EBlock); 1600 EXPECT_EQ(MPD, MPE->getIncomingValueForBlock(DBlock)); 1601 } 1602 1603 TEST_F(MemorySSATest, TestCallClobber) { 1604 F = Function::Create( 1605 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 1606 GlobalValue::ExternalLinkage, "F", &M); 1607 1608 Value *Pointer1 = &*F->arg_begin(); 1609 BasicBlock *Entry(BasicBlock::Create(C, "", F)); 1610 B.SetInsertPoint(Entry); 1611 Value *Pointer2 = B.CreateGEP(B.getInt8Ty(), Pointer1, B.getInt64(1)); 1612 Instruction *StorePointer1 = B.CreateStore(B.getInt8(0), Pointer1); 1613 Instruction *StorePointer2 = B.CreateStore(B.getInt8(0), Pointer2); 1614 Instruction *MemSet = B.CreateMemSet(Pointer2, B.getInt8(0), 1, Align(1)); 1615 1616 setupAnalyses(); 1617 MemorySSA &MSSA = *Analyses->MSSA; 1618 MemorySSAWalker *Walker = Analyses->Walker; 1619 1620 MemoryUseOrDef *Store1Access = MSSA.getMemoryAccess(StorePointer1); 1621 MemoryUseOrDef *Store2Access = MSSA.getMemoryAccess(StorePointer2); 1622 MemoryUseOrDef *MemSetAccess = MSSA.getMemoryAccess(MemSet); 1623 1624 MemoryAccess *Pointer1Clobber = Walker->getClobberingMemoryAccess( 1625 MemSetAccess, MemoryLocation(Pointer1, LocationSize::precise(1))); 1626 EXPECT_EQ(Pointer1Clobber, Store1Access); 1627 1628 MemoryAccess *Pointer2Clobber = Walker->getClobberingMemoryAccess( 1629 MemSetAccess, MemoryLocation(Pointer2, LocationSize::precise(1))); 1630 EXPECT_EQ(Pointer2Clobber, MemSetAccess); 1631 1632 MemoryAccess *MemSetClobber = Walker->getClobberingMemoryAccess(MemSetAccess); 1633 EXPECT_EQ(MemSetClobber, Store2Access); 1634 } 1635 1636 TEST_F(MemorySSATest, TestLoadClobber) { 1637 F = Function::Create( 1638 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 1639 GlobalValue::ExternalLinkage, "F", &M); 1640 1641 Value *Pointer1 = &*F->arg_begin(); 1642 BasicBlock *Entry(BasicBlock::Create(C, "", F)); 1643 B.SetInsertPoint(Entry); 1644 Value *Pointer2 = B.CreateGEP(B.getInt8Ty(), Pointer1, B.getInt64(1)); 1645 Instruction *LoadPointer1 = 1646 B.CreateLoad(B.getInt8Ty(), Pointer1, /* Volatile */ true); 1647 Instruction *LoadPointer2 = 1648 B.CreateLoad(B.getInt8Ty(), Pointer2, /* Volatile */ true); 1649 1650 setupAnalyses(); 1651 MemorySSA &MSSA = *Analyses->MSSA; 1652 MemorySSAWalker *Walker = Analyses->Walker; 1653 1654 MemoryUseOrDef *Load1Access = MSSA.getMemoryAccess(LoadPointer1); 1655 MemoryUseOrDef *Load2Access = MSSA.getMemoryAccess(LoadPointer2); 1656 1657 // When providing a memory location, we should never return a load as the 1658 // clobber. 1659 MemoryAccess *Pointer1Clobber = Walker->getClobberingMemoryAccess( 1660 Load2Access, MemoryLocation(Pointer1, LocationSize::precise(1))); 1661 EXPECT_TRUE(MSSA.isLiveOnEntryDef(Pointer1Clobber)); 1662 1663 MemoryAccess *Pointer2Clobber = Walker->getClobberingMemoryAccess( 1664 Load2Access, MemoryLocation(Pointer2, LocationSize::precise(1))); 1665 EXPECT_TRUE(MSSA.isLiveOnEntryDef(Pointer2Clobber)); 1666 1667 MemoryAccess *Load2Clobber = Walker->getClobberingMemoryAccess(Load2Access); 1668 EXPECT_EQ(Load2Clobber, Load1Access); 1669 } 1670