1 //===- MemorySSA.cpp - Unit tests for MemorySSA ---------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 #include "llvm/Analysis/MemorySSA.h" 9 #include "llvm/Analysis/AliasAnalysis.h" 10 #include "llvm/Analysis/AssumptionCache.h" 11 #include "llvm/Analysis/BasicAliasAnalysis.h" 12 #include "llvm/Analysis/MemorySSAUpdater.h" 13 #include "llvm/Analysis/TargetLibraryInfo.h" 14 #include "llvm/AsmParser/Parser.h" 15 #include "llvm/IR/BasicBlock.h" 16 #include "llvm/IR/DataLayout.h" 17 #include "llvm/IR/Dominators.h" 18 #include "llvm/IR/IRBuilder.h" 19 #include "llvm/IR/Instructions.h" 20 #include "llvm/IR/LLVMContext.h" 21 #include "llvm/Support/SourceMgr.h" 22 #include "gtest/gtest.h" 23 24 using namespace llvm; 25 26 const static char DLString[] = "e-i64:64-f80:128-n8:16:32:64-S128"; 27 28 /// There's a lot of common setup between these tests. This fixture helps reduce 29 /// that. Tests should mock up a function, store it in F, and then call 30 /// setupAnalyses(). 31 class MemorySSATest : public testing::Test { 32 protected: 33 // N.B. Many of these members depend on each other (e.g. the Module depends on 34 // the Context, etc.). So, order matters here (and in TestAnalyses). 35 LLVMContext C; 36 Module M; 37 IRBuilder<> B; 38 DataLayout DL; 39 TargetLibraryInfoImpl TLII; 40 TargetLibraryInfo TLI; 41 Function *F; 42 43 // Things that we need to build after the function is created. 44 struct TestAnalyses { 45 DominatorTree DT; 46 AssumptionCache AC; 47 AAResults AA; 48 BasicAAResult BAA; 49 // We need to defer MSSA construction until AA is *entirely* set up, which 50 // requires calling addAAResult. Hence, we just use a pointer here. 51 std::unique_ptr<MemorySSA> MSSA; 52 MemorySSAWalker *Walker; 53 54 TestAnalyses(MemorySSATest &Test) 55 : DT(*Test.F), AC(*Test.F), AA(Test.TLI), 56 BAA(Test.DL, *Test.F, Test.TLI, AC, &DT) { 57 AA.addAAResult(BAA); 58 MSSA = std::make_unique<MemorySSA>(*Test.F, &AA, &DT); 59 Walker = MSSA->getWalker(); 60 } 61 }; 62 63 std::unique_ptr<TestAnalyses> Analyses; 64 65 void setupAnalyses() { 66 assert(F); 67 Analyses.reset(new TestAnalyses(*this)); 68 } 69 70 public: 71 MemorySSATest() 72 : M("MemorySSATest", C), B(C), DL(DLString), TLI(TLII), F(nullptr) {} 73 }; 74 75 TEST_F(MemorySSATest, CreateALoad) { 76 // We create a diamond where there is a store on one side, and then after 77 // building MemorySSA, create a load after the merge point, and use it to test 78 // updating by creating an access for the load. 79 F = Function::Create( 80 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 81 GlobalValue::ExternalLinkage, "F", &M); 82 BasicBlock *Entry(BasicBlock::Create(C, "", F)); 83 BasicBlock *Left(BasicBlock::Create(C, "", F)); 84 BasicBlock *Right(BasicBlock::Create(C, "", F)); 85 BasicBlock *Merge(BasicBlock::Create(C, "", F)); 86 B.SetInsertPoint(Entry); 87 B.CreateCondBr(B.getTrue(), Left, Right); 88 B.SetInsertPoint(Left); 89 Argument *PointerArg = &*F->arg_begin(); 90 B.CreateStore(B.getInt8(16), PointerArg); 91 BranchInst::Create(Merge, Left); 92 BranchInst::Create(Merge, Right); 93 94 setupAnalyses(); 95 MemorySSA &MSSA = *Analyses->MSSA; 96 MemorySSAUpdater Updater(&MSSA); 97 // Add the load 98 B.SetInsertPoint(Merge); 99 LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg); 100 101 // MemoryPHI should already exist. 102 MemoryPhi *MP = MSSA.getMemoryAccess(Merge); 103 EXPECT_NE(MP, nullptr); 104 105 // Create the load memory acccess 106 MemoryUse *LoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB( 107 LoadInst, MP, Merge, MemorySSA::Beginning)); 108 MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess(); 109 EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess)); 110 MSSA.verifyMemorySSA(); 111 } 112 TEST_F(MemorySSATest, CreateLoadsAndStoreUpdater) { 113 // We create a diamond, then build memoryssa with no memory accesses, and 114 // incrementally update it by inserting a store in the, entry, a load in the 115 // merge point, then a store in the branch, another load in the merge point, 116 // and then a store in the entry. 117 F = Function::Create( 118 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 119 GlobalValue::ExternalLinkage, "F", &M); 120 BasicBlock *Entry(BasicBlock::Create(C, "", F)); 121 BasicBlock *Left(BasicBlock::Create(C, "", F)); 122 BasicBlock *Right(BasicBlock::Create(C, "", F)); 123 BasicBlock *Merge(BasicBlock::Create(C, "", F)); 124 B.SetInsertPoint(Entry); 125 B.CreateCondBr(B.getTrue(), Left, Right); 126 B.SetInsertPoint(Left, Left->begin()); 127 Argument *PointerArg = &*F->arg_begin(); 128 B.SetInsertPoint(Left); 129 B.CreateBr(Merge); 130 B.SetInsertPoint(Right); 131 B.CreateBr(Merge); 132 133 setupAnalyses(); 134 MemorySSA &MSSA = *Analyses->MSSA; 135 MemorySSAUpdater Updater(&MSSA); 136 // Add the store 137 B.SetInsertPoint(Entry, Entry->begin()); 138 StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg); 139 MemoryAccess *EntryStoreAccess = Updater.createMemoryAccessInBB( 140 EntryStore, nullptr, Entry, MemorySSA::Beginning); 141 Updater.insertDef(cast<MemoryDef>(EntryStoreAccess)); 142 143 // Add the load 144 B.SetInsertPoint(Merge, Merge->begin()); 145 LoadInst *FirstLoad = B.CreateLoad(B.getInt8Ty(), PointerArg); 146 147 // MemoryPHI should not already exist. 148 MemoryPhi *MP = MSSA.getMemoryAccess(Merge); 149 EXPECT_EQ(MP, nullptr); 150 151 // Create the load memory access 152 MemoryUse *FirstLoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB( 153 FirstLoad, nullptr, Merge, MemorySSA::Beginning)); 154 Updater.insertUse(FirstLoadAccess); 155 // Should just have a load using the entry access, because it should discover 156 // the phi is trivial 157 EXPECT_EQ(FirstLoadAccess->getDefiningAccess(), EntryStoreAccess); 158 159 // Create a store on the left 160 // Add the store 161 B.SetInsertPoint(Left, Left->begin()); 162 StoreInst *LeftStore = B.CreateStore(B.getInt8(16), PointerArg); 163 MemoryAccess *LeftStoreAccess = Updater.createMemoryAccessInBB( 164 LeftStore, nullptr, Left, MemorySSA::Beginning); 165 Updater.insertDef(cast<MemoryDef>(LeftStoreAccess), false); 166 167 // MemoryPHI should exist after adding LeftStore. 168 MP = MSSA.getMemoryAccess(Merge); 169 EXPECT_NE(MP, nullptr); 170 171 // Add the second load 172 B.SetInsertPoint(Merge, Merge->begin()); 173 LoadInst *SecondLoad = B.CreateLoad(B.getInt8Ty(), PointerArg); 174 175 // Create the load memory access 176 MemoryUse *SecondLoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB( 177 SecondLoad, nullptr, Merge, MemorySSA::Beginning)); 178 Updater.insertUse(SecondLoadAccess); 179 // Now the load should be a phi of the entry store and the left store 180 MemoryPhi *MergePhi = 181 dyn_cast<MemoryPhi>(SecondLoadAccess->getDefiningAccess()); 182 EXPECT_NE(MergePhi, nullptr); 183 EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess); 184 EXPECT_EQ(MergePhi->getIncomingValue(1), LeftStoreAccess); 185 // Now create a store below the existing one in the entry 186 B.SetInsertPoint(Entry, --Entry->end()); 187 StoreInst *SecondEntryStore = B.CreateStore(B.getInt8(16), PointerArg); 188 MemoryAccess *SecondEntryStoreAccess = Updater.createMemoryAccessInBB( 189 SecondEntryStore, nullptr, Entry, MemorySSA::End); 190 // Insert it twice just to test renaming 191 Updater.insertDef(cast<MemoryDef>(SecondEntryStoreAccess), false); 192 EXPECT_NE(FirstLoadAccess->getDefiningAccess(), MergePhi); 193 Updater.insertDef(cast<MemoryDef>(SecondEntryStoreAccess), true); 194 EXPECT_EQ(FirstLoadAccess->getDefiningAccess(), MergePhi); 195 // and make sure the phi below it got updated, despite being blocks away 196 MergePhi = dyn_cast<MemoryPhi>(SecondLoadAccess->getDefiningAccess()); 197 EXPECT_NE(MergePhi, nullptr); 198 EXPECT_EQ(MergePhi->getIncomingValue(0), SecondEntryStoreAccess); 199 EXPECT_EQ(MergePhi->getIncomingValue(1), LeftStoreAccess); 200 MSSA.verifyMemorySSA(); 201 } 202 203 TEST_F(MemorySSATest, CreateALoadUpdater) { 204 // We create a diamond, then build memoryssa with no memory accesses, and 205 // incrementally update it by inserting a store in one of the branches, and a 206 // load in the merge point 207 F = Function::Create( 208 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 209 GlobalValue::ExternalLinkage, "F", &M); 210 BasicBlock *Entry(BasicBlock::Create(C, "", F)); 211 BasicBlock *Left(BasicBlock::Create(C, "", F)); 212 BasicBlock *Right(BasicBlock::Create(C, "", F)); 213 BasicBlock *Merge(BasicBlock::Create(C, "", F)); 214 B.SetInsertPoint(Entry); 215 B.CreateCondBr(B.getTrue(), Left, Right); 216 B.SetInsertPoint(Left, Left->begin()); 217 Argument *PointerArg = &*F->arg_begin(); 218 B.SetInsertPoint(Left); 219 B.CreateBr(Merge); 220 B.SetInsertPoint(Right); 221 B.CreateBr(Merge); 222 223 setupAnalyses(); 224 MemorySSA &MSSA = *Analyses->MSSA; 225 MemorySSAUpdater Updater(&MSSA); 226 B.SetInsertPoint(Left, Left->begin()); 227 // Add the store 228 StoreInst *SI = B.CreateStore(B.getInt8(16), PointerArg); 229 MemoryAccess *StoreAccess = 230 Updater.createMemoryAccessInBB(SI, nullptr, Left, MemorySSA::Beginning); 231 Updater.insertDef(cast<MemoryDef>(StoreAccess)); 232 233 // MemoryPHI should be created when inserting the def 234 MemoryPhi *MP = MSSA.getMemoryAccess(Merge); 235 EXPECT_NE(MP, nullptr); 236 237 // Add the load 238 B.SetInsertPoint(Merge, Merge->begin()); 239 LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg); 240 241 // Create the load memory acccess 242 MemoryUse *LoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB( 243 LoadInst, nullptr, Merge, MemorySSA::Beginning)); 244 Updater.insertUse(LoadAccess); 245 MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess(); 246 EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess)); 247 MSSA.verifyMemorySSA(); 248 } 249 250 TEST_F(MemorySSATest, SinkLoad) { 251 F = Function::Create( 252 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 253 GlobalValue::ExternalLinkage, "F", &M); 254 BasicBlock *Entry(BasicBlock::Create(C, "", F)); 255 BasicBlock *Left(BasicBlock::Create(C, "", F)); 256 BasicBlock *Right(BasicBlock::Create(C, "", F)); 257 BasicBlock *Merge(BasicBlock::Create(C, "", F)); 258 B.SetInsertPoint(Entry); 259 B.CreateCondBr(B.getTrue(), Left, Right); 260 B.SetInsertPoint(Left, Left->begin()); 261 Argument *PointerArg = &*F->arg_begin(); 262 B.SetInsertPoint(Left); 263 B.CreateBr(Merge); 264 B.SetInsertPoint(Right); 265 B.CreateBr(Merge); 266 267 // Load in left block 268 B.SetInsertPoint(Left, Left->begin()); 269 LoadInst *LoadInst1 = B.CreateLoad(B.getInt8Ty(), PointerArg); 270 // Store in merge block 271 B.SetInsertPoint(Merge, Merge->begin()); 272 B.CreateStore(B.getInt8(16), PointerArg); 273 274 setupAnalyses(); 275 MemorySSA &MSSA = *Analyses->MSSA; 276 MemorySSAUpdater Updater(&MSSA); 277 278 // Mimic sinking of a load: 279 // - clone load 280 // - insert in "exit" block 281 // - insert in mssa 282 // - remove from original block 283 284 LoadInst *LoadInstClone = cast<LoadInst>(LoadInst1->clone()); 285 Merge->getInstList().insert(Merge->begin(), LoadInstClone); 286 MemoryAccess * NewLoadAccess = 287 Updater.createMemoryAccessInBB(LoadInstClone, nullptr, 288 LoadInstClone->getParent(), 289 MemorySSA::Beginning); 290 Updater.insertUse(cast<MemoryUse>(NewLoadAccess)); 291 MSSA.verifyMemorySSA(); 292 Updater.removeMemoryAccess(MSSA.getMemoryAccess(LoadInst1)); 293 MSSA.verifyMemorySSA(); 294 } 295 296 TEST_F(MemorySSATest, MoveAStore) { 297 // We create a diamond where there is a in the entry, a store on one side, and 298 // a load at the end. After building MemorySSA, we test updating by moving 299 // the store from the side block to the entry block. This destroys the old 300 // access. 301 F = Function::Create( 302 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 303 GlobalValue::ExternalLinkage, "F", &M); 304 BasicBlock *Entry(BasicBlock::Create(C, "", F)); 305 BasicBlock *Left(BasicBlock::Create(C, "", F)); 306 BasicBlock *Right(BasicBlock::Create(C, "", F)); 307 BasicBlock *Merge(BasicBlock::Create(C, "", F)); 308 B.SetInsertPoint(Entry); 309 Argument *PointerArg = &*F->arg_begin(); 310 StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg); 311 B.CreateCondBr(B.getTrue(), Left, Right); 312 B.SetInsertPoint(Left); 313 StoreInst *SideStore = B.CreateStore(B.getInt8(16), PointerArg); 314 BranchInst::Create(Merge, Left); 315 BranchInst::Create(Merge, Right); 316 B.SetInsertPoint(Merge); 317 B.CreateLoad(B.getInt8Ty(), PointerArg); 318 setupAnalyses(); 319 MemorySSA &MSSA = *Analyses->MSSA; 320 MemorySSAUpdater Updater(&MSSA); 321 // Move the store 322 SideStore->moveBefore(Entry->getTerminator()); 323 MemoryAccess *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore); 324 MemoryAccess *SideStoreAccess = MSSA.getMemoryAccess(SideStore); 325 MemoryAccess *NewStoreAccess = Updater.createMemoryAccessAfter( 326 SideStore, EntryStoreAccess, EntryStoreAccess); 327 EntryStoreAccess->replaceAllUsesWith(NewStoreAccess); 328 Updater.removeMemoryAccess(SideStoreAccess); 329 MSSA.verifyMemorySSA(); 330 } 331 332 TEST_F(MemorySSATest, MoveAStoreUpdater) { 333 // We create a diamond where there is a in the entry, a store on one side, and 334 // a load at the end. After building MemorySSA, we test updating by moving 335 // the store from the side block to the entry block. This destroys the old 336 // access. 337 F = Function::Create( 338 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 339 GlobalValue::ExternalLinkage, "F", &M); 340 BasicBlock *Entry(BasicBlock::Create(C, "", F)); 341 BasicBlock *Left(BasicBlock::Create(C, "", F)); 342 BasicBlock *Right(BasicBlock::Create(C, "", F)); 343 BasicBlock *Merge(BasicBlock::Create(C, "", F)); 344 B.SetInsertPoint(Entry); 345 Argument *PointerArg = &*F->arg_begin(); 346 StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg); 347 B.CreateCondBr(B.getTrue(), Left, Right); 348 B.SetInsertPoint(Left); 349 auto *SideStore = B.CreateStore(B.getInt8(16), PointerArg); 350 BranchInst::Create(Merge, Left); 351 BranchInst::Create(Merge, Right); 352 B.SetInsertPoint(Merge); 353 auto *MergeLoad = B.CreateLoad(B.getInt8Ty(), PointerArg); 354 setupAnalyses(); 355 MemorySSA &MSSA = *Analyses->MSSA; 356 MemorySSAUpdater Updater(&MSSA); 357 358 // Move the store 359 SideStore->moveBefore(Entry->getTerminator()); 360 auto *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore); 361 auto *SideStoreAccess = MSSA.getMemoryAccess(SideStore); 362 auto *NewStoreAccess = Updater.createMemoryAccessAfter( 363 SideStore, EntryStoreAccess, EntryStoreAccess); 364 // Before, the load will point to a phi of the EntryStore and SideStore. 365 auto *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(MergeLoad)); 366 EXPECT_TRUE(isa<MemoryPhi>(LoadAccess->getDefiningAccess())); 367 MemoryPhi *MergePhi = cast<MemoryPhi>(LoadAccess->getDefiningAccess()); 368 EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess); 369 EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess); 370 Updater.removeMemoryAccess(SideStoreAccess); 371 Updater.insertDef(cast<MemoryDef>(NewStoreAccess)); 372 // After it's a phi of the new side store access. 373 EXPECT_EQ(MergePhi->getIncomingValue(0), NewStoreAccess); 374 EXPECT_EQ(MergePhi->getIncomingValue(1), NewStoreAccess); 375 MSSA.verifyMemorySSA(); 376 } 377 378 TEST_F(MemorySSATest, MoveAStoreUpdaterMove) { 379 // We create a diamond where there is a in the entry, a store on one side, and 380 // a load at the end. After building MemorySSA, we test updating by moving 381 // the store from the side block to the entry block. This does not destroy 382 // the old access. 383 F = Function::Create( 384 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 385 GlobalValue::ExternalLinkage, "F", &M); 386 BasicBlock *Entry(BasicBlock::Create(C, "", F)); 387 BasicBlock *Left(BasicBlock::Create(C, "", F)); 388 BasicBlock *Right(BasicBlock::Create(C, "", F)); 389 BasicBlock *Merge(BasicBlock::Create(C, "", F)); 390 B.SetInsertPoint(Entry); 391 Argument *PointerArg = &*F->arg_begin(); 392 StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg); 393 B.CreateCondBr(B.getTrue(), Left, Right); 394 B.SetInsertPoint(Left); 395 auto *SideStore = B.CreateStore(B.getInt8(16), PointerArg); 396 BranchInst::Create(Merge, Left); 397 BranchInst::Create(Merge, Right); 398 B.SetInsertPoint(Merge); 399 auto *MergeLoad = B.CreateLoad(B.getInt8Ty(), PointerArg); 400 setupAnalyses(); 401 MemorySSA &MSSA = *Analyses->MSSA; 402 MemorySSAUpdater Updater(&MSSA); 403 404 // Move the store 405 auto *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore); 406 auto *SideStoreAccess = MSSA.getMemoryAccess(SideStore); 407 // Before, the load will point to a phi of the EntryStore and SideStore. 408 auto *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(MergeLoad)); 409 EXPECT_TRUE(isa<MemoryPhi>(LoadAccess->getDefiningAccess())); 410 MemoryPhi *MergePhi = cast<MemoryPhi>(LoadAccess->getDefiningAccess()); 411 EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess); 412 EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess); 413 SideStore->moveBefore(*EntryStore->getParent(), ++EntryStore->getIterator()); 414 Updater.moveAfter(SideStoreAccess, EntryStoreAccess); 415 // After, it's a phi of the side store. 416 EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess); 417 EXPECT_EQ(MergePhi->getIncomingValue(1), SideStoreAccess); 418 419 MSSA.verifyMemorySSA(); 420 } 421 422 TEST_F(MemorySSATest, MoveAStoreAllAround) { 423 // We create a diamond where there is a in the entry, a store on one side, and 424 // a load at the end. After building MemorySSA, we test updating by moving 425 // the store from the side block to the entry block, then to the other side 426 // block, then to before the load. This does not destroy the old access. 427 F = Function::Create( 428 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 429 GlobalValue::ExternalLinkage, "F", &M); 430 BasicBlock *Entry(BasicBlock::Create(C, "", F)); 431 BasicBlock *Left(BasicBlock::Create(C, "", F)); 432 BasicBlock *Right(BasicBlock::Create(C, "", F)); 433 BasicBlock *Merge(BasicBlock::Create(C, "", F)); 434 B.SetInsertPoint(Entry); 435 Argument *PointerArg = &*F->arg_begin(); 436 StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg); 437 B.CreateCondBr(B.getTrue(), Left, Right); 438 B.SetInsertPoint(Left); 439 auto *SideStore = B.CreateStore(B.getInt8(16), PointerArg); 440 BranchInst::Create(Merge, Left); 441 BranchInst::Create(Merge, Right); 442 B.SetInsertPoint(Merge); 443 auto *MergeLoad = B.CreateLoad(B.getInt8Ty(), PointerArg); 444 setupAnalyses(); 445 MemorySSA &MSSA = *Analyses->MSSA; 446 MemorySSAUpdater Updater(&MSSA); 447 448 // Move the store 449 auto *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore); 450 auto *SideStoreAccess = MSSA.getMemoryAccess(SideStore); 451 // Before, the load will point to a phi of the EntryStore and SideStore. 452 auto *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(MergeLoad)); 453 EXPECT_TRUE(isa<MemoryPhi>(LoadAccess->getDefiningAccess())); 454 MemoryPhi *MergePhi = cast<MemoryPhi>(LoadAccess->getDefiningAccess()); 455 EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess); 456 EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess); 457 // Move the store before the entry store 458 SideStore->moveBefore(*EntryStore->getParent(), EntryStore->getIterator()); 459 Updater.moveBefore(SideStoreAccess, EntryStoreAccess); 460 // After, it's a phi of the entry store. 461 EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess); 462 EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess); 463 MSSA.verifyMemorySSA(); 464 // Now move the store to the right branch 465 SideStore->moveBefore(*Right, Right->begin()); 466 Updater.moveToPlace(SideStoreAccess, Right, MemorySSA::Beginning); 467 MSSA.verifyMemorySSA(); 468 EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess); 469 EXPECT_EQ(MergePhi->getIncomingValue(1), SideStoreAccess); 470 // Now move it before the load 471 SideStore->moveBefore(MergeLoad); 472 Updater.moveBefore(SideStoreAccess, LoadAccess); 473 EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess); 474 EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess); 475 MSSA.verifyMemorySSA(); 476 } 477 478 TEST_F(MemorySSATest, RemoveAPhi) { 479 // We create a diamond where there is a store on one side, and then a load 480 // after the merge point. This enables us to test a bunch of different 481 // removal cases. 482 F = Function::Create( 483 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 484 GlobalValue::ExternalLinkage, "F", &M); 485 BasicBlock *Entry(BasicBlock::Create(C, "", F)); 486 BasicBlock *Left(BasicBlock::Create(C, "", F)); 487 BasicBlock *Right(BasicBlock::Create(C, "", F)); 488 BasicBlock *Merge(BasicBlock::Create(C, "", F)); 489 B.SetInsertPoint(Entry); 490 B.CreateCondBr(B.getTrue(), Left, Right); 491 B.SetInsertPoint(Left); 492 Argument *PointerArg = &*F->arg_begin(); 493 StoreInst *StoreInst = B.CreateStore(B.getInt8(16), PointerArg); 494 BranchInst::Create(Merge, Left); 495 BranchInst::Create(Merge, Right); 496 B.SetInsertPoint(Merge); 497 LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg); 498 499 setupAnalyses(); 500 MemorySSA &MSSA = *Analyses->MSSA; 501 MemorySSAUpdater Updater(&MSSA); 502 503 // Before, the load will be a use of a phi<store, liveonentry>. 504 MemoryUse *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(LoadInst)); 505 MemoryDef *StoreAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreInst)); 506 MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess(); 507 EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess)); 508 // Kill the store 509 Updater.removeMemoryAccess(StoreAccess); 510 MemoryPhi *MP = cast<MemoryPhi>(DefiningAccess); 511 // Verify the phi ended up as liveonentry, liveonentry 512 for (auto &Op : MP->incoming_values()) 513 EXPECT_TRUE(MSSA.isLiveOnEntryDef(cast<MemoryAccess>(Op.get()))); 514 // Replace the phi uses with the live on entry def 515 MP->replaceAllUsesWith(MSSA.getLiveOnEntryDef()); 516 // Verify the load is now defined by liveOnEntryDef 517 EXPECT_TRUE(MSSA.isLiveOnEntryDef(LoadAccess->getDefiningAccess())); 518 // Remove the PHI 519 Updater.removeMemoryAccess(MP); 520 MSSA.verifyMemorySSA(); 521 } 522 523 TEST_F(MemorySSATest, RemoveMemoryAccess) { 524 // We create a diamond where there is a store on one side, and then a load 525 // after the merge point. This enables us to test a bunch of different 526 // removal cases. 527 F = Function::Create( 528 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 529 GlobalValue::ExternalLinkage, "F", &M); 530 BasicBlock *Entry(BasicBlock::Create(C, "", F)); 531 BasicBlock *Left(BasicBlock::Create(C, "", F)); 532 BasicBlock *Right(BasicBlock::Create(C, "", F)); 533 BasicBlock *Merge(BasicBlock::Create(C, "", F)); 534 B.SetInsertPoint(Entry); 535 B.CreateCondBr(B.getTrue(), Left, Right); 536 B.SetInsertPoint(Left); 537 Argument *PointerArg = &*F->arg_begin(); 538 StoreInst *StoreInst = B.CreateStore(B.getInt8(16), PointerArg); 539 BranchInst::Create(Merge, Left); 540 BranchInst::Create(Merge, Right); 541 B.SetInsertPoint(Merge); 542 LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg); 543 544 setupAnalyses(); 545 MemorySSA &MSSA = *Analyses->MSSA; 546 MemorySSAWalker *Walker = Analyses->Walker; 547 MemorySSAUpdater Updater(&MSSA); 548 549 // Before, the load will be a use of a phi<store, liveonentry>. It should be 550 // the same after. 551 MemoryUse *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(LoadInst)); 552 MemoryDef *StoreAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreInst)); 553 MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess(); 554 EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess)); 555 // The load is currently clobbered by one of the phi arguments, so the walker 556 // should determine the clobbering access as the phi. 557 EXPECT_EQ(DefiningAccess, Walker->getClobberingMemoryAccess(LoadInst)); 558 Updater.removeMemoryAccess(StoreAccess); 559 MSSA.verifyMemorySSA(); 560 // After the removeaccess, let's see if we got the right accesses 561 // The load should still point to the phi ... 562 EXPECT_EQ(DefiningAccess, LoadAccess->getDefiningAccess()); 563 // but we should now get live on entry for the clobbering definition of the 564 // load, since it will walk past the phi node since every argument is the 565 // same. 566 // XXX: This currently requires either removing the phi or resetting optimized 567 // on the load 568 569 EXPECT_FALSE( 570 MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(LoadInst))); 571 // If we reset optimized, we get live on entry. 572 LoadAccess->resetOptimized(); 573 EXPECT_TRUE( 574 MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(LoadInst))); 575 // The phi should now be a two entry phi with two live on entry defs. 576 for (const auto &Op : DefiningAccess->operands()) { 577 MemoryAccess *Operand = cast<MemoryAccess>(&*Op); 578 EXPECT_TRUE(MSSA.isLiveOnEntryDef(Operand)); 579 } 580 581 // Now we try to remove the single valued phi 582 Updater.removeMemoryAccess(DefiningAccess); 583 MSSA.verifyMemorySSA(); 584 // Now the load should be a load of live on entry. 585 EXPECT_TRUE(MSSA.isLiveOnEntryDef(LoadAccess->getDefiningAccess())); 586 } 587 588 // We had a bug with caching where the walker would report MemoryDef#3's clobber 589 // (below) was MemoryDef#1. 590 // 591 // define void @F(i8*) { 592 // %A = alloca i8, i8 1 593 // ; 1 = MemoryDef(liveOnEntry) 594 // store i8 0, i8* %A 595 // ; 2 = MemoryDef(1) 596 // store i8 1, i8* %A 597 // ; 3 = MemoryDef(2) 598 // store i8 2, i8* %A 599 // } 600 TEST_F(MemorySSATest, TestTripleStore) { 601 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false), 602 GlobalValue::ExternalLinkage, "F", &M); 603 B.SetInsertPoint(BasicBlock::Create(C, "", F)); 604 Type *Int8 = Type::getInt8Ty(C); 605 Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A"); 606 StoreInst *S1 = B.CreateStore(ConstantInt::get(Int8, 0), Alloca); 607 StoreInst *S2 = B.CreateStore(ConstantInt::get(Int8, 1), Alloca); 608 StoreInst *S3 = B.CreateStore(ConstantInt::get(Int8, 2), Alloca); 609 610 setupAnalyses(); 611 MemorySSA &MSSA = *Analyses->MSSA; 612 MemorySSAWalker *Walker = Analyses->Walker; 613 614 unsigned I = 0; 615 for (StoreInst *V : {S1, S2, S3}) { 616 // Everything should be clobbered by its defining access 617 MemoryAccess *DefiningAccess = MSSA.getMemoryAccess(V)->getDefiningAccess(); 618 MemoryAccess *WalkerClobber = Walker->getClobberingMemoryAccess(V); 619 EXPECT_EQ(DefiningAccess, WalkerClobber) 620 << "Store " << I << " doesn't have the correct clobbering access"; 621 // EXPECT_EQ expands such that if we increment I above, it won't get 622 // incremented except when we try to print the error message. 623 ++I; 624 } 625 } 626 627 // ...And fixing the above bug made it obvious that, when walking, MemorySSA's 628 // walker was caching the initial node it walked. This was fine (albeit 629 // mostly redundant) unless the initial node being walked is a clobber for the 630 // query. In that case, we'd cache that the node clobbered itself. 631 TEST_F(MemorySSATest, TestStoreAndLoad) { 632 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false), 633 GlobalValue::ExternalLinkage, "F", &M); 634 B.SetInsertPoint(BasicBlock::Create(C, "", F)); 635 Type *Int8 = Type::getInt8Ty(C); 636 Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A"); 637 Instruction *SI = B.CreateStore(ConstantInt::get(Int8, 0), Alloca); 638 Instruction *LI = B.CreateLoad(Int8, Alloca); 639 640 setupAnalyses(); 641 MemorySSA &MSSA = *Analyses->MSSA; 642 MemorySSAWalker *Walker = Analyses->Walker; 643 644 MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(LI); 645 EXPECT_EQ(LoadClobber, MSSA.getMemoryAccess(SI)); 646 EXPECT_TRUE(MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(SI))); 647 } 648 649 // Another bug (related to the above two fixes): It was noted that, given the 650 // following code: 651 // ; 1 = MemoryDef(liveOnEntry) 652 // store i8 0, i8* %1 653 // 654 // ...A query to getClobberingMemoryAccess(MemoryAccess*, MemoryLocation) would 655 // hand back the store (correctly). A later call to 656 // getClobberingMemoryAccess(const Instruction*) would also hand back the store 657 // (incorrectly; it should return liveOnEntry). 658 // 659 // This test checks that repeated calls to either function returns what they're 660 // meant to. 661 TEST_F(MemorySSATest, TestStoreDoubleQuery) { 662 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false), 663 GlobalValue::ExternalLinkage, "F", &M); 664 B.SetInsertPoint(BasicBlock::Create(C, "", F)); 665 Type *Int8 = Type::getInt8Ty(C); 666 Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A"); 667 StoreInst *SI = B.CreateStore(ConstantInt::get(Int8, 0), Alloca); 668 669 setupAnalyses(); 670 MemorySSA &MSSA = *Analyses->MSSA; 671 MemorySSAWalker *Walker = Analyses->Walker; 672 673 MemoryAccess *StoreAccess = MSSA.getMemoryAccess(SI); 674 MemoryLocation StoreLoc = MemoryLocation::get(SI); 675 MemoryAccess *Clobber = 676 Walker->getClobberingMemoryAccess(StoreAccess, StoreLoc); 677 MemoryAccess *LiveOnEntry = Walker->getClobberingMemoryAccess(SI); 678 679 EXPECT_EQ(Clobber, StoreAccess); 680 EXPECT_TRUE(MSSA.isLiveOnEntryDef(LiveOnEntry)); 681 682 // Try again (with entries in the cache already) for good measure... 683 Clobber = Walker->getClobberingMemoryAccess(StoreAccess, StoreLoc); 684 LiveOnEntry = Walker->getClobberingMemoryAccess(SI); 685 EXPECT_EQ(Clobber, StoreAccess); 686 EXPECT_TRUE(MSSA.isLiveOnEntryDef(LiveOnEntry)); 687 } 688 689 // Bug: During phi optimization, the walker wouldn't cache to the proper result 690 // in the farthest-walked BB. 691 // 692 // Specifically, it would assume that whatever we walked to was a clobber. 693 // "Whatever we walked to" isn't a clobber if we hit a cache entry. 694 // 695 // ...So, we need a test case that looks like: 696 // A 697 // / \ 698 // B | 699 // \ / 700 // C 701 // 702 // Where, when we try to optimize a thing in 'C', a blocker is found in 'B'. 703 // The walk must determine that the blocker exists by using cache entries *while 704 // walking* 'B'. 705 TEST_F(MemorySSATest, PartialWalkerCacheWithPhis) { 706 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false), 707 GlobalValue::ExternalLinkage, "F", &M); 708 B.SetInsertPoint(BasicBlock::Create(C, "A", F)); 709 Type *Int8 = Type::getInt8Ty(C); 710 Constant *One = ConstantInt::get(Int8, 1); 711 Constant *Zero = ConstantInt::get(Int8, 0); 712 Value *AllocA = B.CreateAlloca(Int8, One, "a"); 713 Value *AllocB = B.CreateAlloca(Int8, One, "b"); 714 BasicBlock *IfThen = BasicBlock::Create(C, "B", F); 715 BasicBlock *IfEnd = BasicBlock::Create(C, "C", F); 716 717 B.CreateCondBr(UndefValue::get(Type::getInt1Ty(C)), IfThen, IfEnd); 718 719 B.SetInsertPoint(IfThen); 720 Instruction *FirstStore = B.CreateStore(Zero, AllocA); 721 B.CreateStore(Zero, AllocB); 722 Instruction *ALoad0 = B.CreateLoad(Int8, AllocA, ""); 723 Instruction *BStore = B.CreateStore(Zero, AllocB); 724 // Due to use optimization/etc. we make a store to A, which is removed after 725 // we build MSSA. This helps keep the test case simple-ish. 726 Instruction *KillStore = B.CreateStore(Zero, AllocA); 727 Instruction *ALoad = B.CreateLoad(Int8, AllocA, ""); 728 B.CreateBr(IfEnd); 729 730 B.SetInsertPoint(IfEnd); 731 Instruction *BelowPhi = B.CreateStore(Zero, AllocA); 732 733 setupAnalyses(); 734 MemorySSA &MSSA = *Analyses->MSSA; 735 MemorySSAWalker *Walker = Analyses->Walker; 736 MemorySSAUpdater Updater(&MSSA); 737 738 // Kill `KillStore`; it exists solely so that the load after it won't be 739 // optimized to FirstStore. 740 Updater.removeMemoryAccess(MSSA.getMemoryAccess(KillStore)); 741 KillStore->eraseFromParent(); 742 auto *ALoadMA = cast<MemoryUse>(MSSA.getMemoryAccess(ALoad)); 743 EXPECT_EQ(ALoadMA->getDefiningAccess(), MSSA.getMemoryAccess(BStore)); 744 745 // Populate the cache for the store to AllocB directly after FirstStore. It 746 // should point to something in block B (so something in D can't be optimized 747 // to it). 748 MemoryAccess *Load0Clobber = Walker->getClobberingMemoryAccess(ALoad0); 749 EXPECT_EQ(MSSA.getMemoryAccess(FirstStore), Load0Clobber); 750 751 // If the bug exists, this will introduce a bad cache entry for %a on BStore. 752 // It will point to the store to %b after FirstStore. This only happens during 753 // phi optimization. 754 MemoryAccess *BottomClobber = Walker->getClobberingMemoryAccess(BelowPhi); 755 MemoryAccess *Phi = MSSA.getMemoryAccess(IfEnd); 756 EXPECT_EQ(BottomClobber, Phi); 757 758 // This query will first check the cache for {%a, BStore}. It should point to 759 // FirstStore, not to the store after FirstStore. 760 MemoryAccess *UseClobber = Walker->getClobberingMemoryAccess(ALoad); 761 EXPECT_EQ(UseClobber, MSSA.getMemoryAccess(FirstStore)); 762 } 763 764 // Test that our walker properly handles loads with the invariant group 765 // attribute. It's a bit hacky, since we add the invariant attribute *after* 766 // building MSSA. Otherwise, the use optimizer will optimize it for us, which 767 // isn't what we want. 768 // FIXME: It may be easier/cleaner to just add an 'optimize uses?' flag to MSSA. 769 TEST_F(MemorySSATest, WalkerInvariantLoadOpt) { 770 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false), 771 GlobalValue::ExternalLinkage, "F", &M); 772 B.SetInsertPoint(BasicBlock::Create(C, "", F)); 773 Type *Int8 = Type::getInt8Ty(C); 774 Constant *One = ConstantInt::get(Int8, 1); 775 Value *AllocA = B.CreateAlloca(Int8, One, ""); 776 777 Instruction *Store = B.CreateStore(One, AllocA); 778 Instruction *Load = B.CreateLoad(Int8, AllocA); 779 780 setupAnalyses(); 781 MemorySSA &MSSA = *Analyses->MSSA; 782 MemorySSAWalker *Walker = Analyses->Walker; 783 784 auto *LoadMA = cast<MemoryUse>(MSSA.getMemoryAccess(Load)); 785 auto *StoreMA = cast<MemoryDef>(MSSA.getMemoryAccess(Store)); 786 EXPECT_EQ(LoadMA->getDefiningAccess(), StoreMA); 787 788 // ...At the time of writing, no cache should exist for LoadMA. Be a bit 789 // flexible to future changes. 790 Walker->invalidateInfo(LoadMA); 791 Load->setMetadata(LLVMContext::MD_invariant_load, MDNode::get(C, {})); 792 793 MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(LoadMA); 794 EXPECT_EQ(LoadClobber, MSSA.getLiveOnEntryDef()); 795 } 796 797 // Test loads get reoptimized properly by the walker. 798 TEST_F(MemorySSATest, WalkerReopt) { 799 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false), 800 GlobalValue::ExternalLinkage, "F", &M); 801 B.SetInsertPoint(BasicBlock::Create(C, "", F)); 802 Type *Int8 = Type::getInt8Ty(C); 803 Value *AllocaA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A"); 804 Instruction *SIA = B.CreateStore(ConstantInt::get(Int8, 0), AllocaA); 805 Value *AllocaB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B"); 806 Instruction *SIB = B.CreateStore(ConstantInt::get(Int8, 0), AllocaB); 807 Instruction *LIA = B.CreateLoad(Int8, AllocaA); 808 809 setupAnalyses(); 810 MemorySSA &MSSA = *Analyses->MSSA; 811 MemorySSAWalker *Walker = Analyses->Walker; 812 MemorySSAUpdater Updater(&MSSA); 813 814 MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(LIA); 815 MemoryUse *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(LIA)); 816 EXPECT_EQ(LoadClobber, MSSA.getMemoryAccess(SIA)); 817 EXPECT_TRUE(MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(SIA))); 818 Updater.removeMemoryAccess(LoadAccess); 819 820 // Create the load memory access pointing to an unoptimized place. 821 MemoryUse *NewLoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB( 822 LIA, MSSA.getMemoryAccess(SIB), LIA->getParent(), MemorySSA::End)); 823 // This should it cause it to be optimized 824 EXPECT_EQ(Walker->getClobberingMemoryAccess(NewLoadAccess), LoadClobber); 825 EXPECT_EQ(NewLoadAccess->getDefiningAccess(), LoadClobber); 826 } 827 828 // Test out MemorySSAUpdater::moveBefore 829 TEST_F(MemorySSATest, MoveAboveMemoryDef) { 830 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false), 831 GlobalValue::ExternalLinkage, "F", &M); 832 B.SetInsertPoint(BasicBlock::Create(C, "", F)); 833 834 Type *Int8 = Type::getInt8Ty(C); 835 Value *A = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A"); 836 Value *B_ = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B"); 837 Value *C = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "C"); 838 839 StoreInst *StoreA0 = B.CreateStore(ConstantInt::get(Int8, 0), A); 840 StoreInst *StoreB = B.CreateStore(ConstantInt::get(Int8, 0), B_); 841 LoadInst *LoadB = B.CreateLoad(Int8, B_); 842 StoreInst *StoreA1 = B.CreateStore(ConstantInt::get(Int8, 4), A); 843 StoreInst *StoreC = B.CreateStore(ConstantInt::get(Int8, 4), C); 844 StoreInst *StoreA2 = B.CreateStore(ConstantInt::get(Int8, 4), A); 845 LoadInst *LoadC = B.CreateLoad(Int8, C); 846 847 setupAnalyses(); 848 MemorySSA &MSSA = *Analyses->MSSA; 849 MemorySSAWalker &Walker = *Analyses->Walker; 850 851 MemorySSAUpdater Updater(&MSSA); 852 StoreC->moveBefore(StoreB); 853 Updater.moveBefore(cast<MemoryDef>(MSSA.getMemoryAccess(StoreC)), 854 cast<MemoryDef>(MSSA.getMemoryAccess(StoreB))); 855 856 MSSA.verifyMemorySSA(); 857 858 EXPECT_EQ(MSSA.getMemoryAccess(StoreB)->getDefiningAccess(), 859 MSSA.getMemoryAccess(StoreC)); 860 EXPECT_EQ(MSSA.getMemoryAccess(StoreC)->getDefiningAccess(), 861 MSSA.getMemoryAccess(StoreA0)); 862 EXPECT_EQ(MSSA.getMemoryAccess(StoreA2)->getDefiningAccess(), 863 MSSA.getMemoryAccess(StoreA1)); 864 EXPECT_EQ(Walker.getClobberingMemoryAccess(LoadB), 865 MSSA.getMemoryAccess(StoreB)); 866 EXPECT_EQ(Walker.getClobberingMemoryAccess(LoadC), 867 MSSA.getMemoryAccess(StoreC)); 868 869 // exercise block numbering 870 EXPECT_TRUE(MSSA.locallyDominates(MSSA.getMemoryAccess(StoreC), 871 MSSA.getMemoryAccess(StoreB))); 872 EXPECT_TRUE(MSSA.locallyDominates(MSSA.getMemoryAccess(StoreA1), 873 MSSA.getMemoryAccess(StoreA2))); 874 } 875 876 TEST_F(MemorySSATest, Irreducible) { 877 // Create the equivalent of 878 // x = something 879 // if (...) 880 // goto second_loop_entry 881 // while (...) { 882 // second_loop_entry: 883 // } 884 // use(x) 885 886 SmallVector<PHINode *, 8> Inserted; 887 IRBuilder<> B(C); 888 F = Function::Create( 889 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 890 GlobalValue::ExternalLinkage, "F", &M); 891 892 // Make blocks 893 BasicBlock *IfBB = BasicBlock::Create(C, "if", F); 894 BasicBlock *LoopStartBB = BasicBlock::Create(C, "loopstart", F); 895 BasicBlock *LoopMainBB = BasicBlock::Create(C, "loopmain", F); 896 BasicBlock *AfterLoopBB = BasicBlock::Create(C, "afterloop", F); 897 B.SetInsertPoint(IfBB); 898 B.CreateCondBr(B.getTrue(), LoopMainBB, LoopStartBB); 899 B.SetInsertPoint(LoopStartBB); 900 B.CreateBr(LoopMainBB); 901 B.SetInsertPoint(LoopMainBB); 902 B.CreateCondBr(B.getTrue(), LoopStartBB, AfterLoopBB); 903 B.SetInsertPoint(AfterLoopBB); 904 Argument *FirstArg = &*F->arg_begin(); 905 setupAnalyses(); 906 MemorySSA &MSSA = *Analyses->MSSA; 907 MemorySSAUpdater Updater(&MSSA); 908 // Create the load memory acccess 909 LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), FirstArg); 910 MemoryUse *LoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB( 911 LoadInst, nullptr, AfterLoopBB, MemorySSA::Beginning)); 912 Updater.insertUse(LoadAccess); 913 MSSA.verifyMemorySSA(); 914 } 915 916 TEST_F(MemorySSATest, MoveToBeforeLiveOnEntryInvalidatesCache) { 917 // Create: 918 // %1 = alloca i8 919 // ; 1 = MemoryDef(liveOnEntry) 920 // store i8 0, i8* %1 921 // ; 2 = MemoryDef(1) 922 // store i8 0, i8* %1 923 // 924 // ...And be sure that MSSA's caching doesn't give us `1` for the clobber of 925 // `2` after `1` is removed. 926 IRBuilder<> B(C); 927 F = Function::Create( 928 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 929 GlobalValue::ExternalLinkage, "F", &M); 930 931 BasicBlock *Entry = BasicBlock::Create(C, "if", F); 932 B.SetInsertPoint(Entry); 933 934 Value *A = B.CreateAlloca(B.getInt8Ty()); 935 StoreInst *StoreA = B.CreateStore(B.getInt8(0), A); 936 StoreInst *StoreB = B.CreateStore(B.getInt8(0), A); 937 938 setupAnalyses(); 939 940 MemorySSA &MSSA = *Analyses->MSSA; 941 942 auto *DefA = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA)); 943 auto *DefB = cast<MemoryDef>(MSSA.getMemoryAccess(StoreB)); 944 945 MemoryAccess *BClobber = MSSA.getWalker()->getClobberingMemoryAccess(DefB); 946 ASSERT_EQ(DefA, BClobber); 947 948 MemorySSAUpdater(&MSSA).removeMemoryAccess(DefA); 949 StoreA->eraseFromParent(); 950 951 EXPECT_EQ(DefB->getDefiningAccess(), MSSA.getLiveOnEntryDef()); 952 953 EXPECT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(DefB), 954 MSSA.getLiveOnEntryDef()) 955 << "(DefA = " << DefA << ")"; 956 } 957 958 TEST_F(MemorySSATest, RemovingDefInvalidatesCache) { 959 // Create: 960 // %x = alloca i8 961 // %y = alloca i8 962 // ; 1 = MemoryDef(liveOnEntry) 963 // store i8 0, i8* %x 964 // ; 2 = MemoryDef(1) 965 // store i8 0, i8* %y 966 // ; 3 = MemoryDef(2) 967 // store i8 0, i8* %x 968 // 969 // And be sure that MSSA's caching handles the removal of def `1` 970 // appropriately. 971 IRBuilder<> B(C); 972 F = Function::Create( 973 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 974 GlobalValue::ExternalLinkage, "F", &M); 975 976 BasicBlock *Entry = BasicBlock::Create(C, "if", F); 977 B.SetInsertPoint(Entry); 978 979 Value *X = B.CreateAlloca(B.getInt8Ty()); 980 Value *Y = B.CreateAlloca(B.getInt8Ty()); 981 StoreInst *StoreX1 = B.CreateStore(B.getInt8(0), X); 982 StoreInst *StoreY = B.CreateStore(B.getInt8(0), Y); 983 StoreInst *StoreX2 = B.CreateStore(B.getInt8(0), X); 984 985 setupAnalyses(); 986 987 MemorySSA &MSSA = *Analyses->MSSA; 988 989 auto *DefX1 = cast<MemoryDef>(MSSA.getMemoryAccess(StoreX1)); 990 auto *DefY = cast<MemoryDef>(MSSA.getMemoryAccess(StoreY)); 991 auto *DefX2 = cast<MemoryDef>(MSSA.getMemoryAccess(StoreX2)); 992 993 EXPECT_EQ(DefX2->getDefiningAccess(), DefY); 994 MemoryAccess *X2Clobber = MSSA.getWalker()->getClobberingMemoryAccess(DefX2); 995 ASSERT_EQ(DefX1, X2Clobber); 996 997 MemorySSAUpdater(&MSSA).removeMemoryAccess(DefX1); 998 StoreX1->eraseFromParent(); 999 1000 EXPECT_EQ(DefX2->getDefiningAccess(), DefY); 1001 EXPECT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(DefX2), 1002 MSSA.getLiveOnEntryDef()) 1003 << "(DefX1 = " << DefX1 << ")"; 1004 } 1005 1006 // Test Must alias for optimized defs. 1007 TEST_F(MemorySSATest, TestStoreMustAlias) { 1008 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false), 1009 GlobalValue::ExternalLinkage, "F", &M); 1010 B.SetInsertPoint(BasicBlock::Create(C, "", F)); 1011 Type *Int8 = Type::getInt8Ty(C); 1012 Value *AllocaA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A"); 1013 Value *AllocaB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B"); 1014 StoreInst *SA1 = B.CreateStore(ConstantInt::get(Int8, 1), AllocaA); 1015 StoreInst *SB1 = B.CreateStore(ConstantInt::get(Int8, 1), AllocaB); 1016 StoreInst *SA2 = B.CreateStore(ConstantInt::get(Int8, 2), AllocaA); 1017 StoreInst *SB2 = B.CreateStore(ConstantInt::get(Int8, 2), AllocaB); 1018 StoreInst *SA3 = B.CreateStore(ConstantInt::get(Int8, 3), AllocaA); 1019 StoreInst *SB3 = B.CreateStore(ConstantInt::get(Int8, 3), AllocaB); 1020 1021 setupAnalyses(); 1022 MemorySSA &MSSA = *Analyses->MSSA; 1023 MemorySSAWalker *Walker = Analyses->Walker; 1024 1025 unsigned I = 0; 1026 for (StoreInst *V : {SA1, SB1, SA2, SB2, SA3, SB3}) { 1027 MemoryDef *MemDef = dyn_cast_or_null<MemoryDef>(MSSA.getMemoryAccess(V)); 1028 EXPECT_EQ(MemDef->isOptimized(), false) 1029 << "Store " << I << " is optimized from the start?"; 1030 if (V == SA1) 1031 Walker->getClobberingMemoryAccess(V); 1032 else { 1033 MemoryAccess *Def = MemDef->getDefiningAccess(); 1034 MemoryAccess *Clob = Walker->getClobberingMemoryAccess(V); 1035 EXPECT_NE(Def, Clob) << "Store " << I 1036 << " has Defining Access equal to Clobbering Access"; 1037 } 1038 EXPECT_EQ(MemDef->isOptimized(), true) 1039 << "Store " << I << " was not optimized"; 1040 // EXPECT_EQ expands such that if we increment I above, it won't get 1041 // incremented except when we try to print the error message. 1042 ++I; 1043 } 1044 } 1045 1046 // Test May alias for optimized defs. 1047 TEST_F(MemorySSATest, TestStoreMayAlias) { 1048 F = Function::Create(FunctionType::get(B.getVoidTy(), 1049 {B.getInt8PtrTy(), B.getInt8PtrTy()}, 1050 false), 1051 GlobalValue::ExternalLinkage, "F", &M); 1052 B.SetInsertPoint(BasicBlock::Create(C, "", F)); 1053 Type *Int8 = Type::getInt8Ty(C); 1054 auto *ArgIt = F->arg_begin(); 1055 Argument *PointerA = &*ArgIt; 1056 Argument *PointerB = &*(++ArgIt); 1057 Value *AllocaC = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "C"); 1058 // Store into arg1, must alias because it's LOE => must 1059 StoreInst *SA1 = B.CreateStore(ConstantInt::get(Int8, 0), PointerA); 1060 // Store into arg2, may alias store to arg1 => may 1061 StoreInst *SB1 = B.CreateStore(ConstantInt::get(Int8, 1), PointerB); 1062 // Store into aloca, no alias with args, so must alias LOE => must 1063 StoreInst *SC1 = B.CreateStore(ConstantInt::get(Int8, 2), AllocaC); 1064 // Store into arg1, may alias store to arg2 => may 1065 StoreInst *SA2 = B.CreateStore(ConstantInt::get(Int8, 3), PointerA); 1066 // Store into arg2, may alias store to arg1 => may 1067 StoreInst *SB2 = B.CreateStore(ConstantInt::get(Int8, 4), PointerB); 1068 // Store into aloca, no alias with args, so must alias SC1 => must 1069 StoreInst *SC2 = B.CreateStore(ConstantInt::get(Int8, 5), AllocaC); 1070 // Store into arg2, must alias store to arg2 => must 1071 StoreInst *SB3 = B.CreateStore(ConstantInt::get(Int8, 6), PointerB); 1072 std::initializer_list<StoreInst *> Sts = {SA1, SB1, SC1, SA2, SB2, SC2, SB3}; 1073 1074 setupAnalyses(); 1075 MemorySSA &MSSA = *Analyses->MSSA; 1076 MemorySSAWalker *Walker = Analyses->Walker; 1077 1078 unsigned I = 0; 1079 for (StoreInst *V : Sts) { 1080 MemoryDef *MemDef = dyn_cast_or_null<MemoryDef>(MSSA.getMemoryAccess(V)); 1081 EXPECT_EQ(MemDef->isOptimized(), false) 1082 << "Store " << I << " is optimized from the start?"; 1083 ++I; 1084 } 1085 1086 for (StoreInst *V : Sts) 1087 Walker->getClobberingMemoryAccess(V); 1088 1089 I = 0; 1090 for (StoreInst *V : Sts) { 1091 MemoryDef *MemDef = dyn_cast_or_null<MemoryDef>(MSSA.getMemoryAccess(V)); 1092 EXPECT_EQ(MemDef->isOptimized(), true) 1093 << "Store " << I << " was not optimized"; 1094 // EXPECT_EQ expands such that if we increment I above, it won't get 1095 // incremented except when we try to print the error message. 1096 ++I; 1097 } 1098 } 1099 1100 TEST_F(MemorySSATest, LifetimeMarkersAreClobbers) { 1101 // Example code: 1102 // define void @a(i8* %foo) { 1103 // %bar = getelementptr i8, i8* %foo, i64 1 1104 // %baz = getelementptr i8, i8* %foo, i64 2 1105 // store i8 0, i8* %foo 1106 // store i8 0, i8* %bar 1107 // call void @llvm.lifetime.end.p0i8(i64 3, i8* %foo) 1108 // call void @llvm.lifetime.start.p0i8(i64 3, i8* %foo) 1109 // store i8 0, i8* %foo 1110 // store i8 0, i8* %bar 1111 // call void @llvm.memset.p0i8(i8* %baz, i8 0, i64 1) 1112 // ret void 1113 // } 1114 // 1115 // Patterns like this are possible after inlining; the stores to %foo and %bar 1116 // should both be clobbered by the lifetime.start call if they're dominated by 1117 // it. 1118 1119 IRBuilder<> B(C); 1120 F = Function::Create( 1121 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 1122 GlobalValue::ExternalLinkage, "F", &M); 1123 1124 // Make blocks 1125 BasicBlock *Entry = BasicBlock::Create(C, "entry", F); 1126 1127 B.SetInsertPoint(Entry); 1128 Value *Foo = &*F->arg_begin(); 1129 1130 Value *Bar = B.CreateGEP(B.getInt8Ty(), Foo, B.getInt64(1), "bar"); 1131 Value *Baz = B.CreateGEP(B.getInt8Ty(), Foo, B.getInt64(2), "baz"); 1132 1133 B.CreateStore(B.getInt8(0), Foo); 1134 B.CreateStore(B.getInt8(0), Bar); 1135 1136 auto GetLifetimeIntrinsic = [&](Intrinsic::ID ID) { 1137 return Intrinsic::getDeclaration(&M, ID, {Foo->getType()}); 1138 }; 1139 1140 B.CreateCall(GetLifetimeIntrinsic(Intrinsic::lifetime_end), 1141 {B.getInt64(3), Foo}); 1142 Instruction *LifetimeStart = B.CreateCall( 1143 GetLifetimeIntrinsic(Intrinsic::lifetime_start), {B.getInt64(3), Foo}); 1144 1145 Instruction *FooStore = B.CreateStore(B.getInt8(0), Foo); 1146 Instruction *BarStore = B.CreateStore(B.getInt8(0), Bar); 1147 Instruction *BazMemSet = B.CreateMemSet(Baz, B.getInt8(0), 1, Align(1)); 1148 1149 setupAnalyses(); 1150 MemorySSA &MSSA = *Analyses->MSSA; 1151 1152 MemoryAccess *LifetimeStartAccess = MSSA.getMemoryAccess(LifetimeStart); 1153 ASSERT_NE(LifetimeStartAccess, nullptr); 1154 1155 MemoryAccess *FooAccess = MSSA.getMemoryAccess(FooStore); 1156 ASSERT_NE(FooAccess, nullptr); 1157 1158 MemoryAccess *BarAccess = MSSA.getMemoryAccess(BarStore); 1159 ASSERT_NE(BarAccess, nullptr); 1160 1161 MemoryAccess *BazAccess = MSSA.getMemoryAccess(BazMemSet); 1162 ASSERT_NE(BazAccess, nullptr); 1163 1164 MemoryAccess *FooClobber = 1165 MSSA.getWalker()->getClobberingMemoryAccess(FooAccess); 1166 EXPECT_EQ(FooClobber, LifetimeStartAccess); 1167 1168 MemoryAccess *BarClobber = 1169 MSSA.getWalker()->getClobberingMemoryAccess(BarAccess); 1170 EXPECT_EQ(BarClobber, LifetimeStartAccess); 1171 1172 MemoryAccess *BazClobber = 1173 MSSA.getWalker()->getClobberingMemoryAccess(BazAccess); 1174 EXPECT_EQ(BazClobber, LifetimeStartAccess); 1175 1176 MemoryAccess *LifetimeStartClobber = 1177 MSSA.getWalker()->getClobberingMemoryAccess( 1178 LifetimeStartAccess, MemoryLocation::getAfter(Foo)); 1179 EXPECT_EQ(LifetimeStartClobber, LifetimeStartAccess); 1180 } 1181 1182 TEST_F(MemorySSATest, DefOptimizationsAreInvalidatedOnMoving) { 1183 IRBuilder<> B(C); 1184 F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getInt1Ty()}, false), 1185 GlobalValue::ExternalLinkage, "F", &M); 1186 1187 // Make a CFG like 1188 // entry 1189 // / \ 1190 // a b 1191 // \ / 1192 // c 1193 // 1194 // Put a def in A and a def in B, move the def from A -> B, observe as the 1195 // optimization is invalidated. 1196 BasicBlock *Entry = BasicBlock::Create(C, "entry", F); 1197 BasicBlock *BlockA = BasicBlock::Create(C, "a", F); 1198 BasicBlock *BlockB = BasicBlock::Create(C, "b", F); 1199 BasicBlock *BlockC = BasicBlock::Create(C, "c", F); 1200 1201 B.SetInsertPoint(Entry); 1202 Type *Int8 = Type::getInt8Ty(C); 1203 Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "alloc"); 1204 StoreInst *StoreEntry = B.CreateStore(B.getInt8(0), Alloca); 1205 B.CreateCondBr(B.getTrue(), BlockA, BlockB); 1206 1207 B.SetInsertPoint(BlockA); 1208 StoreInst *StoreA = B.CreateStore(B.getInt8(1), Alloca); 1209 B.CreateBr(BlockC); 1210 1211 B.SetInsertPoint(BlockB); 1212 StoreInst *StoreB = B.CreateStore(B.getInt8(2), Alloca); 1213 B.CreateBr(BlockC); 1214 1215 B.SetInsertPoint(BlockC); 1216 B.CreateUnreachable(); 1217 1218 setupAnalyses(); 1219 MemorySSA &MSSA = *Analyses->MSSA; 1220 1221 auto *AccessEntry = cast<MemoryDef>(MSSA.getMemoryAccess(StoreEntry)); 1222 auto *StoreAEntry = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA)); 1223 auto *StoreBEntry = cast<MemoryDef>(MSSA.getMemoryAccess(StoreB)); 1224 1225 ASSERT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(StoreAEntry), 1226 AccessEntry); 1227 ASSERT_TRUE(StoreAEntry->isOptimized()); 1228 1229 ASSERT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(StoreBEntry), 1230 AccessEntry); 1231 ASSERT_TRUE(StoreBEntry->isOptimized()); 1232 1233 // Note that if we did InsertionPlace::Beginning, we don't go out of our way 1234 // to invalidate the cache for StoreBEntry. If the user wants to actually do 1235 // moves like these, it's up to them to ensure that nearby cache entries are 1236 // correctly invalidated (which, in general, requires walking all instructions 1237 // that the moved instruction dominates. So we probably shouldn't be doing 1238 // moves like this in general. Still, works as a test-case. ;) ) 1239 MemorySSAUpdater(&MSSA).moveToPlace(StoreAEntry, BlockB, 1240 MemorySSA::InsertionPlace::End); 1241 ASSERT_FALSE(StoreAEntry->isOptimized()); 1242 ASSERT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(StoreAEntry), 1243 StoreBEntry); 1244 } 1245 1246 TEST_F(MemorySSATest, TestOptimizedDefsAreProperUses) { 1247 F = Function::Create(FunctionType::get(B.getVoidTy(), 1248 {B.getInt8PtrTy(), B.getInt8PtrTy()}, 1249 false), 1250 GlobalValue::ExternalLinkage, "F", &M); 1251 B.SetInsertPoint(BasicBlock::Create(C, "", F)); 1252 Type *Int8 = Type::getInt8Ty(C); 1253 Value *AllocA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A"); 1254 Value *AllocB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B"); 1255 1256 StoreInst *StoreA = B.CreateStore(ConstantInt::get(Int8, 0), AllocA); 1257 StoreInst *StoreB = B.CreateStore(ConstantInt::get(Int8, 1), AllocB); 1258 StoreInst *StoreA2 = B.CreateStore(ConstantInt::get(Int8, 2), AllocA); 1259 1260 setupAnalyses(); 1261 MemorySSA &MSSA = *Analyses->MSSA; 1262 MemorySSAWalker *Walker = Analyses->Walker; 1263 1264 // If these don't hold, there's no chance of the test result being useful. 1265 ASSERT_EQ(Walker->getClobberingMemoryAccess(StoreA), 1266 MSSA.getLiveOnEntryDef()); 1267 ASSERT_EQ(Walker->getClobberingMemoryAccess(StoreB), 1268 MSSA.getLiveOnEntryDef()); 1269 auto *StoreAAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA)); 1270 auto *StoreA2Access = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA2)); 1271 ASSERT_EQ(Walker->getClobberingMemoryAccess(StoreA2), StoreAAccess); 1272 ASSERT_EQ(StoreA2Access->getOptimized(), StoreAAccess); 1273 1274 auto *StoreBAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreB)); 1275 ASSERT_LT(StoreAAccess->getID(), StoreBAccess->getID()); 1276 ASSERT_LT(StoreBAccess->getID(), StoreA2Access->getID()); 1277 1278 auto SortVecByID = [](std::vector<const MemoryDef *> &Defs) { 1279 llvm::sort(Defs, [](const MemoryDef *LHS, const MemoryDef *RHS) { 1280 return LHS->getID() < RHS->getID(); 1281 }); 1282 }; 1283 1284 auto SortedUserList = [&](const MemoryDef *MD) { 1285 std::vector<const MemoryDef *> Result; 1286 transform(MD->users(), std::back_inserter(Result), 1287 [](const User *U) { return cast<MemoryDef>(U); }); 1288 SortVecByID(Result); 1289 return Result; 1290 }; 1291 1292 // Use std::vectors, since they have nice pretty-printing if the test fails. 1293 // Parens are necessary because EXPECT_EQ is a macro, and we have commas in 1294 // our init lists... 1295 EXPECT_EQ(SortedUserList(StoreAAccess), 1296 (std::vector<const MemoryDef *>{StoreBAccess, StoreA2Access})); 1297 1298 EXPECT_EQ(SortedUserList(StoreBAccess), 1299 std::vector<const MemoryDef *>{StoreA2Access}); 1300 1301 // StoreAAccess should be present twice, since it uses liveOnEntry for both 1302 // its defining and optimized accesses. This is a bit awkward, and is not 1303 // relied upon anywhere at the moment. If this is painful, we can fix it. 1304 EXPECT_EQ(SortedUserList(cast<MemoryDef>(MSSA.getLiveOnEntryDef())), 1305 (std::vector<const MemoryDef *>{StoreAAccess, StoreAAccess, 1306 StoreBAccess})); 1307 } 1308 1309 // entry 1310 // | 1311 // header 1312 // / \ 1313 // body | 1314 // \ / 1315 // exit 1316 // header: 1317 // ; 1 = MemoryDef(liveOnEntry) 1318 // body: 1319 // ; 2 = MemoryDef(1) 1320 // exit: 1321 // ; 3 = MemoryPhi({body, 2}, {header, 1}) 1322 // ; 4 = MemoryDef(3); optimized to 3, cannot optimize thorugh phi. 1323 // Insert edge: entry -> exit, check mssa Update is correct. 1324 TEST_F(MemorySSATest, TestAddedEdgeToBlockWithPhiNotOpt) { 1325 F = Function::Create( 1326 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 1327 GlobalValue::ExternalLinkage, "F", &M); 1328 Argument *PointerArg = &*F->arg_begin(); 1329 BasicBlock *Entry(BasicBlock::Create(C, "entry", F)); 1330 BasicBlock *Header(BasicBlock::Create(C, "header", F)); 1331 BasicBlock *Body(BasicBlock::Create(C, "body", F)); 1332 BasicBlock *Exit(BasicBlock::Create(C, "exit", F)); 1333 B.SetInsertPoint(Entry); 1334 BranchInst::Create(Header, Entry); 1335 B.SetInsertPoint(Header); 1336 B.CreateStore(B.getInt8(16), PointerArg); 1337 B.CreateCondBr(B.getTrue(), Exit, Body); 1338 B.SetInsertPoint(Body); 1339 B.CreateStore(B.getInt8(16), PointerArg); 1340 BranchInst::Create(Exit, Body); 1341 B.SetInsertPoint(Exit); 1342 StoreInst *S1 = B.CreateStore(B.getInt8(16), PointerArg); 1343 1344 setupAnalyses(); 1345 MemorySSA &MSSA = *Analyses->MSSA; 1346 MemorySSAWalker *Walker = Analyses->Walker; 1347 std::unique_ptr<MemorySSAUpdater> MSSAU = 1348 std::make_unique<MemorySSAUpdater>(&MSSA); 1349 1350 MemoryPhi *Phi = MSSA.getMemoryAccess(Exit); 1351 EXPECT_EQ(Phi, Walker->getClobberingMemoryAccess(S1)); 1352 1353 // Alter CFG, add edge: entry -> exit 1354 Entry->getTerminator()->eraseFromParent(); 1355 B.SetInsertPoint(Entry); 1356 B.CreateCondBr(B.getTrue(), Header, Exit); 1357 SmallVector<CFGUpdate, 1> Updates; 1358 Updates.push_back({cfg::UpdateKind::Insert, Entry, Exit}); 1359 Analyses->DT.applyUpdates(Updates); 1360 MSSAU->applyInsertUpdates(Updates, Analyses->DT); 1361 EXPECT_EQ(Phi, Walker->getClobberingMemoryAccess(S1)); 1362 } 1363 1364 // entry 1365 // | 1366 // header 1367 // / \ 1368 // body | 1369 // \ / 1370 // exit 1371 // header: 1372 // ; 1 = MemoryDef(liveOnEntry) 1373 // body: 1374 // ; 2 = MemoryDef(1) 1375 // exit: 1376 // ; 3 = MemoryPhi({body, 2}, {header, 1}) 1377 // ; 4 = MemoryDef(3); optimize this to 1 now, added edge should invalidate 1378 // the optimized access. 1379 // Insert edge: entry -> exit, check mssa Update is correct. 1380 TEST_F(MemorySSATest, TestAddedEdgeToBlockWithPhiOpt) { 1381 F = Function::Create( 1382 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 1383 GlobalValue::ExternalLinkage, "F", &M); 1384 Argument *PointerArg = &*F->arg_begin(); 1385 Type *Int8 = Type::getInt8Ty(C); 1386 BasicBlock *Entry(BasicBlock::Create(C, "entry", F)); 1387 BasicBlock *Header(BasicBlock::Create(C, "header", F)); 1388 BasicBlock *Body(BasicBlock::Create(C, "body", F)); 1389 BasicBlock *Exit(BasicBlock::Create(C, "exit", F)); 1390 1391 B.SetInsertPoint(Entry); 1392 Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A"); 1393 BranchInst::Create(Header, Entry); 1394 1395 B.SetInsertPoint(Header); 1396 StoreInst *S1 = B.CreateStore(B.getInt8(16), PointerArg); 1397 B.CreateCondBr(B.getTrue(), Exit, Body); 1398 1399 B.SetInsertPoint(Body); 1400 B.CreateStore(ConstantInt::get(Int8, 0), Alloca); 1401 BranchInst::Create(Exit, Body); 1402 1403 B.SetInsertPoint(Exit); 1404 StoreInst *S2 = B.CreateStore(B.getInt8(16), PointerArg); 1405 1406 setupAnalyses(); 1407 MemorySSA &MSSA = *Analyses->MSSA; 1408 MemorySSAWalker *Walker = Analyses->Walker; 1409 std::unique_ptr<MemorySSAUpdater> MSSAU = 1410 std::make_unique<MemorySSAUpdater>(&MSSA); 1411 1412 MemoryDef *DefS1 = cast<MemoryDef>(MSSA.getMemoryAccess(S1)); 1413 EXPECT_EQ(DefS1, Walker->getClobberingMemoryAccess(S2)); 1414 1415 // Alter CFG, add edge: entry -> exit 1416 Entry->getTerminator()->eraseFromParent(); 1417 B.SetInsertPoint(Entry); 1418 B.CreateCondBr(B.getTrue(), Header, Exit); 1419 SmallVector<CFGUpdate, 1> Updates; 1420 Updates.push_back({cfg::UpdateKind::Insert, Entry, Exit}); 1421 Analyses->DT.applyUpdates(Updates); 1422 MSSAU->applyInsertUpdates(Updates, Analyses->DT); 1423 1424 MemoryPhi *Phi = MSSA.getMemoryAccess(Exit); 1425 EXPECT_EQ(Phi, Walker->getClobberingMemoryAccess(S2)); 1426 } 1427 1428 // entry 1429 // / | 1430 // a | 1431 // / \ | 1432 // b c f 1433 // \ / | 1434 // d | 1435 // \ / 1436 // e 1437 // f: 1438 // ; 1 = MemoryDef(liveOnEntry) 1439 // e: 1440 // ; 2 = MemoryPhi({d, liveOnEntry}, {f, 1}) 1441 // 1442 // Insert edge: f -> c, check update is correct. 1443 // After update: 1444 // f: 1445 // ; 1 = MemoryDef(liveOnEntry) 1446 // c: 1447 // ; 3 = MemoryPhi({a, liveOnEntry}, {f, 1}) 1448 // d: 1449 // ; 4 = MemoryPhi({b, liveOnEntry}, {c, 3}) 1450 // e: 1451 // ; 2 = MemoryPhi({d, 4}, {f, 1}) 1452 TEST_F(MemorySSATest, TestAddedEdgeToBlockWithNoPhiAddNewPhis) { 1453 F = Function::Create( 1454 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 1455 GlobalValue::ExternalLinkage, "F", &M); 1456 Argument *PointerArg = &*F->arg_begin(); 1457 BasicBlock *Entry(BasicBlock::Create(C, "entry", F)); 1458 BasicBlock *ABlock(BasicBlock::Create(C, "a", F)); 1459 BasicBlock *BBlock(BasicBlock::Create(C, "b", F)); 1460 BasicBlock *CBlock(BasicBlock::Create(C, "c", F)); 1461 BasicBlock *DBlock(BasicBlock::Create(C, "d", F)); 1462 BasicBlock *EBlock(BasicBlock::Create(C, "e", F)); 1463 BasicBlock *FBlock(BasicBlock::Create(C, "f", F)); 1464 1465 B.SetInsertPoint(Entry); 1466 B.CreateCondBr(B.getTrue(), ABlock, FBlock); 1467 B.SetInsertPoint(ABlock); 1468 B.CreateCondBr(B.getTrue(), BBlock, CBlock); 1469 B.SetInsertPoint(BBlock); 1470 BranchInst::Create(DBlock, BBlock); 1471 B.SetInsertPoint(CBlock); 1472 BranchInst::Create(DBlock, CBlock); 1473 B.SetInsertPoint(DBlock); 1474 BranchInst::Create(EBlock, DBlock); 1475 B.SetInsertPoint(FBlock); 1476 B.CreateStore(B.getInt8(16), PointerArg); 1477 BranchInst::Create(EBlock, FBlock); 1478 1479 setupAnalyses(); 1480 MemorySSA &MSSA = *Analyses->MSSA; 1481 std::unique_ptr<MemorySSAUpdater> MSSAU = 1482 std::make_unique<MemorySSAUpdater>(&MSSA); 1483 1484 // Alter CFG, add edge: f -> c 1485 FBlock->getTerminator()->eraseFromParent(); 1486 B.SetInsertPoint(FBlock); 1487 B.CreateCondBr(B.getTrue(), CBlock, EBlock); 1488 SmallVector<CFGUpdate, 1> Updates; 1489 Updates.push_back({cfg::UpdateKind::Insert, FBlock, CBlock}); 1490 Analyses->DT.applyUpdates(Updates); 1491 MSSAU->applyInsertUpdates(Updates, Analyses->DT); 1492 1493 MemoryPhi *MPC = MSSA.getMemoryAccess(CBlock); 1494 EXPECT_NE(MPC, nullptr); 1495 MemoryPhi *MPD = MSSA.getMemoryAccess(DBlock); 1496 EXPECT_NE(MPD, nullptr); 1497 MemoryPhi *MPE = MSSA.getMemoryAccess(EBlock); 1498 EXPECT_EQ(MPD, MPE->getIncomingValueForBlock(DBlock)); 1499 } 1500 1501 TEST_F(MemorySSATest, TestCallClobber) { 1502 F = Function::Create( 1503 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 1504 GlobalValue::ExternalLinkage, "F", &M); 1505 1506 Value *Pointer1 = &*F->arg_begin(); 1507 BasicBlock *Entry(BasicBlock::Create(C, "", F)); 1508 B.SetInsertPoint(Entry); 1509 Value *Pointer2 = B.CreateGEP(B.getInt8Ty(), Pointer1, B.getInt64(1)); 1510 Instruction *StorePointer1 = B.CreateStore(B.getInt8(0), Pointer1); 1511 Instruction *StorePointer2 = B.CreateStore(B.getInt8(0), Pointer2); 1512 Instruction *MemSet = B.CreateMemSet(Pointer2, B.getInt8(0), 1, Align(1)); 1513 1514 setupAnalyses(); 1515 MemorySSA &MSSA = *Analyses->MSSA; 1516 MemorySSAWalker *Walker = Analyses->Walker; 1517 1518 MemoryUseOrDef *Store1Access = MSSA.getMemoryAccess(StorePointer1); 1519 MemoryUseOrDef *Store2Access = MSSA.getMemoryAccess(StorePointer2); 1520 MemoryUseOrDef *MemSetAccess = MSSA.getMemoryAccess(MemSet); 1521 1522 MemoryAccess *Pointer1Clobber = Walker->getClobberingMemoryAccess( 1523 MemSetAccess, MemoryLocation(Pointer1, LocationSize::precise(1))); 1524 EXPECT_EQ(Pointer1Clobber, Store1Access); 1525 1526 MemoryAccess *Pointer2Clobber = Walker->getClobberingMemoryAccess( 1527 MemSetAccess, MemoryLocation(Pointer2, LocationSize::precise(1))); 1528 EXPECT_EQ(Pointer2Clobber, MemSetAccess); 1529 1530 MemoryAccess *MemSetClobber = Walker->getClobberingMemoryAccess(MemSetAccess); 1531 EXPECT_EQ(MemSetClobber, Store2Access); 1532 } 1533 1534 TEST_F(MemorySSATest, TestLoadClobber) { 1535 F = Function::Create( 1536 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 1537 GlobalValue::ExternalLinkage, "F", &M); 1538 1539 Value *Pointer1 = &*F->arg_begin(); 1540 BasicBlock *Entry(BasicBlock::Create(C, "", F)); 1541 B.SetInsertPoint(Entry); 1542 Value *Pointer2 = B.CreateGEP(B.getInt8Ty(), Pointer1, B.getInt64(1)); 1543 Instruction *LoadPointer1 = 1544 B.CreateLoad(B.getInt8Ty(), Pointer1, /* Volatile */ true); 1545 Instruction *LoadPointer2 = 1546 B.CreateLoad(B.getInt8Ty(), Pointer2, /* Volatile */ true); 1547 1548 setupAnalyses(); 1549 MemorySSA &MSSA = *Analyses->MSSA; 1550 MemorySSAWalker *Walker = Analyses->Walker; 1551 1552 MemoryUseOrDef *Load1Access = MSSA.getMemoryAccess(LoadPointer1); 1553 MemoryUseOrDef *Load2Access = MSSA.getMemoryAccess(LoadPointer2); 1554 1555 // When providing a memory location, we should never return a load as the 1556 // clobber. 1557 MemoryAccess *Pointer1Clobber = Walker->getClobberingMemoryAccess( 1558 Load2Access, MemoryLocation(Pointer1, LocationSize::precise(1))); 1559 EXPECT_TRUE(MSSA.isLiveOnEntryDef(Pointer1Clobber)); 1560 1561 MemoryAccess *Pointer2Clobber = Walker->getClobberingMemoryAccess( 1562 Load2Access, MemoryLocation(Pointer2, LocationSize::precise(1))); 1563 EXPECT_TRUE(MSSA.isLiveOnEntryDef(Pointer2Clobber)); 1564 1565 MemoryAccess *Load2Clobber = Walker->getClobberingMemoryAccess(Load2Access); 1566 EXPECT_EQ(Load2Clobber, Load1Access); 1567 } 1568 1569 // We want to test if the location information are retained 1570 // when the IsGuaranteedLoopInvariant function handles a 1571 // memory access referring to a pointer defined in the entry 1572 // block, hence automatically guaranteed to be loop invariant. 1573 TEST_F(MemorySSATest, TestLoopInvariantEntryBlockPointer) { 1574 SMDiagnostic E; 1575 auto LocalM = 1576 parseAssemblyString("define void @test(i64 %a0, i8* %a1, i1* %a2) {\n" 1577 "entry:\n" 1578 "%v0 = getelementptr i8, i8* %a1, i64 %a0\n" 1579 "%v1 = bitcast i8* %v0 to i64*\n" 1580 "%v2 = bitcast i8* %v0 to i32*\n" 1581 "%v3 = load i1, i1* %a2\n" 1582 "br i1 %v3, label %body, label %exit\n" 1583 "body:\n" 1584 "store i32 1, i32* %v2\n" 1585 "br label %exit\n" 1586 "exit:\n" 1587 "store i64 0, i64* %v1\n" 1588 "ret void\n" 1589 "}", 1590 E, C); 1591 ASSERT_TRUE(LocalM); 1592 F = LocalM->getFunction("test"); 1593 ASSERT_TRUE(F); 1594 // Setup the analysis 1595 setupAnalyses(); 1596 MemorySSA &MSSA = *Analyses->MSSA; 1597 // Find the exit block 1598 for (auto &BB : *F) { 1599 if (BB.getName() == "exit") { 1600 // Get the store instruction 1601 auto *SI = BB.getFirstNonPHI(); 1602 // Get the memory access and location 1603 MemoryAccess *MA = MSSA.getMemoryAccess(SI); 1604 MemoryLocation ML = MemoryLocation::get(SI); 1605 // Use the 'upward_defs_iterator' which internally calls 1606 // IsGuaranteedLoopInvariant 1607 auto ItA = upward_defs_begin({MA, ML}, MSSA.getDomTree()); 1608 auto ItB = 1609 upward_defs_begin({ItA->first, ItA->second}, MSSA.getDomTree()); 1610 // Check if the location information have been retained 1611 EXPECT_TRUE(ItB->second.Size.isPrecise()); 1612 EXPECT_TRUE(ItB->second.Size.hasValue()); 1613 EXPECT_TRUE(ItB->second.Size.getValue() == 8); 1614 } 1615 } 1616 } 1617 1618 TEST_F(MemorySSATest, TestInvariantGroup) { 1619 SMDiagnostic E; 1620 auto M = parseAssemblyString("declare void @f(i8*)\n" 1621 "define i8 @test(i8* %p) {\n" 1622 "entry:\n" 1623 " store i8 42, i8* %p, !invariant.group !0\n" 1624 " call void @f(i8* %p)\n" 1625 " %v = load i8, i8* %p, !invariant.group !0\n" 1626 " ret i8 %v\n" 1627 "}\n" 1628 "!0 = !{}", 1629 E, C); 1630 ASSERT_TRUE(M); 1631 F = M->getFunction("test"); 1632 ASSERT_TRUE(F); 1633 setupAnalyses(); 1634 MemorySSA &MSSA = *Analyses->MSSA; 1635 MemorySSAWalker *Walker = Analyses->Walker; 1636 1637 auto &BB = F->getEntryBlock(); 1638 auto &SI = cast<StoreInst>(*BB.begin()); 1639 auto &Call = cast<CallBase>(*std::next(BB.begin())); 1640 auto &LI = cast<LoadInst>(*std::next(std::next(BB.begin()))); 1641 1642 { 1643 MemoryAccess *SAccess = MSSA.getMemoryAccess(&SI); 1644 MemoryAccess *LAccess = MSSA.getMemoryAccess(&LI); 1645 MemoryAccess *SClobber = Walker->getClobberingMemoryAccess(SAccess); 1646 EXPECT_TRUE(MSSA.isLiveOnEntryDef(SClobber)); 1647 MemoryAccess *LClobber = Walker->getClobberingMemoryAccess(LAccess); 1648 EXPECT_EQ(SAccess, LClobber); 1649 } 1650 1651 // remove store and verify that the memory accesses still make sense 1652 MemorySSAUpdater Updater(&MSSA); 1653 Updater.removeMemoryAccess(&SI); 1654 SI.eraseFromParent(); 1655 1656 { 1657 MemoryAccess *CallAccess = MSSA.getMemoryAccess(&Call); 1658 MemoryAccess *LAccess = MSSA.getMemoryAccess(&LI); 1659 MemoryAccess *LClobber = Walker->getClobberingMemoryAccess(LAccess); 1660 EXPECT_EQ(CallAccess, LClobber); 1661 } 1662 } 1663 1664 static BasicBlock *getBasicBlockByName(Function &F, StringRef Name) { 1665 for (BasicBlock &BB : F) 1666 if (BB.getName() == Name) 1667 return &BB; 1668 llvm_unreachable("Expected to find basic block!"); 1669 } 1670 1671 static Instruction *getInstructionByName(Function &F, StringRef Name) { 1672 for (BasicBlock &BB : F) 1673 for (Instruction &I : BB) 1674 if (I.getName() == Name) 1675 return &I; 1676 llvm_unreachable("Expected to find instruction!"); 1677 } 1678 1679 TEST_F(MemorySSATest, TestVisitedBlocks) { 1680 SMDiagnostic E; 1681 auto M = parseAssemblyString( 1682 "define void @test(i64* noalias %P, i64 %N) {\n" 1683 "preheader.n:\n" 1684 " br label %header.n\n" 1685 "header.n:\n" 1686 " %n = phi i64 [ 0, %preheader.n ], [ %inc.n, %latch.n ]\n" 1687 " %guard.cond.i = icmp slt i64 0, %N\n" 1688 " br i1 %guard.cond.i, label %header.i.check, label %other.i\n" 1689 "header.i.check:\n" 1690 " br label %preheader.i\n" 1691 "preheader.i:\n" 1692 " br label %header.i\n" 1693 "header.i:\n" 1694 " %i = phi i64 [ 0, %preheader.i ], [ %inc.i, %header.i ]\n" 1695 " %v1 = load i64, i64* %P, align 8\n" 1696 " %v2 = load i64, i64* %P, align 8\n" 1697 " %inc.i = add nsw i64 %i, 1\n" 1698 " %cmp.i = icmp slt i64 %inc.i, %N\n" 1699 " br i1 %cmp.i, label %header.i, label %exit.i\n" 1700 "exit.i:\n" 1701 " br label %commonexit\n" 1702 "other.i:\n" 1703 " br label %commonexit\n" 1704 "commonexit:\n" 1705 " br label %latch.n\n" 1706 "latch.n:\n" 1707 " %inc.n = add nsw i64 %n, 1\n" 1708 " %cmp.n = icmp slt i64 %inc.n, %N\n" 1709 " br i1 %cmp.n, label %header.n, label %exit.n\n" 1710 "exit.n:\n" 1711 " ret void\n" 1712 "}\n", 1713 E, C); 1714 ASSERT_TRUE(M); 1715 F = M->getFunction("test"); 1716 ASSERT_TRUE(F); 1717 setupAnalyses(); 1718 MemorySSA &MSSA = *Analyses->MSSA; 1719 MemorySSAUpdater Updater(&MSSA); 1720 1721 { 1722 // Move %v1 before the terminator of %header.i.check 1723 BasicBlock *BB = getBasicBlockByName(*F, "header.i.check"); 1724 Instruction *LI = getInstructionByName(*F, "v1"); 1725 LI->moveBefore(BB->getTerminator()); 1726 if (MemoryUseOrDef *MUD = MSSA.getMemoryAccess(LI)) 1727 Updater.moveToPlace(MUD, BB, MemorySSA::BeforeTerminator); 1728 1729 // Change the termiantor of %header.i.check to `br label true, label 1730 // %preheader.i, label %other.i` 1731 BB->getTerminator()->eraseFromParent(); 1732 ConstantInt *BoolTrue = ConstantInt::getTrue(F->getContext()); 1733 BranchInst::Create(getBasicBlockByName(*F, "preheader.i"), 1734 getBasicBlockByName(*F, "other.i"), BoolTrue, BB); 1735 SmallVector<DominatorTree::UpdateType, 4> DTUpdates; 1736 DTUpdates.push_back(DominatorTree::UpdateType( 1737 DominatorTree::Insert, BB, getBasicBlockByName(*F, "other.i"))); 1738 Updater.applyUpdates(DTUpdates, Analyses->DT, true); 1739 } 1740 1741 // After the first moveToPlace(), %other.i is in VisitedBlocks, even after 1742 // there is a new edge to %other.i, which makes the second moveToPlace() 1743 // traverse incorrectly. 1744 { 1745 // Move %v2 before the terminator of %preheader.i 1746 BasicBlock *BB = getBasicBlockByName(*F, "preheader.i"); 1747 Instruction *LI = getInstructionByName(*F, "v2"); 1748 LI->moveBefore(BB->getTerminator()); 1749 // Check that there is no assertion of "Incomplete phi during partial 1750 // rename" 1751 if (MemoryUseOrDef *MUD = MSSA.getMemoryAccess(LI)) 1752 Updater.moveToPlace(MUD, BB, MemorySSA::BeforeTerminator); 1753 } 1754 } 1755