1 //===- MemorySSA.cpp - Unit tests for MemorySSA ---------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 #include "llvm/Analysis/MemorySSA.h" 9 #include "llvm/Analysis/AliasAnalysis.h" 10 #include "llvm/Analysis/AssumptionCache.h" 11 #include "llvm/Analysis/BasicAliasAnalysis.h" 12 #include "llvm/Analysis/MemorySSAUpdater.h" 13 #include "llvm/Analysis/TargetLibraryInfo.h" 14 #include "llvm/AsmParser/Parser.h" 15 #include "llvm/IR/BasicBlock.h" 16 #include "llvm/IR/DataLayout.h" 17 #include "llvm/IR/Dominators.h" 18 #include "llvm/IR/IRBuilder.h" 19 #include "llvm/IR/Instructions.h" 20 #include "llvm/IR/LLVMContext.h" 21 #include "llvm/Support/SourceMgr.h" 22 #include "gtest/gtest.h" 23 24 using namespace llvm; 25 26 const static char DLString[] = "e-i64:64-f80:128-n8:16:32:64-S128"; 27 28 /// There's a lot of common setup between these tests. This fixture helps reduce 29 /// that. Tests should mock up a function, store it in F, and then call 30 /// setupAnalyses(). 31 class MemorySSATest : public testing::Test { 32 protected: 33 // N.B. Many of these members depend on each other (e.g. the Module depends on 34 // the Context, etc.). So, order matters here (and in TestAnalyses). 35 LLVMContext C; 36 Module M; 37 IRBuilder<> B; 38 DataLayout DL; 39 TargetLibraryInfoImpl TLII; 40 TargetLibraryInfo TLI; 41 Function *F; 42 43 // Things that we need to build after the function is created. 44 struct TestAnalyses { 45 DominatorTree DT; 46 AssumptionCache AC; 47 AAResults AA; 48 BasicAAResult BAA; 49 // We need to defer MSSA construction until AA is *entirely* set up, which 50 // requires calling addAAResult. Hence, we just use a pointer here. 51 std::unique_ptr<MemorySSA> MSSA; 52 MemorySSAWalker *Walker; 53 54 TestAnalyses(MemorySSATest &Test) 55 : DT(*Test.F), AC(*Test.F), AA(Test.TLI), 56 BAA(Test.DL, *Test.F, Test.TLI, AC, &DT) { 57 AA.addAAResult(BAA); 58 MSSA = std::make_unique<MemorySSA>(*Test.F, &AA, &DT); 59 Walker = MSSA->getWalker(); 60 } 61 }; 62 63 std::unique_ptr<TestAnalyses> Analyses; 64 65 void setupAnalyses() { 66 assert(F); 67 Analyses.reset(new TestAnalyses(*this)); 68 } 69 70 public: 71 MemorySSATest() 72 : M("MemorySSATest", C), B(C), DL(DLString), TLI(TLII), F(nullptr) {} 73 }; 74 75 TEST_F(MemorySSATest, CreateALoad) { 76 // We create a diamond where there is a store on one side, and then after 77 // building MemorySSA, create a load after the merge point, and use it to test 78 // updating by creating an access for the load. 79 F = Function::Create( 80 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 81 GlobalValue::ExternalLinkage, "F", &M); 82 BasicBlock *Entry(BasicBlock::Create(C, "", F)); 83 BasicBlock *Left(BasicBlock::Create(C, "", F)); 84 BasicBlock *Right(BasicBlock::Create(C, "", F)); 85 BasicBlock *Merge(BasicBlock::Create(C, "", F)); 86 B.SetInsertPoint(Entry); 87 B.CreateCondBr(B.getTrue(), Left, Right); 88 B.SetInsertPoint(Left); 89 Argument *PointerArg = &*F->arg_begin(); 90 B.CreateStore(B.getInt8(16), PointerArg); 91 BranchInst::Create(Merge, Left); 92 BranchInst::Create(Merge, Right); 93 94 setupAnalyses(); 95 MemorySSA &MSSA = *Analyses->MSSA; 96 MemorySSAUpdater Updater(&MSSA); 97 // Add the load 98 B.SetInsertPoint(Merge); 99 LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg); 100 101 // MemoryPHI should already exist. 102 MemoryPhi *MP = MSSA.getMemoryAccess(Merge); 103 EXPECT_NE(MP, nullptr); 104 105 // Create the load memory acccess 106 MemoryUse *LoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB( 107 LoadInst, MP, Merge, MemorySSA::Beginning)); 108 MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess(); 109 EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess)); 110 MSSA.verifyMemorySSA(); 111 } 112 TEST_F(MemorySSATest, CreateLoadsAndStoreUpdater) { 113 // We create a diamond, then build memoryssa with no memory accesses, and 114 // incrementally update it by inserting a store in the, entry, a load in the 115 // merge point, then a store in the branch, another load in the merge point, 116 // and then a store in the entry. 117 F = Function::Create( 118 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 119 GlobalValue::ExternalLinkage, "F", &M); 120 BasicBlock *Entry(BasicBlock::Create(C, "", F)); 121 BasicBlock *Left(BasicBlock::Create(C, "", F)); 122 BasicBlock *Right(BasicBlock::Create(C, "", F)); 123 BasicBlock *Merge(BasicBlock::Create(C, "", F)); 124 B.SetInsertPoint(Entry); 125 B.CreateCondBr(B.getTrue(), Left, Right); 126 B.SetInsertPoint(Left, Left->begin()); 127 Argument *PointerArg = &*F->arg_begin(); 128 B.SetInsertPoint(Left); 129 B.CreateBr(Merge); 130 B.SetInsertPoint(Right); 131 B.CreateBr(Merge); 132 133 setupAnalyses(); 134 MemorySSA &MSSA = *Analyses->MSSA; 135 MemorySSAUpdater Updater(&MSSA); 136 // Add the store 137 B.SetInsertPoint(Entry, Entry->begin()); 138 StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg); 139 MemoryAccess *EntryStoreAccess = Updater.createMemoryAccessInBB( 140 EntryStore, nullptr, Entry, MemorySSA::Beginning); 141 Updater.insertDef(cast<MemoryDef>(EntryStoreAccess)); 142 143 // Add the load 144 B.SetInsertPoint(Merge, Merge->begin()); 145 LoadInst *FirstLoad = B.CreateLoad(B.getInt8Ty(), PointerArg); 146 147 // MemoryPHI should not already exist. 148 MemoryPhi *MP = MSSA.getMemoryAccess(Merge); 149 EXPECT_EQ(MP, nullptr); 150 151 // Create the load memory access 152 MemoryUse *FirstLoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB( 153 FirstLoad, nullptr, Merge, MemorySSA::Beginning)); 154 Updater.insertUse(FirstLoadAccess); 155 // Should just have a load using the entry access, because it should discover 156 // the phi is trivial 157 EXPECT_EQ(FirstLoadAccess->getDefiningAccess(), EntryStoreAccess); 158 159 // Create a store on the left 160 // Add the store 161 B.SetInsertPoint(Left, Left->begin()); 162 StoreInst *LeftStore = B.CreateStore(B.getInt8(16), PointerArg); 163 MemoryAccess *LeftStoreAccess = Updater.createMemoryAccessInBB( 164 LeftStore, nullptr, Left, MemorySSA::Beginning); 165 Updater.insertDef(cast<MemoryDef>(LeftStoreAccess), false); 166 167 // MemoryPHI should exist after adding LeftStore. 168 MP = MSSA.getMemoryAccess(Merge); 169 EXPECT_NE(MP, nullptr); 170 171 // Add the second load 172 B.SetInsertPoint(Merge, Merge->begin()); 173 LoadInst *SecondLoad = B.CreateLoad(B.getInt8Ty(), PointerArg); 174 175 // Create the load memory access 176 MemoryUse *SecondLoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB( 177 SecondLoad, nullptr, Merge, MemorySSA::Beginning)); 178 Updater.insertUse(SecondLoadAccess); 179 // Now the load should be a phi of the entry store and the left store 180 MemoryPhi *MergePhi = 181 dyn_cast<MemoryPhi>(SecondLoadAccess->getDefiningAccess()); 182 EXPECT_NE(MergePhi, nullptr); 183 EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess); 184 EXPECT_EQ(MergePhi->getIncomingValue(1), LeftStoreAccess); 185 // Now create a store below the existing one in the entry 186 B.SetInsertPoint(Entry, --Entry->end()); 187 StoreInst *SecondEntryStore = B.CreateStore(B.getInt8(16), PointerArg); 188 MemoryAccess *SecondEntryStoreAccess = Updater.createMemoryAccessInBB( 189 SecondEntryStore, nullptr, Entry, MemorySSA::End); 190 // Insert it twice just to test renaming 191 Updater.insertDef(cast<MemoryDef>(SecondEntryStoreAccess), false); 192 EXPECT_NE(FirstLoadAccess->getDefiningAccess(), MergePhi); 193 Updater.insertDef(cast<MemoryDef>(SecondEntryStoreAccess), true); 194 EXPECT_EQ(FirstLoadAccess->getDefiningAccess(), MergePhi); 195 // and make sure the phi below it got updated, despite being blocks away 196 MergePhi = dyn_cast<MemoryPhi>(SecondLoadAccess->getDefiningAccess()); 197 EXPECT_NE(MergePhi, nullptr); 198 EXPECT_EQ(MergePhi->getIncomingValue(0), SecondEntryStoreAccess); 199 EXPECT_EQ(MergePhi->getIncomingValue(1), LeftStoreAccess); 200 MSSA.verifyMemorySSA(); 201 } 202 203 TEST_F(MemorySSATest, CreateALoadUpdater) { 204 // We create a diamond, then build memoryssa with no memory accesses, and 205 // incrementally update it by inserting a store in one of the branches, and a 206 // load in the merge point 207 F = Function::Create( 208 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 209 GlobalValue::ExternalLinkage, "F", &M); 210 BasicBlock *Entry(BasicBlock::Create(C, "", F)); 211 BasicBlock *Left(BasicBlock::Create(C, "", F)); 212 BasicBlock *Right(BasicBlock::Create(C, "", F)); 213 BasicBlock *Merge(BasicBlock::Create(C, "", F)); 214 B.SetInsertPoint(Entry); 215 B.CreateCondBr(B.getTrue(), Left, Right); 216 B.SetInsertPoint(Left, Left->begin()); 217 Argument *PointerArg = &*F->arg_begin(); 218 B.SetInsertPoint(Left); 219 B.CreateBr(Merge); 220 B.SetInsertPoint(Right); 221 B.CreateBr(Merge); 222 223 setupAnalyses(); 224 MemorySSA &MSSA = *Analyses->MSSA; 225 MemorySSAUpdater Updater(&MSSA); 226 B.SetInsertPoint(Left, Left->begin()); 227 // Add the store 228 StoreInst *SI = B.CreateStore(B.getInt8(16), PointerArg); 229 MemoryAccess *StoreAccess = 230 Updater.createMemoryAccessInBB(SI, nullptr, Left, MemorySSA::Beginning); 231 Updater.insertDef(cast<MemoryDef>(StoreAccess)); 232 233 // MemoryPHI should be created when inserting the def 234 MemoryPhi *MP = MSSA.getMemoryAccess(Merge); 235 EXPECT_NE(MP, nullptr); 236 237 // Add the load 238 B.SetInsertPoint(Merge, Merge->begin()); 239 LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg); 240 241 // Create the load memory acccess 242 MemoryUse *LoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB( 243 LoadInst, nullptr, Merge, MemorySSA::Beginning)); 244 Updater.insertUse(LoadAccess); 245 MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess(); 246 EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess)); 247 MSSA.verifyMemorySSA(); 248 } 249 250 TEST_F(MemorySSATest, SinkLoad) { 251 F = Function::Create( 252 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 253 GlobalValue::ExternalLinkage, "F", &M); 254 BasicBlock *Entry(BasicBlock::Create(C, "", F)); 255 BasicBlock *Left(BasicBlock::Create(C, "", F)); 256 BasicBlock *Right(BasicBlock::Create(C, "", F)); 257 BasicBlock *Merge(BasicBlock::Create(C, "", F)); 258 B.SetInsertPoint(Entry); 259 B.CreateCondBr(B.getTrue(), Left, Right); 260 B.SetInsertPoint(Left, Left->begin()); 261 Argument *PointerArg = &*F->arg_begin(); 262 B.SetInsertPoint(Left); 263 B.CreateBr(Merge); 264 B.SetInsertPoint(Right); 265 B.CreateBr(Merge); 266 267 // Load in left block 268 B.SetInsertPoint(Left, Left->begin()); 269 LoadInst *LoadInst1 = B.CreateLoad(B.getInt8Ty(), PointerArg); 270 // Store in merge block 271 B.SetInsertPoint(Merge, Merge->begin()); 272 B.CreateStore(B.getInt8(16), PointerArg); 273 274 setupAnalyses(); 275 MemorySSA &MSSA = *Analyses->MSSA; 276 MemorySSAUpdater Updater(&MSSA); 277 278 // Mimic sinking of a load: 279 // - clone load 280 // - insert in "exit" block 281 // - insert in mssa 282 // - remove from original block 283 284 LoadInst *LoadInstClone = cast<LoadInst>(LoadInst1->clone()); 285 Merge->getInstList().insert(Merge->begin(), LoadInstClone); 286 MemoryAccess * NewLoadAccess = 287 Updater.createMemoryAccessInBB(LoadInstClone, nullptr, 288 LoadInstClone->getParent(), 289 MemorySSA::Beginning); 290 Updater.insertUse(cast<MemoryUse>(NewLoadAccess)); 291 MSSA.verifyMemorySSA(); 292 Updater.removeMemoryAccess(MSSA.getMemoryAccess(LoadInst1)); 293 MSSA.verifyMemorySSA(); 294 } 295 296 TEST_F(MemorySSATest, MoveAStore) { 297 // We create a diamond where there is a in the entry, a store on one side, and 298 // a load at the end. After building MemorySSA, we test updating by moving 299 // the store from the side block to the entry block. This destroys the old 300 // access. 301 F = Function::Create( 302 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 303 GlobalValue::ExternalLinkage, "F", &M); 304 BasicBlock *Entry(BasicBlock::Create(C, "", F)); 305 BasicBlock *Left(BasicBlock::Create(C, "", F)); 306 BasicBlock *Right(BasicBlock::Create(C, "", F)); 307 BasicBlock *Merge(BasicBlock::Create(C, "", F)); 308 B.SetInsertPoint(Entry); 309 Argument *PointerArg = &*F->arg_begin(); 310 StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg); 311 B.CreateCondBr(B.getTrue(), Left, Right); 312 B.SetInsertPoint(Left); 313 StoreInst *SideStore = B.CreateStore(B.getInt8(16), PointerArg); 314 BranchInst::Create(Merge, Left); 315 BranchInst::Create(Merge, Right); 316 B.SetInsertPoint(Merge); 317 B.CreateLoad(B.getInt8Ty(), PointerArg); 318 setupAnalyses(); 319 MemorySSA &MSSA = *Analyses->MSSA; 320 MemorySSAUpdater Updater(&MSSA); 321 // Move the store 322 SideStore->moveBefore(Entry->getTerminator()); 323 MemoryAccess *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore); 324 MemoryAccess *SideStoreAccess = MSSA.getMemoryAccess(SideStore); 325 MemoryAccess *NewStoreAccess = Updater.createMemoryAccessAfter( 326 SideStore, EntryStoreAccess, EntryStoreAccess); 327 EntryStoreAccess->replaceAllUsesWith(NewStoreAccess); 328 Updater.removeMemoryAccess(SideStoreAccess); 329 MSSA.verifyMemorySSA(); 330 } 331 332 TEST_F(MemorySSATest, MoveAStoreUpdater) { 333 // We create a diamond where there is a in the entry, a store on one side, and 334 // a load at the end. After building MemorySSA, we test updating by moving 335 // the store from the side block to the entry block. This destroys the old 336 // access. 337 F = Function::Create( 338 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 339 GlobalValue::ExternalLinkage, "F", &M); 340 BasicBlock *Entry(BasicBlock::Create(C, "", F)); 341 BasicBlock *Left(BasicBlock::Create(C, "", F)); 342 BasicBlock *Right(BasicBlock::Create(C, "", F)); 343 BasicBlock *Merge(BasicBlock::Create(C, "", F)); 344 B.SetInsertPoint(Entry); 345 Argument *PointerArg = &*F->arg_begin(); 346 StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg); 347 B.CreateCondBr(B.getTrue(), Left, Right); 348 B.SetInsertPoint(Left); 349 auto *SideStore = B.CreateStore(B.getInt8(16), PointerArg); 350 BranchInst::Create(Merge, Left); 351 BranchInst::Create(Merge, Right); 352 B.SetInsertPoint(Merge); 353 auto *MergeLoad = B.CreateLoad(B.getInt8Ty(), PointerArg); 354 setupAnalyses(); 355 MemorySSA &MSSA = *Analyses->MSSA; 356 MemorySSAUpdater Updater(&MSSA); 357 358 // Move the store 359 SideStore->moveBefore(Entry->getTerminator()); 360 auto *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore); 361 auto *SideStoreAccess = MSSA.getMemoryAccess(SideStore); 362 auto *NewStoreAccess = Updater.createMemoryAccessAfter( 363 SideStore, EntryStoreAccess, EntryStoreAccess); 364 // Before, the load will point to a phi of the EntryStore and SideStore. 365 auto *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(MergeLoad)); 366 EXPECT_TRUE(isa<MemoryPhi>(LoadAccess->getDefiningAccess())); 367 MemoryPhi *MergePhi = cast<MemoryPhi>(LoadAccess->getDefiningAccess()); 368 EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess); 369 EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess); 370 Updater.removeMemoryAccess(SideStoreAccess); 371 Updater.insertDef(cast<MemoryDef>(NewStoreAccess)); 372 // After it's a phi of the new side store access. 373 EXPECT_EQ(MergePhi->getIncomingValue(0), NewStoreAccess); 374 EXPECT_EQ(MergePhi->getIncomingValue(1), NewStoreAccess); 375 MSSA.verifyMemorySSA(); 376 } 377 378 TEST_F(MemorySSATest, MoveAStoreUpdaterMove) { 379 // We create a diamond where there is a in the entry, a store on one side, and 380 // a load at the end. After building MemorySSA, we test updating by moving 381 // the store from the side block to the entry block. This does not destroy 382 // the old access. 383 F = Function::Create( 384 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 385 GlobalValue::ExternalLinkage, "F", &M); 386 BasicBlock *Entry(BasicBlock::Create(C, "", F)); 387 BasicBlock *Left(BasicBlock::Create(C, "", F)); 388 BasicBlock *Right(BasicBlock::Create(C, "", F)); 389 BasicBlock *Merge(BasicBlock::Create(C, "", F)); 390 B.SetInsertPoint(Entry); 391 Argument *PointerArg = &*F->arg_begin(); 392 StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg); 393 B.CreateCondBr(B.getTrue(), Left, Right); 394 B.SetInsertPoint(Left); 395 auto *SideStore = B.CreateStore(B.getInt8(16), PointerArg); 396 BranchInst::Create(Merge, Left); 397 BranchInst::Create(Merge, Right); 398 B.SetInsertPoint(Merge); 399 auto *MergeLoad = B.CreateLoad(B.getInt8Ty(), PointerArg); 400 setupAnalyses(); 401 MemorySSA &MSSA = *Analyses->MSSA; 402 MemorySSAUpdater Updater(&MSSA); 403 404 // Move the store 405 auto *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore); 406 auto *SideStoreAccess = MSSA.getMemoryAccess(SideStore); 407 // Before, the load will point to a phi of the EntryStore and SideStore. 408 auto *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(MergeLoad)); 409 EXPECT_TRUE(isa<MemoryPhi>(LoadAccess->getDefiningAccess())); 410 MemoryPhi *MergePhi = cast<MemoryPhi>(LoadAccess->getDefiningAccess()); 411 EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess); 412 EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess); 413 SideStore->moveBefore(*EntryStore->getParent(), ++EntryStore->getIterator()); 414 Updater.moveAfter(SideStoreAccess, EntryStoreAccess); 415 // After, it's a phi of the side store. 416 EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess); 417 EXPECT_EQ(MergePhi->getIncomingValue(1), SideStoreAccess); 418 419 MSSA.verifyMemorySSA(); 420 } 421 422 TEST_F(MemorySSATest, MoveAStoreAllAround) { 423 // We create a diamond where there is a in the entry, a store on one side, and 424 // a load at the end. After building MemorySSA, we test updating by moving 425 // the store from the side block to the entry block, then to the other side 426 // block, then to before the load. This does not destroy the old access. 427 F = Function::Create( 428 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 429 GlobalValue::ExternalLinkage, "F", &M); 430 BasicBlock *Entry(BasicBlock::Create(C, "", F)); 431 BasicBlock *Left(BasicBlock::Create(C, "", F)); 432 BasicBlock *Right(BasicBlock::Create(C, "", F)); 433 BasicBlock *Merge(BasicBlock::Create(C, "", F)); 434 B.SetInsertPoint(Entry); 435 Argument *PointerArg = &*F->arg_begin(); 436 StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg); 437 B.CreateCondBr(B.getTrue(), Left, Right); 438 B.SetInsertPoint(Left); 439 auto *SideStore = B.CreateStore(B.getInt8(16), PointerArg); 440 BranchInst::Create(Merge, Left); 441 BranchInst::Create(Merge, Right); 442 B.SetInsertPoint(Merge); 443 auto *MergeLoad = B.CreateLoad(B.getInt8Ty(), PointerArg); 444 setupAnalyses(); 445 MemorySSA &MSSA = *Analyses->MSSA; 446 MemorySSAUpdater Updater(&MSSA); 447 448 // Move the store 449 auto *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore); 450 auto *SideStoreAccess = MSSA.getMemoryAccess(SideStore); 451 // Before, the load will point to a phi of the EntryStore and SideStore. 452 auto *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(MergeLoad)); 453 EXPECT_TRUE(isa<MemoryPhi>(LoadAccess->getDefiningAccess())); 454 MemoryPhi *MergePhi = cast<MemoryPhi>(LoadAccess->getDefiningAccess()); 455 EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess); 456 EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess); 457 // Move the store before the entry store 458 SideStore->moveBefore(*EntryStore->getParent(), EntryStore->getIterator()); 459 Updater.moveBefore(SideStoreAccess, EntryStoreAccess); 460 // After, it's a phi of the entry store. 461 EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess); 462 EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess); 463 MSSA.verifyMemorySSA(); 464 // Now move the store to the right branch 465 SideStore->moveBefore(*Right, Right->begin()); 466 Updater.moveToPlace(SideStoreAccess, Right, MemorySSA::Beginning); 467 MSSA.verifyMemorySSA(); 468 EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess); 469 EXPECT_EQ(MergePhi->getIncomingValue(1), SideStoreAccess); 470 // Now move it before the load 471 SideStore->moveBefore(MergeLoad); 472 Updater.moveBefore(SideStoreAccess, LoadAccess); 473 EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess); 474 EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess); 475 MSSA.verifyMemorySSA(); 476 } 477 478 TEST_F(MemorySSATest, RemoveAPhi) { 479 // We create a diamond where there is a store on one side, and then a load 480 // after the merge point. This enables us to test a bunch of different 481 // removal cases. 482 F = Function::Create( 483 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 484 GlobalValue::ExternalLinkage, "F", &M); 485 BasicBlock *Entry(BasicBlock::Create(C, "", F)); 486 BasicBlock *Left(BasicBlock::Create(C, "", F)); 487 BasicBlock *Right(BasicBlock::Create(C, "", F)); 488 BasicBlock *Merge(BasicBlock::Create(C, "", F)); 489 B.SetInsertPoint(Entry); 490 B.CreateCondBr(B.getTrue(), Left, Right); 491 B.SetInsertPoint(Left); 492 Argument *PointerArg = &*F->arg_begin(); 493 StoreInst *StoreInst = B.CreateStore(B.getInt8(16), PointerArg); 494 BranchInst::Create(Merge, Left); 495 BranchInst::Create(Merge, Right); 496 B.SetInsertPoint(Merge); 497 LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg); 498 499 setupAnalyses(); 500 MemorySSA &MSSA = *Analyses->MSSA; 501 MemorySSAUpdater Updater(&MSSA); 502 503 // Before, the load will be a use of a phi<store, liveonentry>. 504 MemoryUse *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(LoadInst)); 505 MemoryDef *StoreAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreInst)); 506 MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess(); 507 EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess)); 508 // Kill the store 509 Updater.removeMemoryAccess(StoreAccess); 510 MemoryPhi *MP = cast<MemoryPhi>(DefiningAccess); 511 // Verify the phi ended up as liveonentry, liveonentry 512 for (auto &Op : MP->incoming_values()) 513 EXPECT_TRUE(MSSA.isLiveOnEntryDef(cast<MemoryAccess>(Op.get()))); 514 // Replace the phi uses with the live on entry def 515 MP->replaceAllUsesWith(MSSA.getLiveOnEntryDef()); 516 // Verify the load is now defined by liveOnEntryDef 517 EXPECT_TRUE(MSSA.isLiveOnEntryDef(LoadAccess->getDefiningAccess())); 518 // Remove the PHI 519 Updater.removeMemoryAccess(MP); 520 MSSA.verifyMemorySSA(); 521 } 522 523 TEST_F(MemorySSATest, RemoveMemoryAccess) { 524 // We create a diamond where there is a store on one side, and then a load 525 // after the merge point. This enables us to test a bunch of different 526 // removal cases. 527 F = Function::Create( 528 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 529 GlobalValue::ExternalLinkage, "F", &M); 530 BasicBlock *Entry(BasicBlock::Create(C, "", F)); 531 BasicBlock *Left(BasicBlock::Create(C, "", F)); 532 BasicBlock *Right(BasicBlock::Create(C, "", F)); 533 BasicBlock *Merge(BasicBlock::Create(C, "", F)); 534 B.SetInsertPoint(Entry); 535 B.CreateCondBr(B.getTrue(), Left, Right); 536 B.SetInsertPoint(Left); 537 Argument *PointerArg = &*F->arg_begin(); 538 StoreInst *StoreInst = B.CreateStore(B.getInt8(16), PointerArg); 539 BranchInst::Create(Merge, Left); 540 BranchInst::Create(Merge, Right); 541 B.SetInsertPoint(Merge); 542 LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg); 543 544 setupAnalyses(); 545 MemorySSA &MSSA = *Analyses->MSSA; 546 MemorySSAWalker *Walker = Analyses->Walker; 547 MemorySSAUpdater Updater(&MSSA); 548 549 // Before, the load will be a use of a phi<store, liveonentry>. It should be 550 // the same after. 551 MemoryUse *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(LoadInst)); 552 MemoryDef *StoreAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreInst)); 553 MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess(); 554 EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess)); 555 // The load is currently clobbered by one of the phi arguments, so the walker 556 // should determine the clobbering access as the phi. 557 EXPECT_EQ(DefiningAccess, Walker->getClobberingMemoryAccess(LoadInst)); 558 Updater.removeMemoryAccess(StoreAccess); 559 MSSA.verifyMemorySSA(); 560 // After the removeaccess, let's see if we got the right accesses 561 // The load should still point to the phi ... 562 EXPECT_EQ(DefiningAccess, LoadAccess->getDefiningAccess()); 563 // but we should now get live on entry for the clobbering definition of the 564 // load, since it will walk past the phi node since every argument is the 565 // same. 566 // XXX: This currently requires either removing the phi or resetting optimized 567 // on the load 568 569 EXPECT_FALSE( 570 MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(LoadInst))); 571 // If we reset optimized, we get live on entry. 572 LoadAccess->resetOptimized(); 573 EXPECT_TRUE( 574 MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(LoadInst))); 575 // The phi should now be a two entry phi with two live on entry defs. 576 for (const auto &Op : DefiningAccess->operands()) { 577 MemoryAccess *Operand = cast<MemoryAccess>(&*Op); 578 EXPECT_TRUE(MSSA.isLiveOnEntryDef(Operand)); 579 } 580 581 // Now we try to remove the single valued phi 582 Updater.removeMemoryAccess(DefiningAccess); 583 MSSA.verifyMemorySSA(); 584 // Now the load should be a load of live on entry. 585 EXPECT_TRUE(MSSA.isLiveOnEntryDef(LoadAccess->getDefiningAccess())); 586 } 587 588 // We had a bug with caching where the walker would report MemoryDef#3's clobber 589 // (below) was MemoryDef#1. 590 // 591 // define void @F(i8*) { 592 // %A = alloca i8, i8 1 593 // ; 1 = MemoryDef(liveOnEntry) 594 // store i8 0, i8* %A 595 // ; 2 = MemoryDef(1) 596 // store i8 1, i8* %A 597 // ; 3 = MemoryDef(2) 598 // store i8 2, i8* %A 599 // } 600 TEST_F(MemorySSATest, TestTripleStore) { 601 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false), 602 GlobalValue::ExternalLinkage, "F", &M); 603 B.SetInsertPoint(BasicBlock::Create(C, "", F)); 604 Type *Int8 = Type::getInt8Ty(C); 605 Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A"); 606 StoreInst *S1 = B.CreateStore(ConstantInt::get(Int8, 0), Alloca); 607 StoreInst *S2 = B.CreateStore(ConstantInt::get(Int8, 1), Alloca); 608 StoreInst *S3 = B.CreateStore(ConstantInt::get(Int8, 2), Alloca); 609 610 setupAnalyses(); 611 MemorySSA &MSSA = *Analyses->MSSA; 612 MemorySSAWalker *Walker = Analyses->Walker; 613 614 unsigned I = 0; 615 for (StoreInst *V : {S1, S2, S3}) { 616 // Everything should be clobbered by its defining access 617 MemoryAccess *DefiningAccess = MSSA.getMemoryAccess(V)->getDefiningAccess(); 618 MemoryAccess *WalkerClobber = Walker->getClobberingMemoryAccess(V); 619 EXPECT_EQ(DefiningAccess, WalkerClobber) 620 << "Store " << I << " doesn't have the correct clobbering access"; 621 // EXPECT_EQ expands such that if we increment I above, it won't get 622 // incremented except when we try to print the error message. 623 ++I; 624 } 625 } 626 627 // ...And fixing the above bug made it obvious that, when walking, MemorySSA's 628 // walker was caching the initial node it walked. This was fine (albeit 629 // mostly redundant) unless the initial node being walked is a clobber for the 630 // query. In that case, we'd cache that the node clobbered itself. 631 TEST_F(MemorySSATest, TestStoreAndLoad) { 632 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false), 633 GlobalValue::ExternalLinkage, "F", &M); 634 B.SetInsertPoint(BasicBlock::Create(C, "", F)); 635 Type *Int8 = Type::getInt8Ty(C); 636 Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A"); 637 Instruction *SI = B.CreateStore(ConstantInt::get(Int8, 0), Alloca); 638 Instruction *LI = B.CreateLoad(Int8, Alloca); 639 640 setupAnalyses(); 641 MemorySSA &MSSA = *Analyses->MSSA; 642 MemorySSAWalker *Walker = Analyses->Walker; 643 644 MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(LI); 645 EXPECT_EQ(LoadClobber, MSSA.getMemoryAccess(SI)); 646 EXPECT_TRUE(MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(SI))); 647 } 648 649 // Another bug (related to the above two fixes): It was noted that, given the 650 // following code: 651 // ; 1 = MemoryDef(liveOnEntry) 652 // store i8 0, i8* %1 653 // 654 // ...A query to getClobberingMemoryAccess(MemoryAccess*, MemoryLocation) would 655 // hand back the store (correctly). A later call to 656 // getClobberingMemoryAccess(const Instruction*) would also hand back the store 657 // (incorrectly; it should return liveOnEntry). 658 // 659 // This test checks that repeated calls to either function returns what they're 660 // meant to. 661 TEST_F(MemorySSATest, TestStoreDoubleQuery) { 662 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false), 663 GlobalValue::ExternalLinkage, "F", &M); 664 B.SetInsertPoint(BasicBlock::Create(C, "", F)); 665 Type *Int8 = Type::getInt8Ty(C); 666 Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A"); 667 StoreInst *SI = B.CreateStore(ConstantInt::get(Int8, 0), Alloca); 668 669 setupAnalyses(); 670 MemorySSA &MSSA = *Analyses->MSSA; 671 MemorySSAWalker *Walker = Analyses->Walker; 672 673 MemoryAccess *StoreAccess = MSSA.getMemoryAccess(SI); 674 MemoryLocation StoreLoc = MemoryLocation::get(SI); 675 MemoryAccess *Clobber = 676 Walker->getClobberingMemoryAccess(StoreAccess, StoreLoc); 677 MemoryAccess *LiveOnEntry = Walker->getClobberingMemoryAccess(SI); 678 679 EXPECT_EQ(Clobber, StoreAccess); 680 EXPECT_TRUE(MSSA.isLiveOnEntryDef(LiveOnEntry)); 681 682 // Try again (with entries in the cache already) for good measure... 683 Clobber = Walker->getClobberingMemoryAccess(StoreAccess, StoreLoc); 684 LiveOnEntry = Walker->getClobberingMemoryAccess(SI); 685 EXPECT_EQ(Clobber, StoreAccess); 686 EXPECT_TRUE(MSSA.isLiveOnEntryDef(LiveOnEntry)); 687 } 688 689 // Bug: During phi optimization, the walker wouldn't cache to the proper result 690 // in the farthest-walked BB. 691 // 692 // Specifically, it would assume that whatever we walked to was a clobber. 693 // "Whatever we walked to" isn't a clobber if we hit a cache entry. 694 // 695 // ...So, we need a test case that looks like: 696 // A 697 // / \ 698 // B | 699 // \ / 700 // C 701 // 702 // Where, when we try to optimize a thing in 'C', a blocker is found in 'B'. 703 // The walk must determine that the blocker exists by using cache entries *while 704 // walking* 'B'. 705 TEST_F(MemorySSATest, PartialWalkerCacheWithPhis) { 706 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false), 707 GlobalValue::ExternalLinkage, "F", &M); 708 B.SetInsertPoint(BasicBlock::Create(C, "A", F)); 709 Type *Int8 = Type::getInt8Ty(C); 710 Constant *One = ConstantInt::get(Int8, 1); 711 Constant *Zero = ConstantInt::get(Int8, 0); 712 Value *AllocA = B.CreateAlloca(Int8, One, "a"); 713 Value *AllocB = B.CreateAlloca(Int8, One, "b"); 714 BasicBlock *IfThen = BasicBlock::Create(C, "B", F); 715 BasicBlock *IfEnd = BasicBlock::Create(C, "C", F); 716 717 B.CreateCondBr(UndefValue::get(Type::getInt1Ty(C)), IfThen, IfEnd); 718 719 B.SetInsertPoint(IfThen); 720 Instruction *FirstStore = B.CreateStore(Zero, AllocA); 721 B.CreateStore(Zero, AllocB); 722 Instruction *ALoad0 = B.CreateLoad(Int8, AllocA, ""); 723 Instruction *BStore = B.CreateStore(Zero, AllocB); 724 // Due to use optimization/etc. we make a store to A, which is removed after 725 // we build MSSA. This helps keep the test case simple-ish. 726 Instruction *KillStore = B.CreateStore(Zero, AllocA); 727 Instruction *ALoad = B.CreateLoad(Int8, AllocA, ""); 728 B.CreateBr(IfEnd); 729 730 B.SetInsertPoint(IfEnd); 731 Instruction *BelowPhi = B.CreateStore(Zero, AllocA); 732 733 setupAnalyses(); 734 MemorySSA &MSSA = *Analyses->MSSA; 735 MemorySSAWalker *Walker = Analyses->Walker; 736 MemorySSAUpdater Updater(&MSSA); 737 738 // Kill `KillStore`; it exists solely so that the load after it won't be 739 // optimized to FirstStore. 740 Updater.removeMemoryAccess(MSSA.getMemoryAccess(KillStore)); 741 KillStore->eraseFromParent(); 742 auto *ALoadMA = cast<MemoryUse>(MSSA.getMemoryAccess(ALoad)); 743 EXPECT_EQ(ALoadMA->getDefiningAccess(), MSSA.getMemoryAccess(BStore)); 744 745 // Populate the cache for the store to AllocB directly after FirstStore. It 746 // should point to something in block B (so something in D can't be optimized 747 // to it). 748 MemoryAccess *Load0Clobber = Walker->getClobberingMemoryAccess(ALoad0); 749 EXPECT_EQ(MSSA.getMemoryAccess(FirstStore), Load0Clobber); 750 751 // If the bug exists, this will introduce a bad cache entry for %a on BStore. 752 // It will point to the store to %b after FirstStore. This only happens during 753 // phi optimization. 754 MemoryAccess *BottomClobber = Walker->getClobberingMemoryAccess(BelowPhi); 755 MemoryAccess *Phi = MSSA.getMemoryAccess(IfEnd); 756 EXPECT_EQ(BottomClobber, Phi); 757 758 // This query will first check the cache for {%a, BStore}. It should point to 759 // FirstStore, not to the store after FirstStore. 760 MemoryAccess *UseClobber = Walker->getClobberingMemoryAccess(ALoad); 761 EXPECT_EQ(UseClobber, MSSA.getMemoryAccess(FirstStore)); 762 } 763 764 // Test that our walker properly handles loads with the invariant group 765 // attribute. It's a bit hacky, since we add the invariant attribute *after* 766 // building MSSA. Otherwise, the use optimizer will optimize it for us, which 767 // isn't what we want. 768 // FIXME: It may be easier/cleaner to just add an 'optimize uses?' flag to MSSA. 769 TEST_F(MemorySSATest, WalkerInvariantLoadOpt) { 770 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false), 771 GlobalValue::ExternalLinkage, "F", &M); 772 B.SetInsertPoint(BasicBlock::Create(C, "", F)); 773 Type *Int8 = Type::getInt8Ty(C); 774 Constant *One = ConstantInt::get(Int8, 1); 775 Value *AllocA = B.CreateAlloca(Int8, One, ""); 776 777 Instruction *Store = B.CreateStore(One, AllocA); 778 Instruction *Load = B.CreateLoad(Int8, AllocA); 779 780 setupAnalyses(); 781 MemorySSA &MSSA = *Analyses->MSSA; 782 MemorySSAWalker *Walker = Analyses->Walker; 783 784 auto *LoadMA = cast<MemoryUse>(MSSA.getMemoryAccess(Load)); 785 auto *StoreMA = cast<MemoryDef>(MSSA.getMemoryAccess(Store)); 786 EXPECT_EQ(LoadMA->getDefiningAccess(), StoreMA); 787 788 // ...At the time of writing, no cache should exist for LoadMA. Be a bit 789 // flexible to future changes. 790 Walker->invalidateInfo(LoadMA); 791 Load->setMetadata(LLVMContext::MD_invariant_load, MDNode::get(C, {})); 792 793 MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(LoadMA); 794 EXPECT_EQ(LoadClobber, MSSA.getLiveOnEntryDef()); 795 } 796 797 // Test loads get reoptimized properly by the walker. 798 TEST_F(MemorySSATest, WalkerReopt) { 799 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false), 800 GlobalValue::ExternalLinkage, "F", &M); 801 B.SetInsertPoint(BasicBlock::Create(C, "", F)); 802 Type *Int8 = Type::getInt8Ty(C); 803 Value *AllocaA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A"); 804 Instruction *SIA = B.CreateStore(ConstantInt::get(Int8, 0), AllocaA); 805 Value *AllocaB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B"); 806 Instruction *SIB = B.CreateStore(ConstantInt::get(Int8, 0), AllocaB); 807 Instruction *LIA = B.CreateLoad(Int8, AllocaA); 808 809 setupAnalyses(); 810 MemorySSA &MSSA = *Analyses->MSSA; 811 MemorySSAWalker *Walker = Analyses->Walker; 812 MemorySSAUpdater Updater(&MSSA); 813 814 MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(LIA); 815 MemoryUse *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(LIA)); 816 EXPECT_EQ(LoadClobber, MSSA.getMemoryAccess(SIA)); 817 EXPECT_TRUE(MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(SIA))); 818 Updater.removeMemoryAccess(LoadAccess); 819 820 // Create the load memory access pointing to an unoptimized place. 821 MemoryUse *NewLoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB( 822 LIA, MSSA.getMemoryAccess(SIB), LIA->getParent(), MemorySSA::End)); 823 // This should it cause it to be optimized 824 EXPECT_EQ(Walker->getClobberingMemoryAccess(NewLoadAccess), LoadClobber); 825 EXPECT_EQ(NewLoadAccess->getDefiningAccess(), LoadClobber); 826 } 827 828 // Test out MemorySSAUpdater::moveBefore 829 TEST_F(MemorySSATest, MoveAboveMemoryDef) { 830 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false), 831 GlobalValue::ExternalLinkage, "F", &M); 832 B.SetInsertPoint(BasicBlock::Create(C, "", F)); 833 834 Type *Int8 = Type::getInt8Ty(C); 835 Value *A = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A"); 836 Value *B_ = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B"); 837 Value *C = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "C"); 838 839 StoreInst *StoreA0 = B.CreateStore(ConstantInt::get(Int8, 0), A); 840 StoreInst *StoreB = B.CreateStore(ConstantInt::get(Int8, 0), B_); 841 LoadInst *LoadB = B.CreateLoad(Int8, B_); 842 StoreInst *StoreA1 = B.CreateStore(ConstantInt::get(Int8, 4), A); 843 StoreInst *StoreC = B.CreateStore(ConstantInt::get(Int8, 4), C); 844 StoreInst *StoreA2 = B.CreateStore(ConstantInt::get(Int8, 4), A); 845 LoadInst *LoadC = B.CreateLoad(Int8, C); 846 847 setupAnalyses(); 848 MemorySSA &MSSA = *Analyses->MSSA; 849 MemorySSAWalker &Walker = *Analyses->Walker; 850 851 MemorySSAUpdater Updater(&MSSA); 852 StoreC->moveBefore(StoreB); 853 Updater.moveBefore(cast<MemoryDef>(MSSA.getMemoryAccess(StoreC)), 854 cast<MemoryDef>(MSSA.getMemoryAccess(StoreB))); 855 856 MSSA.verifyMemorySSA(); 857 858 EXPECT_EQ(MSSA.getMemoryAccess(StoreB)->getDefiningAccess(), 859 MSSA.getMemoryAccess(StoreC)); 860 EXPECT_EQ(MSSA.getMemoryAccess(StoreC)->getDefiningAccess(), 861 MSSA.getMemoryAccess(StoreA0)); 862 EXPECT_EQ(MSSA.getMemoryAccess(StoreA2)->getDefiningAccess(), 863 MSSA.getMemoryAccess(StoreA1)); 864 EXPECT_EQ(Walker.getClobberingMemoryAccess(LoadB), 865 MSSA.getMemoryAccess(StoreB)); 866 EXPECT_EQ(Walker.getClobberingMemoryAccess(LoadC), 867 MSSA.getMemoryAccess(StoreC)); 868 869 // exercise block numbering 870 EXPECT_TRUE(MSSA.locallyDominates(MSSA.getMemoryAccess(StoreC), 871 MSSA.getMemoryAccess(StoreB))); 872 EXPECT_TRUE(MSSA.locallyDominates(MSSA.getMemoryAccess(StoreA1), 873 MSSA.getMemoryAccess(StoreA2))); 874 } 875 876 TEST_F(MemorySSATest, Irreducible) { 877 // Create the equivalent of 878 // x = something 879 // if (...) 880 // goto second_loop_entry 881 // while (...) { 882 // second_loop_entry: 883 // } 884 // use(x) 885 886 SmallVector<PHINode *, 8> Inserted; 887 IRBuilder<> B(C); 888 F = Function::Create( 889 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 890 GlobalValue::ExternalLinkage, "F", &M); 891 892 // Make blocks 893 BasicBlock *IfBB = BasicBlock::Create(C, "if", F); 894 BasicBlock *LoopStartBB = BasicBlock::Create(C, "loopstart", F); 895 BasicBlock *LoopMainBB = BasicBlock::Create(C, "loopmain", F); 896 BasicBlock *AfterLoopBB = BasicBlock::Create(C, "afterloop", F); 897 B.SetInsertPoint(IfBB); 898 B.CreateCondBr(B.getTrue(), LoopMainBB, LoopStartBB); 899 B.SetInsertPoint(LoopStartBB); 900 B.CreateBr(LoopMainBB); 901 B.SetInsertPoint(LoopMainBB); 902 B.CreateCondBr(B.getTrue(), LoopStartBB, AfterLoopBB); 903 B.SetInsertPoint(AfterLoopBB); 904 Argument *FirstArg = &*F->arg_begin(); 905 setupAnalyses(); 906 MemorySSA &MSSA = *Analyses->MSSA; 907 MemorySSAUpdater Updater(&MSSA); 908 // Create the load memory acccess 909 LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), FirstArg); 910 MemoryUse *LoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB( 911 LoadInst, nullptr, AfterLoopBB, MemorySSA::Beginning)); 912 Updater.insertUse(LoadAccess); 913 MSSA.verifyMemorySSA(); 914 } 915 916 TEST_F(MemorySSATest, MoveToBeforeLiveOnEntryInvalidatesCache) { 917 // Create: 918 // %1 = alloca i8 919 // ; 1 = MemoryDef(liveOnEntry) 920 // store i8 0, i8* %1 921 // ; 2 = MemoryDef(1) 922 // store i8 0, i8* %1 923 // 924 // ...And be sure that MSSA's caching doesn't give us `1` for the clobber of 925 // `2` after `1` is removed. 926 IRBuilder<> B(C); 927 F = Function::Create( 928 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 929 GlobalValue::ExternalLinkage, "F", &M); 930 931 BasicBlock *Entry = BasicBlock::Create(C, "if", F); 932 B.SetInsertPoint(Entry); 933 934 Value *A = B.CreateAlloca(B.getInt8Ty()); 935 StoreInst *StoreA = B.CreateStore(B.getInt8(0), A); 936 StoreInst *StoreB = B.CreateStore(B.getInt8(0), A); 937 938 setupAnalyses(); 939 940 MemorySSA &MSSA = *Analyses->MSSA; 941 942 auto *DefA = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA)); 943 auto *DefB = cast<MemoryDef>(MSSA.getMemoryAccess(StoreB)); 944 945 MemoryAccess *BClobber = MSSA.getWalker()->getClobberingMemoryAccess(DefB); 946 ASSERT_EQ(DefA, BClobber); 947 948 MemorySSAUpdater(&MSSA).removeMemoryAccess(DefA); 949 StoreA->eraseFromParent(); 950 951 EXPECT_EQ(DefB->getDefiningAccess(), MSSA.getLiveOnEntryDef()); 952 953 EXPECT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(DefB), 954 MSSA.getLiveOnEntryDef()) 955 << "(DefA = " << DefA << ")"; 956 } 957 958 TEST_F(MemorySSATest, RemovingDefInvalidatesCache) { 959 // Create: 960 // %x = alloca i8 961 // %y = alloca i8 962 // ; 1 = MemoryDef(liveOnEntry) 963 // store i8 0, i8* %x 964 // ; 2 = MemoryDef(1) 965 // store i8 0, i8* %y 966 // ; 3 = MemoryDef(2) 967 // store i8 0, i8* %x 968 // 969 // And be sure that MSSA's caching handles the removal of def `1` 970 // appropriately. 971 IRBuilder<> B(C); 972 F = Function::Create( 973 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 974 GlobalValue::ExternalLinkage, "F", &M); 975 976 BasicBlock *Entry = BasicBlock::Create(C, "if", F); 977 B.SetInsertPoint(Entry); 978 979 Value *X = B.CreateAlloca(B.getInt8Ty()); 980 Value *Y = B.CreateAlloca(B.getInt8Ty()); 981 StoreInst *StoreX1 = B.CreateStore(B.getInt8(0), X); 982 StoreInst *StoreY = B.CreateStore(B.getInt8(0), Y); 983 StoreInst *StoreX2 = B.CreateStore(B.getInt8(0), X); 984 985 setupAnalyses(); 986 987 MemorySSA &MSSA = *Analyses->MSSA; 988 989 auto *DefX1 = cast<MemoryDef>(MSSA.getMemoryAccess(StoreX1)); 990 auto *DefY = cast<MemoryDef>(MSSA.getMemoryAccess(StoreY)); 991 auto *DefX2 = cast<MemoryDef>(MSSA.getMemoryAccess(StoreX2)); 992 993 EXPECT_EQ(DefX2->getDefiningAccess(), DefY); 994 MemoryAccess *X2Clobber = MSSA.getWalker()->getClobberingMemoryAccess(DefX2); 995 ASSERT_EQ(DefX1, X2Clobber); 996 997 MemorySSAUpdater(&MSSA).removeMemoryAccess(DefX1); 998 StoreX1->eraseFromParent(); 999 1000 EXPECT_EQ(DefX2->getDefiningAccess(), DefY); 1001 EXPECT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(DefX2), 1002 MSSA.getLiveOnEntryDef()) 1003 << "(DefX1 = " << DefX1 << ")"; 1004 } 1005 1006 // Test Must alias for optimized uses 1007 TEST_F(MemorySSATest, TestLoadMustAlias) { 1008 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false), 1009 GlobalValue::ExternalLinkage, "F", &M); 1010 B.SetInsertPoint(BasicBlock::Create(C, "", F)); 1011 Type *Int8 = Type::getInt8Ty(C); 1012 Value *AllocaA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A"); 1013 Value *AllocaB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B"); 1014 1015 B.CreateStore(ConstantInt::get(Int8, 1), AllocaB); 1016 // Check load from LOE 1017 LoadInst *LA1 = B.CreateLoad(Int8, AllocaA, ""); 1018 // Check load alias cached for second load 1019 LoadInst *LA2 = B.CreateLoad(Int8, AllocaA, ""); 1020 1021 B.CreateStore(ConstantInt::get(Int8, 1), AllocaA); 1022 // Check load from store/def 1023 LoadInst *LA3 = B.CreateLoad(Int8, AllocaA, ""); 1024 // Check load alias cached for second load 1025 LoadInst *LA4 = B.CreateLoad(Int8, AllocaA, ""); 1026 1027 setupAnalyses(); 1028 MemorySSA &MSSA = *Analyses->MSSA; 1029 MSSA.ensureOptimizedUses(); 1030 1031 unsigned I = 0; 1032 for (LoadInst *V : {LA1, LA2}) { 1033 MemoryUse *MemUse = dyn_cast_or_null<MemoryUse>(MSSA.getMemoryAccess(V)); 1034 EXPECT_EQ(MemUse->getOptimizedAccessType(), None) 1035 << "Load " << I << " doesn't have the correct alias information"; 1036 // EXPECT_EQ expands such that if we increment I above, it won't get 1037 // incremented except when we try to print the error message. 1038 ++I; 1039 } 1040 for (LoadInst *V : {LA3, LA4}) { 1041 MemoryUse *MemUse = dyn_cast_or_null<MemoryUse>(MSSA.getMemoryAccess(V)); 1042 EXPECT_EQ(*MemUse->getOptimizedAccessType(), AliasResult::MustAlias) 1043 << "Load " << I << " doesn't have the correct alias information"; 1044 // EXPECT_EQ expands such that if we increment I above, it won't get 1045 // incremented except when we try to print the error message. 1046 ++I; 1047 } 1048 } 1049 1050 // Test Must alias for optimized defs. 1051 TEST_F(MemorySSATest, TestStoreMustAlias) { 1052 F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false), 1053 GlobalValue::ExternalLinkage, "F", &M); 1054 B.SetInsertPoint(BasicBlock::Create(C, "", F)); 1055 Type *Int8 = Type::getInt8Ty(C); 1056 Value *AllocaA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A"); 1057 Value *AllocaB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B"); 1058 StoreInst *SA1 = B.CreateStore(ConstantInt::get(Int8, 1), AllocaA); 1059 StoreInst *SB1 = B.CreateStore(ConstantInt::get(Int8, 1), AllocaB); 1060 StoreInst *SA2 = B.CreateStore(ConstantInt::get(Int8, 2), AllocaA); 1061 StoreInst *SB2 = B.CreateStore(ConstantInt::get(Int8, 2), AllocaB); 1062 StoreInst *SA3 = B.CreateStore(ConstantInt::get(Int8, 3), AllocaA); 1063 StoreInst *SB3 = B.CreateStore(ConstantInt::get(Int8, 3), AllocaB); 1064 1065 setupAnalyses(); 1066 MemorySSA &MSSA = *Analyses->MSSA; 1067 MemorySSAWalker *Walker = Analyses->Walker; 1068 1069 unsigned I = 0; 1070 for (StoreInst *V : {SA1, SB1, SA2, SB2, SA3, SB3}) { 1071 MemoryDef *MemDef = dyn_cast_or_null<MemoryDef>(MSSA.getMemoryAccess(V)); 1072 EXPECT_EQ(MemDef->isOptimized(), false) 1073 << "Store " << I << " is optimized from the start?"; 1074 EXPECT_EQ(MemDef->getOptimizedAccessType(), None) 1075 << "Store " << I 1076 << " has correct alias information before being optimized?"; 1077 if (V == SA1) 1078 Walker->getClobberingMemoryAccess(V); 1079 else { 1080 MemoryAccess *Def = MemDef->getDefiningAccess(); 1081 MemoryAccess *Clob = Walker->getClobberingMemoryAccess(V); 1082 EXPECT_NE(Def, Clob) << "Store " << I 1083 << " has Defining Access equal to Clobbering Access"; 1084 } 1085 EXPECT_EQ(MemDef->isOptimized(), true) 1086 << "Store " << I << " was not optimized"; 1087 if (I == 0 || I == 1) 1088 EXPECT_EQ(MemDef->getOptimizedAccessType(), None) 1089 << "Store " << I << " doesn't have the correct alias information"; 1090 else 1091 EXPECT_EQ(*MemDef->getOptimizedAccessType(), AliasResult::MustAlias) 1092 << "Store " << I << " doesn't have the correct alias information"; 1093 // EXPECT_EQ expands such that if we increment I above, it won't get 1094 // incremented except when we try to print the error message. 1095 ++I; 1096 } 1097 } 1098 1099 // Test May alias for optimized uses. 1100 TEST_F(MemorySSATest, TestLoadMayAlias) { 1101 F = Function::Create(FunctionType::get(B.getVoidTy(), 1102 {B.getInt8PtrTy(), B.getInt8PtrTy()}, 1103 false), 1104 GlobalValue::ExternalLinkage, "F", &M); 1105 B.SetInsertPoint(BasicBlock::Create(C, "", F)); 1106 Type *Int8 = Type::getInt8Ty(C); 1107 auto *ArgIt = F->arg_begin(); 1108 Argument *PointerA = &*ArgIt; 1109 Argument *PointerB = &*(++ArgIt); 1110 B.CreateStore(ConstantInt::get(Int8, 1), PointerB); 1111 LoadInst *LA1 = B.CreateLoad(Int8, PointerA, ""); 1112 B.CreateStore(ConstantInt::get(Int8, 0), PointerA); 1113 LoadInst *LB1 = B.CreateLoad(Int8, PointerB, ""); 1114 B.CreateStore(ConstantInt::get(Int8, 0), PointerA); 1115 LoadInst *LA2 = B.CreateLoad(Int8, PointerA, ""); 1116 B.CreateStore(ConstantInt::get(Int8, 0), PointerB); 1117 LoadInst *LB2 = B.CreateLoad(Int8, PointerB, ""); 1118 1119 setupAnalyses(); 1120 MemorySSA &MSSA = *Analyses->MSSA; 1121 MSSA.ensureOptimizedUses(); 1122 1123 unsigned I = 0; 1124 for (LoadInst *V : {LA1, LB1}) { 1125 MemoryUse *MemUse = dyn_cast_or_null<MemoryUse>(MSSA.getMemoryAccess(V)); 1126 EXPECT_EQ(*MemUse->getOptimizedAccessType(), AliasResult::MayAlias) 1127 << "Load " << I << " doesn't have the correct alias information"; 1128 // EXPECT_EQ expands such that if we increment I above, it won't get 1129 // incremented except when we try to print the error message. 1130 ++I; 1131 } 1132 for (LoadInst *V : {LA2, LB2}) { 1133 MemoryUse *MemUse = dyn_cast_or_null<MemoryUse>(MSSA.getMemoryAccess(V)); 1134 EXPECT_EQ(*MemUse->getOptimizedAccessType(), AliasResult::MustAlias) 1135 << "Load " << I << " doesn't have the correct alias information"; 1136 // EXPECT_EQ expands such that if we increment I above, it won't get 1137 // incremented except when we try to print the error message. 1138 ++I; 1139 } 1140 } 1141 1142 // Test May alias for optimized defs. 1143 TEST_F(MemorySSATest, TestStoreMayAlias) { 1144 F = Function::Create(FunctionType::get(B.getVoidTy(), 1145 {B.getInt8PtrTy(), B.getInt8PtrTy()}, 1146 false), 1147 GlobalValue::ExternalLinkage, "F", &M); 1148 B.SetInsertPoint(BasicBlock::Create(C, "", F)); 1149 Type *Int8 = Type::getInt8Ty(C); 1150 auto *ArgIt = F->arg_begin(); 1151 Argument *PointerA = &*ArgIt; 1152 Argument *PointerB = &*(++ArgIt); 1153 Value *AllocaC = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "C"); 1154 // Store into arg1, must alias because it's LOE => must 1155 StoreInst *SA1 = B.CreateStore(ConstantInt::get(Int8, 0), PointerA); 1156 // Store into arg2, may alias store to arg1 => may 1157 StoreInst *SB1 = B.CreateStore(ConstantInt::get(Int8, 1), PointerB); 1158 // Store into aloca, no alias with args, so must alias LOE => must 1159 StoreInst *SC1 = B.CreateStore(ConstantInt::get(Int8, 2), AllocaC); 1160 // Store into arg1, may alias store to arg2 => may 1161 StoreInst *SA2 = B.CreateStore(ConstantInt::get(Int8, 3), PointerA); 1162 // Store into arg2, may alias store to arg1 => may 1163 StoreInst *SB2 = B.CreateStore(ConstantInt::get(Int8, 4), PointerB); 1164 // Store into aloca, no alias with args, so must alias SC1 => must 1165 StoreInst *SC2 = B.CreateStore(ConstantInt::get(Int8, 5), AllocaC); 1166 // Store into arg2, must alias store to arg2 => must 1167 StoreInst *SB3 = B.CreateStore(ConstantInt::get(Int8, 6), PointerB); 1168 std::initializer_list<StoreInst *> Sts = {SA1, SB1, SC1, SA2, SB2, SC2, SB3}; 1169 1170 setupAnalyses(); 1171 MemorySSA &MSSA = *Analyses->MSSA; 1172 MemorySSAWalker *Walker = Analyses->Walker; 1173 1174 unsigned I = 0; 1175 for (StoreInst *V : Sts) { 1176 MemoryDef *MemDef = dyn_cast_or_null<MemoryDef>(MSSA.getMemoryAccess(V)); 1177 EXPECT_EQ(MemDef->isOptimized(), false) 1178 << "Store " << I << " is optimized from the start?"; 1179 EXPECT_EQ(MemDef->getOptimizedAccessType(), None) 1180 << "Store " << I 1181 << " has correct alias information before being optimized?"; 1182 ++I; 1183 } 1184 1185 for (StoreInst *V : Sts) 1186 Walker->getClobberingMemoryAccess(V); 1187 1188 I = 0; 1189 for (StoreInst *V : Sts) { 1190 MemoryDef *MemDef = dyn_cast_or_null<MemoryDef>(MSSA.getMemoryAccess(V)); 1191 EXPECT_EQ(MemDef->isOptimized(), true) 1192 << "Store " << I << " was not optimized"; 1193 if (I == 1 || I == 3 || I == 4) 1194 EXPECT_EQ(MemDef->getOptimizedAccessType().value(), AliasResult::MayAlias) 1195 << "Store " << I << " doesn't have the correct alias information"; 1196 else if (I == 0 || I == 2) 1197 EXPECT_EQ(MemDef->getOptimizedAccessType(), None) 1198 << "Store " << I << " doesn't have the correct alias information"; 1199 else 1200 EXPECT_EQ(MemDef->getOptimizedAccessType().value(), 1201 AliasResult::MustAlias) 1202 << "Store " << I << " doesn't have the correct alias information"; 1203 // EXPECT_EQ expands such that if we increment I above, it won't get 1204 // incremented except when we try to print the error message. 1205 ++I; 1206 } 1207 } 1208 1209 TEST_F(MemorySSATest, LifetimeMarkersAreClobbers) { 1210 // Example code: 1211 // define void @a(i8* %foo) { 1212 // %bar = getelementptr i8, i8* %foo, i64 1 1213 // %baz = getelementptr i8, i8* %foo, i64 2 1214 // store i8 0, i8* %foo 1215 // store i8 0, i8* %bar 1216 // call void @llvm.lifetime.end.p0i8(i64 3, i8* %foo) 1217 // call void @llvm.lifetime.start.p0i8(i64 3, i8* %foo) 1218 // store i8 0, i8* %foo 1219 // store i8 0, i8* %bar 1220 // call void @llvm.memset.p0i8(i8* %baz, i8 0, i64 1) 1221 // ret void 1222 // } 1223 // 1224 // Patterns like this are possible after inlining; the stores to %foo and %bar 1225 // should both be clobbered by the lifetime.start call if they're dominated by 1226 // it. 1227 1228 IRBuilder<> B(C); 1229 F = Function::Create( 1230 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 1231 GlobalValue::ExternalLinkage, "F", &M); 1232 1233 // Make blocks 1234 BasicBlock *Entry = BasicBlock::Create(C, "entry", F); 1235 1236 B.SetInsertPoint(Entry); 1237 Value *Foo = &*F->arg_begin(); 1238 1239 Value *Bar = B.CreateGEP(B.getInt8Ty(), Foo, B.getInt64(1), "bar"); 1240 Value *Baz = B.CreateGEP(B.getInt8Ty(), Foo, B.getInt64(2), "baz"); 1241 1242 B.CreateStore(B.getInt8(0), Foo); 1243 B.CreateStore(B.getInt8(0), Bar); 1244 1245 auto GetLifetimeIntrinsic = [&](Intrinsic::ID ID) { 1246 return Intrinsic::getDeclaration(&M, ID, {Foo->getType()}); 1247 }; 1248 1249 B.CreateCall(GetLifetimeIntrinsic(Intrinsic::lifetime_end), 1250 {B.getInt64(3), Foo}); 1251 Instruction *LifetimeStart = B.CreateCall( 1252 GetLifetimeIntrinsic(Intrinsic::lifetime_start), {B.getInt64(3), Foo}); 1253 1254 Instruction *FooStore = B.CreateStore(B.getInt8(0), Foo); 1255 Instruction *BarStore = B.CreateStore(B.getInt8(0), Bar); 1256 Instruction *BazMemSet = B.CreateMemSet(Baz, B.getInt8(0), 1, Align(1)); 1257 1258 setupAnalyses(); 1259 MemorySSA &MSSA = *Analyses->MSSA; 1260 1261 MemoryAccess *LifetimeStartAccess = MSSA.getMemoryAccess(LifetimeStart); 1262 ASSERT_NE(LifetimeStartAccess, nullptr); 1263 1264 MemoryAccess *FooAccess = MSSA.getMemoryAccess(FooStore); 1265 ASSERT_NE(FooAccess, nullptr); 1266 1267 MemoryAccess *BarAccess = MSSA.getMemoryAccess(BarStore); 1268 ASSERT_NE(BarAccess, nullptr); 1269 1270 MemoryAccess *BazAccess = MSSA.getMemoryAccess(BazMemSet); 1271 ASSERT_NE(BazAccess, nullptr); 1272 1273 MemoryAccess *FooClobber = 1274 MSSA.getWalker()->getClobberingMemoryAccess(FooAccess); 1275 EXPECT_EQ(FooClobber, LifetimeStartAccess); 1276 1277 MemoryAccess *BarClobber = 1278 MSSA.getWalker()->getClobberingMemoryAccess(BarAccess); 1279 EXPECT_EQ(BarClobber, LifetimeStartAccess); 1280 1281 MemoryAccess *BazClobber = 1282 MSSA.getWalker()->getClobberingMemoryAccess(BazAccess); 1283 EXPECT_EQ(BazClobber, LifetimeStartAccess); 1284 1285 MemoryAccess *LifetimeStartClobber = 1286 MSSA.getWalker()->getClobberingMemoryAccess( 1287 LifetimeStartAccess, MemoryLocation::getAfter(Foo)); 1288 EXPECT_EQ(LifetimeStartClobber, LifetimeStartAccess); 1289 } 1290 1291 TEST_F(MemorySSATest, DefOptimizationsAreInvalidatedOnMoving) { 1292 IRBuilder<> B(C); 1293 F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getInt1Ty()}, false), 1294 GlobalValue::ExternalLinkage, "F", &M); 1295 1296 // Make a CFG like 1297 // entry 1298 // / \ 1299 // a b 1300 // \ / 1301 // c 1302 // 1303 // Put a def in A and a def in B, move the def from A -> B, observe as the 1304 // optimization is invalidated. 1305 BasicBlock *Entry = BasicBlock::Create(C, "entry", F); 1306 BasicBlock *BlockA = BasicBlock::Create(C, "a", F); 1307 BasicBlock *BlockB = BasicBlock::Create(C, "b", F); 1308 BasicBlock *BlockC = BasicBlock::Create(C, "c", F); 1309 1310 B.SetInsertPoint(Entry); 1311 Type *Int8 = Type::getInt8Ty(C); 1312 Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "alloc"); 1313 StoreInst *StoreEntry = B.CreateStore(B.getInt8(0), Alloca); 1314 B.CreateCondBr(B.getTrue(), BlockA, BlockB); 1315 1316 B.SetInsertPoint(BlockA); 1317 StoreInst *StoreA = B.CreateStore(B.getInt8(1), Alloca); 1318 B.CreateBr(BlockC); 1319 1320 B.SetInsertPoint(BlockB); 1321 StoreInst *StoreB = B.CreateStore(B.getInt8(2), Alloca); 1322 B.CreateBr(BlockC); 1323 1324 B.SetInsertPoint(BlockC); 1325 B.CreateUnreachable(); 1326 1327 setupAnalyses(); 1328 MemorySSA &MSSA = *Analyses->MSSA; 1329 1330 auto *AccessEntry = cast<MemoryDef>(MSSA.getMemoryAccess(StoreEntry)); 1331 auto *StoreAEntry = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA)); 1332 auto *StoreBEntry = cast<MemoryDef>(MSSA.getMemoryAccess(StoreB)); 1333 1334 ASSERT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(StoreAEntry), 1335 AccessEntry); 1336 ASSERT_TRUE(StoreAEntry->isOptimized()); 1337 1338 ASSERT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(StoreBEntry), 1339 AccessEntry); 1340 ASSERT_TRUE(StoreBEntry->isOptimized()); 1341 1342 // Note that if we did InsertionPlace::Beginning, we don't go out of our way 1343 // to invalidate the cache for StoreBEntry. If the user wants to actually do 1344 // moves like these, it's up to them to ensure that nearby cache entries are 1345 // correctly invalidated (which, in general, requires walking all instructions 1346 // that the moved instruction dominates. So we probably shouldn't be doing 1347 // moves like this in general. Still, works as a test-case. ;) ) 1348 MemorySSAUpdater(&MSSA).moveToPlace(StoreAEntry, BlockB, 1349 MemorySSA::InsertionPlace::End); 1350 ASSERT_FALSE(StoreAEntry->isOptimized()); 1351 ASSERT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(StoreAEntry), 1352 StoreBEntry); 1353 } 1354 1355 TEST_F(MemorySSATest, TestOptimizedDefsAreProperUses) { 1356 F = Function::Create(FunctionType::get(B.getVoidTy(), 1357 {B.getInt8PtrTy(), B.getInt8PtrTy()}, 1358 false), 1359 GlobalValue::ExternalLinkage, "F", &M); 1360 B.SetInsertPoint(BasicBlock::Create(C, "", F)); 1361 Type *Int8 = Type::getInt8Ty(C); 1362 Value *AllocA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A"); 1363 Value *AllocB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B"); 1364 1365 StoreInst *StoreA = B.CreateStore(ConstantInt::get(Int8, 0), AllocA); 1366 StoreInst *StoreB = B.CreateStore(ConstantInt::get(Int8, 1), AllocB); 1367 StoreInst *StoreA2 = B.CreateStore(ConstantInt::get(Int8, 2), AllocA); 1368 1369 setupAnalyses(); 1370 MemorySSA &MSSA = *Analyses->MSSA; 1371 MemorySSAWalker *Walker = Analyses->Walker; 1372 1373 // If these don't hold, there's no chance of the test result being useful. 1374 ASSERT_EQ(Walker->getClobberingMemoryAccess(StoreA), 1375 MSSA.getLiveOnEntryDef()); 1376 ASSERT_EQ(Walker->getClobberingMemoryAccess(StoreB), 1377 MSSA.getLiveOnEntryDef()); 1378 auto *StoreAAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA)); 1379 auto *StoreA2Access = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA2)); 1380 ASSERT_EQ(Walker->getClobberingMemoryAccess(StoreA2), StoreAAccess); 1381 ASSERT_EQ(StoreA2Access->getOptimized(), StoreAAccess); 1382 1383 auto *StoreBAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreB)); 1384 ASSERT_LT(StoreAAccess->getID(), StoreBAccess->getID()); 1385 ASSERT_LT(StoreBAccess->getID(), StoreA2Access->getID()); 1386 1387 auto SortVecByID = [](std::vector<const MemoryDef *> &Defs) { 1388 llvm::sort(Defs, [](const MemoryDef *LHS, const MemoryDef *RHS) { 1389 return LHS->getID() < RHS->getID(); 1390 }); 1391 }; 1392 1393 auto SortedUserList = [&](const MemoryDef *MD) { 1394 std::vector<const MemoryDef *> Result; 1395 transform(MD->users(), std::back_inserter(Result), 1396 [](const User *U) { return cast<MemoryDef>(U); }); 1397 SortVecByID(Result); 1398 return Result; 1399 }; 1400 1401 // Use std::vectors, since they have nice pretty-printing if the test fails. 1402 // Parens are necessary because EXPECT_EQ is a macro, and we have commas in 1403 // our init lists... 1404 EXPECT_EQ(SortedUserList(StoreAAccess), 1405 (std::vector<const MemoryDef *>{StoreBAccess, StoreA2Access})); 1406 1407 EXPECT_EQ(SortedUserList(StoreBAccess), 1408 std::vector<const MemoryDef *>{StoreA2Access}); 1409 1410 // StoreAAccess should be present twice, since it uses liveOnEntry for both 1411 // its defining and optimized accesses. This is a bit awkward, and is not 1412 // relied upon anywhere at the moment. If this is painful, we can fix it. 1413 EXPECT_EQ(SortedUserList(cast<MemoryDef>(MSSA.getLiveOnEntryDef())), 1414 (std::vector<const MemoryDef *>{StoreAAccess, StoreAAccess, 1415 StoreBAccess})); 1416 } 1417 1418 // entry 1419 // | 1420 // header 1421 // / \ 1422 // body | 1423 // \ / 1424 // exit 1425 // header: 1426 // ; 1 = MemoryDef(liveOnEntry) 1427 // body: 1428 // ; 2 = MemoryDef(1) 1429 // exit: 1430 // ; 3 = MemoryPhi({body, 2}, {header, 1}) 1431 // ; 4 = MemoryDef(3); optimized to 3, cannot optimize thorugh phi. 1432 // Insert edge: entry -> exit, check mssa Update is correct. 1433 TEST_F(MemorySSATest, TestAddedEdgeToBlockWithPhiNotOpt) { 1434 F = Function::Create( 1435 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 1436 GlobalValue::ExternalLinkage, "F", &M); 1437 Argument *PointerArg = &*F->arg_begin(); 1438 BasicBlock *Entry(BasicBlock::Create(C, "entry", F)); 1439 BasicBlock *Header(BasicBlock::Create(C, "header", F)); 1440 BasicBlock *Body(BasicBlock::Create(C, "body", F)); 1441 BasicBlock *Exit(BasicBlock::Create(C, "exit", F)); 1442 B.SetInsertPoint(Entry); 1443 BranchInst::Create(Header, Entry); 1444 B.SetInsertPoint(Header); 1445 B.CreateStore(B.getInt8(16), PointerArg); 1446 B.CreateCondBr(B.getTrue(), Exit, Body); 1447 B.SetInsertPoint(Body); 1448 B.CreateStore(B.getInt8(16), PointerArg); 1449 BranchInst::Create(Exit, Body); 1450 B.SetInsertPoint(Exit); 1451 StoreInst *S1 = B.CreateStore(B.getInt8(16), PointerArg); 1452 1453 setupAnalyses(); 1454 MemorySSA &MSSA = *Analyses->MSSA; 1455 MemorySSAWalker *Walker = Analyses->Walker; 1456 std::unique_ptr<MemorySSAUpdater> MSSAU = 1457 std::make_unique<MemorySSAUpdater>(&MSSA); 1458 1459 MemoryPhi *Phi = MSSA.getMemoryAccess(Exit); 1460 EXPECT_EQ(Phi, Walker->getClobberingMemoryAccess(S1)); 1461 1462 // Alter CFG, add edge: entry -> exit 1463 Entry->getTerminator()->eraseFromParent(); 1464 B.SetInsertPoint(Entry); 1465 B.CreateCondBr(B.getTrue(), Header, Exit); 1466 SmallVector<CFGUpdate, 1> Updates; 1467 Updates.push_back({cfg::UpdateKind::Insert, Entry, Exit}); 1468 Analyses->DT.applyUpdates(Updates); 1469 MSSAU->applyInsertUpdates(Updates, Analyses->DT); 1470 EXPECT_EQ(Phi, Walker->getClobberingMemoryAccess(S1)); 1471 } 1472 1473 // entry 1474 // | 1475 // header 1476 // / \ 1477 // body | 1478 // \ / 1479 // exit 1480 // header: 1481 // ; 1 = MemoryDef(liveOnEntry) 1482 // body: 1483 // ; 2 = MemoryDef(1) 1484 // exit: 1485 // ; 3 = MemoryPhi({body, 2}, {header, 1}) 1486 // ; 4 = MemoryDef(3); optimize this to 1 now, added edge should invalidate 1487 // the optimized access. 1488 // Insert edge: entry -> exit, check mssa Update is correct. 1489 TEST_F(MemorySSATest, TestAddedEdgeToBlockWithPhiOpt) { 1490 F = Function::Create( 1491 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 1492 GlobalValue::ExternalLinkage, "F", &M); 1493 Argument *PointerArg = &*F->arg_begin(); 1494 Type *Int8 = Type::getInt8Ty(C); 1495 BasicBlock *Entry(BasicBlock::Create(C, "entry", F)); 1496 BasicBlock *Header(BasicBlock::Create(C, "header", F)); 1497 BasicBlock *Body(BasicBlock::Create(C, "body", F)); 1498 BasicBlock *Exit(BasicBlock::Create(C, "exit", F)); 1499 1500 B.SetInsertPoint(Entry); 1501 Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A"); 1502 BranchInst::Create(Header, Entry); 1503 1504 B.SetInsertPoint(Header); 1505 StoreInst *S1 = B.CreateStore(B.getInt8(16), PointerArg); 1506 B.CreateCondBr(B.getTrue(), Exit, Body); 1507 1508 B.SetInsertPoint(Body); 1509 B.CreateStore(ConstantInt::get(Int8, 0), Alloca); 1510 BranchInst::Create(Exit, Body); 1511 1512 B.SetInsertPoint(Exit); 1513 StoreInst *S2 = B.CreateStore(B.getInt8(16), PointerArg); 1514 1515 setupAnalyses(); 1516 MemorySSA &MSSA = *Analyses->MSSA; 1517 MemorySSAWalker *Walker = Analyses->Walker; 1518 std::unique_ptr<MemorySSAUpdater> MSSAU = 1519 std::make_unique<MemorySSAUpdater>(&MSSA); 1520 1521 MemoryDef *DefS1 = cast<MemoryDef>(MSSA.getMemoryAccess(S1)); 1522 EXPECT_EQ(DefS1, Walker->getClobberingMemoryAccess(S2)); 1523 1524 // Alter CFG, add edge: entry -> exit 1525 Entry->getTerminator()->eraseFromParent(); 1526 B.SetInsertPoint(Entry); 1527 B.CreateCondBr(B.getTrue(), Header, Exit); 1528 SmallVector<CFGUpdate, 1> Updates; 1529 Updates.push_back({cfg::UpdateKind::Insert, Entry, Exit}); 1530 Analyses->DT.applyUpdates(Updates); 1531 MSSAU->applyInsertUpdates(Updates, Analyses->DT); 1532 1533 MemoryPhi *Phi = MSSA.getMemoryAccess(Exit); 1534 EXPECT_EQ(Phi, Walker->getClobberingMemoryAccess(S2)); 1535 } 1536 1537 // entry 1538 // / | 1539 // a | 1540 // / \ | 1541 // b c f 1542 // \ / | 1543 // d | 1544 // \ / 1545 // e 1546 // f: 1547 // ; 1 = MemoryDef(liveOnEntry) 1548 // e: 1549 // ; 2 = MemoryPhi({d, liveOnEntry}, {f, 1}) 1550 // 1551 // Insert edge: f -> c, check update is correct. 1552 // After update: 1553 // f: 1554 // ; 1 = MemoryDef(liveOnEntry) 1555 // c: 1556 // ; 3 = MemoryPhi({a, liveOnEntry}, {f, 1}) 1557 // d: 1558 // ; 4 = MemoryPhi({b, liveOnEntry}, {c, 3}) 1559 // e: 1560 // ; 2 = MemoryPhi({d, 4}, {f, 1}) 1561 TEST_F(MemorySSATest, TestAddedEdgeToBlockWithNoPhiAddNewPhis) { 1562 F = Function::Create( 1563 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 1564 GlobalValue::ExternalLinkage, "F", &M); 1565 Argument *PointerArg = &*F->arg_begin(); 1566 BasicBlock *Entry(BasicBlock::Create(C, "entry", F)); 1567 BasicBlock *ABlock(BasicBlock::Create(C, "a", F)); 1568 BasicBlock *BBlock(BasicBlock::Create(C, "b", F)); 1569 BasicBlock *CBlock(BasicBlock::Create(C, "c", F)); 1570 BasicBlock *DBlock(BasicBlock::Create(C, "d", F)); 1571 BasicBlock *EBlock(BasicBlock::Create(C, "e", F)); 1572 BasicBlock *FBlock(BasicBlock::Create(C, "f", F)); 1573 1574 B.SetInsertPoint(Entry); 1575 B.CreateCondBr(B.getTrue(), ABlock, FBlock); 1576 B.SetInsertPoint(ABlock); 1577 B.CreateCondBr(B.getTrue(), BBlock, CBlock); 1578 B.SetInsertPoint(BBlock); 1579 BranchInst::Create(DBlock, BBlock); 1580 B.SetInsertPoint(CBlock); 1581 BranchInst::Create(DBlock, CBlock); 1582 B.SetInsertPoint(DBlock); 1583 BranchInst::Create(EBlock, DBlock); 1584 B.SetInsertPoint(FBlock); 1585 B.CreateStore(B.getInt8(16), PointerArg); 1586 BranchInst::Create(EBlock, FBlock); 1587 1588 setupAnalyses(); 1589 MemorySSA &MSSA = *Analyses->MSSA; 1590 std::unique_ptr<MemorySSAUpdater> MSSAU = 1591 std::make_unique<MemorySSAUpdater>(&MSSA); 1592 1593 // Alter CFG, add edge: f -> c 1594 FBlock->getTerminator()->eraseFromParent(); 1595 B.SetInsertPoint(FBlock); 1596 B.CreateCondBr(B.getTrue(), CBlock, EBlock); 1597 SmallVector<CFGUpdate, 1> Updates; 1598 Updates.push_back({cfg::UpdateKind::Insert, FBlock, CBlock}); 1599 Analyses->DT.applyUpdates(Updates); 1600 MSSAU->applyInsertUpdates(Updates, Analyses->DT); 1601 1602 MemoryPhi *MPC = MSSA.getMemoryAccess(CBlock); 1603 EXPECT_NE(MPC, nullptr); 1604 MemoryPhi *MPD = MSSA.getMemoryAccess(DBlock); 1605 EXPECT_NE(MPD, nullptr); 1606 MemoryPhi *MPE = MSSA.getMemoryAccess(EBlock); 1607 EXPECT_EQ(MPD, MPE->getIncomingValueForBlock(DBlock)); 1608 } 1609 1610 TEST_F(MemorySSATest, TestCallClobber) { 1611 F = Function::Create( 1612 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 1613 GlobalValue::ExternalLinkage, "F", &M); 1614 1615 Value *Pointer1 = &*F->arg_begin(); 1616 BasicBlock *Entry(BasicBlock::Create(C, "", F)); 1617 B.SetInsertPoint(Entry); 1618 Value *Pointer2 = B.CreateGEP(B.getInt8Ty(), Pointer1, B.getInt64(1)); 1619 Instruction *StorePointer1 = B.CreateStore(B.getInt8(0), Pointer1); 1620 Instruction *StorePointer2 = B.CreateStore(B.getInt8(0), Pointer2); 1621 Instruction *MemSet = B.CreateMemSet(Pointer2, B.getInt8(0), 1, Align(1)); 1622 1623 setupAnalyses(); 1624 MemorySSA &MSSA = *Analyses->MSSA; 1625 MemorySSAWalker *Walker = Analyses->Walker; 1626 1627 MemoryUseOrDef *Store1Access = MSSA.getMemoryAccess(StorePointer1); 1628 MemoryUseOrDef *Store2Access = MSSA.getMemoryAccess(StorePointer2); 1629 MemoryUseOrDef *MemSetAccess = MSSA.getMemoryAccess(MemSet); 1630 1631 MemoryAccess *Pointer1Clobber = Walker->getClobberingMemoryAccess( 1632 MemSetAccess, MemoryLocation(Pointer1, LocationSize::precise(1))); 1633 EXPECT_EQ(Pointer1Clobber, Store1Access); 1634 1635 MemoryAccess *Pointer2Clobber = Walker->getClobberingMemoryAccess( 1636 MemSetAccess, MemoryLocation(Pointer2, LocationSize::precise(1))); 1637 EXPECT_EQ(Pointer2Clobber, MemSetAccess); 1638 1639 MemoryAccess *MemSetClobber = Walker->getClobberingMemoryAccess(MemSetAccess); 1640 EXPECT_EQ(MemSetClobber, Store2Access); 1641 } 1642 1643 TEST_F(MemorySSATest, TestLoadClobber) { 1644 F = Function::Create( 1645 FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false), 1646 GlobalValue::ExternalLinkage, "F", &M); 1647 1648 Value *Pointer1 = &*F->arg_begin(); 1649 BasicBlock *Entry(BasicBlock::Create(C, "", F)); 1650 B.SetInsertPoint(Entry); 1651 Value *Pointer2 = B.CreateGEP(B.getInt8Ty(), Pointer1, B.getInt64(1)); 1652 Instruction *LoadPointer1 = 1653 B.CreateLoad(B.getInt8Ty(), Pointer1, /* Volatile */ true); 1654 Instruction *LoadPointer2 = 1655 B.CreateLoad(B.getInt8Ty(), Pointer2, /* Volatile */ true); 1656 1657 setupAnalyses(); 1658 MemorySSA &MSSA = *Analyses->MSSA; 1659 MemorySSAWalker *Walker = Analyses->Walker; 1660 1661 MemoryUseOrDef *Load1Access = MSSA.getMemoryAccess(LoadPointer1); 1662 MemoryUseOrDef *Load2Access = MSSA.getMemoryAccess(LoadPointer2); 1663 1664 // When providing a memory location, we should never return a load as the 1665 // clobber. 1666 MemoryAccess *Pointer1Clobber = Walker->getClobberingMemoryAccess( 1667 Load2Access, MemoryLocation(Pointer1, LocationSize::precise(1))); 1668 EXPECT_TRUE(MSSA.isLiveOnEntryDef(Pointer1Clobber)); 1669 1670 MemoryAccess *Pointer2Clobber = Walker->getClobberingMemoryAccess( 1671 Load2Access, MemoryLocation(Pointer2, LocationSize::precise(1))); 1672 EXPECT_TRUE(MSSA.isLiveOnEntryDef(Pointer2Clobber)); 1673 1674 MemoryAccess *Load2Clobber = Walker->getClobberingMemoryAccess(Load2Access); 1675 EXPECT_EQ(Load2Clobber, Load1Access); 1676 } 1677 1678 // We want to test if the location information are retained 1679 // when the IsGuaranteedLoopInvariant function handles a 1680 // memory access referring to a pointer defined in the entry 1681 // block, hence automatically guaranteed to be loop invariant. 1682 TEST_F(MemorySSATest, TestLoopInvariantEntryBlockPointer) { 1683 SMDiagnostic E; 1684 auto LocalM = 1685 parseAssemblyString("define void @test(i64 %a0, i8* %a1, i1* %a2) {\n" 1686 "entry:\n" 1687 "%v0 = getelementptr i8, i8* %a1, i64 %a0\n" 1688 "%v1 = bitcast i8* %v0 to i64*\n" 1689 "%v2 = bitcast i8* %v0 to i32*\n" 1690 "%v3 = load i1, i1* %a2\n" 1691 "br i1 %v3, label %body, label %exit\n" 1692 "body:\n" 1693 "store i32 1, i32* %v2\n" 1694 "br label %exit\n" 1695 "exit:\n" 1696 "store i64 0, i64* %v1\n" 1697 "ret void\n" 1698 "}", 1699 E, C); 1700 ASSERT_TRUE(LocalM); 1701 F = LocalM->getFunction("test"); 1702 ASSERT_TRUE(F); 1703 // Setup the analysis 1704 setupAnalyses(); 1705 MemorySSA &MSSA = *Analyses->MSSA; 1706 // Find the exit block 1707 for (auto &BB : *F) { 1708 if (BB.getName() == "exit") { 1709 // Get the store instruction 1710 auto *SI = BB.getFirstNonPHI(); 1711 // Get the memory access and location 1712 MemoryAccess *MA = MSSA.getMemoryAccess(SI); 1713 MemoryLocation ML = MemoryLocation::get(SI); 1714 // Use the 'upward_defs_iterator' which internally calls 1715 // IsGuaranteedLoopInvariant 1716 auto ItA = upward_defs_begin({MA, ML}, MSSA.getDomTree()); 1717 auto ItB = 1718 upward_defs_begin({ItA->first, ItA->second}, MSSA.getDomTree()); 1719 // Check if the location information have been retained 1720 EXPECT_TRUE(ItB->second.Size.isPrecise()); 1721 EXPECT_TRUE(ItB->second.Size.hasValue()); 1722 EXPECT_TRUE(ItB->second.Size.getValue() == 8); 1723 } 1724 } 1725 } 1726 1727 TEST_F(MemorySSATest, TestInvariantGroup) { 1728 SMDiagnostic E; 1729 auto M = parseAssemblyString("declare void @f(i8*)\n" 1730 "define i8 @test(i8* %p) {\n" 1731 "entry:\n" 1732 " store i8 42, i8* %p, !invariant.group !0\n" 1733 " call void @f(i8* %p)\n" 1734 " %v = load i8, i8* %p, !invariant.group !0\n" 1735 " ret i8 %v\n" 1736 "}\n" 1737 "!0 = !{}", 1738 E, C); 1739 ASSERT_TRUE(M); 1740 F = M->getFunction("test"); 1741 ASSERT_TRUE(F); 1742 setupAnalyses(); 1743 MemorySSA &MSSA = *Analyses->MSSA; 1744 MemorySSAWalker *Walker = Analyses->Walker; 1745 1746 auto &BB = F->getEntryBlock(); 1747 auto &SI = cast<StoreInst>(*BB.begin()); 1748 auto &Call = cast<CallBase>(*std::next(BB.begin())); 1749 auto &LI = cast<LoadInst>(*std::next(std::next(BB.begin()))); 1750 1751 { 1752 MemoryAccess *SAccess = MSSA.getMemoryAccess(&SI); 1753 MemoryAccess *LAccess = MSSA.getMemoryAccess(&LI); 1754 MemoryAccess *SClobber = Walker->getClobberingMemoryAccess(SAccess); 1755 EXPECT_TRUE(MSSA.isLiveOnEntryDef(SClobber)); 1756 MemoryAccess *LClobber = Walker->getClobberingMemoryAccess(LAccess); 1757 EXPECT_EQ(SAccess, LClobber); 1758 } 1759 1760 // remove store and verify that the memory accesses still make sense 1761 MemorySSAUpdater Updater(&MSSA); 1762 Updater.removeMemoryAccess(&SI); 1763 SI.eraseFromParent(); 1764 1765 { 1766 MemoryAccess *CallAccess = MSSA.getMemoryAccess(&Call); 1767 MemoryAccess *LAccess = MSSA.getMemoryAccess(&LI); 1768 MemoryAccess *LClobber = Walker->getClobberingMemoryAccess(LAccess); 1769 EXPECT_EQ(CallAccess, LClobber); 1770 } 1771 } 1772 1773 static BasicBlock *getBasicBlockByName(Function &F, StringRef Name) { 1774 for (BasicBlock &BB : F) 1775 if (BB.getName() == Name) 1776 return &BB; 1777 llvm_unreachable("Expected to find basic block!"); 1778 } 1779 1780 static Instruction *getInstructionByName(Function &F, StringRef Name) { 1781 for (BasicBlock &BB : F) 1782 for (Instruction &I : BB) 1783 if (I.getName() == Name) 1784 return &I; 1785 llvm_unreachable("Expected to find instruction!"); 1786 } 1787 1788 TEST_F(MemorySSATest, TestVisitedBlocks) { 1789 SMDiagnostic E; 1790 auto M = parseAssemblyString( 1791 "define void @test(i64* noalias %P, i64 %N) {\n" 1792 "preheader.n:\n" 1793 " br label %header.n\n" 1794 "header.n:\n" 1795 " %n = phi i64 [ 0, %preheader.n ], [ %inc.n, %latch.n ]\n" 1796 " %guard.cond.i = icmp slt i64 0, %N\n" 1797 " br i1 %guard.cond.i, label %header.i.check, label %other.i\n" 1798 "header.i.check:\n" 1799 " br label %preheader.i\n" 1800 "preheader.i:\n" 1801 " br label %header.i\n" 1802 "header.i:\n" 1803 " %i = phi i64 [ 0, %preheader.i ], [ %inc.i, %header.i ]\n" 1804 " %v1 = load i64, i64* %P, align 8\n" 1805 " %v2 = load i64, i64* %P, align 8\n" 1806 " %inc.i = add nsw i64 %i, 1\n" 1807 " %cmp.i = icmp slt i64 %inc.i, %N\n" 1808 " br i1 %cmp.i, label %header.i, label %exit.i\n" 1809 "exit.i:\n" 1810 " br label %commonexit\n" 1811 "other.i:\n" 1812 " br label %commonexit\n" 1813 "commonexit:\n" 1814 " br label %latch.n\n" 1815 "latch.n:\n" 1816 " %inc.n = add nsw i64 %n, 1\n" 1817 " %cmp.n = icmp slt i64 %inc.n, %N\n" 1818 " br i1 %cmp.n, label %header.n, label %exit.n\n" 1819 "exit.n:\n" 1820 " ret void\n" 1821 "}\n", 1822 E, C); 1823 ASSERT_TRUE(M); 1824 F = M->getFunction("test"); 1825 ASSERT_TRUE(F); 1826 setupAnalyses(); 1827 MemorySSA &MSSA = *Analyses->MSSA; 1828 MemorySSAUpdater Updater(&MSSA); 1829 1830 { 1831 // Move %v1 before the terminator of %header.i.check 1832 BasicBlock *BB = getBasicBlockByName(*F, "header.i.check"); 1833 Instruction *LI = getInstructionByName(*F, "v1"); 1834 LI->moveBefore(BB->getTerminator()); 1835 if (MemoryUseOrDef *MUD = MSSA.getMemoryAccess(LI)) 1836 Updater.moveToPlace(MUD, BB, MemorySSA::BeforeTerminator); 1837 1838 // Change the termiantor of %header.i.check to `br label true, label 1839 // %preheader.i, label %other.i` 1840 BB->getTerminator()->eraseFromParent(); 1841 ConstantInt *BoolTrue = ConstantInt::getTrue(F->getContext()); 1842 BranchInst::Create(getBasicBlockByName(*F, "preheader.i"), 1843 getBasicBlockByName(*F, "other.i"), BoolTrue, BB); 1844 SmallVector<DominatorTree::UpdateType, 4> DTUpdates; 1845 DTUpdates.push_back(DominatorTree::UpdateType( 1846 DominatorTree::Insert, BB, getBasicBlockByName(*F, "other.i"))); 1847 Updater.applyUpdates(DTUpdates, Analyses->DT, true); 1848 } 1849 1850 // After the first moveToPlace(), %other.i is in VisitedBlocks, even after 1851 // there is a new edge to %other.i, which makes the second moveToPlace() 1852 // traverse incorrectly. 1853 { 1854 // Move %v2 before the terminator of %preheader.i 1855 BasicBlock *BB = getBasicBlockByName(*F, "preheader.i"); 1856 Instruction *LI = getInstructionByName(*F, "v2"); 1857 LI->moveBefore(BB->getTerminator()); 1858 // Check that there is no assertion of "Incomplete phi during partial 1859 // rename" 1860 if (MemoryUseOrDef *MUD = MSSA.getMemoryAccess(LI)) 1861 Updater.moveToPlace(MUD, BB, MemorySSA::BeforeTerminator); 1862 } 1863 } 1864