1 //===- ScalarEvolutionExpanderTest.cpp - ScalarEvolution unit tests -------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 10 #include "llvm/ADT/SmallVector.h" 11 #include "llvm/Analysis/AssumptionCache.h" 12 #include "llvm/Analysis/LoopInfo.h" 13 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 14 #include "llvm/Analysis/TargetLibraryInfo.h" 15 #include "llvm/AsmParser/Parser.h" 16 #include "llvm/IR/Constants.h" 17 #include "llvm/IR/Dominators.h" 18 #include "llvm/IR/GlobalVariable.h" 19 #include "llvm/IR/IRBuilder.h" 20 #include "llvm/IR/InstIterator.h" 21 #include "llvm/IR/LLVMContext.h" 22 #include "llvm/IR/LegacyPassManager.h" 23 #include "llvm/IR/Module.h" 24 #include "llvm/IR/Verifier.h" 25 #include "llvm/Support/SourceMgr.h" 26 #include "gtest/gtest.h" 27 28 namespace llvm { 29 30 // We use this fixture to ensure that we clean up ScalarEvolution before 31 // deleting the PassManager. 32 class ScalarEvolutionsTest : public testing::Test { 33 protected: 34 LLVMContext Context; 35 Module M; 36 TargetLibraryInfoImpl TLII; 37 TargetLibraryInfo TLI; 38 39 std::unique_ptr<AssumptionCache> AC; 40 std::unique_ptr<DominatorTree> DT; 41 std::unique_ptr<LoopInfo> LI; 42 43 ScalarEvolutionsTest() : M("", Context), TLII(), TLI(TLII) {} 44 45 ScalarEvolution buildSE(Function &F) { 46 AC.reset(new AssumptionCache(F)); 47 DT.reset(new DominatorTree(F)); 48 LI.reset(new LoopInfo(*DT)); 49 return ScalarEvolution(F, TLI, *AC, *DT, *LI); 50 } 51 52 void runWithSE( 53 Module &M, StringRef FuncName, 54 function_ref<void(Function &F, LoopInfo &LI, ScalarEvolution &SE)> Test) { 55 auto *F = M.getFunction(FuncName); 56 ASSERT_NE(F, nullptr) << "Could not find " << FuncName; 57 ScalarEvolution SE = buildSE(*F); 58 Test(*F, *LI, SE); 59 } 60 61 static Optional<APInt> computeConstantDifference(ScalarEvolution &SE, 62 const SCEV *LHS, 63 const SCEV *RHS) { 64 return SE.computeConstantDifference(LHS, RHS); 65 } 66 }; 67 68 static Instruction &GetInstByName(Function &F, StringRef Name) { 69 for (auto &I : instructions(F)) 70 if (I.getName() == Name) 71 return I; 72 llvm_unreachable("Could not find instructions!"); 73 } 74 75 TEST_F(ScalarEvolutionsTest, ExpandPtrTypeSCEV) { 76 // It is to test the fix for PR30213. It exercises the branch in scev 77 // expansion when the value in ValueOffsetPair is a ptr and the offset 78 // is not divisible by the elem type size of value. 79 auto *I8Ty = Type::getInt8Ty(Context); 80 auto *I8PtrTy = Type::getInt8PtrTy(Context); 81 auto *I32Ty = Type::getInt32Ty(Context); 82 auto *I32PtrTy = Type::getInt32PtrTy(Context); 83 FunctionType *FTy = 84 FunctionType::get(Type::getVoidTy(Context), std::vector<Type *>(), false); 85 Function *F = Function::Create(FTy, Function::ExternalLinkage, "f", M); 86 BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F); 87 BasicBlock *LoopBB = BasicBlock::Create(Context, "loop", F); 88 BasicBlock *ExitBB = BasicBlock::Create(Context, "exit", F); 89 BranchInst::Create(LoopBB, EntryBB); 90 ReturnInst::Create(Context, nullptr, ExitBB); 91 92 // loop: ; preds = %loop, %entry 93 // %alloca = alloca i32 94 // %gep0 = getelementptr i32, i32* %alloca, i32 1 95 // %bitcast1 = bitcast i32* %gep0 to i8* 96 // %gep1 = getelementptr i8, i8* %bitcast1, i32 1 97 // %gep2 = getelementptr i8, i8* undef, i32 1 98 // %cmp = icmp ult i8* undef, %bitcast1 99 // %select = select i1 %cmp, i8* %gep1, i8* %gep2 100 // %bitcast2 = bitcast i8* %select to i32* 101 // br i1 undef, label %loop, label %exit 102 103 const DataLayout &DL = F->getParent()->getDataLayout(); 104 BranchInst *Br = BranchInst::Create( 105 LoopBB, ExitBB, UndefValue::get(Type::getInt1Ty(Context)), LoopBB); 106 AllocaInst *Alloca = 107 new AllocaInst(I32Ty, DL.getAllocaAddrSpace(), "alloca", Br); 108 ConstantInt *Ci32 = ConstantInt::get(Context, APInt(32, 1)); 109 GetElementPtrInst *Gep0 = 110 GetElementPtrInst::Create(I32Ty, Alloca, Ci32, "gep0", Br); 111 CastInst *CastA = 112 CastInst::CreateBitOrPointerCast(Gep0, I8PtrTy, "bitcast1", Br); 113 GetElementPtrInst *Gep1 = 114 GetElementPtrInst::Create(I8Ty, CastA, Ci32, "gep1", Br); 115 GetElementPtrInst *Gep2 = GetElementPtrInst::Create( 116 I8Ty, UndefValue::get(I8PtrTy), Ci32, "gep2", Br); 117 CmpInst *Cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_ULT, 118 UndefValue::get(I8PtrTy), CastA, "cmp", Br); 119 SelectInst *Sel = SelectInst::Create(Cmp, Gep1, Gep2, "select", Br); 120 CastInst *CastB = 121 CastInst::CreateBitOrPointerCast(Sel, I32PtrTy, "bitcast2", Br); 122 123 ScalarEvolution SE = buildSE(*F); 124 auto *S = SE.getSCEV(CastB); 125 SCEVExpander Exp(SE, M.getDataLayout(), "expander"); 126 Value *V = 127 Exp.expandCodeFor(cast<SCEVAddExpr>(S)->getOperand(1), nullptr, Br); 128 129 // Expect the expansion code contains: 130 // %0 = bitcast i32* %bitcast2 to i8* 131 // %uglygep = getelementptr i8, i8* %0, i64 -1 132 // %1 = bitcast i8* %uglygep to i32* 133 EXPECT_TRUE(isa<BitCastInst>(V)); 134 Instruction *Gep = cast<Instruction>(V)->getPrevNode(); 135 EXPECT_TRUE(isa<GetElementPtrInst>(Gep)); 136 EXPECT_TRUE(isa<ConstantInt>(Gep->getOperand(1))); 137 EXPECT_EQ(cast<ConstantInt>(Gep->getOperand(1))->getSExtValue(), -1); 138 EXPECT_TRUE(isa<BitCastInst>(Gep->getPrevNode())); 139 } 140 141 // Make sure that SCEV doesn't introduce illegal ptrtoint/inttoptr instructions 142 TEST_F(ScalarEvolutionsTest, SCEVZeroExtendExprNonIntegral) { 143 /* 144 * Create the following code: 145 * func(i64 addrspace(10)* %arg) 146 * top: 147 * br label %L.ph 148 * L.ph: 149 * br label %L 150 * L: 151 * %phi = phi i64 [i64 0, %L.ph], [ %add, %L2 ] 152 * %add = add i64 %phi2, 1 153 * br i1 undef, label %post, label %L2 154 * post: 155 * %gepbase = getelementptr i64 addrspace(10)* %arg, i64 1 156 * #= %gep = getelementptr i64 addrspace(10)* %gepbase, i64 %add =# 157 * ret void 158 * 159 * We will create the appropriate SCEV expression for %gep and expand it, 160 * then check that no inttoptr/ptrtoint instructions got inserted. 161 */ 162 163 // Create a module with non-integral pointers in it's datalayout 164 Module NIM("nonintegral", Context); 165 std::string DataLayout = M.getDataLayoutStr(); 166 if (!DataLayout.empty()) 167 DataLayout += "-"; 168 DataLayout += "ni:10"; 169 NIM.setDataLayout(DataLayout); 170 171 Type *T_int1 = Type::getInt1Ty(Context); 172 Type *T_int64 = Type::getInt64Ty(Context); 173 Type *T_pint64 = T_int64->getPointerTo(10); 174 175 FunctionType *FTy = 176 FunctionType::get(Type::getVoidTy(Context), {T_pint64}, false); 177 Function *F = Function::Create(FTy, Function::ExternalLinkage, "foo", NIM); 178 179 Argument *Arg = &*F->arg_begin(); 180 181 BasicBlock *Top = BasicBlock::Create(Context, "top", F); 182 BasicBlock *LPh = BasicBlock::Create(Context, "L.ph", F); 183 BasicBlock *L = BasicBlock::Create(Context, "L", F); 184 BasicBlock *Post = BasicBlock::Create(Context, "post", F); 185 186 IRBuilder<> Builder(Top); 187 Builder.CreateBr(LPh); 188 189 Builder.SetInsertPoint(LPh); 190 Builder.CreateBr(L); 191 192 Builder.SetInsertPoint(L); 193 PHINode *Phi = Builder.CreatePHI(T_int64, 2); 194 Value *Add = Builder.CreateAdd(Phi, ConstantInt::get(T_int64, 1), "add"); 195 Builder.CreateCondBr(UndefValue::get(T_int1), L, Post); 196 Phi->addIncoming(ConstantInt::get(T_int64, 0), LPh); 197 Phi->addIncoming(Add, L); 198 199 Builder.SetInsertPoint(Post); 200 Value *GepBase = 201 Builder.CreateGEP(T_int64, Arg, ConstantInt::get(T_int64, 1)); 202 Instruction *Ret = Builder.CreateRetVoid(); 203 204 ScalarEvolution SE = buildSE(*F); 205 auto *AddRec = 206 SE.getAddRecExpr(SE.getUnknown(GepBase), SE.getConstant(T_int64, 1), 207 LI->getLoopFor(L), SCEV::FlagNUW); 208 209 SCEVExpander Exp(SE, NIM.getDataLayout(), "expander"); 210 Exp.disableCanonicalMode(); 211 Exp.expandCodeFor(AddRec, T_pint64, Ret); 212 213 // Make sure none of the instructions inserted were inttoptr/ptrtoint. 214 // The verifier will check this. 215 EXPECT_FALSE(verifyFunction(*F, &errs())); 216 } 217 218 // Check that we can correctly identify the points at which the SCEV of the 219 // AddRec can be expanded. 220 TEST_F(ScalarEvolutionsTest, SCEVExpanderIsSafeToExpandAt) { 221 /* 222 * Create the following code: 223 * func(i64 addrspace(10)* %arg) 224 * top: 225 * br label %L.ph 226 * L.ph: 227 * br label %L 228 * L: 229 * %phi = phi i64 [i64 0, %L.ph], [ %add, %L2 ] 230 * %add = add i64 %phi2, 1 231 * %cond = icmp slt i64 %add, 1000; then becomes 2000. 232 * br i1 %cond, label %post, label %L2 233 * post: 234 * ret void 235 * 236 */ 237 238 // Create a module with non-integral pointers in it's datalayout 239 Module NIM("nonintegral", Context); 240 std::string DataLayout = M.getDataLayoutStr(); 241 if (!DataLayout.empty()) 242 DataLayout += "-"; 243 DataLayout += "ni:10"; 244 NIM.setDataLayout(DataLayout); 245 246 Type *T_int64 = Type::getInt64Ty(Context); 247 Type *T_pint64 = T_int64->getPointerTo(10); 248 249 FunctionType *FTy = 250 FunctionType::get(Type::getVoidTy(Context), {T_pint64}, false); 251 Function *F = Function::Create(FTy, Function::ExternalLinkage, "foo", NIM); 252 253 BasicBlock *Top = BasicBlock::Create(Context, "top", F); 254 BasicBlock *LPh = BasicBlock::Create(Context, "L.ph", F); 255 BasicBlock *L = BasicBlock::Create(Context, "L", F); 256 BasicBlock *Post = BasicBlock::Create(Context, "post", F); 257 258 IRBuilder<> Builder(Top); 259 Builder.CreateBr(LPh); 260 261 Builder.SetInsertPoint(LPh); 262 Builder.CreateBr(L); 263 264 Builder.SetInsertPoint(L); 265 PHINode *Phi = Builder.CreatePHI(T_int64, 2); 266 auto *Add = cast<Instruction>( 267 Builder.CreateAdd(Phi, ConstantInt::get(T_int64, 1), "add")); 268 auto *Limit = ConstantInt::get(T_int64, 1000); 269 auto *Cond = cast<Instruction>( 270 Builder.CreateICmp(ICmpInst::ICMP_SLT, Add, Limit, "cond")); 271 Builder.CreateCondBr(Cond, L, Post); 272 Phi->addIncoming(ConstantInt::get(T_int64, 0), LPh); 273 Phi->addIncoming(Add, L); 274 275 Builder.SetInsertPoint(Post); 276 Builder.CreateRetVoid(); 277 278 ScalarEvolution SE = buildSE(*F); 279 const SCEV *S = SE.getSCEV(Phi); 280 EXPECT_TRUE(isa<SCEVAddRecExpr>(S)); 281 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 282 EXPECT_TRUE(AR->isAffine()); 283 EXPECT_FALSE(isSafeToExpandAt(AR, Top->getTerminator(), SE)); 284 EXPECT_FALSE(isSafeToExpandAt(AR, LPh->getTerminator(), SE)); 285 EXPECT_TRUE(isSafeToExpandAt(AR, L->getTerminator(), SE)); 286 EXPECT_TRUE(isSafeToExpandAt(AR, Post->getTerminator(), SE)); 287 } 288 289 // Check that SCEV expander does not use the nuw instruction 290 // for expansion. 291 TEST_F(ScalarEvolutionsTest, SCEVExpanderNUW) { 292 /* 293 * Create the following code: 294 * func(i64 %a) 295 * entry: 296 * br false, label %exit, label %body 297 * body: 298 * %s1 = add i64 %a, -1 299 * br label %exit 300 * exit: 301 * %s = add nuw i64 %a, -1 302 * ret %s 303 */ 304 305 // Create a module. 306 Module M("SCEVExpanderNUW", Context); 307 308 Type *T_int64 = Type::getInt64Ty(Context); 309 310 FunctionType *FTy = 311 FunctionType::get(Type::getVoidTy(Context), {T_int64}, false); 312 Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M); 313 Argument *Arg = &*F->arg_begin(); 314 ConstantInt *C = ConstantInt::get(Context, APInt(64, -1)); 315 316 BasicBlock *Entry = BasicBlock::Create(Context, "entry", F); 317 BasicBlock *Body = BasicBlock::Create(Context, "body", F); 318 BasicBlock *Exit = BasicBlock::Create(Context, "exit", F); 319 320 IRBuilder<> Builder(Entry); 321 ConstantInt *Cond = ConstantInt::get(Context, APInt(1, 0)); 322 Builder.CreateCondBr(Cond, Exit, Body); 323 324 Builder.SetInsertPoint(Body); 325 auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add")); 326 Builder.CreateBr(Exit); 327 328 Builder.SetInsertPoint(Exit); 329 auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add")); 330 S2->setHasNoUnsignedWrap(true); 331 auto *R = cast<Instruction>(Builder.CreateRetVoid()); 332 333 ScalarEvolution SE = buildSE(*F); 334 const SCEV *S = SE.getSCEV(S1); 335 EXPECT_TRUE(isa<SCEVAddExpr>(S)); 336 SCEVExpander Exp(SE, M.getDataLayout(), "expander"); 337 auto *I = cast<Instruction>(Exp.expandCodeFor(S, nullptr, R)); 338 EXPECT_FALSE(I->hasNoUnsignedWrap()); 339 } 340 341 // Check that SCEV expander does not use the nsw instruction 342 // for expansion. 343 TEST_F(ScalarEvolutionsTest, SCEVExpanderNSW) { 344 /* 345 * Create the following code: 346 * func(i64 %a) 347 * entry: 348 * br false, label %exit, label %body 349 * body: 350 * %s1 = add i64 %a, -1 351 * br label %exit 352 * exit: 353 * %s = add nsw i64 %a, -1 354 * ret %s 355 */ 356 357 // Create a module. 358 Module M("SCEVExpanderNSW", Context); 359 360 Type *T_int64 = Type::getInt64Ty(Context); 361 362 FunctionType *FTy = 363 FunctionType::get(Type::getVoidTy(Context), {T_int64}, false); 364 Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M); 365 Argument *Arg = &*F->arg_begin(); 366 ConstantInt *C = ConstantInt::get(Context, APInt(64, -1)); 367 368 BasicBlock *Entry = BasicBlock::Create(Context, "entry", F); 369 BasicBlock *Body = BasicBlock::Create(Context, "body", F); 370 BasicBlock *Exit = BasicBlock::Create(Context, "exit", F); 371 372 IRBuilder<> Builder(Entry); 373 ConstantInt *Cond = ConstantInt::get(Context, APInt(1, 0)); 374 Builder.CreateCondBr(Cond, Exit, Body); 375 376 Builder.SetInsertPoint(Body); 377 auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add")); 378 Builder.CreateBr(Exit); 379 380 Builder.SetInsertPoint(Exit); 381 auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add")); 382 S2->setHasNoSignedWrap(true); 383 auto *R = cast<Instruction>(Builder.CreateRetVoid()); 384 385 ScalarEvolution SE = buildSE(*F); 386 const SCEV *S = SE.getSCEV(S1); 387 EXPECT_TRUE(isa<SCEVAddExpr>(S)); 388 SCEVExpander Exp(SE, M.getDataLayout(), "expander"); 389 auto *I = cast<Instruction>(Exp.expandCodeFor(S, nullptr, R)); 390 EXPECT_FALSE(I->hasNoSignedWrap()); 391 } 392 393 // Check that SCEV does not save the SCEV -> V 394 // mapping of SCEV differ from V in NUW flag. 395 TEST_F(ScalarEvolutionsTest, SCEVCacheNUW) { 396 /* 397 * Create the following code: 398 * func(i64 %a) 399 * entry: 400 * %s1 = add i64 %a, -1 401 * %s2 = add nuw i64 %a, -1 402 * br label %exit 403 * exit: 404 * ret %s 405 */ 406 407 // Create a module. 408 Module M("SCEVCacheNUW", Context); 409 410 Type *T_int64 = Type::getInt64Ty(Context); 411 412 FunctionType *FTy = 413 FunctionType::get(Type::getVoidTy(Context), {T_int64}, false); 414 Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M); 415 Argument *Arg = &*F->arg_begin(); 416 ConstantInt *C = ConstantInt::get(Context, APInt(64, -1)); 417 418 BasicBlock *Entry = BasicBlock::Create(Context, "entry", F); 419 BasicBlock *Exit = BasicBlock::Create(Context, "exit", F); 420 421 IRBuilder<> Builder(Entry); 422 auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add")); 423 auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add")); 424 S2->setHasNoUnsignedWrap(true); 425 Builder.CreateBr(Exit); 426 427 Builder.SetInsertPoint(Exit); 428 auto *R = cast<Instruction>(Builder.CreateRetVoid()); 429 430 ScalarEvolution SE = buildSE(*F); 431 // Get S2 first to move it to cache. 432 const SCEV *SC2 = SE.getSCEV(S2); 433 EXPECT_TRUE(isa<SCEVAddExpr>(SC2)); 434 // Now get S1. 435 const SCEV *SC1 = SE.getSCEV(S1); 436 EXPECT_TRUE(isa<SCEVAddExpr>(SC1)); 437 // Expand for S1, it should use S1 not S2 in spite S2 438 // first in the cache. 439 SCEVExpander Exp(SE, M.getDataLayout(), "expander"); 440 auto *I = cast<Instruction>(Exp.expandCodeFor(SC1, nullptr, R)); 441 EXPECT_FALSE(I->hasNoUnsignedWrap()); 442 } 443 444 // Check that SCEV does not save the SCEV -> V 445 // mapping of SCEV differ from V in NSW flag. 446 TEST_F(ScalarEvolutionsTest, SCEVCacheNSW) { 447 /* 448 * Create the following code: 449 * func(i64 %a) 450 * entry: 451 * %s1 = add i64 %a, -1 452 * %s2 = add nsw i64 %a, -1 453 * br label %exit 454 * exit: 455 * ret %s 456 */ 457 458 // Create a module. 459 Module M("SCEVCacheNUW", Context); 460 461 Type *T_int64 = Type::getInt64Ty(Context); 462 463 FunctionType *FTy = 464 FunctionType::get(Type::getVoidTy(Context), {T_int64}, false); 465 Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M); 466 Argument *Arg = &*F->arg_begin(); 467 ConstantInt *C = ConstantInt::get(Context, APInt(64, -1)); 468 469 BasicBlock *Entry = BasicBlock::Create(Context, "entry", F); 470 BasicBlock *Exit = BasicBlock::Create(Context, "exit", F); 471 472 IRBuilder<> Builder(Entry); 473 auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add")); 474 auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add")); 475 S2->setHasNoSignedWrap(true); 476 Builder.CreateBr(Exit); 477 478 Builder.SetInsertPoint(Exit); 479 auto *R = cast<Instruction>(Builder.CreateRetVoid()); 480 481 ScalarEvolution SE = buildSE(*F); 482 // Get S2 first to move it to cache. 483 const SCEV *SC2 = SE.getSCEV(S2); 484 EXPECT_TRUE(isa<SCEVAddExpr>(SC2)); 485 // Now get S1. 486 const SCEV *SC1 = SE.getSCEV(S1); 487 EXPECT_TRUE(isa<SCEVAddExpr>(SC1)); 488 // Expand for S1, it should use S1 not S2 in spite S2 489 // first in the cache. 490 SCEVExpander Exp(SE, M.getDataLayout(), "expander"); 491 auto *I = cast<Instruction>(Exp.expandCodeFor(SC1, nullptr, R)); 492 EXPECT_FALSE(I->hasNoSignedWrap()); 493 } 494 495 TEST_F(ScalarEvolutionsTest, SCEVExpandInsertCanonicalIV) { 496 LLVMContext C; 497 SMDiagnostic Err; 498 499 // Expand the addrec produced by GetAddRec into a loop without a canonical IV. 500 // SCEVExpander will insert one. 501 auto TestNoCanonicalIV = 502 [&](std::function<const SCEV *(ScalarEvolution & SE, Loop * L)> 503 GetAddRec) { 504 std::unique_ptr<Module> M = parseAssemblyString( 505 "define i32 @test(i32 %limit) { " 506 "entry: " 507 " br label %loop " 508 "loop: " 509 " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] " 510 " %i.inc = add nsw i32 %i, 1 " 511 " %cont = icmp slt i32 %i.inc, %limit " 512 " br i1 %cont, label %loop, label %exit " 513 "exit: " 514 " ret i32 %i.inc " 515 "}", 516 Err, C); 517 518 assert(M && "Could not parse module?"); 519 assert(!verifyModule(*M) && "Must have been well formed!"); 520 521 runWithSE( 522 *M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) { 523 auto &I = GetInstByName(F, "i"); 524 auto *Loop = LI.getLoopFor(I.getParent()); 525 EXPECT_FALSE(Loop->getCanonicalInductionVariable()); 526 527 auto *AR = GetAddRec(SE, Loop); 528 unsigned ExpectedCanonicalIVWidth = 529 SE.getTypeSizeInBits(AR->getType()); 530 531 SCEVExpander Exp(SE, M->getDataLayout(), "expander"); 532 auto *InsertAt = I.getNextNode(); 533 Exp.expandCodeFor(AR, nullptr, InsertAt); 534 PHINode *CanonicalIV = Loop->getCanonicalInductionVariable(); 535 unsigned CanonicalIVBitWidth = 536 cast<IntegerType>(CanonicalIV->getType())->getBitWidth(); 537 EXPECT_EQ(CanonicalIVBitWidth, ExpectedCanonicalIVWidth); 538 }); 539 }; 540 541 // Expand the addrec produced by GetAddRec into a loop with a canonical IV 542 // which is narrower than addrec type. 543 // SCEVExpander will insert a canonical IV of a wider type to expand the 544 // addrec. 545 auto TestNarrowCanonicalIV = [&](std::function<const SCEV *( 546 ScalarEvolution & SE, Loop * L)> 547 GetAddRec) { 548 std::unique_ptr<Module> M = parseAssemblyString( 549 "define i32 @test(i32 %limit) { " 550 "entry: " 551 " br label %loop " 552 "loop: " 553 " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] " 554 " %canonical.iv = phi i8 [ 0, %entry ], [ %canonical.iv.inc, %loop ] " 555 " %i.inc = add nsw i32 %i, 1 " 556 " %canonical.iv.inc = add i8 %canonical.iv, 1 " 557 " %cont = icmp slt i32 %i.inc, %limit " 558 " br i1 %cont, label %loop, label %exit " 559 "exit: " 560 " ret i32 %i.inc " 561 "}", 562 Err, C); 563 564 assert(M && "Could not parse module?"); 565 assert(!verifyModule(*M) && "Must have been well formed!"); 566 567 runWithSE(*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) { 568 auto &I = GetInstByName(F, "i"); 569 570 auto *LoopHeaderBB = I.getParent(); 571 auto *Loop = LI.getLoopFor(LoopHeaderBB); 572 PHINode *CanonicalIV = Loop->getCanonicalInductionVariable(); 573 EXPECT_EQ(CanonicalIV, &GetInstByName(F, "canonical.iv")); 574 575 auto *AR = GetAddRec(SE, Loop); 576 577 unsigned ExpectedCanonicalIVWidth = SE.getTypeSizeInBits(AR->getType()); 578 unsigned CanonicalIVBitWidth = 579 cast<IntegerType>(CanonicalIV->getType())->getBitWidth(); 580 EXPECT_LT(CanonicalIVBitWidth, ExpectedCanonicalIVWidth); 581 582 SCEVExpander Exp(SE, M->getDataLayout(), "expander"); 583 auto *InsertAt = I.getNextNode(); 584 Exp.expandCodeFor(AR, nullptr, InsertAt); 585 586 // Loop over all of the PHI nodes, looking for the new canonical indvar. 587 PHINode *NewCanonicalIV = nullptr; 588 for (BasicBlock::iterator i = LoopHeaderBB->begin(); isa<PHINode>(i); 589 ++i) { 590 PHINode *PN = cast<PHINode>(i); 591 if (PN == &I || PN == CanonicalIV) 592 continue; 593 // We expect that the only PHI added is the new canonical IV 594 EXPECT_FALSE(NewCanonicalIV); 595 NewCanonicalIV = PN; 596 } 597 598 // Check that NewCanonicalIV is a canonical IV, i.e {0,+,1} 599 BasicBlock *Incoming = nullptr, *Backedge = nullptr; 600 EXPECT_TRUE(Loop->getIncomingAndBackEdge(Incoming, Backedge)); 601 auto *Start = NewCanonicalIV->getIncomingValueForBlock(Incoming); 602 EXPECT_TRUE(isa<ConstantInt>(Start)); 603 EXPECT_TRUE(dyn_cast<ConstantInt>(Start)->isZero()); 604 auto *Next = NewCanonicalIV->getIncomingValueForBlock(Backedge); 605 EXPECT_TRUE(isa<BinaryOperator>(Next)); 606 auto *NextBinOp = dyn_cast<BinaryOperator>(Next); 607 EXPECT_EQ(NextBinOp->getOpcode(), Instruction::Add); 608 EXPECT_EQ(NextBinOp->getOperand(0), NewCanonicalIV); 609 auto *Step = NextBinOp->getOperand(1); 610 EXPECT_TRUE(isa<ConstantInt>(Step)); 611 EXPECT_TRUE(dyn_cast<ConstantInt>(Step)->isOne()); 612 613 unsigned NewCanonicalIVBitWidth = 614 cast<IntegerType>(NewCanonicalIV->getType())->getBitWidth(); 615 EXPECT_EQ(NewCanonicalIVBitWidth, ExpectedCanonicalIVWidth); 616 }); 617 }; 618 619 // Expand the addrec produced by GetAddRec into a loop with a canonical IV 620 // of addrec width. 621 // To expand the addrec SCEVExpander should use the existing canonical IV. 622 auto TestMatchingCanonicalIV = 623 [&](std::function<const SCEV *(ScalarEvolution & SE, Loop * L)> GetAddRec, 624 unsigned ARBitWidth) { 625 auto ARBitWidthTypeStr = "i" + std::to_string(ARBitWidth); 626 std::unique_ptr<Module> M = parseAssemblyString( 627 "define i32 @test(i32 %limit) { " 628 "entry: " 629 " br label %loop " 630 "loop: " 631 " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] " 632 " %canonical.iv = phi " + 633 ARBitWidthTypeStr + 634 " [ 0, %entry ], [ %canonical.iv.inc, %loop ] " 635 " %i.inc = add nsw i32 %i, 1 " 636 " %canonical.iv.inc = add " + 637 ARBitWidthTypeStr + 638 " %canonical.iv, 1 " 639 " %cont = icmp slt i32 %i.inc, %limit " 640 " br i1 %cont, label %loop, label %exit " 641 "exit: " 642 " ret i32 %i.inc " 643 "}", 644 Err, C); 645 646 assert(M && "Could not parse module?"); 647 assert(!verifyModule(*M) && "Must have been well formed!"); 648 649 runWithSE( 650 *M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) { 651 auto &I = GetInstByName(F, "i"); 652 auto &CanonicalIV = GetInstByName(F, "canonical.iv"); 653 654 auto *LoopHeaderBB = I.getParent(); 655 auto *Loop = LI.getLoopFor(LoopHeaderBB); 656 EXPECT_EQ(&CanonicalIV, Loop->getCanonicalInductionVariable()); 657 unsigned CanonicalIVBitWidth = 658 cast<IntegerType>(CanonicalIV.getType())->getBitWidth(); 659 660 auto *AR = GetAddRec(SE, Loop); 661 EXPECT_EQ(ARBitWidth, SE.getTypeSizeInBits(AR->getType())); 662 EXPECT_EQ(CanonicalIVBitWidth, ARBitWidth); 663 664 SCEVExpander Exp(SE, M->getDataLayout(), "expander"); 665 auto *InsertAt = I.getNextNode(); 666 Exp.expandCodeFor(AR, nullptr, InsertAt); 667 668 // Loop over all of the PHI nodes, looking if a new canonical 669 // indvar was introduced. 670 PHINode *NewCanonicalIV = nullptr; 671 for (BasicBlock::iterator i = LoopHeaderBB->begin(); 672 isa<PHINode>(i); ++i) { 673 PHINode *PN = cast<PHINode>(i); 674 if (PN == &I || PN == &CanonicalIV) 675 continue; 676 NewCanonicalIV = PN; 677 } 678 EXPECT_FALSE(NewCanonicalIV); 679 }); 680 }; 681 682 unsigned ARBitWidth = 16; 683 Type *ARType = IntegerType::get(C, ARBitWidth); 684 685 // Expand {5,+,1} 686 auto GetAR2 = [&](ScalarEvolution &SE, Loop *L) -> const SCEV * { 687 return SE.getAddRecExpr(SE.getConstant(APInt(ARBitWidth, 5)), 688 SE.getOne(ARType), L, SCEV::FlagAnyWrap); 689 }; 690 TestNoCanonicalIV(GetAR2); 691 TestNarrowCanonicalIV(GetAR2); 692 TestMatchingCanonicalIV(GetAR2, ARBitWidth); 693 } 694 695 TEST_F(ScalarEvolutionsTest, SCEVExpanderShlNSW) { 696 697 auto checkOneCase = [this](std::string &&str) { 698 LLVMContext C; 699 SMDiagnostic Err; 700 std::unique_ptr<Module> M = parseAssemblyString(str, Err, C); 701 702 assert(M && "Could not parse module?"); 703 assert(!verifyModule(*M) && "Must have been well formed!"); 704 705 Function *F = M->getFunction("f"); 706 ASSERT_NE(F, nullptr) << "Could not find function 'f'"; 707 708 BasicBlock &Entry = F->getEntryBlock(); 709 LoadInst *Load = cast<LoadInst>(&Entry.front()); 710 BinaryOperator *And = cast<BinaryOperator>(*Load->user_begin()); 711 712 ScalarEvolution SE = buildSE(*F); 713 const SCEV *AndSCEV = SE.getSCEV(And); 714 EXPECT_TRUE(isa<SCEVMulExpr>(AndSCEV)); 715 EXPECT_TRUE(cast<SCEVMulExpr>(AndSCEV)->hasNoSignedWrap()); 716 717 SCEVExpander Exp(SE, M->getDataLayout(), "expander"); 718 auto *I = cast<Instruction>(Exp.expandCodeFor(AndSCEV, nullptr, And)); 719 EXPECT_EQ(I->getOpcode(), Instruction::Shl); 720 EXPECT_FALSE(I->hasNoSignedWrap()); 721 }; 722 723 checkOneCase("define void @f(i16* %arrayidx) { " 724 " %1 = load i16, i16* %arrayidx " 725 " %2 = and i16 %1, -32768 " 726 " ret void " 727 "} "); 728 729 checkOneCase("define void @f(i8* %arrayidx) { " 730 " %1 = load i8, i8* %arrayidx " 731 " %2 = and i8 %1, -128 " 732 " ret void " 733 "} "); 734 } 735 736 // Test expansion of nested addrecs in CanonicalMode. 737 // Expanding nested addrecs in canonical mode requiers a canonical IV of a 738 // type wider than the type of the addrec itself. Currently, SCEVExpander 739 // just falls back to literal mode for nested addrecs. 740 TEST_F(ScalarEvolutionsTest, SCEVExpandNonAffineAddRec) { 741 LLVMContext C; 742 SMDiagnostic Err; 743 744 // Expand the addrec produced by GetAddRec into a loop without a canonical IV. 745 auto TestNoCanonicalIV = 746 [&](std::function<const SCEVAddRecExpr *(ScalarEvolution & SE, Loop * L)> 747 GetAddRec) { 748 std::unique_ptr<Module> M = parseAssemblyString( 749 "define i32 @test(i32 %limit) { " 750 "entry: " 751 " br label %loop " 752 "loop: " 753 " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] " 754 " %i.inc = add nsw i32 %i, 1 " 755 " %cont = icmp slt i32 %i.inc, %limit " 756 " br i1 %cont, label %loop, label %exit " 757 "exit: " 758 " ret i32 %i.inc " 759 "}", 760 Err, C); 761 762 assert(M && "Could not parse module?"); 763 assert(!verifyModule(*M) && "Must have been well formed!"); 764 765 runWithSE(*M, "test", 766 [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) { 767 auto &I = GetInstByName(F, "i"); 768 auto *Loop = LI.getLoopFor(I.getParent()); 769 EXPECT_FALSE(Loop->getCanonicalInductionVariable()); 770 771 auto *AR = GetAddRec(SE, Loop); 772 EXPECT_FALSE(AR->isAffine()); 773 774 SCEVExpander Exp(SE, M->getDataLayout(), "expander"); 775 auto *InsertAt = I.getNextNode(); 776 Value *V = Exp.expandCodeFor(AR, nullptr, InsertAt); 777 auto *ExpandedAR = SE.getSCEV(V); 778 // Check that the expansion happened literally. 779 EXPECT_EQ(AR, ExpandedAR); 780 }); 781 }; 782 783 // Expand the addrec produced by GetAddRec into a loop with a canonical IV 784 // which is narrower than addrec type. 785 auto TestNarrowCanonicalIV = [&](std::function<const SCEVAddRecExpr *( 786 ScalarEvolution & SE, Loop * L)> 787 GetAddRec) { 788 std::unique_ptr<Module> M = parseAssemblyString( 789 "define i32 @test(i32 %limit) { " 790 "entry: " 791 " br label %loop " 792 "loop: " 793 " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] " 794 " %canonical.iv = phi i8 [ 0, %entry ], [ %canonical.iv.inc, %loop ] " 795 " %i.inc = add nsw i32 %i, 1 " 796 " %canonical.iv.inc = add i8 %canonical.iv, 1 " 797 " %cont = icmp slt i32 %i.inc, %limit " 798 " br i1 %cont, label %loop, label %exit " 799 "exit: " 800 " ret i32 %i.inc " 801 "}", 802 Err, C); 803 804 assert(M && "Could not parse module?"); 805 assert(!verifyModule(*M) && "Must have been well formed!"); 806 807 runWithSE(*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) { 808 auto &I = GetInstByName(F, "i"); 809 810 auto *LoopHeaderBB = I.getParent(); 811 auto *Loop = LI.getLoopFor(LoopHeaderBB); 812 PHINode *CanonicalIV = Loop->getCanonicalInductionVariable(); 813 EXPECT_EQ(CanonicalIV, &GetInstByName(F, "canonical.iv")); 814 815 auto *AR = GetAddRec(SE, Loop); 816 EXPECT_FALSE(AR->isAffine()); 817 818 unsigned ExpectedCanonicalIVWidth = SE.getTypeSizeInBits(AR->getType()); 819 unsigned CanonicalIVBitWidth = 820 cast<IntegerType>(CanonicalIV->getType())->getBitWidth(); 821 EXPECT_LT(CanonicalIVBitWidth, ExpectedCanonicalIVWidth); 822 823 SCEVExpander Exp(SE, M->getDataLayout(), "expander"); 824 auto *InsertAt = I.getNextNode(); 825 Value *V = Exp.expandCodeFor(AR, nullptr, InsertAt); 826 auto *ExpandedAR = SE.getSCEV(V); 827 // Check that the expansion happened literally. 828 EXPECT_EQ(AR, ExpandedAR); 829 }); 830 }; 831 832 // Expand the addrec produced by GetAddRec into a loop with a canonical IV 833 // of addrec width. 834 auto TestMatchingCanonicalIV = 835 [&](std::function<const SCEVAddRecExpr *(ScalarEvolution & SE, Loop * L)> 836 GetAddRec, 837 unsigned ARBitWidth) { 838 auto ARBitWidthTypeStr = "i" + std::to_string(ARBitWidth); 839 std::unique_ptr<Module> M = parseAssemblyString( 840 "define i32 @test(i32 %limit) { " 841 "entry: " 842 " br label %loop " 843 "loop: " 844 " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] " 845 " %canonical.iv = phi " + 846 ARBitWidthTypeStr + 847 " [ 0, %entry ], [ %canonical.iv.inc, %loop ] " 848 " %i.inc = add nsw i32 %i, 1 " 849 " %canonical.iv.inc = add " + 850 ARBitWidthTypeStr + 851 " %canonical.iv, 1 " 852 " %cont = icmp slt i32 %i.inc, %limit " 853 " br i1 %cont, label %loop, label %exit " 854 "exit: " 855 " ret i32 %i.inc " 856 "}", 857 Err, C); 858 859 assert(M && "Could not parse module?"); 860 assert(!verifyModule(*M) && "Must have been well formed!"); 861 862 runWithSE( 863 *M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) { 864 auto &I = GetInstByName(F, "i"); 865 auto &CanonicalIV = GetInstByName(F, "canonical.iv"); 866 867 auto *LoopHeaderBB = I.getParent(); 868 auto *Loop = LI.getLoopFor(LoopHeaderBB); 869 EXPECT_EQ(&CanonicalIV, Loop->getCanonicalInductionVariable()); 870 unsigned CanonicalIVBitWidth = 871 cast<IntegerType>(CanonicalIV.getType())->getBitWidth(); 872 873 auto *AR = GetAddRec(SE, Loop); 874 EXPECT_FALSE(AR->isAffine()); 875 EXPECT_EQ(ARBitWidth, SE.getTypeSizeInBits(AR->getType())); 876 EXPECT_EQ(CanonicalIVBitWidth, ARBitWidth); 877 878 SCEVExpander Exp(SE, M->getDataLayout(), "expander"); 879 auto *InsertAt = I.getNextNode(); 880 Value *V = Exp.expandCodeFor(AR, nullptr, InsertAt); 881 auto *ExpandedAR = SE.getSCEV(V); 882 // Check that the expansion happened literally. 883 EXPECT_EQ(AR, ExpandedAR); 884 }); 885 }; 886 887 unsigned ARBitWidth = 16; 888 Type *ARType = IntegerType::get(C, ARBitWidth); 889 890 // Expand {5,+,1,+,1} 891 auto GetAR3 = [&](ScalarEvolution &SE, Loop *L) -> const SCEVAddRecExpr * { 892 SmallVector<const SCEV *, 3> Ops = {SE.getConstant(APInt(ARBitWidth, 5)), 893 SE.getOne(ARType), SE.getOne(ARType)}; 894 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, L, SCEV::FlagAnyWrap)); 895 }; 896 TestNoCanonicalIV(GetAR3); 897 TestNarrowCanonicalIV(GetAR3); 898 TestMatchingCanonicalIV(GetAR3, ARBitWidth); 899 900 // Expand {5,+,1,+,1,+,1} 901 auto GetAR4 = [&](ScalarEvolution &SE, Loop *L) -> const SCEVAddRecExpr * { 902 SmallVector<const SCEV *, 4> Ops = {SE.getConstant(APInt(ARBitWidth, 5)), 903 SE.getOne(ARType), SE.getOne(ARType), 904 SE.getOne(ARType)}; 905 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, L, SCEV::FlagAnyWrap)); 906 }; 907 TestNoCanonicalIV(GetAR4); 908 TestNarrowCanonicalIV(GetAR4); 909 TestMatchingCanonicalIV(GetAR4, ARBitWidth); 910 911 // Expand {5,+,1,+,1,+,1,+,1} 912 auto GetAR5 = [&](ScalarEvolution &SE, Loop *L) -> const SCEVAddRecExpr * { 913 SmallVector<const SCEV *, 5> Ops = {SE.getConstant(APInt(ARBitWidth, 5)), 914 SE.getOne(ARType), SE.getOne(ARType), 915 SE.getOne(ARType), SE.getOne(ARType)}; 916 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, L, SCEV::FlagAnyWrap)); 917 }; 918 TestNoCanonicalIV(GetAR5); 919 TestNarrowCanonicalIV(GetAR5); 920 TestMatchingCanonicalIV(GetAR5, ARBitWidth); 921 } 922 923 } // end namespace llvm 924