1 //===- ScalarEvolutionsTest.cpp - ScalarEvolution unit tests --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "llvm/ADT/SmallVector.h" 10 #include "llvm/Analysis/AssumptionCache.h" 11 #include "llvm/Analysis/LoopInfo.h" 12 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 13 #include "llvm/Analysis/ScalarEvolutionNormalization.h" 14 #include "llvm/Analysis/TargetLibraryInfo.h" 15 #include "llvm/AsmParser/Parser.h" 16 #include "llvm/IR/Constants.h" 17 #include "llvm/IR/Dominators.h" 18 #include "llvm/IR/GlobalVariable.h" 19 #include "llvm/IR/IRBuilder.h" 20 #include "llvm/IR/InstIterator.h" 21 #include "llvm/IR/LLVMContext.h" 22 #include "llvm/IR/Module.h" 23 #include "llvm/IR/Verifier.h" 24 #include "llvm/Support/SourceMgr.h" 25 #include "gtest/gtest.h" 26 27 namespace llvm { 28 29 // We use this fixture to ensure that we clean up ScalarEvolution before 30 // deleting the PassManager. 31 class ScalarEvolutionsTest : public testing::Test { 32 protected: 33 LLVMContext Context; 34 Module M; 35 TargetLibraryInfoImpl TLII; 36 TargetLibraryInfo TLI; 37 38 std::unique_ptr<AssumptionCache> AC; 39 std::unique_ptr<DominatorTree> DT; 40 std::unique_ptr<LoopInfo> LI; 41 42 ScalarEvolutionsTest() : M("", Context), TLII(), TLI(TLII) {} 43 44 ScalarEvolution buildSE(Function &F) { 45 AC.reset(new AssumptionCache(F)); 46 DT.reset(new DominatorTree(F)); 47 LI.reset(new LoopInfo(*DT)); 48 return ScalarEvolution(F, TLI, *AC, *DT, *LI); 49 } 50 51 void runWithSE( 52 Module &M, StringRef FuncName, 53 function_ref<void(Function &F, LoopInfo &LI, ScalarEvolution &SE)> Test) { 54 auto *F = M.getFunction(FuncName); 55 ASSERT_NE(F, nullptr) << "Could not find " << FuncName; 56 ScalarEvolution SE = buildSE(*F); 57 Test(*F, *LI, SE); 58 } 59 60 static std::optional<APInt> computeConstantDifference(ScalarEvolution &SE, 61 const SCEV *LHS, 62 const SCEV *RHS) { 63 return SE.computeConstantDifference(LHS, RHS); 64 } 65 66 static bool matchURem(ScalarEvolution &SE, const SCEV *Expr, const SCEV *&LHS, 67 const SCEV *&RHS) { 68 return SE.matchURem(Expr, LHS, RHS); 69 } 70 71 static bool isImpliedCond( 72 ScalarEvolution &SE, ICmpInst::Predicate Pred, const SCEV *LHS, 73 const SCEV *RHS, ICmpInst::Predicate FoundPred, const SCEV *FoundLHS, 74 const SCEV *FoundRHS) { 75 return SE.isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS); 76 } 77 }; 78 79 TEST_F(ScalarEvolutionsTest, SCEVUnknownRAUW) { 80 FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context), 81 std::vector<Type *>(), false); 82 Function *F = Function::Create(FTy, Function::ExternalLinkage, "f", M); 83 BasicBlock *BB = BasicBlock::Create(Context, "entry", F); 84 ReturnInst::Create(Context, nullptr, BB); 85 86 Type *Ty = Type::getInt1Ty(Context); 87 Constant *Init = Constant::getNullValue(Ty); 88 Value *V0 = new GlobalVariable(M, Ty, false, GlobalValue::ExternalLinkage, Init, "V0"); 89 Value *V1 = new GlobalVariable(M, Ty, false, GlobalValue::ExternalLinkage, Init, "V1"); 90 Value *V2 = new GlobalVariable(M, Ty, false, GlobalValue::ExternalLinkage, Init, "V2"); 91 92 ScalarEvolution SE = buildSE(*F); 93 94 const SCEV *S0 = SE.getSCEV(V0); 95 const SCEV *S1 = SE.getSCEV(V1); 96 const SCEV *S2 = SE.getSCEV(V2); 97 98 const SCEV *P0 = SE.getAddExpr(S0, SE.getConstant(S0->getType(), 2)); 99 const SCEV *P1 = SE.getAddExpr(S1, SE.getConstant(S0->getType(), 2)); 100 const SCEV *P2 = SE.getAddExpr(S2, SE.getConstant(S0->getType(), 2)); 101 102 auto *M0 = cast<SCEVAddExpr>(P0); 103 auto *M1 = cast<SCEVAddExpr>(P1); 104 auto *M2 = cast<SCEVAddExpr>(P2); 105 106 EXPECT_EQ(cast<SCEVConstant>(M0->getOperand(0))->getValue()->getZExtValue(), 107 2u); 108 EXPECT_EQ(cast<SCEVConstant>(M1->getOperand(0))->getValue()->getZExtValue(), 109 2u); 110 EXPECT_EQ(cast<SCEVConstant>(M2->getOperand(0))->getValue()->getZExtValue(), 111 2u); 112 113 // Before the RAUWs, these are all pointing to separate values. 114 EXPECT_EQ(cast<SCEVUnknown>(M0->getOperand(1))->getValue(), V0); 115 EXPECT_EQ(cast<SCEVUnknown>(M1->getOperand(1))->getValue(), V1); 116 EXPECT_EQ(cast<SCEVUnknown>(M2->getOperand(1))->getValue(), V2); 117 118 // Do some RAUWs. 119 V2->replaceAllUsesWith(V1); 120 V1->replaceAllUsesWith(V0); 121 122 // After the RAUWs, these should all be pointing to V0. 123 EXPECT_EQ(cast<SCEVUnknown>(M0->getOperand(1))->getValue(), V0); 124 EXPECT_EQ(cast<SCEVUnknown>(M1->getOperand(1))->getValue(), V0); 125 EXPECT_EQ(cast<SCEVUnknown>(M2->getOperand(1))->getValue(), V0); 126 } 127 128 TEST_F(ScalarEvolutionsTest, SimplifiedPHI) { 129 FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context), 130 std::vector<Type *>(), false); 131 Function *F = Function::Create(FTy, Function::ExternalLinkage, "f", M); 132 BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F); 133 BasicBlock *LoopBB = BasicBlock::Create(Context, "loop", F); 134 BasicBlock *ExitBB = BasicBlock::Create(Context, "exit", F); 135 BranchInst::Create(LoopBB, EntryBB); 136 BranchInst::Create(LoopBB, ExitBB, UndefValue::get(Type::getInt1Ty(Context)), 137 LoopBB); 138 ReturnInst::Create(Context, nullptr, ExitBB); 139 auto *Ty = Type::getInt32Ty(Context); 140 auto *PN = PHINode::Create(Ty, 2, "", &*LoopBB->begin()); 141 PN->addIncoming(Constant::getNullValue(Ty), EntryBB); 142 PN->addIncoming(UndefValue::get(Ty), LoopBB); 143 ScalarEvolution SE = buildSE(*F); 144 auto *S1 = SE.getSCEV(PN); 145 auto *S2 = SE.getSCEV(PN); 146 auto *ZeroConst = SE.getConstant(Ty, 0); 147 148 // At some point, only the first call to getSCEV returned the simplified 149 // SCEVConstant and later calls just returned a SCEVUnknown referencing the 150 // PHI node. 151 EXPECT_EQ(S1, ZeroConst); 152 EXPECT_EQ(S1, S2); 153 } 154 155 156 static Instruction *getInstructionByName(Function &F, StringRef Name) { 157 for (auto &I : instructions(F)) 158 if (I.getName() == Name) 159 return &I; 160 llvm_unreachable("Expected to find instruction!"); 161 } 162 163 static Value *getArgByName(Function &F, StringRef Name) { 164 for (auto &Arg : F.args()) 165 if (Arg.getName() == Name) 166 return &Arg; 167 llvm_unreachable("Expected to find instruction!"); 168 } 169 TEST_F(ScalarEvolutionsTest, CommutativeExprOperandOrder) { 170 LLVMContext C; 171 SMDiagnostic Err; 172 std::unique_ptr<Module> M = parseAssemblyString( 173 "target datalayout = \"e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128\" " 174 " " 175 "@var_0 = external global i32, align 4" 176 "@var_1 = external global i32, align 4" 177 "@var_2 = external global i32, align 4" 178 " " 179 "declare i32 @unknown(i32, i32, i32)" 180 " " 181 "define void @f_1(i8* nocapture %arr, i32 %n, i32* %A, i32* %B) " 182 " local_unnamed_addr { " 183 "entry: " 184 " %entrycond = icmp sgt i32 %n, 0 " 185 " br i1 %entrycond, label %loop.ph, label %for.end " 186 " " 187 "loop.ph: " 188 " %a = load i32, i32* %A, align 4 " 189 " %b = load i32, i32* %B, align 4 " 190 " %mul = mul nsw i32 %b, %a " 191 " %iv0.init = getelementptr inbounds i8, i8* %arr, i32 %mul " 192 " br label %loop " 193 " " 194 "loop: " 195 " %iv0 = phi i8* [ %iv0.inc, %loop ], [ %iv0.init, %loop.ph ] " 196 " %iv1 = phi i32 [ %iv1.inc, %loop ], [ 0, %loop.ph ] " 197 " %conv = trunc i32 %iv1 to i8 " 198 " store i8 %conv, i8* %iv0, align 1 " 199 " %iv0.inc = getelementptr inbounds i8, i8* %iv0, i32 %b " 200 " %iv1.inc = add nuw nsw i32 %iv1, 1 " 201 " %exitcond = icmp eq i32 %iv1.inc, %n " 202 " br i1 %exitcond, label %for.end.loopexit, label %loop " 203 " " 204 "for.end.loopexit: " 205 " br label %for.end " 206 " " 207 "for.end: " 208 " ret void " 209 "} " 210 " " 211 "define void @f_2(i32* %X, i32* %Y, i32* %Z) { " 212 " %x = load i32, i32* %X " 213 " %y = load i32, i32* %Y " 214 " %z = load i32, i32* %Z " 215 " ret void " 216 "} " 217 " " 218 "define void @f_3() { " 219 " %x = load i32, i32* @var_0" 220 " %y = load i32, i32* @var_1" 221 " %z = load i32, i32* @var_2" 222 " ret void" 223 "} " 224 " " 225 "define void @f_4(i32 %a, i32 %b, i32 %c) { " 226 " %x = call i32 @unknown(i32 %a, i32 %b, i32 %c)" 227 " %y = call i32 @unknown(i32 %b, i32 %c, i32 %a)" 228 " %z = call i32 @unknown(i32 %c, i32 %a, i32 %b)" 229 " ret void" 230 "} " 231 , 232 Err, C); 233 234 assert(M && "Could not parse module?"); 235 assert(!verifyModule(*M) && "Must have been well formed!"); 236 237 runWithSE(*M, "f_1", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) { 238 auto *IV0 = getInstructionByName(F, "iv0"); 239 auto *IV0Inc = getInstructionByName(F, "iv0.inc"); 240 241 auto *FirstExprForIV0 = SE.getSCEV(IV0); 242 auto *FirstExprForIV0Inc = SE.getSCEV(IV0Inc); 243 auto *SecondExprForIV0 = SE.getSCEV(IV0); 244 245 EXPECT_TRUE(isa<SCEVAddRecExpr>(FirstExprForIV0)); 246 EXPECT_TRUE(isa<SCEVAddRecExpr>(FirstExprForIV0Inc)); 247 EXPECT_TRUE(isa<SCEVAddRecExpr>(SecondExprForIV0)); 248 }); 249 250 auto CheckCommutativeMulExprs = [&](ScalarEvolution &SE, const SCEV *A, 251 const SCEV *B, const SCEV *C) { 252 EXPECT_EQ(SE.getMulExpr(A, B), SE.getMulExpr(B, A)); 253 EXPECT_EQ(SE.getMulExpr(B, C), SE.getMulExpr(C, B)); 254 EXPECT_EQ(SE.getMulExpr(A, C), SE.getMulExpr(C, A)); 255 256 SmallVector<const SCEV *, 3> Ops0 = {A, B, C}; 257 SmallVector<const SCEV *, 3> Ops1 = {A, C, B}; 258 SmallVector<const SCEV *, 3> Ops2 = {B, A, C}; 259 SmallVector<const SCEV *, 3> Ops3 = {B, C, A}; 260 SmallVector<const SCEV *, 3> Ops4 = {C, B, A}; 261 SmallVector<const SCEV *, 3> Ops5 = {C, A, B}; 262 263 auto *Mul0 = SE.getMulExpr(Ops0); 264 auto *Mul1 = SE.getMulExpr(Ops1); 265 auto *Mul2 = SE.getMulExpr(Ops2); 266 auto *Mul3 = SE.getMulExpr(Ops3); 267 auto *Mul4 = SE.getMulExpr(Ops4); 268 auto *Mul5 = SE.getMulExpr(Ops5); 269 270 EXPECT_EQ(Mul0, Mul1) << "Expected " << *Mul0 << " == " << *Mul1; 271 EXPECT_EQ(Mul1, Mul2) << "Expected " << *Mul1 << " == " << *Mul2; 272 EXPECT_EQ(Mul2, Mul3) << "Expected " << *Mul2 << " == " << *Mul3; 273 EXPECT_EQ(Mul3, Mul4) << "Expected " << *Mul3 << " == " << *Mul4; 274 EXPECT_EQ(Mul4, Mul5) << "Expected " << *Mul4 << " == " << *Mul5; 275 }; 276 277 for (StringRef FuncName : {"f_2", "f_3", "f_4"}) 278 runWithSE( 279 *M, FuncName, [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) { 280 CheckCommutativeMulExprs(SE, SE.getSCEV(getInstructionByName(F, "x")), 281 SE.getSCEV(getInstructionByName(F, "y")), 282 SE.getSCEV(getInstructionByName(F, "z"))); 283 }); 284 } 285 286 TEST_F(ScalarEvolutionsTest, CompareSCEVComplexity) { 287 FunctionType *FTy = 288 FunctionType::get(Type::getVoidTy(Context), std::vector<Type *>(), false); 289 Function *F = Function::Create(FTy, Function::ExternalLinkage, "f", M); 290 BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F); 291 BasicBlock *LoopBB = BasicBlock::Create(Context, "bb1", F); 292 BranchInst::Create(LoopBB, EntryBB); 293 294 auto *Ty = Type::getInt32Ty(Context); 295 SmallVector<Instruction*, 8> Muls(8), Acc(8), NextAcc(8); 296 297 Acc[0] = PHINode::Create(Ty, 2, "", LoopBB); 298 Acc[1] = PHINode::Create(Ty, 2, "", LoopBB); 299 Acc[2] = PHINode::Create(Ty, 2, "", LoopBB); 300 Acc[3] = PHINode::Create(Ty, 2, "", LoopBB); 301 Acc[4] = PHINode::Create(Ty, 2, "", LoopBB); 302 Acc[5] = PHINode::Create(Ty, 2, "", LoopBB); 303 Acc[6] = PHINode::Create(Ty, 2, "", LoopBB); 304 Acc[7] = PHINode::Create(Ty, 2, "", LoopBB); 305 306 for (int i = 0; i < 20; i++) { 307 Muls[0] = BinaryOperator::CreateMul(Acc[0], Acc[0], "", LoopBB); 308 NextAcc[0] = BinaryOperator::CreateAdd(Muls[0], Acc[4], "", LoopBB); 309 Muls[1] = BinaryOperator::CreateMul(Acc[1], Acc[1], "", LoopBB); 310 NextAcc[1] = BinaryOperator::CreateAdd(Muls[1], Acc[5], "", LoopBB); 311 Muls[2] = BinaryOperator::CreateMul(Acc[2], Acc[2], "", LoopBB); 312 NextAcc[2] = BinaryOperator::CreateAdd(Muls[2], Acc[6], "", LoopBB); 313 Muls[3] = BinaryOperator::CreateMul(Acc[3], Acc[3], "", LoopBB); 314 NextAcc[3] = BinaryOperator::CreateAdd(Muls[3], Acc[7], "", LoopBB); 315 316 Muls[4] = BinaryOperator::CreateMul(Acc[4], Acc[4], "", LoopBB); 317 NextAcc[4] = BinaryOperator::CreateAdd(Muls[4], Acc[0], "", LoopBB); 318 Muls[5] = BinaryOperator::CreateMul(Acc[5], Acc[5], "", LoopBB); 319 NextAcc[5] = BinaryOperator::CreateAdd(Muls[5], Acc[1], "", LoopBB); 320 Muls[6] = BinaryOperator::CreateMul(Acc[6], Acc[6], "", LoopBB); 321 NextAcc[6] = BinaryOperator::CreateAdd(Muls[6], Acc[2], "", LoopBB); 322 Muls[7] = BinaryOperator::CreateMul(Acc[7], Acc[7], "", LoopBB); 323 NextAcc[7] = BinaryOperator::CreateAdd(Muls[7], Acc[3], "", LoopBB); 324 Acc = NextAcc; 325 } 326 327 auto II = LoopBB->begin(); 328 for (int i = 0; i < 8; i++) { 329 PHINode *Phi = cast<PHINode>(&*II++); 330 Phi->addIncoming(Acc[i], LoopBB); 331 Phi->addIncoming(UndefValue::get(Ty), EntryBB); 332 } 333 334 BasicBlock *ExitBB = BasicBlock::Create(Context, "bb2", F); 335 BranchInst::Create(LoopBB, ExitBB, UndefValue::get(Type::getInt1Ty(Context)), 336 LoopBB); 337 338 Acc[0] = BinaryOperator::CreateAdd(Acc[0], Acc[1], "", ExitBB); 339 Acc[1] = BinaryOperator::CreateAdd(Acc[2], Acc[3], "", ExitBB); 340 Acc[2] = BinaryOperator::CreateAdd(Acc[4], Acc[5], "", ExitBB); 341 Acc[3] = BinaryOperator::CreateAdd(Acc[6], Acc[7], "", ExitBB); 342 Acc[0] = BinaryOperator::CreateAdd(Acc[0], Acc[1], "", ExitBB); 343 Acc[1] = BinaryOperator::CreateAdd(Acc[2], Acc[3], "", ExitBB); 344 Acc[0] = BinaryOperator::CreateAdd(Acc[0], Acc[1], "", ExitBB); 345 346 ReturnInst::Create(Context, nullptr, ExitBB); 347 348 ScalarEvolution SE = buildSE(*F); 349 350 EXPECT_NE(nullptr, SE.getSCEV(Acc[0])); 351 } 352 353 TEST_F(ScalarEvolutionsTest, CompareValueComplexity) { 354 IntegerType *IntPtrTy = M.getDataLayout().getIntPtrType(Context); 355 PointerType *IntPtrPtrTy = PointerType::getUnqual(Context); 356 357 FunctionType *FTy = 358 FunctionType::get(Type::getVoidTy(Context), {IntPtrTy, IntPtrTy}, false); 359 Function *F = Function::Create(FTy, Function::ExternalLinkage, "f", M); 360 BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F); 361 362 Value *X = &*F->arg_begin(); 363 Value *Y = &*std::next(F->arg_begin()); 364 365 const int ValueDepth = 10; 366 for (int i = 0; i < ValueDepth; i++) { 367 X = new LoadInst(IntPtrTy, new IntToPtrInst(X, IntPtrPtrTy, "", EntryBB), 368 "", 369 /*isVolatile*/ false, EntryBB); 370 Y = new LoadInst(IntPtrTy, new IntToPtrInst(Y, IntPtrPtrTy, "", EntryBB), 371 "", 372 /*isVolatile*/ false, EntryBB); 373 } 374 375 auto *MulA = BinaryOperator::CreateMul(X, Y, "", EntryBB); 376 auto *MulB = BinaryOperator::CreateMul(Y, X, "", EntryBB); 377 ReturnInst::Create(Context, nullptr, EntryBB); 378 379 // This test isn't checking for correctness. Today making A and B resolve to 380 // the same SCEV would require deeper searching in CompareValueComplexity, 381 // which will slow down compilation. However, this test can fail (with LLVM's 382 // behavior still being correct) if we ever have a smarter 383 // CompareValueComplexity that is both fast and more accurate. 384 385 ScalarEvolution SE = buildSE(*F); 386 auto *A = SE.getSCEV(MulA); 387 auto *B = SE.getSCEV(MulB); 388 EXPECT_NE(A, B); 389 } 390 391 TEST_F(ScalarEvolutionsTest, SCEVAddExpr) { 392 Type *Ty32 = Type::getInt32Ty(Context); 393 Type *ArgTys[] = {Type::getInt64Ty(Context), Ty32, Ty32, Ty32, Ty32, Ty32}; 394 395 FunctionType *FTy = 396 FunctionType::get(Type::getVoidTy(Context), ArgTys, false); 397 Function *F = Function::Create(FTy, Function::ExternalLinkage, "f", M); 398 399 Argument *A1 = &*F->arg_begin(); 400 Argument *A2 = &*(std::next(F->arg_begin())); 401 BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F); 402 403 Instruction *Trunc = CastInst::CreateTruncOrBitCast(A1, Ty32, "", EntryBB); 404 Instruction *Mul1 = BinaryOperator::CreateMul(Trunc, A2, "", EntryBB); 405 Instruction *Add1 = BinaryOperator::CreateAdd(Mul1, Trunc, "", EntryBB); 406 Mul1 = BinaryOperator::CreateMul(Add1, Trunc, "", EntryBB); 407 Instruction *Add2 = BinaryOperator::CreateAdd(Mul1, Add1, "", EntryBB); 408 // FIXME: The size of this is arbitrary and doesn't seem to change the 409 // result, but SCEV will do quadratic work for these so a large number here 410 // will be extremely slow. We should revisit what and how this is testing 411 // SCEV. 412 for (int i = 0; i < 10; i++) { 413 Mul1 = BinaryOperator::CreateMul(Add2, Add1, "", EntryBB); 414 Add1 = Add2; 415 Add2 = BinaryOperator::CreateAdd(Mul1, Add1, "", EntryBB); 416 } 417 418 ReturnInst::Create(Context, nullptr, EntryBB); 419 ScalarEvolution SE = buildSE(*F); 420 EXPECT_NE(nullptr, SE.getSCEV(Mul1)); 421 422 Argument *A3 = &*(std::next(F->arg_begin(), 2)); 423 Argument *A4 = &*(std::next(F->arg_begin(), 3)); 424 Argument *A5 = &*(std::next(F->arg_begin(), 4)); 425 Argument *A6 = &*(std::next(F->arg_begin(), 5)); 426 427 auto *AddWithNUW = cast<SCEVAddExpr>(SE.getAddExpr( 428 SE.getAddExpr(SE.getSCEV(A2), SE.getSCEV(A3), SCEV::FlagNUW), 429 SE.getConstant(APInt(/*numBits=*/32, 5)), SCEV::FlagNUW)); 430 EXPECT_EQ(AddWithNUW->getNumOperands(), 3u); 431 EXPECT_EQ(AddWithNUW->getNoWrapFlags(), SCEV::FlagNUW); 432 433 auto *AddWithAnyWrap = 434 SE.getAddExpr(SE.getSCEV(A3), SE.getSCEV(A4), SCEV::FlagAnyWrap); 435 auto *AddWithAnyWrapNUW = cast<SCEVAddExpr>( 436 SE.getAddExpr(AddWithAnyWrap, SE.getSCEV(A5), SCEV::FlagNUW)); 437 EXPECT_EQ(AddWithAnyWrapNUW->getNumOperands(), 3u); 438 EXPECT_EQ(AddWithAnyWrapNUW->getNoWrapFlags(), SCEV::FlagAnyWrap); 439 440 auto *AddWithNSW = SE.getAddExpr( 441 SE.getSCEV(A2), SE.getConstant(APInt(32, 99)), SCEV::FlagNSW); 442 auto *AddWithNSW_NUW = cast<SCEVAddExpr>( 443 SE.getAddExpr(AddWithNSW, SE.getSCEV(A5), SCEV::FlagNUW)); 444 EXPECT_EQ(AddWithNSW_NUW->getNumOperands(), 3u); 445 EXPECT_EQ(AddWithNSW_NUW->getNoWrapFlags(), SCEV::FlagAnyWrap); 446 447 auto *AddWithNSWNUW = 448 SE.getAddExpr(SE.getSCEV(A2), SE.getSCEV(A4), 449 ScalarEvolution::setFlags(SCEV::FlagNUW, SCEV::FlagNSW)); 450 auto *AddWithNSWNUW_NUW = cast<SCEVAddExpr>( 451 SE.getAddExpr(AddWithNSWNUW, SE.getSCEV(A5), SCEV::FlagNUW)); 452 EXPECT_EQ(AddWithNSWNUW_NUW->getNumOperands(), 3u); 453 EXPECT_EQ(AddWithNSWNUW_NUW->getNoWrapFlags(), SCEV::FlagNUW); 454 455 auto *AddWithNSW_NSWNUW = cast<SCEVAddExpr>( 456 SE.getAddExpr(AddWithNSW, SE.getSCEV(A6), 457 ScalarEvolution::setFlags(SCEV::FlagNUW, SCEV::FlagNSW))); 458 EXPECT_EQ(AddWithNSW_NSWNUW->getNumOperands(), 3u); 459 EXPECT_EQ(AddWithNSW_NSWNUW->getNoWrapFlags(), SCEV::FlagAnyWrap); 460 } 461 462 static Instruction &GetInstByName(Function &F, StringRef Name) { 463 for (auto &I : instructions(F)) 464 if (I.getName() == Name) 465 return I; 466 llvm_unreachable("Could not find instructions!"); 467 } 468 469 TEST_F(ScalarEvolutionsTest, SCEVNormalization) { 470 LLVMContext C; 471 SMDiagnostic Err; 472 std::unique_ptr<Module> M = parseAssemblyString( 473 "target datalayout = \"e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128\" " 474 " " 475 "@var_0 = external global i32, align 4" 476 "@var_1 = external global i32, align 4" 477 "@var_2 = external global i32, align 4" 478 " " 479 "declare i32 @unknown(i32, i32, i32)" 480 " " 481 "define void @f_1(i8* nocapture %arr, i32 %n, i32* %A, i32* %B) " 482 " local_unnamed_addr { " 483 "entry: " 484 " br label %loop.ph " 485 " " 486 "loop.ph: " 487 " br label %loop " 488 " " 489 "loop: " 490 " %iv0 = phi i32 [ %iv0.inc, %loop ], [ 0, %loop.ph ] " 491 " %iv1 = phi i32 [ %iv1.inc, %loop ], [ -2147483648, %loop.ph ] " 492 " %iv0.inc = add i32 %iv0, 1 " 493 " %iv1.inc = add i32 %iv1, 3 " 494 " br i1 undef, label %for.end.loopexit, label %loop " 495 " " 496 "for.end.loopexit: " 497 " ret void " 498 "} " 499 " " 500 "define void @f_2(i32 %a, i32 %b, i32 %c, i32 %d) " 501 " local_unnamed_addr { " 502 "entry: " 503 " br label %loop_0 " 504 " " 505 "loop_0: " 506 " br i1 undef, label %loop_0, label %loop_1 " 507 " " 508 "loop_1: " 509 " br i1 undef, label %loop_2, label %loop_1 " 510 " " 511 " " 512 "loop_2: " 513 " br i1 undef, label %end, label %loop_2 " 514 " " 515 "end: " 516 " ret void " 517 "} " 518 , 519 Err, C); 520 521 assert(M && "Could not parse module?"); 522 assert(!verifyModule(*M) && "Must have been well formed!"); 523 524 runWithSE(*M, "f_1", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) { 525 auto &I0 = GetInstByName(F, "iv0"); 526 auto &I1 = *I0.getNextNode(); 527 528 auto *S0 = cast<SCEVAddRecExpr>(SE.getSCEV(&I0)); 529 PostIncLoopSet Loops; 530 Loops.insert(S0->getLoop()); 531 auto *N0 = normalizeForPostIncUse(S0, Loops, SE); 532 auto *D0 = denormalizeForPostIncUse(N0, Loops, SE); 533 EXPECT_EQ(S0, D0) << *S0 << " " << *D0; 534 535 auto *S1 = cast<SCEVAddRecExpr>(SE.getSCEV(&I1)); 536 Loops.clear(); 537 Loops.insert(S1->getLoop()); 538 auto *N1 = normalizeForPostIncUse(S1, Loops, SE); 539 auto *D1 = denormalizeForPostIncUse(N1, Loops, SE); 540 EXPECT_EQ(S1, D1) << *S1 << " " << *D1; 541 }); 542 543 runWithSE(*M, "f_2", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) { 544 auto *L2 = *LI.begin(); 545 auto *L1 = *std::next(LI.begin()); 546 auto *L0 = *std::next(LI.begin(), 2); 547 548 auto GetAddRec = [&SE](const Loop *L, std::initializer_list<const SCEV *> Ops) { 549 SmallVector<const SCEV *, 4> OpsCopy(Ops); 550 return SE.getAddRecExpr(OpsCopy, L, SCEV::FlagAnyWrap); 551 }; 552 553 auto GetAdd = [&SE](std::initializer_list<const SCEV *> Ops) { 554 SmallVector<const SCEV *, 4> OpsCopy(Ops); 555 return SE.getAddExpr(OpsCopy, SCEV::FlagAnyWrap); 556 }; 557 558 // We first populate the AddRecs vector with a few "interesting" SCEV 559 // expressions, and then we go through the list and assert that each 560 // expression in it has an invertible normalization. 561 562 std::vector<const SCEV *> Exprs; 563 { 564 const SCEV *V0 = SE.getSCEV(&*F.arg_begin()); 565 const SCEV *V1 = SE.getSCEV(&*std::next(F.arg_begin(), 1)); 566 const SCEV *V2 = SE.getSCEV(&*std::next(F.arg_begin(), 2)); 567 const SCEV *V3 = SE.getSCEV(&*std::next(F.arg_begin(), 3)); 568 569 Exprs.push_back(GetAddRec(L0, {V0})); // 0 570 Exprs.push_back(GetAddRec(L0, {V0, V1})); // 1 571 Exprs.push_back(GetAddRec(L0, {V0, V1, V2})); // 2 572 Exprs.push_back(GetAddRec(L0, {V0, V1, V2, V3})); // 3 573 574 Exprs.push_back( 575 GetAddRec(L1, {Exprs[1], Exprs[2], Exprs[3], Exprs[0]})); // 4 576 Exprs.push_back( 577 GetAddRec(L1, {Exprs[1], Exprs[2], Exprs[0], Exprs[3]})); // 5 578 Exprs.push_back( 579 GetAddRec(L1, {Exprs[1], Exprs[3], Exprs[3], Exprs[1]})); // 6 580 581 Exprs.push_back(GetAdd({Exprs[6], Exprs[3], V2})); // 7 582 583 Exprs.push_back( 584 GetAddRec(L2, {Exprs[4], Exprs[3], Exprs[3], Exprs[5]})); // 8 585 586 Exprs.push_back( 587 GetAddRec(L2, {Exprs[4], Exprs[6], Exprs[7], Exprs[3], V0})); // 9 588 } 589 590 std::vector<PostIncLoopSet> LoopSets; 591 for (int i = 0; i < 8; i++) { 592 LoopSets.emplace_back(); 593 if (i & 1) 594 LoopSets.back().insert(L0); 595 if (i & 2) 596 LoopSets.back().insert(L1); 597 if (i & 4) 598 LoopSets.back().insert(L2); 599 } 600 601 for (const auto &LoopSet : LoopSets) 602 for (auto *S : Exprs) { 603 { 604 auto *N = llvm::normalizeForPostIncUse(S, LoopSet, SE); 605 auto *D = llvm::denormalizeForPostIncUse(N, LoopSet, SE); 606 607 // Normalization and then denormalizing better give us back the same 608 // value. 609 EXPECT_EQ(S, D) << "S = " << *S << " D = " << *D << " N = " << *N; 610 } 611 { 612 auto *D = llvm::denormalizeForPostIncUse(S, LoopSet, SE); 613 auto *N = llvm::normalizeForPostIncUse(D, LoopSet, SE); 614 615 // Denormalization and then normalizing better give us back the same 616 // value. 617 EXPECT_EQ(S, N) << "S = " << *S << " N = " << *N; 618 } 619 } 620 }); 621 } 622 623 // Expect the call of getZeroExtendExpr will not cost exponential time. 624 TEST_F(ScalarEvolutionsTest, SCEVZeroExtendExpr) { 625 LLVMContext C; 626 SMDiagnostic Err; 627 628 // Generate a function like below: 629 // define void @foo() { 630 // entry: 631 // br label %for.cond 632 // 633 // for.cond: 634 // %0 = phi i64 [ 100, %entry ], [ %dec, %for.inc ] 635 // %cmp = icmp sgt i64 %0, 90 636 // br i1 %cmp, label %for.inc, label %for.cond1 637 // 638 // for.inc: 639 // %dec = add nsw i64 %0, -1 640 // br label %for.cond 641 // 642 // for.cond1: 643 // %1 = phi i64 [ 100, %for.cond ], [ %dec5, %for.inc2 ] 644 // %cmp3 = icmp sgt i64 %1, 90 645 // br i1 %cmp3, label %for.inc2, label %for.cond4 646 // 647 // for.inc2: 648 // %dec5 = add nsw i64 %1, -1 649 // br label %for.cond1 650 // 651 // ...... 652 // 653 // for.cond89: 654 // %19 = phi i64 [ 100, %for.cond84 ], [ %dec94, %for.inc92 ] 655 // %cmp93 = icmp sgt i64 %19, 90 656 // br i1 %cmp93, label %for.inc92, label %for.end 657 // 658 // for.inc92: 659 // %dec94 = add nsw i64 %19, -1 660 // br label %for.cond89 661 // 662 // for.end: 663 // %gep = getelementptr i8, i8* null, i64 %dec 664 // %gep6 = getelementptr i8, i8* %gep, i64 %dec5 665 // ...... 666 // %gep95 = getelementptr i8, i8* %gep91, i64 %dec94 667 // ret void 668 // } 669 FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context), {}, false); 670 Function *F = Function::Create(FTy, Function::ExternalLinkage, "foo", M); 671 672 BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F); 673 BasicBlock *CondBB = BasicBlock::Create(Context, "for.cond", F); 674 BasicBlock *EndBB = BasicBlock::Create(Context, "for.end", F); 675 BranchInst::Create(CondBB, EntryBB); 676 BasicBlock *PrevBB = EntryBB; 677 678 Type *I64Ty = Type::getInt64Ty(Context); 679 Type *I8Ty = Type::getInt8Ty(Context); 680 Type *I8PtrTy = PointerType::getUnqual(Context); 681 Value *Accum = Constant::getNullValue(I8PtrTy); 682 int Iters = 20; 683 for (int i = 0; i < Iters; i++) { 684 BasicBlock *IncBB = BasicBlock::Create(Context, "for.inc", F, EndBB); 685 auto *PN = PHINode::Create(I64Ty, 2, "", CondBB); 686 PN->addIncoming(ConstantInt::get(Context, APInt(64, 100)), PrevBB); 687 auto *Cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_SGT, PN, 688 ConstantInt::get(Context, APInt(64, 90)), "cmp", 689 CondBB); 690 BasicBlock *NextBB; 691 if (i != Iters - 1) 692 NextBB = BasicBlock::Create(Context, "for.cond", F, EndBB); 693 else 694 NextBB = EndBB; 695 BranchInst::Create(IncBB, NextBB, Cmp, CondBB); 696 auto *Dec = BinaryOperator::CreateNSWAdd( 697 PN, ConstantInt::get(Context, APInt(64, -1)), "dec", IncBB); 698 PN->addIncoming(Dec, IncBB); 699 BranchInst::Create(CondBB, IncBB); 700 701 Accum = GetElementPtrInst::Create(I8Ty, Accum, PN, "gep", EndBB); 702 703 PrevBB = CondBB; 704 CondBB = NextBB; 705 } 706 ReturnInst::Create(Context, nullptr, EndBB); 707 ScalarEvolution SE = buildSE(*F); 708 const SCEV *S = SE.getSCEV(Accum); 709 S = SE.getLosslessPtrToIntExpr(S); 710 Type *I128Ty = Type::getInt128Ty(Context); 711 SE.getZeroExtendExpr(S, I128Ty); 712 } 713 714 // Make sure that SCEV invalidates exit limits after invalidating the values it 715 // depends on when we forget a loop. 716 TEST_F(ScalarEvolutionsTest, SCEVExitLimitForgetLoop) { 717 /* 718 * Create the following code: 719 * func(i64 addrspace(10)* %arg) 720 * top: 721 * br label %L.ph 722 * L.ph: 723 * br label %L 724 * L: 725 * %phi = phi i64 [i64 0, %L.ph], [ %add, %L2 ] 726 * %add = add i64 %phi2, 1 727 * %cond = icmp slt i64 %add, 1000; then becomes 2000. 728 * br i1 %cond, label %post, label %L2 729 * post: 730 * ret void 731 * 732 */ 733 734 // Create a module with non-integral pointers in it's datalayout 735 Module NIM("nonintegral", Context); 736 std::string DataLayout = M.getDataLayoutStr(); 737 if (!DataLayout.empty()) 738 DataLayout += "-"; 739 DataLayout += "ni:10"; 740 NIM.setDataLayout(DataLayout); 741 742 Type *T_int64 = Type::getInt64Ty(Context); 743 Type *T_pint64 = PointerType::get(Context, 10); 744 745 FunctionType *FTy = 746 FunctionType::get(Type::getVoidTy(Context), {T_pint64}, false); 747 Function *F = Function::Create(FTy, Function::ExternalLinkage, "foo", NIM); 748 749 BasicBlock *Top = BasicBlock::Create(Context, "top", F); 750 BasicBlock *LPh = BasicBlock::Create(Context, "L.ph", F); 751 BasicBlock *L = BasicBlock::Create(Context, "L", F); 752 BasicBlock *Post = BasicBlock::Create(Context, "post", F); 753 754 IRBuilder<> Builder(Top); 755 Builder.CreateBr(LPh); 756 757 Builder.SetInsertPoint(LPh); 758 Builder.CreateBr(L); 759 760 Builder.SetInsertPoint(L); 761 PHINode *Phi = Builder.CreatePHI(T_int64, 2); 762 auto *Add = cast<Instruction>( 763 Builder.CreateAdd(Phi, ConstantInt::get(T_int64, 1), "add")); 764 auto *Limit = ConstantInt::get(T_int64, 1000); 765 auto *Cond = cast<Instruction>( 766 Builder.CreateICmp(ICmpInst::ICMP_SLT, Add, Limit, "cond")); 767 auto *Br = cast<Instruction>(Builder.CreateCondBr(Cond, L, Post)); 768 Phi->addIncoming(ConstantInt::get(T_int64, 0), LPh); 769 Phi->addIncoming(Add, L); 770 771 Builder.SetInsertPoint(Post); 772 Builder.CreateRetVoid(); 773 774 ScalarEvolution SE = buildSE(*F); 775 auto *Loop = LI->getLoopFor(L); 776 const SCEV *EC = SE.getBackedgeTakenCount(Loop); 777 EXPECT_FALSE(isa<SCEVCouldNotCompute>(EC)); 778 EXPECT_TRUE(isa<SCEVConstant>(EC)); 779 EXPECT_EQ(cast<SCEVConstant>(EC)->getAPInt().getLimitedValue(), 999u); 780 781 // The add recurrence {5,+,1} does not correspond to any PHI in the IR, and 782 // that is relevant to this test. 783 auto *Five = SE.getConstant(APInt(/*numBits=*/64, 5)); 784 auto *AR = 785 SE.getAddRecExpr(Five, SE.getOne(T_int64), Loop, SCEV::FlagAnyWrap); 786 const SCEV *ARAtLoopExit = SE.getSCEVAtScope(AR, nullptr); 787 EXPECT_FALSE(isa<SCEVCouldNotCompute>(ARAtLoopExit)); 788 EXPECT_TRUE(isa<SCEVConstant>(ARAtLoopExit)); 789 EXPECT_EQ(cast<SCEVConstant>(ARAtLoopExit)->getAPInt().getLimitedValue(), 790 1004u); 791 792 SE.forgetLoop(Loop); 793 Br->eraseFromParent(); 794 Cond->eraseFromParent(); 795 796 Builder.SetInsertPoint(L); 797 auto *NewCond = Builder.CreateICmp( 798 ICmpInst::ICMP_SLT, Add, ConstantInt::get(T_int64, 2000), "new.cond"); 799 Builder.CreateCondBr(NewCond, L, Post); 800 const SCEV *NewEC = SE.getBackedgeTakenCount(Loop); 801 EXPECT_FALSE(isa<SCEVCouldNotCompute>(NewEC)); 802 EXPECT_TRUE(isa<SCEVConstant>(NewEC)); 803 EXPECT_EQ(cast<SCEVConstant>(NewEC)->getAPInt().getLimitedValue(), 1999u); 804 const SCEV *NewARAtLoopExit = SE.getSCEVAtScope(AR, nullptr); 805 EXPECT_FALSE(isa<SCEVCouldNotCompute>(NewARAtLoopExit)); 806 EXPECT_TRUE(isa<SCEVConstant>(NewARAtLoopExit)); 807 EXPECT_EQ(cast<SCEVConstant>(NewARAtLoopExit)->getAPInt().getLimitedValue(), 808 2004u); 809 } 810 811 // Make sure that SCEV invalidates exit limits after invalidating the values it 812 // depends on when we forget a value. 813 TEST_F(ScalarEvolutionsTest, SCEVExitLimitForgetValue) { 814 /* 815 * Create the following code: 816 * func(i64 addrspace(10)* %arg) 817 * top: 818 * br label %L.ph 819 * L.ph: 820 * %load = load i64 addrspace(10)* %arg 821 * br label %L 822 * L: 823 * %phi = phi i64 [i64 0, %L.ph], [ %add, %L2 ] 824 * %add = add i64 %phi2, 1 825 * %cond = icmp slt i64 %add, %load ; then becomes 2000. 826 * br i1 %cond, label %post, label %L2 827 * post: 828 * ret void 829 * 830 */ 831 832 // Create a module with non-integral pointers in it's datalayout 833 Module NIM("nonintegral", Context); 834 std::string DataLayout = M.getDataLayoutStr(); 835 if (!DataLayout.empty()) 836 DataLayout += "-"; 837 DataLayout += "ni:10"; 838 NIM.setDataLayout(DataLayout); 839 840 Type *T_int64 = Type::getInt64Ty(Context); 841 Type *T_pint64 = PointerType::get(Context, 10); 842 843 FunctionType *FTy = 844 FunctionType::get(Type::getVoidTy(Context), {T_pint64}, false); 845 Function *F = Function::Create(FTy, Function::ExternalLinkage, "foo", NIM); 846 847 Argument *Arg = &*F->arg_begin(); 848 849 BasicBlock *Top = BasicBlock::Create(Context, "top", F); 850 BasicBlock *LPh = BasicBlock::Create(Context, "L.ph", F); 851 BasicBlock *L = BasicBlock::Create(Context, "L", F); 852 BasicBlock *Post = BasicBlock::Create(Context, "post", F); 853 854 IRBuilder<> Builder(Top); 855 Builder.CreateBr(LPh); 856 857 Builder.SetInsertPoint(LPh); 858 auto *Load = cast<Instruction>(Builder.CreateLoad(T_int64, Arg, "load")); 859 Builder.CreateBr(L); 860 861 Builder.SetInsertPoint(L); 862 PHINode *Phi = Builder.CreatePHI(T_int64, 2); 863 auto *Add = cast<Instruction>( 864 Builder.CreateAdd(Phi, ConstantInt::get(T_int64, 1), "add")); 865 auto *Cond = cast<Instruction>( 866 Builder.CreateICmp(ICmpInst::ICMP_SLT, Add, Load, "cond")); 867 auto *Br = cast<Instruction>(Builder.CreateCondBr(Cond, L, Post)); 868 Phi->addIncoming(ConstantInt::get(T_int64, 0), LPh); 869 Phi->addIncoming(Add, L); 870 871 Builder.SetInsertPoint(Post); 872 Builder.CreateRetVoid(); 873 874 ScalarEvolution SE = buildSE(*F); 875 auto *Loop = LI->getLoopFor(L); 876 const SCEV *EC = SE.getBackedgeTakenCount(Loop); 877 EXPECT_FALSE(isa<SCEVCouldNotCompute>(EC)); 878 EXPECT_FALSE(isa<SCEVConstant>(EC)); 879 880 SE.forgetValue(Load); 881 Br->eraseFromParent(); 882 Cond->eraseFromParent(); 883 Load->eraseFromParent(); 884 885 Builder.SetInsertPoint(L); 886 auto *NewCond = Builder.CreateICmp( 887 ICmpInst::ICMP_SLT, Add, ConstantInt::get(T_int64, 2000), "new.cond"); 888 Builder.CreateCondBr(NewCond, L, Post); 889 const SCEV *NewEC = SE.getBackedgeTakenCount(Loop); 890 EXPECT_FALSE(isa<SCEVCouldNotCompute>(NewEC)); 891 EXPECT_TRUE(isa<SCEVConstant>(NewEC)); 892 EXPECT_EQ(cast<SCEVConstant>(NewEC)->getAPInt().getLimitedValue(), 1999u); 893 } 894 895 TEST_F(ScalarEvolutionsTest, SCEVAddRecFromPHIwithLargeConstants) { 896 // Reference: https://reviews.llvm.org/D37265 897 // Make sure that SCEV does not blow up when constructing an AddRec 898 // with predicates for a phi with the update pattern: 899 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum 900 // when either the initial value of the Phi or the InvariantAccum are 901 // constants that are too large to fit in an ix but are zero when truncated to 902 // ix. 903 FunctionType *FTy = 904 FunctionType::get(Type::getVoidTy(Context), std::vector<Type *>(), false); 905 Function *F = 906 Function::Create(FTy, Function::ExternalLinkage, "addrecphitest", M); 907 908 /* 909 Create IR: 910 entry: 911 br label %loop 912 loop: 913 %0 = phi i64 [-9223372036854775808, %entry], [%3, %loop] 914 %1 = shl i64 %0, 32 915 %2 = ashr exact i64 %1, 32 916 %3 = add i64 %2, -9223372036854775808 917 br i1 undef, label %exit, label %loop 918 exit: 919 ret void 920 */ 921 BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F); 922 BasicBlock *LoopBB = BasicBlock::Create(Context, "loop", F); 923 BasicBlock *ExitBB = BasicBlock::Create(Context, "exit", F); 924 925 // entry: 926 BranchInst::Create(LoopBB, EntryBB); 927 // loop: 928 auto *MinInt64 = 929 ConstantInt::get(Context, APInt(64, 0x8000000000000000U, true)); 930 auto *Int64_32 = ConstantInt::get(Context, APInt(64, 32)); 931 auto *Br = BranchInst::Create( 932 LoopBB, ExitBB, UndefValue::get(Type::getInt1Ty(Context)), LoopBB); 933 auto *Phi = PHINode::Create(Type::getInt64Ty(Context), 2, "", Br); 934 auto *Shl = BinaryOperator::CreateShl(Phi, Int64_32, "", Br); 935 auto *AShr = BinaryOperator::CreateExactAShr(Shl, Int64_32, "", Br); 936 auto *Add = BinaryOperator::CreateAdd(AShr, MinInt64, "", Br); 937 Phi->addIncoming(MinInt64, EntryBB); 938 Phi->addIncoming(Add, LoopBB); 939 // exit: 940 ReturnInst::Create(Context, nullptr, ExitBB); 941 942 // Make sure that SCEV doesn't blow up 943 ScalarEvolution SE = buildSE(*F); 944 const SCEV *Expr = SE.getSCEV(Phi); 945 EXPECT_NE(nullptr, Expr); 946 EXPECT_TRUE(isa<SCEVUnknown>(Expr)); 947 auto Result = SE.createAddRecFromPHIWithCasts(cast<SCEVUnknown>(Expr)); 948 } 949 950 TEST_F(ScalarEvolutionsTest, SCEVAddRecFromPHIwithLargeConstantAccum) { 951 // Make sure that SCEV does not blow up when constructing an AddRec 952 // with predicates for a phi with the update pattern: 953 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum 954 // when the InvariantAccum is a constant that is too large to fit in an 955 // ix but are zero when truncated to ix, and the initial value of the 956 // phi is not a constant. 957 Type *Int32Ty = Type::getInt32Ty(Context); 958 SmallVector<Type *, 1> Types; 959 Types.push_back(Int32Ty); 960 FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context), Types, false); 961 Function *F = 962 Function::Create(FTy, Function::ExternalLinkage, "addrecphitest", M); 963 964 /* 965 Create IR: 966 define @addrecphitest(i32) 967 entry: 968 br label %loop 969 loop: 970 %1 = phi i32 [%0, %entry], [%4, %loop] 971 %2 = shl i32 %1, 16 972 %3 = ashr exact i32 %2, 16 973 %4 = add i32 %3, -2147483648 974 br i1 undef, label %exit, label %loop 975 exit: 976 ret void 977 */ 978 BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F); 979 BasicBlock *LoopBB = BasicBlock::Create(Context, "loop", F); 980 BasicBlock *ExitBB = BasicBlock::Create(Context, "exit", F); 981 982 // entry: 983 BranchInst::Create(LoopBB, EntryBB); 984 // loop: 985 auto *MinInt32 = ConstantInt::get(Context, APInt(32, 0x80000000U, true)); 986 auto *Int32_16 = ConstantInt::get(Context, APInt(32, 16)); 987 auto *Br = BranchInst::Create( 988 LoopBB, ExitBB, UndefValue::get(Type::getInt1Ty(Context)), LoopBB); 989 auto *Phi = PHINode::Create(Int32Ty, 2, "", Br); 990 auto *Shl = BinaryOperator::CreateShl(Phi, Int32_16, "", Br); 991 auto *AShr = BinaryOperator::CreateExactAShr(Shl, Int32_16, "", Br); 992 auto *Add = BinaryOperator::CreateAdd(AShr, MinInt32, "", Br); 993 auto *Arg = &*(F->arg_begin()); 994 Phi->addIncoming(Arg, EntryBB); 995 Phi->addIncoming(Add, LoopBB); 996 // exit: 997 ReturnInst::Create(Context, nullptr, ExitBB); 998 999 // Make sure that SCEV doesn't blow up 1000 ScalarEvolution SE = buildSE(*F); 1001 const SCEV *Expr = SE.getSCEV(Phi); 1002 EXPECT_NE(nullptr, Expr); 1003 EXPECT_TRUE(isa<SCEVUnknown>(Expr)); 1004 auto Result = SE.createAddRecFromPHIWithCasts(cast<SCEVUnknown>(Expr)); 1005 } 1006 1007 TEST_F(ScalarEvolutionsTest, SCEVFoldSumOfTruncs) { 1008 // Verify that the following SCEV gets folded to a zero: 1009 // (-1 * (trunc i64 (-1 * %0) to i32)) + (-1 * (trunc i64 %0 to i32) 1010 Type *ArgTy = Type::getInt64Ty(Context); 1011 Type *Int32Ty = Type::getInt32Ty(Context); 1012 SmallVector<Type *, 1> Types; 1013 Types.push_back(ArgTy); 1014 FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context), Types, false); 1015 Function *F = Function::Create(FTy, Function::ExternalLinkage, "f", M); 1016 BasicBlock *BB = BasicBlock::Create(Context, "entry", F); 1017 ReturnInst::Create(Context, nullptr, BB); 1018 1019 ScalarEvolution SE = buildSE(*F); 1020 1021 auto *Arg = &*(F->arg_begin()); 1022 const auto *ArgSCEV = SE.getSCEV(Arg); 1023 1024 // Build the SCEV 1025 const auto *A0 = SE.getNegativeSCEV(ArgSCEV); 1026 const auto *A1 = SE.getTruncateExpr(A0, Int32Ty); 1027 const auto *A = SE.getNegativeSCEV(A1); 1028 1029 const auto *B0 = SE.getTruncateExpr(ArgSCEV, Int32Ty); 1030 const auto *B = SE.getNegativeSCEV(B0); 1031 1032 const auto *Expr = SE.getAddExpr(A, B); 1033 // Verify that the SCEV was folded to 0 1034 const auto *ZeroConst = SE.getConstant(Int32Ty, 0); 1035 EXPECT_EQ(Expr, ZeroConst); 1036 } 1037 1038 // Check logic of SCEV expression size computation. 1039 TEST_F(ScalarEvolutionsTest, SCEVComputeExpressionSize) { 1040 /* 1041 * Create the following code: 1042 * void func(i64 %a, i64 %b) 1043 * entry: 1044 * %s1 = add i64 %a, 1 1045 * %s2 = udiv i64 %s1, %b 1046 * br label %exit 1047 * exit: 1048 * ret 1049 */ 1050 1051 // Create a module. 1052 Module M("SCEVComputeExpressionSize", Context); 1053 1054 Type *T_int64 = Type::getInt64Ty(Context); 1055 1056 FunctionType *FTy = 1057 FunctionType::get(Type::getVoidTy(Context), { T_int64, T_int64 }, false); 1058 Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M); 1059 Argument *A = &*F->arg_begin(); 1060 Argument *B = &*std::next(F->arg_begin()); 1061 ConstantInt *C = ConstantInt::get(Context, APInt(64, 1)); 1062 1063 BasicBlock *Entry = BasicBlock::Create(Context, "entry", F); 1064 BasicBlock *Exit = BasicBlock::Create(Context, "exit", F); 1065 1066 IRBuilder<> Builder(Entry); 1067 auto *S1 = cast<Instruction>(Builder.CreateAdd(A, C, "s1")); 1068 auto *S2 = cast<Instruction>(Builder.CreateUDiv(S1, B, "s2")); 1069 Builder.CreateBr(Exit); 1070 1071 Builder.SetInsertPoint(Exit); 1072 Builder.CreateRetVoid(); 1073 1074 ScalarEvolution SE = buildSE(*F); 1075 // Get S2 first to move it to cache. 1076 const SCEV *AS = SE.getSCEV(A); 1077 const SCEV *BS = SE.getSCEV(B); 1078 const SCEV *CS = SE.getSCEV(C); 1079 const SCEV *S1S = SE.getSCEV(S1); 1080 const SCEV *S2S = SE.getSCEV(S2); 1081 EXPECT_EQ(AS->getExpressionSize(), 1u); 1082 EXPECT_EQ(BS->getExpressionSize(), 1u); 1083 EXPECT_EQ(CS->getExpressionSize(), 1u); 1084 EXPECT_EQ(S1S->getExpressionSize(), 3u); 1085 EXPECT_EQ(S2S->getExpressionSize(), 5u); 1086 } 1087 1088 TEST_F(ScalarEvolutionsTest, SCEVLoopDecIntrinsic) { 1089 LLVMContext C; 1090 SMDiagnostic Err; 1091 std::unique_ptr<Module> M = parseAssemblyString( 1092 "define void @foo(i32 %N) { " 1093 "entry: " 1094 " %cmp3 = icmp sgt i32 %N, 0 " 1095 " br i1 %cmp3, label %for.body, label %for.cond.cleanup " 1096 "for.cond.cleanup: " 1097 " ret void " 1098 "for.body: " 1099 " %i.04 = phi i32 [ %inc, %for.body ], [ 100, %entry ] " 1100 " %inc = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %i.04, i32 1) " 1101 " %exitcond = icmp ne i32 %inc, 0 " 1102 " br i1 %exitcond, label %for.cond.cleanup, label %for.body " 1103 "} " 1104 "declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) ", 1105 Err, C); 1106 1107 ASSERT_TRUE(M && "Could not parse module?"); 1108 ASSERT_TRUE(!verifyModule(*M) && "Must have been well formed!"); 1109 1110 runWithSE(*M, "foo", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) { 1111 auto *ScevInc = SE.getSCEV(getInstructionByName(F, "inc")); 1112 EXPECT_TRUE(isa<SCEVAddRecExpr>(ScevInc)); 1113 }); 1114 } 1115 1116 TEST_F(ScalarEvolutionsTest, SCEVComputeConstantDifference) { 1117 LLVMContext C; 1118 SMDiagnostic Err; 1119 std::unique_ptr<Module> M = parseAssemblyString( 1120 "define void @foo(i32 %sz, i32 %pp) { " 1121 "entry: " 1122 " %v0 = add i32 %pp, 0 " 1123 " %v3 = add i32 %pp, 3 " 1124 " br label %loop.body " 1125 "loop.body: " 1126 " %iv = phi i32 [ %iv.next, %loop.body ], [ 0, %entry ] " 1127 " %xa = add nsw i32 %iv, %v0 " 1128 " %yy = add nsw i32 %iv, %v3 " 1129 " %xb = sub nsw i32 %yy, 3 " 1130 " %iv.next = add nsw i32 %iv, 1 " 1131 " %cmp = icmp sle i32 %iv.next, %sz " 1132 " br i1 %cmp, label %loop.body, label %exit " 1133 "exit: " 1134 " ret void " 1135 "} ", 1136 Err, C); 1137 1138 ASSERT_TRUE(M && "Could not parse module?"); 1139 ASSERT_TRUE(!verifyModule(*M) && "Must have been well formed!"); 1140 1141 runWithSE(*M, "foo", [](Function &F, LoopInfo &LI, ScalarEvolution &SE) { 1142 auto *ScevV0 = SE.getSCEV(getInstructionByName(F, "v0")); // %pp 1143 auto *ScevV3 = SE.getSCEV(getInstructionByName(F, "v3")); // (3 + %pp) 1144 auto *ScevIV = SE.getSCEV(getInstructionByName(F, "iv")); // {0,+,1} 1145 auto *ScevXA = SE.getSCEV(getInstructionByName(F, "xa")); // {%pp,+,1} 1146 auto *ScevYY = SE.getSCEV(getInstructionByName(F, "yy")); // {(3 + %pp),+,1} 1147 auto *ScevXB = SE.getSCEV(getInstructionByName(F, "xb")); // {%pp,+,1} 1148 auto *ScevIVNext = SE.getSCEV(getInstructionByName(F, "iv.next")); // {1,+,1} 1149 1150 auto diff = [&SE](const SCEV *LHS, const SCEV *RHS) -> std::optional<int> { 1151 auto ConstantDiffOrNone = computeConstantDifference(SE, LHS, RHS); 1152 if (!ConstantDiffOrNone) 1153 return std::nullopt; 1154 1155 auto ExtDiff = ConstantDiffOrNone->getSExtValue(); 1156 int Diff = ExtDiff; 1157 assert(Diff == ExtDiff && "Integer overflow"); 1158 return Diff; 1159 }; 1160 1161 EXPECT_EQ(diff(ScevV3, ScevV0), 3); 1162 EXPECT_EQ(diff(ScevV0, ScevV3), -3); 1163 EXPECT_EQ(diff(ScevV0, ScevV0), 0); 1164 EXPECT_EQ(diff(ScevV3, ScevV3), 0); 1165 EXPECT_EQ(diff(ScevIV, ScevIV), 0); 1166 EXPECT_EQ(diff(ScevXA, ScevXB), 0); 1167 EXPECT_EQ(diff(ScevXA, ScevYY), -3); 1168 EXPECT_EQ(diff(ScevYY, ScevXB), 3); 1169 EXPECT_EQ(diff(ScevIV, ScevIVNext), -1); 1170 EXPECT_EQ(diff(ScevIVNext, ScevIV), 1); 1171 EXPECT_EQ(diff(ScevIVNext, ScevIVNext), 0); 1172 EXPECT_EQ(diff(ScevV0, ScevIV), std::nullopt); 1173 EXPECT_EQ(diff(ScevIVNext, ScevV3), std::nullopt); 1174 EXPECT_EQ(diff(ScevYY, ScevV3), std::nullopt); 1175 }); 1176 } 1177 1178 TEST_F(ScalarEvolutionsTest, SCEVrewriteUnknowns) { 1179 LLVMContext C; 1180 SMDiagnostic Err; 1181 std::unique_ptr<Module> M = parseAssemblyString( 1182 "define void @foo(i32 %i) { " 1183 "entry: " 1184 " %cmp3 = icmp ult i32 %i, 16 " 1185 " br i1 %cmp3, label %loop.body, label %exit " 1186 "loop.body: " 1187 " %iv = phi i32 [ %iv.next, %loop.body ], [ %i, %entry ] " 1188 " %iv.next = add nsw i32 %iv, 1 " 1189 " %cmp = icmp eq i32 %iv.next, 16 " 1190 " br i1 %cmp, label %exit, label %loop.body " 1191 "exit: " 1192 " ret void " 1193 "} ", 1194 Err, C); 1195 1196 ASSERT_TRUE(M && "Could not parse module?"); 1197 ASSERT_TRUE(!verifyModule(*M) && "Must have been well formed!"); 1198 1199 runWithSE(*M, "foo", [](Function &F, LoopInfo &LI, ScalarEvolution &SE) { 1200 auto *ScevIV = SE.getSCEV(getInstructionByName(F, "iv")); // {0,+,1} 1201 auto *ScevI = SE.getSCEV(getArgByName(F, "i")); // {0,+,1} 1202 1203 ValueToSCEVMapTy RewriteMap; 1204 RewriteMap[cast<SCEVUnknown>(ScevI)->getValue()] = 1205 SE.getUMinExpr(ScevI, SE.getConstant(ScevI->getType(), 17)); 1206 auto *WithUMin = SCEVParameterRewriter::rewrite(ScevIV, SE, RewriteMap); 1207 1208 EXPECT_NE(WithUMin, ScevIV); 1209 auto *AR = dyn_cast<SCEVAddRecExpr>(WithUMin); 1210 EXPECT_TRUE(AR); 1211 EXPECT_EQ(AR->getStart(), 1212 SE.getUMinExpr(ScevI, SE.getConstant(ScevI->getType(), 17))); 1213 EXPECT_EQ(AR->getStepRecurrence(SE), 1214 cast<SCEVAddRecExpr>(ScevIV)->getStepRecurrence(SE)); 1215 }); 1216 } 1217 1218 TEST_F(ScalarEvolutionsTest, SCEVAddNUW) { 1219 LLVMContext C; 1220 SMDiagnostic Err; 1221 std::unique_ptr<Module> M = parseAssemblyString("define void @foo(i32 %x) { " 1222 " ret void " 1223 "} ", 1224 Err, C); 1225 1226 ASSERT_TRUE(M && "Could not parse module?"); 1227 ASSERT_TRUE(!verifyModule(*M) && "Must have been well formed!"); 1228 1229 runWithSE(*M, "foo", [](Function &F, LoopInfo &LI, ScalarEvolution &SE) { 1230 auto *X = SE.getSCEV(getArgByName(F, "x")); 1231 auto *One = SE.getOne(X->getType()); 1232 auto *Sum = SE.getAddExpr(X, One, SCEV::FlagNUW); 1233 EXPECT_TRUE(SE.isKnownPredicate(ICmpInst::ICMP_UGE, Sum, X)); 1234 EXPECT_TRUE(SE.isKnownPredicate(ICmpInst::ICMP_UGT, Sum, X)); 1235 }); 1236 } 1237 1238 TEST_F(ScalarEvolutionsTest, SCEVgetRanges) { 1239 LLVMContext C; 1240 SMDiagnostic Err; 1241 std::unique_ptr<Module> M = parseAssemblyString( 1242 "define void @foo(i32 %i) { " 1243 "entry: " 1244 " br label %loop.body " 1245 "loop.body: " 1246 " %iv = phi i32 [ %iv.next, %loop.body ], [ 0, %entry ] " 1247 " %iv.next = add nsw i32 %iv, 1 " 1248 " %cmp = icmp eq i32 %iv.next, 16 " 1249 " br i1 %cmp, label %exit, label %loop.body " 1250 "exit: " 1251 " ret void " 1252 "} ", 1253 Err, C); 1254 1255 runWithSE(*M, "foo", [](Function &F, LoopInfo &LI, ScalarEvolution &SE) { 1256 auto *ScevIV = SE.getSCEV(getInstructionByName(F, "iv")); // {0,+,1} 1257 auto *ScevI = SE.getSCEV(getArgByName(F, "i")); 1258 EXPECT_EQ(SE.getUnsignedRange(ScevIV).getLower(), 0); 1259 EXPECT_EQ(SE.getUnsignedRange(ScevIV).getUpper(), 16); 1260 1261 auto *Add = SE.getAddExpr(ScevI, ScevIV); 1262 ValueToSCEVMapTy RewriteMap; 1263 RewriteMap[cast<SCEVUnknown>(ScevI)->getValue()] = 1264 SE.getUMinExpr(ScevI, SE.getConstant(ScevI->getType(), 17)); 1265 auto *AddWithUMin = SCEVParameterRewriter::rewrite(Add, SE, RewriteMap); 1266 EXPECT_EQ(SE.getUnsignedRange(AddWithUMin).getLower(), 0); 1267 EXPECT_EQ(SE.getUnsignedRange(AddWithUMin).getUpper(), 33); 1268 }); 1269 } 1270 1271 TEST_F(ScalarEvolutionsTest, SCEVgetExitLimitForGuardedLoop) { 1272 LLVMContext C; 1273 SMDiagnostic Err; 1274 std::unique_ptr<Module> M = parseAssemblyString( 1275 "define void @foo(i32 %i) { " 1276 "entry: " 1277 " %cmp3 = icmp ult i32 %i, 16 " 1278 " br i1 %cmp3, label %loop.body, label %exit " 1279 "loop.body: " 1280 " %iv = phi i32 [ %iv.next, %loop.body ], [ %i, %entry ] " 1281 " %iv.next = add nsw i32 %iv, 1 " 1282 " %cmp = icmp eq i32 %iv.next, 16 " 1283 " br i1 %cmp, label %exit, label %loop.body " 1284 "exit: " 1285 " ret void " 1286 "} ", 1287 Err, C); 1288 1289 ASSERT_TRUE(M && "Could not parse module?"); 1290 ASSERT_TRUE(!verifyModule(*M) && "Must have been well formed!"); 1291 1292 runWithSE(*M, "foo", [](Function &F, LoopInfo &LI, ScalarEvolution &SE) { 1293 auto *ScevIV = SE.getSCEV(getInstructionByName(F, "iv")); // {0,+,1} 1294 const Loop *L = cast<SCEVAddRecExpr>(ScevIV)->getLoop(); 1295 1296 const SCEV *BTC = SE.getBackedgeTakenCount(L); 1297 EXPECT_FALSE(isa<SCEVConstant>(BTC)); 1298 const SCEV *MaxBTC = SE.getConstantMaxBackedgeTakenCount(L); 1299 EXPECT_EQ(cast<SCEVConstant>(MaxBTC)->getAPInt(), 15); 1300 }); 1301 } 1302 1303 TEST_F(ScalarEvolutionsTest, ImpliedViaAddRecStart) { 1304 LLVMContext C; 1305 SMDiagnostic Err; 1306 std::unique_ptr<Module> M = parseAssemblyString( 1307 "define void @foo(i32* %p) { " 1308 "entry: " 1309 " %x = load i32, i32* %p, !range !0 " 1310 " br label %loop " 1311 "loop: " 1312 " %iv = phi i32 [ %x, %entry], [%iv.next, %backedge] " 1313 " %ne.check = icmp ne i32 %iv, 0 " 1314 " br i1 %ne.check, label %backedge, label %exit " 1315 "backedge: " 1316 " %iv.next = add i32 %iv, -1 " 1317 " br label %loop " 1318 "exit:" 1319 " ret void " 1320 "} " 1321 "!0 = !{i32 0, i32 2147483647}", 1322 Err, C); 1323 1324 ASSERT_TRUE(M && "Could not parse module?"); 1325 ASSERT_TRUE(!verifyModule(*M) && "Must have been well formed!"); 1326 1327 runWithSE(*M, "foo", [](Function &F, LoopInfo &LI, ScalarEvolution &SE) { 1328 auto *X = SE.getSCEV(getInstructionByName(F, "x")); 1329 auto *Context = getInstructionByName(F, "iv.next"); 1330 EXPECT_TRUE(SE.isKnownPredicateAt(ICmpInst::ICMP_NE, X, 1331 SE.getZero(X->getType()), Context)); 1332 }); 1333 } 1334 1335 TEST_F(ScalarEvolutionsTest, UnsignedIsImpliedViaOperations) { 1336 LLVMContext C; 1337 SMDiagnostic Err; 1338 std::unique_ptr<Module> M = 1339 parseAssemblyString("define void @foo(i32* %p1, i32* %p2) { " 1340 "entry: " 1341 " %x = load i32, i32* %p1, !range !0 " 1342 " %cond = icmp ne i32 %x, 0 " 1343 " br i1 %cond, label %guarded, label %exit " 1344 "guarded: " 1345 " %y = add i32 %x, -1 " 1346 " ret void " 1347 "exit: " 1348 " ret void " 1349 "} " 1350 "!0 = !{i32 0, i32 2147483647}", 1351 Err, C); 1352 1353 ASSERT_TRUE(M && "Could not parse module?"); 1354 ASSERT_TRUE(!verifyModule(*M) && "Must have been well formed!"); 1355 1356 runWithSE(*M, "foo", [](Function &F, LoopInfo &LI, ScalarEvolution &SE) { 1357 auto *X = SE.getSCEV(getInstructionByName(F, "x")); 1358 auto *Y = SE.getSCEV(getInstructionByName(F, "y")); 1359 auto *Guarded = getInstructionByName(F, "y")->getParent(); 1360 ASSERT_TRUE(Guarded); 1361 EXPECT_TRUE( 1362 SE.isBasicBlockEntryGuardedByCond(Guarded, ICmpInst::ICMP_ULT, Y, X)); 1363 EXPECT_TRUE( 1364 SE.isBasicBlockEntryGuardedByCond(Guarded, ICmpInst::ICMP_UGT, X, Y)); 1365 }); 1366 } 1367 1368 TEST_F(ScalarEvolutionsTest, ProveImplicationViaNarrowing) { 1369 LLVMContext C; 1370 SMDiagnostic Err; 1371 std::unique_ptr<Module> M = parseAssemblyString( 1372 "define i32 @foo(i32 %start, i32* %q) { " 1373 "entry: " 1374 " %wide.start = zext i32 %start to i64 " 1375 " br label %loop " 1376 "loop: " 1377 " %wide.iv = phi i64 [%wide.start, %entry], [%wide.iv.next, %backedge] " 1378 " %iv = phi i32 [%start, %entry], [%iv.next, %backedge] " 1379 " %cond = icmp eq i64 %wide.iv, 0 " 1380 " br i1 %cond, label %exit, label %backedge " 1381 "backedge: " 1382 " %iv.next = add i32 %iv, -1 " 1383 " %index = zext i32 %iv.next to i64 " 1384 " %load.addr = getelementptr i32, i32* %q, i64 %index " 1385 " %stop = load i32, i32* %load.addr " 1386 " %loop.cond = icmp eq i32 %stop, 0 " 1387 " %wide.iv.next = add nsw i64 %wide.iv, -1 " 1388 " br i1 %loop.cond, label %loop, label %failure " 1389 "exit: " 1390 " ret i32 0 " 1391 "failure: " 1392 " unreachable " 1393 "} ", 1394 Err, C); 1395 1396 ASSERT_TRUE(M && "Could not parse module?"); 1397 ASSERT_TRUE(!verifyModule(*M) && "Must have been well formed!"); 1398 1399 runWithSE(*M, "foo", [](Function &F, LoopInfo &LI, ScalarEvolution &SE) { 1400 auto *IV = SE.getSCEV(getInstructionByName(F, "iv")); 1401 auto *Zero = SE.getZero(IV->getType()); 1402 auto *Backedge = getInstructionByName(F, "iv.next")->getParent(); 1403 ASSERT_TRUE(Backedge); 1404 (void)IV; 1405 (void)Zero; 1406 // FIXME: This can only be proved with turned on option 1407 // scalar-evolution-use-expensive-range-sharpening which is currently off. 1408 // Enable the check once it's switched true by default. 1409 // EXPECT_TRUE(SE.isBasicBlockEntryGuardedByCond(Backedge, 1410 // ICmpInst::ICMP_UGT, 1411 // IV, Zero)); 1412 }); 1413 } 1414 1415 TEST_F(ScalarEvolutionsTest, ImpliedCond) { 1416 LLVMContext C; 1417 SMDiagnostic Err; 1418 std::unique_ptr<Module> M = parseAssemblyString( 1419 "define void @foo(i32 %len) { " 1420 "entry: " 1421 " br label %loop " 1422 "loop: " 1423 " %iv = phi i32 [ 0, %entry], [%iv.next, %loop] " 1424 " %iv.next = add nsw i32 %iv, 1 " 1425 " %cmp = icmp slt i32 %iv, %len " 1426 " br i1 %cmp, label %loop, label %exit " 1427 "exit:" 1428 " ret void " 1429 "}", 1430 Err, C); 1431 1432 ASSERT_TRUE(M && "Could not parse module?"); 1433 ASSERT_TRUE(!verifyModule(*M) && "Must have been well formed!"); 1434 1435 runWithSE(*M, "foo", [](Function &F, LoopInfo &LI, ScalarEvolution &SE) { 1436 Instruction *IV = getInstructionByName(F, "iv"); 1437 Type *Ty = IV->getType(); 1438 const SCEV *Zero = SE.getZero(Ty); 1439 const SCEV *MinusOne = SE.getMinusOne(Ty); 1440 // {0,+,1}<nuw><nsw> 1441 const SCEV *AddRec_0_1 = SE.getSCEV(IV); 1442 // {0,+,-1}<nw> 1443 const SCEV *AddRec_0_N1 = SE.getNegativeSCEV(AddRec_0_1); 1444 1445 // {0,+,1}<nuw><nsw> > 0 -> {0,+,-1}<nw> < 0 1446 EXPECT_TRUE(isImpliedCond(SE, ICmpInst::ICMP_SLT, AddRec_0_N1, Zero, 1447 ICmpInst::ICMP_SGT, AddRec_0_1, Zero)); 1448 // {0,+,-1}<nw> < -1 -> {0,+,1}<nuw><nsw> > 0 1449 EXPECT_TRUE(isImpliedCond(SE, ICmpInst::ICMP_SGT, AddRec_0_1, Zero, 1450 ICmpInst::ICMP_SLT, AddRec_0_N1, MinusOne)); 1451 }); 1452 } 1453 1454 TEST_F(ScalarEvolutionsTest, MatchURem) { 1455 LLVMContext C; 1456 SMDiagnostic Err; 1457 std::unique_ptr<Module> M = parseAssemblyString( 1458 "target datalayout = \"e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128\" " 1459 " " 1460 "define void @test(i32 %a, i32 %b, i16 %c, i64 %d) {" 1461 "entry: " 1462 " %rem1 = urem i32 %a, 2" 1463 " %rem2 = urem i32 %a, 5" 1464 " %rem3 = urem i32 %a, %b" 1465 " %c.ext = zext i16 %c to i32" 1466 " %rem4 = urem i32 %c.ext, 2" 1467 " %ext = zext i32 %rem4 to i64" 1468 " %rem5 = urem i64 %d, 17179869184" 1469 " ret void " 1470 "} ", 1471 Err, C); 1472 1473 assert(M && "Could not parse module?"); 1474 assert(!verifyModule(*M) && "Must have been well formed!"); 1475 1476 runWithSE(*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) { 1477 for (auto *N : {"rem1", "rem2", "rem3", "rem5"}) { 1478 auto *URemI = getInstructionByName(F, N); 1479 auto *S = SE.getSCEV(URemI); 1480 const SCEV *LHS, *RHS; 1481 EXPECT_TRUE(matchURem(SE, S, LHS, RHS)); 1482 EXPECT_EQ(LHS, SE.getSCEV(URemI->getOperand(0))); 1483 EXPECT_EQ(RHS, SE.getSCEV(URemI->getOperand(1))); 1484 EXPECT_EQ(LHS->getType(), S->getType()); 1485 EXPECT_EQ(RHS->getType(), S->getType()); 1486 } 1487 1488 // Check the case where the urem operand is zero-extended. Make sure the 1489 // match results are extended to the size of the input expression. 1490 auto *Ext = getInstructionByName(F, "ext"); 1491 auto *URem1 = getInstructionByName(F, "rem4"); 1492 auto *S = SE.getSCEV(Ext); 1493 const SCEV *LHS, *RHS; 1494 EXPECT_TRUE(matchURem(SE, S, LHS, RHS)); 1495 EXPECT_NE(LHS, SE.getSCEV(URem1->getOperand(0))); 1496 // RHS and URem1->getOperand(1) have different widths, so compare the 1497 // integer values. 1498 EXPECT_EQ(cast<SCEVConstant>(RHS)->getValue()->getZExtValue(), 1499 cast<SCEVConstant>(SE.getSCEV(URem1->getOperand(1))) 1500 ->getValue() 1501 ->getZExtValue()); 1502 EXPECT_EQ(LHS->getType(), S->getType()); 1503 EXPECT_EQ(RHS->getType(), S->getType()); 1504 }); 1505 } 1506 1507 TEST_F(ScalarEvolutionsTest, SCEVUDivFloorCeiling) { 1508 LLVMContext C; 1509 SMDiagnostic Err; 1510 std::unique_ptr<Module> M = parseAssemblyString("define void @foo() { " 1511 " ret void " 1512 "} ", 1513 Err, C); 1514 1515 ASSERT_TRUE(M && "Could not parse module?"); 1516 ASSERT_TRUE(!verifyModule(*M) && "Must have been well formed!"); 1517 1518 runWithSE(*M, "foo", [](Function &F, LoopInfo &LI, ScalarEvolution &SE) { 1519 // Check that SCEV's udiv and uceil handling produce the correct results 1520 // for all 8 bit options. Div-by-zero is deliberately excluded. 1521 for (unsigned N = 0; N < 256; N++) 1522 for (unsigned D = 1; D < 256; D++) { 1523 APInt NInt(8, N); 1524 APInt DInt(8, D); 1525 using namespace llvm::APIntOps; 1526 APInt FloorInt = RoundingUDiv(NInt, DInt, APInt::Rounding::DOWN); 1527 APInt CeilingInt = RoundingUDiv(NInt, DInt, APInt::Rounding::UP); 1528 auto *NS = SE.getConstant(NInt); 1529 auto *DS = SE.getConstant(DInt); 1530 auto *FloorS = cast<SCEVConstant>(SE.getUDivExpr(NS, DS)); 1531 auto *CeilingS = cast<SCEVConstant>(SE.getUDivCeilSCEV(NS, DS)); 1532 ASSERT_TRUE(FloorS->getAPInt() == FloorInt); 1533 ASSERT_TRUE(CeilingS->getAPInt() == CeilingInt); 1534 } 1535 }); 1536 } 1537 1538 TEST_F(ScalarEvolutionsTest, CheckGetPowerOfTwo) { 1539 Module M("CheckGetPowerOfTwo", Context); 1540 FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context), {}, false); 1541 Function *F = Function::Create(FTy, Function::ExternalLinkage, "foo", M); 1542 BasicBlock *Entry = BasicBlock::Create(Context, "entry", F); 1543 IRBuilder<> Builder(Entry); 1544 Builder.CreateRetVoid(); 1545 ScalarEvolution SE = buildSE(*F); 1546 1547 for (unsigned short i = 0; i < 64; ++i) 1548 EXPECT_TRUE( 1549 dyn_cast<SCEVConstant>(SE.getPowerOfTwo(Type::getInt64Ty(Context), i)) 1550 ->getValue() 1551 ->equalsInt(1ULL << i)); 1552 } 1553 1554 TEST_F(ScalarEvolutionsTest, ApplyLoopGuards) { 1555 LLVMContext C; 1556 SMDiagnostic Err; 1557 std::unique_ptr<Module> M = parseAssemblyString( 1558 "declare void @llvm.assume(i1)\n" 1559 "define void @test(i32 %num) {\n" 1560 "entry:\n" 1561 " %u = urem i32 %num, 4\n" 1562 " %cmp = icmp eq i32 %u, 0\n" 1563 " tail call void @llvm.assume(i1 %cmp)\n" 1564 " %cmp.1 = icmp ugt i32 %num, 0\n" 1565 " tail call void @llvm.assume(i1 %cmp.1)\n" 1566 " br label %for.body\n" 1567 "for.body:\n" 1568 " %i.010 = phi i32 [ 0, %entry ], [ %inc, %for.body ]\n" 1569 " %inc = add nuw nsw i32 %i.010, 1\n" 1570 " %cmp2 = icmp ult i32 %inc, %num\n" 1571 " br i1 %cmp2, label %for.body, label %exit\n" 1572 "exit:\n" 1573 " ret void\n" 1574 "}\n", 1575 Err, C); 1576 1577 ASSERT_TRUE(M && "Could not parse module?"); 1578 ASSERT_TRUE(!verifyModule(*M) && "Must have been well formed!"); 1579 1580 runWithSE(*M, "test", [](Function &F, LoopInfo &LI, ScalarEvolution &SE) { 1581 auto *TCScev = SE.getSCEV(getArgByName(F, "num")); 1582 auto *ApplyLoopGuardsTC = SE.applyLoopGuards(TCScev, *LI.begin()); 1583 // Assert that the new TC is (4 * ((4 umax %num) /u 4)) 1584 APInt Four(32, 4); 1585 auto *Constant4 = SE.getConstant(Four); 1586 auto *Max = SE.getUMaxExpr(TCScev, Constant4); 1587 auto *Mul = SE.getMulExpr(SE.getUDivExpr(Max, Constant4), Constant4); 1588 ASSERT_TRUE(Mul == ApplyLoopGuardsTC); 1589 }); 1590 } 1591 1592 TEST_F(ScalarEvolutionsTest, ForgetValueWithOverflowInst) { 1593 LLVMContext C; 1594 SMDiagnostic Err; 1595 std::unique_ptr<Module> M = parseAssemblyString( 1596 "declare { i32, i1 } @llvm.smul.with.overflow.i32(i32, i32) " 1597 "define void @foo(i32 %i) { " 1598 "entry: " 1599 " br label %loop.body " 1600 "loop.body: " 1601 " %iv = phi i32 [ %iv.next, %loop.body ], [ 0, %entry ] " 1602 " %iv.next = add nsw i32 %iv, 1 " 1603 " %call = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %iv, i32 -2) " 1604 " %extractvalue = extractvalue {i32, i1} %call, 0 " 1605 " %cmp = icmp eq i32 %iv.next, 16 " 1606 " br i1 %cmp, label %exit, label %loop.body " 1607 "exit: " 1608 " ret void " 1609 "} ", 1610 Err, C); 1611 1612 ASSERT_TRUE(M && "Could not parse module?"); 1613 ASSERT_TRUE(!verifyModule(*M) && "Must have been well formed!"); 1614 1615 runWithSE(*M, "foo", [](Function &F, LoopInfo &LI, ScalarEvolution &SE) { 1616 auto *ExtractValue = getInstructionByName(F, "extractvalue"); 1617 auto *IV = getInstructionByName(F, "iv"); 1618 1619 auto *ExtractValueScev = SE.getSCEV(ExtractValue); 1620 EXPECT_NE(ExtractValueScev, nullptr); 1621 1622 SE.forgetValue(IV); 1623 auto *ExtractValueScevForgotten = SE.getExistingSCEV(ExtractValue); 1624 EXPECT_EQ(ExtractValueScevForgotten, nullptr); 1625 }); 1626 } 1627 1628 } // end namespace llvm 1629