1 //===- MVETailPredication.cpp - MVE Tail Predication ------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// Armv8.1m introduced MVE, M-Profile Vector Extension, and low-overhead 11 /// branches to help accelerate DSP applications. These two extensions, 12 /// combined with a new form of predication called tail-predication, can be used 13 /// to provide implicit vector predication within a low-overhead loop. 14 /// This is implicit because the predicate of active/inactive lanes is 15 /// calculated by hardware, and thus does not need to be explicitly passed 16 /// to vector instructions. The instructions responsible for this are the 17 /// DLSTP and WLSTP instructions, which setup a tail-predicated loop and the 18 /// the total number of data elements processed by the loop. The loop-end 19 /// LETP instruction is responsible for decrementing and setting the remaining 20 /// elements to be processed and generating the mask of active lanes. 21 /// 22 /// The HardwareLoops pass inserts intrinsics identifying loops that the 23 /// backend will attempt to convert into a low-overhead loop. The vectorizer is 24 /// responsible for generating a vectorized loop in which the lanes are 25 /// predicated upon the iteration counter. This pass looks at these predicated 26 /// vector loops, that are targets for low-overhead loops, and prepares it for 27 /// code generation. Once the vectorizer has produced a masked loop, there's a 28 /// couple of final forms: 29 /// - A tail-predicated loop, with implicit predication. 30 /// - A loop containing multiple VCPT instructions, predicating multiple VPT 31 /// blocks of instructions operating on different vector types. 32 /// 33 /// This pass: 34 /// 1) Checks if the predicates of the masked load/store instructions are 35 /// generated by intrinsic @llvm.get.active.lanes(). This intrinsic consumes 36 /// the Backedge Taken Count (BTC) of the scalar loop as its second argument, 37 /// which we extract to set up the number of elements processed by the loop. 38 /// 2) Intrinsic @llvm.get.active.lanes() is then replaced by the MVE target 39 /// specific VCTP intrinsic to represent the effect of tail predication. 40 /// This will be picked up by the ARM Low-overhead loop pass, which performs 41 /// the final transformation to a DLSTP or WLSTP tail-predicated loop. 42 43 #include "ARM.h" 44 #include "ARMSubtarget.h" 45 #include "llvm/Analysis/LoopInfo.h" 46 #include "llvm/Analysis/LoopPass.h" 47 #include "llvm/Analysis/ScalarEvolution.h" 48 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 49 #include "llvm/Analysis/TargetTransformInfo.h" 50 #include "llvm/CodeGen/TargetPassConfig.h" 51 #include "llvm/IR/IRBuilder.h" 52 #include "llvm/IR/Instructions.h" 53 #include "llvm/IR/IntrinsicsARM.h" 54 #include "llvm/IR/PatternMatch.h" 55 #include "llvm/InitializePasses.h" 56 #include "llvm/Support/Debug.h" 57 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 58 #include "llvm/Transforms/Utils/LoopUtils.h" 59 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 60 61 using namespace llvm; 62 63 #define DEBUG_TYPE "mve-tail-predication" 64 #define DESC "Transform predicated vector loops to use MVE tail predication" 65 66 static cl::opt<bool> 67 ForceTailPredication("force-mve-tail-predication", cl::Hidden, cl::init(false), 68 cl::desc("Force MVE tail-predication even if it might be " 69 "unsafe (e.g. possible overflow in loop " 70 "counters)")); 71 72 cl::opt<bool> 73 DisableTailPredication("disable-mve-tail-predication", cl::Hidden, 74 cl::init(true), 75 cl::desc("Disable MVE Tail Predication")); 76 namespace { 77 78 class MVETailPredication : public LoopPass { 79 SmallVector<IntrinsicInst*, 4> MaskedInsts; 80 Loop *L = nullptr; 81 LoopInfo *LI = nullptr; 82 const DataLayout *DL; 83 DominatorTree *DT = nullptr; 84 ScalarEvolution *SE = nullptr; 85 TargetTransformInfo *TTI = nullptr; 86 TargetLibraryInfo *TLI = nullptr; 87 bool ClonedVCTPInExitBlock = false; 88 IntrinsicInst *ActiveLaneMask = nullptr; 89 FixedVectorType *VecTy = nullptr; 90 91 public: 92 static char ID; 93 94 MVETailPredication() : LoopPass(ID) { } 95 96 void getAnalysisUsage(AnalysisUsage &AU) const override { 97 AU.addRequired<ScalarEvolutionWrapperPass>(); 98 AU.addRequired<LoopInfoWrapperPass>(); 99 AU.addRequired<TargetPassConfig>(); 100 AU.addRequired<TargetTransformInfoWrapperPass>(); 101 AU.addRequired<DominatorTreeWrapperPass>(); 102 AU.addRequired<TargetLibraryInfoWrapperPass>(); 103 AU.addPreserved<LoopInfoWrapperPass>(); 104 AU.setPreservesCFG(); 105 } 106 107 bool runOnLoop(Loop *L, LPPassManager&) override; 108 109 private: 110 /// Perform the relevant checks on the loop and convert if possible. 111 bool TryConvert(Value *TripCount); 112 113 /// Return whether this is a vectorized loop, that contains masked 114 /// load/stores. 115 bool IsPredicatedVectorLoop(); 116 117 /// Perform checks on the arguments of @llvm.get.active.lane.mask 118 /// intrinsic: check if the first is a loop induction variable, and for the 119 /// the second check that no overflow can occur in the expression that use 120 /// this backedge-taken count. 121 bool IsSafeActiveMask(Value *TripCount, FixedVectorType *VecTy); 122 123 /// Insert the intrinsic to represent the effect of tail predication. 124 void InsertVCTPIntrinsic(IntrinsicInst *ActiveLaneMask, Value *TripCount, 125 FixedVectorType *VecTy, 126 DenseMap<Instruction *, Instruction *> &NewPredicates); 127 128 /// Rematerialize the iteration count in exit blocks, which enables 129 /// ARMLowOverheadLoops to better optimise away loop update statements inside 130 /// hardware-loops. 131 void RematerializeIterCount(); 132 133 /// If it is not safe to lower @llvm.get.active.lane.mask to a VCTP, it needs 134 /// to be lowered to an icmp. 135 void RevertActiveLaneMask(); 136 }; 137 138 } // end namespace 139 140 static bool IsDecrement(Instruction &I) { 141 auto *Call = dyn_cast<IntrinsicInst>(&I); 142 if (!Call) 143 return false; 144 145 Intrinsic::ID ID = Call->getIntrinsicID(); 146 return ID == Intrinsic::loop_decrement_reg; 147 } 148 149 static bool IsMasked(Instruction *I) { 150 auto *Call = dyn_cast<IntrinsicInst>(I); 151 if (!Call) 152 return false; 153 154 Intrinsic::ID ID = Call->getIntrinsicID(); 155 // TODO: Support gather/scatter expand/compress operations. 156 return ID == Intrinsic::masked_store || ID == Intrinsic::masked_load; 157 } 158 159 void MVETailPredication::RematerializeIterCount() { 160 SmallVector<WeakTrackingVH, 16> DeadInsts; 161 SCEVExpander Rewriter(*SE, *DL, "mvetp"); 162 ReplaceExitVal ReplaceExitValue = AlwaysRepl; 163 164 formLCSSARecursively(*L, *DT, LI, SE); 165 rewriteLoopExitValues(L, LI, TLI, SE, TTI, Rewriter, DT, ReplaceExitValue, 166 DeadInsts); 167 } 168 169 void MVETailPredication::RevertActiveLaneMask() { 170 if (!ActiveLaneMask) 171 return; 172 173 int VectorWidth = VecTy->getElementCount().Min; 174 IRBuilder<> Builder(ActiveLaneMask->getParent()->getFirstNonPHI()); 175 176 // 1. Create the vector induction step. This %induction will be the LHS of 177 // the icmp: 178 // 179 // %splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0 180 // %splat = shufflevector <4 x i32> %splatinsert, <4 x i32> undef, <4 x i32> 0 181 // %induction = add <4 x i32> %splat, <i32 0, i32 1, i32 2, i32 3> 182 // 183 Value *Index = ActiveLaneMask->getOperand(0); 184 Value *SplatIndex = 185 Builder.CreateVectorSplat(VectorWidth, Index, "lane.mask"); 186 187 SmallVector<Constant *, 8> Indices; 188 for (int i = 0; i < VectorWidth; ++i) 189 Indices.push_back(ConstantInt::get(Index->getType(), i)); 190 191 Constant *CV = ConstantVector::get(Indices); 192 Value *Induction = Builder.CreateAdd(SplatIndex, CV, "lane.mask.induction"); 193 194 LLVM_DEBUG(dbgs() << "ARM TP: New index: " << *SplatIndex << "\n"; 195 dbgs() << "ARM TP: New Induction: " << *Induction << "\n"); 196 197 // 2. In the Preheader, first look if the splat BTC already exists. Find this 198 // %splat, which will be the RHS of the icmp: 199 // 200 // %TC.minus.1 = add i32 %N, -1 201 // %splatinsert = insertelement <4 x i32> undef, i32 %TC.minus.1, i32 0 202 // %splat = shufflevector <4 x i32> %splatinsert, <4 x i32> undef, <16 x i32> 0 203 // 204 auto *Preheader = L->getLoopPreheader(); 205 auto *BTC = ActiveLaneMask->getOperand(1); 206 Value *SplatBTC = nullptr; 207 208 if (auto *C = dyn_cast<ConstantInt>(BTC)) { 209 Builder.SetInsertPoint(Preheader->getTerminator()); 210 SplatBTC = Builder.CreateVectorSplat(VectorWidth, C); 211 LLVM_DEBUG(dbgs() << "ARM TCP: New splat BTC: " << *SplatBTC << "\n"); 212 } else { 213 Instruction *InsertElem; 214 for (auto &V : *Preheader) { 215 InsertElem = dyn_cast<InsertElementInst>(&V); 216 if (!InsertElem) 217 continue; 218 ConstantInt *CI = dyn_cast<ConstantInt>(InsertElem->getOperand(2)); 219 if (!CI) 220 continue; 221 if (InsertElem->getOperand(1) != BTC || CI->getSExtValue() != 0) 222 continue; 223 if ((SplatBTC = dyn_cast<ShuffleVectorInst>(*InsertElem->users().begin()))) 224 break; 225 } 226 } 227 // Or create the splat BTC if it doesn't exist. 228 if (!SplatBTC) { 229 Builder.SetInsertPoint(Preheader->getTerminator()); 230 Value *Undef = 231 UndefValue::get(FixedVectorType::get(BTC->getType(), VectorWidth)); 232 Value *Insert = Builder.CreateInsertElement(Undef, 233 BTC, Builder.getInt32(0), "insert.btc"); 234 Value *Zero = ConstantInt::get(Insert->getType(), 0); 235 SplatBTC = Builder.CreateShuffleVector (Insert, Undef, Zero, "splat.btc"); 236 LLVM_DEBUG(dbgs() << "ARM TCP: New splat BTC: " << *SplatBTC << "\n"); 237 } 238 239 Builder.SetInsertPoint(ActiveLaneMask); 240 Value *ICmp = Builder.CreateICmp(ICmpInst::ICMP_ULE, Induction, SplatBTC); 241 LLVM_DEBUG(dbgs() << "ARM TP: New compare: " << *ICmp << "\n"); 242 ActiveLaneMask->replaceAllUsesWith(ICmp); 243 ActiveLaneMask->eraseFromParent(); 244 } 245 246 bool MVETailPredication::runOnLoop(Loop *L, LPPassManager&) { 247 if (skipLoop(L) || DisableTailPredication) 248 return false; 249 250 MaskedInsts.clear(); 251 Function &F = *L->getHeader()->getParent(); 252 auto &TPC = getAnalysis<TargetPassConfig>(); 253 auto &TM = TPC.getTM<TargetMachine>(); 254 auto *ST = &TM.getSubtarget<ARMSubtarget>(F); 255 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 256 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 257 TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 258 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 259 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 260 TLI = TLIP ? &TLIP->getTLI(*L->getHeader()->getParent()) : nullptr; 261 DL = &L->getHeader()->getModule()->getDataLayout(); 262 this->L = L; 263 ActiveLaneMask = nullptr; 264 265 // The MVE and LOB extensions are combined to enable tail-predication, but 266 // there's nothing preventing us from generating VCTP instructions for v8.1m. 267 if (!ST->hasMVEIntegerOps() || !ST->hasV8_1MMainlineOps()) { 268 LLVM_DEBUG(dbgs() << "ARM TP: Not a v8.1m.main+mve target.\n"); 269 return false; 270 } 271 272 BasicBlock *Preheader = L->getLoopPreheader(); 273 if (!Preheader) 274 return false; 275 276 auto FindLoopIterations = [](BasicBlock *BB) -> IntrinsicInst* { 277 for (auto &I : *BB) { 278 auto *Call = dyn_cast<IntrinsicInst>(&I); 279 if (!Call) 280 continue; 281 282 Intrinsic::ID ID = Call->getIntrinsicID(); 283 if (ID == Intrinsic::set_loop_iterations || 284 ID == Intrinsic::test_set_loop_iterations) 285 return cast<IntrinsicInst>(&I); 286 } 287 return nullptr; 288 }; 289 290 // Look for the hardware loop intrinsic that sets the iteration count. 291 IntrinsicInst *Setup = FindLoopIterations(Preheader); 292 293 // The test.set iteration could live in the pre-preheader. 294 if (!Setup) { 295 if (!Preheader->getSinglePredecessor()) 296 return false; 297 Setup = FindLoopIterations(Preheader->getSinglePredecessor()); 298 if (!Setup) 299 return false; 300 } 301 302 // Search for the hardware loop intrinic that decrements the loop counter. 303 IntrinsicInst *Decrement = nullptr; 304 for (auto *BB : L->getBlocks()) { 305 for (auto &I : *BB) { 306 if (IsDecrement(I)) { 307 Decrement = cast<IntrinsicInst>(&I); 308 break; 309 } 310 } 311 } 312 313 if (!Decrement) 314 return false; 315 316 ClonedVCTPInExitBlock = false; 317 LLVM_DEBUG(dbgs() << "ARM TP: Running on Loop: " << *L << *Setup << "\n" 318 << *Decrement << "\n"); 319 320 if (TryConvert(Setup->getArgOperand(0))) { 321 if (ClonedVCTPInExitBlock) 322 RematerializeIterCount(); 323 return true; 324 } else 325 RevertActiveLaneMask(); 326 327 LLVM_DEBUG(dbgs() << "ARM TP: Can't tail-predicate this loop.\n"); 328 return false; 329 } 330 331 static FixedVectorType *getVectorType(IntrinsicInst *I) { 332 unsigned TypeOp = I->getIntrinsicID() == Intrinsic::masked_load ? 0 : 1; 333 auto *PtrTy = cast<PointerType>(I->getOperand(TypeOp)->getType()); 334 auto *VecTy = cast<FixedVectorType>(PtrTy->getElementType()); 335 assert(VecTy && "No scalable vectors expected here"); 336 return VecTy; 337 } 338 339 bool MVETailPredication::IsPredicatedVectorLoop() { 340 // Check that the loop contains at least one masked load/store intrinsic. 341 // We only support 'normal' vector instructions - other than masked 342 // load/stores. 343 for (auto *BB : L->getBlocks()) { 344 for (auto &I : *BB) { 345 if (IsMasked(&I)) { 346 FixedVectorType *VecTy = getVectorType(cast<IntrinsicInst>(&I)); 347 unsigned Lanes = VecTy->getNumElements(); 348 unsigned ElementWidth = VecTy->getScalarSizeInBits(); 349 // MVE vectors are 128-bit, but don't support 128 x i1. 350 // TODO: Can we support vectors larger than 128-bits? 351 unsigned MaxWidth = TTI->getRegisterBitWidth(true); 352 if (Lanes * ElementWidth > MaxWidth || Lanes == MaxWidth) 353 return false; 354 MaskedInsts.push_back(cast<IntrinsicInst>(&I)); 355 } else if (auto *Int = dyn_cast<IntrinsicInst>(&I)) { 356 if (Int->getIntrinsicID() == Intrinsic::fma) 357 continue; 358 for (auto &U : Int->args()) { 359 if (isa<VectorType>(U->getType())) 360 return false; 361 } 362 } 363 } 364 } 365 366 return !MaskedInsts.empty(); 367 } 368 369 // Look through the exit block to see whether there's a duplicate predicate 370 // instruction. This can happen when we need to perform a select on values 371 // from the last and previous iteration. Instead of doing a straight 372 // replacement of that predicate with the vctp, clone the vctp and place it 373 // in the block. This means that the VPR doesn't have to be live into the 374 // exit block which should make it easier to convert this loop into a proper 375 // tail predicated loop. 376 static bool Cleanup(DenseMap<Instruction*, Instruction*> &NewPredicates, 377 SetVector<Instruction*> &MaybeDead, Loop *L) { 378 BasicBlock *Exit = L->getUniqueExitBlock(); 379 if (!Exit) { 380 LLVM_DEBUG(dbgs() << "ARM TP: can't find loop exit block\n"); 381 return false; 382 } 383 384 bool ClonedVCTPInExitBlock = false; 385 386 for (auto &Pair : NewPredicates) { 387 Instruction *OldPred = Pair.first; 388 Instruction *NewPred = Pair.second; 389 390 for (auto &I : *Exit) { 391 if (I.isSameOperationAs(OldPred)) { 392 Instruction *PredClone = NewPred->clone(); 393 PredClone->insertBefore(&I); 394 I.replaceAllUsesWith(PredClone); 395 MaybeDead.insert(&I); 396 ClonedVCTPInExitBlock = true; 397 LLVM_DEBUG(dbgs() << "ARM TP: replacing: "; I.dump(); 398 dbgs() << "ARM TP: with: "; PredClone->dump()); 399 break; 400 } 401 } 402 } 403 404 // Drop references and add operands to check for dead. 405 SmallPtrSet<Instruction*, 4> Dead; 406 while (!MaybeDead.empty()) { 407 auto *I = MaybeDead.front(); 408 MaybeDead.remove(I); 409 if (I->hasNUsesOrMore(1)) 410 continue; 411 412 for (auto &U : I->operands()) 413 if (auto *OpI = dyn_cast<Instruction>(U)) 414 MaybeDead.insert(OpI); 415 416 Dead.insert(I); 417 } 418 419 for (auto *I : Dead) { 420 LLVM_DEBUG(dbgs() << "ARM TP: removing dead insn: "; I->dump()); 421 I->eraseFromParent(); 422 } 423 424 for (auto I : L->blocks()) 425 DeleteDeadPHIs(I); 426 427 return ClonedVCTPInExitBlock; 428 } 429 430 // The active lane intrinsic has this form: 431 // 432 // @llvm.get.active.lane.mask(IV, BTC) 433 // 434 // Here we perform checks that this intrinsic behaves as expected, 435 // which means: 436 // 437 // 1) The element count, which is calculated with BTC + 1, cannot overflow. 438 // 2) The element count needs to be sufficiently large that the decrement of 439 // element counter doesn't overflow, which means that we need to prove: 440 // ceil(ElementCount / VectorWidth) >= TripCount 441 // by rounding up ElementCount up: 442 // ((ElementCount + (VectorWidth - 1)) / VectorWidth 443 // and evaluate if expression isKnownNonNegative: 444 // (((ElementCount + (VectorWidth - 1)) / VectorWidth) - TripCount 445 // 3) The IV must be an induction phi with an increment equal to the 446 // vector width. 447 bool MVETailPredication::IsSafeActiveMask(Value *TripCount, 448 FixedVectorType *VecTy) { 449 // 1) Test whether entry to the loop is protected by a conditional 450 // BTC + 1 < 0. In other words, if the scalar trip count overflows, 451 // becomes negative, we shouldn't enter the loop and creating 452 // tripcount expression BTC + 1 is not safe. So, check that BTC 453 // isn't max. This is evaluated in unsigned, because the semantics 454 // of @get.active.lane.mask is a ULE comparison. 455 int VectorWidth = VecTy->getNumElements(); 456 auto *BackedgeTakenCount = ActiveLaneMask->getOperand(1); 457 auto *BTC = SE->getSCEV(BackedgeTakenCount); 458 459 if (!llvm::cannotBeMaxInLoop(BTC, L, *SE, false /*Signed*/) && 460 !ForceTailPredication) { 461 LLVM_DEBUG(dbgs() << "ARM TP: Overflow possible, BTC can be max: "; 462 BTC->dump()); 463 return false; 464 } 465 466 // 2) Prove that the sub expression is non-negative, i.e. it doesn't overflow: 467 // 468 // (((ElementCount + (VectorWidth - 1)) / VectorWidth) - TripCount 469 // 470 // 2.1) First prove overflow can't happen in: 471 // 472 // ElementCount + (VectorWidth - 1) 473 // 474 // Because of a lack of context, it is difficult to get a useful bounds on 475 // this expression. But since ElementCount uses the same variables as the 476 // TripCount (TC), for which we can find meaningful value ranges, we use that 477 // instead and assert that: 478 // 479 // upperbound(TC) <= UINT_MAX - VectorWidth 480 // 481 auto *TC = SE->getSCEV(TripCount); 482 unsigned SizeInBits = TripCount->getType()->getScalarSizeInBits(); 483 auto Diff = APInt(SizeInBits, ~0) - APInt(SizeInBits, VectorWidth); 484 uint64_t MaxMinusVW = Diff.getZExtValue(); 485 uint64_t UpperboundTC = SE->getSignedRange(TC).getUpper().getZExtValue(); 486 487 if (UpperboundTC > MaxMinusVW && !ForceTailPredication) { 488 LLVM_DEBUG(dbgs() << "ARM TP: Overflow possible in tripcount rounding:\n"; 489 dbgs() << "upperbound(TC) <= UINT_MAX - VectorWidth\n"; 490 dbgs() << UpperboundTC << " <= " << MaxMinusVW << "== false\n";); 491 return false; 492 } 493 494 // 2.2) Make sure overflow doesn't happen in final expression: 495 // (((ElementCount + (VectorWidth - 1)) / VectorWidth) - TripCount, 496 // To do this, compare the full ranges of these subexpressions: 497 // 498 // Range(Ceil) <= Range(TC) 499 // 500 // where Ceil = ElementCount + (VW-1) / VW. If Ceil and TC are runtime 501 // values (and not constants), we have to compensate for the lowerbound value 502 // range to be off by 1. The reason is that BTC lives in the preheader in 503 // this form: 504 // 505 // %trip.count.minus = add nsw nuw i32 %N, -1 506 // 507 // For the loop to be executed, %N has to be >= 1 and as a result the value 508 // range of %trip.count.minus has a lower bound of 0. Value %TC has this form: 509 // 510 // %5 = add nuw nsw i32 %4, 1 511 // call void @llvm.set.loop.iterations.i32(i32 %5) 512 // 513 // where %5 is some expression using %N, which needs to have a lower bound of 514 // 1. Thus, if the ranges of Ceil and TC are not a single constant but a set, 515 // we first add 0 to TC such that we can do the <= comparison on both sets. 516 // 517 auto *One = SE->getOne(TripCount->getType()); 518 // ElementCount = BTC + 1 519 auto *ElementCount = SE->getAddExpr(BTC, One); 520 // Tmp = ElementCount + (VW-1) 521 auto *ECPlusVWMinus1 = SE->getAddExpr(ElementCount, 522 SE->getSCEV(ConstantInt::get(TripCount->getType(), VectorWidth - 1))); 523 // Ceil = ElementCount + (VW-1) / VW 524 auto *Ceil = SE->getUDivExpr(ECPlusVWMinus1, 525 SE->getSCEV(ConstantInt::get(TripCount->getType(), VectorWidth))); 526 527 ConstantRange RangeCeil = SE->getSignedRange(Ceil) ; 528 ConstantRange RangeTC = SE->getSignedRange(TC) ; 529 if (!RangeTC.isSingleElement()) { 530 auto ZeroRange = 531 ConstantRange(APInt(TripCount->getType()->getScalarSizeInBits(), 0)); 532 RangeTC = RangeTC.unionWith(ZeroRange); 533 } 534 if (!RangeTC.contains(RangeCeil) && !ForceTailPredication) { 535 LLVM_DEBUG(dbgs() << "ARM TP: Overflow possible in sub\n"); 536 return false; 537 } 538 539 // 3) Find out if IV is an induction phi. Note that We can't use Loop 540 // helpers here to get the induction variable, because the hardware loop is 541 // no longer in loopsimplify form, and also the hwloop intrinsic use a 542 // different counter. Using SCEV, we check that the induction is of the 543 // form i = i + 4, where the increment must be equal to the VectorWidth. 544 auto *IV = ActiveLaneMask->getOperand(0); 545 auto *IVExpr = SE->getSCEV(IV); 546 auto *AddExpr = dyn_cast<SCEVAddRecExpr>(IVExpr); 547 if (!AddExpr) { 548 LLVM_DEBUG(dbgs() << "ARM TP: induction not an add expr: "; IVExpr->dump()); 549 return false; 550 } 551 // Check that this AddRec is associated with this loop. 552 if (AddExpr->getLoop() != L) { 553 LLVM_DEBUG(dbgs() << "ARM TP: phi not part of this loop\n"); 554 return false; 555 } 556 auto *Step = dyn_cast<SCEVConstant>(AddExpr->getOperand(1)); 557 if (!Step) { 558 LLVM_DEBUG(dbgs() << "ARM TP: induction step is not a constant: "; 559 AddExpr->getOperand(1)->dump()); 560 return false; 561 } 562 auto StepValue = Step->getValue()->getSExtValue(); 563 if (VectorWidth == StepValue) 564 return true; 565 566 LLVM_DEBUG(dbgs() << "ARM TP: step value " << StepValue << " doesn't match " 567 "vector width : " << VectorWidth << "\n"); 568 569 return false; 570 } 571 572 // Materialize NumElements in the preheader block. 573 static Value *getNumElements(BasicBlock *Preheader, Value *BTC) { 574 // First, check the preheader if it not already exist: 575 // 576 // preheader: 577 // %BTC = add i32 %N, -1 578 // .. 579 // vector.body: 580 // 581 // if %BTC already exists. We don't need to emit %NumElems = %BTC + 1, 582 // but instead can just return %N. 583 for (auto &I : *Preheader) { 584 if (I.getOpcode() != Instruction::Add || &I != BTC) 585 continue; 586 ConstantInt *MinusOne = nullptr; 587 if (!(MinusOne = dyn_cast<ConstantInt>(I.getOperand(1)))) 588 continue; 589 if (MinusOne->getSExtValue() == -1) { 590 LLVM_DEBUG(dbgs() << "ARM TP: Found num elems: " << I << "\n"); 591 return I.getOperand(0); 592 } 593 } 594 595 // But we do need to materialise BTC if it is not already there, 596 // e.g. if it is a constant. 597 IRBuilder<> Builder(Preheader->getTerminator()); 598 Value *NumElements = Builder.CreateAdd(BTC, 599 ConstantInt::get(BTC->getType(), 1), "num.elements"); 600 LLVM_DEBUG(dbgs() << "ARM TP: Created num elems: " << *NumElements << "\n"); 601 return NumElements; 602 } 603 604 void MVETailPredication::InsertVCTPIntrinsic(IntrinsicInst *ActiveLaneMask, 605 Value *TripCount, FixedVectorType *VecTy, 606 DenseMap<Instruction*, Instruction*> &NewPredicates) { 607 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 608 Module *M = L->getHeader()->getModule(); 609 Type *Ty = IntegerType::get(M->getContext(), 32); 610 611 // The backedge-taken count in @llvm.get.active.lane.mask, its 2nd operand, 612 // is one less than the trip count. So we need to find or create 613 // %num.elements = %BTC + 1 in the preheader. 614 Value *BTC = ActiveLaneMask->getOperand(1); 615 Builder.SetInsertPoint(L->getLoopPreheader()->getTerminator()); 616 Value *NumElements = getNumElements(L->getLoopPreheader(), BTC); 617 618 // Insert a phi to count the number of elements processed by the loop. 619 Builder.SetInsertPoint(L->getHeader()->getFirstNonPHI() ); 620 PHINode *Processed = Builder.CreatePHI(Ty, 2); 621 Processed->addIncoming(NumElements, L->getLoopPreheader()); 622 623 // Replace @llvm.get.active.mask() with the ARM specific VCTP intrinic, and thus 624 // represent the effect of tail predication. 625 Builder.SetInsertPoint(ActiveLaneMask); 626 ConstantInt *Factor = 627 ConstantInt::get(cast<IntegerType>(Ty), VecTy->getNumElements()); 628 629 Intrinsic::ID VCTPID; 630 switch (VecTy->getNumElements()) { 631 default: 632 llvm_unreachable("unexpected number of lanes"); 633 case 4: VCTPID = Intrinsic::arm_mve_vctp32; break; 634 case 8: VCTPID = Intrinsic::arm_mve_vctp16; break; 635 case 16: VCTPID = Intrinsic::arm_mve_vctp8; break; 636 637 // FIXME: vctp64 currently not supported because the predicate 638 // vector wants to be <2 x i1>, but v2i1 is not a legal MVE 639 // type, so problems happen at isel time. 640 // Intrinsic::arm_mve_vctp64 exists for ACLE intrinsics 641 // purposes, but takes a v4i1 instead of a v2i1. 642 } 643 Function *VCTP = Intrinsic::getDeclaration(M, VCTPID); 644 Value *VCTPCall = Builder.CreateCall(VCTP, Processed); 645 ActiveLaneMask->replaceAllUsesWith(VCTPCall); 646 NewPredicates[ActiveLaneMask] = cast<Instruction>(VCTPCall); 647 648 // Add the incoming value to the new phi. 649 // TODO: This add likely already exists in the loop. 650 Value *Remaining = Builder.CreateSub(Processed, Factor); 651 Processed->addIncoming(Remaining, L->getLoopLatch()); 652 LLVM_DEBUG(dbgs() << "ARM TP: Insert processed elements phi: " 653 << *Processed << "\n" 654 << "ARM TP: Inserted VCTP: " << *VCTPCall << "\n"); 655 } 656 657 bool MVETailPredication::TryConvert(Value *TripCount) { 658 if (!IsPredicatedVectorLoop()) { 659 LLVM_DEBUG(dbgs() << "ARM TP: no masked instructions in loop.\n"); 660 return false; 661 } 662 663 LLVM_DEBUG(dbgs() << "ARM TP: Found predicated vector loop.\n"); 664 665 SetVector<Instruction*> Predicates; 666 DenseMap<Instruction*, Instruction*> NewPredicates; 667 668 // Walk through the masked intrinsics and try to find whether the predicate 669 // operand is generated by intrinsic @llvm.get.active.lane.mask(). 670 for (auto *I : MaskedInsts) { 671 unsigned PredOp = I->getIntrinsicID() == Intrinsic::masked_load ? 2 : 3; 672 auto *Predicate = dyn_cast<Instruction>(I->getArgOperand(PredOp)); 673 if (!Predicate || Predicates.count(Predicate)) 674 continue; 675 676 ActiveLaneMask = dyn_cast<IntrinsicInst>(Predicate); 677 if (!ActiveLaneMask || 678 ActiveLaneMask->getIntrinsicID() != Intrinsic::get_active_lane_mask) 679 continue; 680 681 Predicates.insert(Predicate); 682 LLVM_DEBUG(dbgs() << "ARM TP: Found active lane mask: " 683 << *ActiveLaneMask << "\n"); 684 685 VecTy = getVectorType(I); 686 if (!IsSafeActiveMask(TripCount, VecTy)) { 687 LLVM_DEBUG(dbgs() << "ARM TP: Not safe to insert VCTP.\n"); 688 return false; 689 } 690 LLVM_DEBUG(dbgs() << "ARM TP: Safe to insert VCTP.\n"); 691 InsertVCTPIntrinsic(ActiveLaneMask, TripCount, VecTy, NewPredicates); 692 } 693 694 // Now clean up. 695 ClonedVCTPInExitBlock = Cleanup(NewPredicates, Predicates, L); 696 return true; 697 } 698 699 Pass *llvm::createMVETailPredicationPass() { 700 return new MVETailPredication(); 701 } 702 703 char MVETailPredication::ID = 0; 704 705 INITIALIZE_PASS_BEGIN(MVETailPredication, DEBUG_TYPE, DESC, false, false) 706 INITIALIZE_PASS_END(MVETailPredication, DEBUG_TYPE, DESC, false, false) 707