1 //===- OpenMPIRBuilder.cpp - Builder for LLVM-IR for OpenMP directives ----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// 10 /// This file implements the OpenMPIRBuilder class, which is used as a 11 /// convenient way to create LLVM instructions for OpenMP directives. 12 /// 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" 16 #include "llvm/ADT/StringRef.h" 17 #include "llvm/ADT/Triple.h" 18 #include "llvm/Analysis/AssumptionCache.h" 19 #include "llvm/Analysis/CodeMetrics.h" 20 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 21 #include "llvm/Analysis/ScalarEvolution.h" 22 #include "llvm/Analysis/TargetLibraryInfo.h" 23 #include "llvm/IR/CFG.h" 24 #include "llvm/IR/DebugInfo.h" 25 #include "llvm/IR/IRBuilder.h" 26 #include "llvm/IR/MDBuilder.h" 27 #include "llvm/IR/PassManager.h" 28 #include "llvm/IR/Value.h" 29 #include "llvm/MC/TargetRegistry.h" 30 #include "llvm/Support/CommandLine.h" 31 #include "llvm/Support/Error.h" 32 #include "llvm/Target/TargetMachine.h" 33 #include "llvm/Target/TargetOptions.h" 34 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 35 #include "llvm/Transforms/Utils/CodeExtractor.h" 36 #include "llvm/Transforms/Utils/LoopPeel.h" 37 #include "llvm/Transforms/Utils/ModuleUtils.h" 38 #include "llvm/Transforms/Utils/UnrollLoop.h" 39 40 #include <sstream> 41 42 #define DEBUG_TYPE "openmp-ir-builder" 43 44 using namespace llvm; 45 using namespace omp; 46 47 static cl::opt<bool> 48 OptimisticAttributes("openmp-ir-builder-optimistic-attributes", cl::Hidden, 49 cl::desc("Use optimistic attributes describing " 50 "'as-if' properties of runtime calls."), 51 cl::init(false)); 52 53 static cl::opt<double> UnrollThresholdFactor( 54 "openmp-ir-builder-unroll-threshold-factor", cl::Hidden, 55 cl::desc("Factor for the unroll threshold to account for code " 56 "simplifications still taking place"), 57 cl::init(1.5)); 58 59 void OpenMPIRBuilder::addAttributes(omp::RuntimeFunction FnID, Function &Fn) { 60 LLVMContext &Ctx = Fn.getContext(); 61 62 // Get the function's current attributes. 63 auto Attrs = Fn.getAttributes(); 64 auto FnAttrs = Attrs.getFnAttrs(); 65 auto RetAttrs = Attrs.getRetAttrs(); 66 SmallVector<AttributeSet, 4> ArgAttrs; 67 for (size_t ArgNo = 0; ArgNo < Fn.arg_size(); ++ArgNo) 68 ArgAttrs.emplace_back(Attrs.getParamAttrs(ArgNo)); 69 70 #define OMP_ATTRS_SET(VarName, AttrSet) AttributeSet VarName = AttrSet; 71 #include "llvm/Frontend/OpenMP/OMPKinds.def" 72 73 // Add attributes to the function declaration. 74 switch (FnID) { 75 #define OMP_RTL_ATTRS(Enum, FnAttrSet, RetAttrSet, ArgAttrSets) \ 76 case Enum: \ 77 FnAttrs = FnAttrs.addAttributes(Ctx, FnAttrSet); \ 78 RetAttrs = RetAttrs.addAttributes(Ctx, RetAttrSet); \ 79 for (size_t ArgNo = 0; ArgNo < ArgAttrSets.size(); ++ArgNo) \ 80 ArgAttrs[ArgNo] = \ 81 ArgAttrs[ArgNo].addAttributes(Ctx, ArgAttrSets[ArgNo]); \ 82 Fn.setAttributes(AttributeList::get(Ctx, FnAttrs, RetAttrs, ArgAttrs)); \ 83 break; 84 #include "llvm/Frontend/OpenMP/OMPKinds.def" 85 default: 86 // Attributes are optional. 87 break; 88 } 89 } 90 91 FunctionCallee 92 OpenMPIRBuilder::getOrCreateRuntimeFunction(Module &M, RuntimeFunction FnID) { 93 FunctionType *FnTy = nullptr; 94 Function *Fn = nullptr; 95 96 // Try to find the declation in the module first. 97 switch (FnID) { 98 #define OMP_RTL(Enum, Str, IsVarArg, ReturnType, ...) \ 99 case Enum: \ 100 FnTy = FunctionType::get(ReturnType, ArrayRef<Type *>{__VA_ARGS__}, \ 101 IsVarArg); \ 102 Fn = M.getFunction(Str); \ 103 break; 104 #include "llvm/Frontend/OpenMP/OMPKinds.def" 105 } 106 107 if (!Fn) { 108 // Create a new declaration if we need one. 109 switch (FnID) { 110 #define OMP_RTL(Enum, Str, ...) \ 111 case Enum: \ 112 Fn = Function::Create(FnTy, GlobalValue::ExternalLinkage, Str, M); \ 113 break; 114 #include "llvm/Frontend/OpenMP/OMPKinds.def" 115 } 116 117 // Add information if the runtime function takes a callback function 118 if (FnID == OMPRTL___kmpc_fork_call || FnID == OMPRTL___kmpc_fork_teams) { 119 if (!Fn->hasMetadata(LLVMContext::MD_callback)) { 120 LLVMContext &Ctx = Fn->getContext(); 121 MDBuilder MDB(Ctx); 122 // Annotate the callback behavior of the runtime function: 123 // - The callback callee is argument number 2 (microtask). 124 // - The first two arguments of the callback callee are unknown (-1). 125 // - All variadic arguments to the runtime function are passed to the 126 // callback callee. 127 Fn->addMetadata( 128 LLVMContext::MD_callback, 129 *MDNode::get(Ctx, {MDB.createCallbackEncoding( 130 2, {-1, -1}, /* VarArgsArePassed */ true)})); 131 } 132 } 133 134 LLVM_DEBUG(dbgs() << "Created OpenMP runtime function " << Fn->getName() 135 << " with type " << *Fn->getFunctionType() << "\n"); 136 addAttributes(FnID, *Fn); 137 138 } else { 139 LLVM_DEBUG(dbgs() << "Found OpenMP runtime function " << Fn->getName() 140 << " with type " << *Fn->getFunctionType() << "\n"); 141 } 142 143 assert(Fn && "Failed to create OpenMP runtime function"); 144 145 // Cast the function to the expected type if necessary 146 Constant *C = ConstantExpr::getBitCast(Fn, FnTy->getPointerTo()); 147 return {FnTy, C}; 148 } 149 150 Function *OpenMPIRBuilder::getOrCreateRuntimeFunctionPtr(RuntimeFunction FnID) { 151 FunctionCallee RTLFn = getOrCreateRuntimeFunction(M, FnID); 152 auto *Fn = dyn_cast<llvm::Function>(RTLFn.getCallee()); 153 assert(Fn && "Failed to create OpenMP runtime function pointer"); 154 return Fn; 155 } 156 157 void OpenMPIRBuilder::initialize() { initializeTypes(M); } 158 159 void OpenMPIRBuilder::finalize(Function *Fn, bool AllowExtractorSinking) { 160 SmallPtrSet<BasicBlock *, 32> ParallelRegionBlockSet; 161 SmallVector<BasicBlock *, 32> Blocks; 162 SmallVector<OutlineInfo, 16> DeferredOutlines; 163 for (OutlineInfo &OI : OutlineInfos) { 164 // Skip functions that have not finalized yet; may happen with nested 165 // function generation. 166 if (Fn && OI.getFunction() != Fn) { 167 DeferredOutlines.push_back(OI); 168 continue; 169 } 170 171 ParallelRegionBlockSet.clear(); 172 Blocks.clear(); 173 OI.collectBlocks(ParallelRegionBlockSet, Blocks); 174 175 Function *OuterFn = OI.getFunction(); 176 CodeExtractorAnalysisCache CEAC(*OuterFn); 177 CodeExtractor Extractor(Blocks, /* DominatorTree */ nullptr, 178 /* AggregateArgs */ false, 179 /* BlockFrequencyInfo */ nullptr, 180 /* BranchProbabilityInfo */ nullptr, 181 /* AssumptionCache */ nullptr, 182 /* AllowVarArgs */ true, 183 /* AllowAlloca */ true, 184 /* Suffix */ ".omp_par"); 185 186 LLVM_DEBUG(dbgs() << "Before outlining: " << *OuterFn << "\n"); 187 LLVM_DEBUG(dbgs() << "Entry " << OI.EntryBB->getName() 188 << " Exit: " << OI.ExitBB->getName() << "\n"); 189 assert(Extractor.isEligible() && 190 "Expected OpenMP outlining to be possible!"); 191 192 Function *OutlinedFn = Extractor.extractCodeRegion(CEAC); 193 194 LLVM_DEBUG(dbgs() << "After outlining: " << *OuterFn << "\n"); 195 LLVM_DEBUG(dbgs() << " Outlined function: " << *OutlinedFn << "\n"); 196 assert(OutlinedFn->getReturnType()->isVoidTy() && 197 "OpenMP outlined functions should not return a value!"); 198 199 // For compability with the clang CG we move the outlined function after the 200 // one with the parallel region. 201 OutlinedFn->removeFromParent(); 202 M.getFunctionList().insertAfter(OuterFn->getIterator(), OutlinedFn); 203 204 // Remove the artificial entry introduced by the extractor right away, we 205 // made our own entry block after all. 206 { 207 BasicBlock &ArtificialEntry = OutlinedFn->getEntryBlock(); 208 assert(ArtificialEntry.getUniqueSuccessor() == OI.EntryBB); 209 assert(OI.EntryBB->getUniquePredecessor() == &ArtificialEntry); 210 if (AllowExtractorSinking) { 211 // Move instructions from the to-be-deleted ArtificialEntry to the entry 212 // basic block of the parallel region. CodeExtractor may have sunk 213 // allocas/bitcasts for values that are solely used in the outlined 214 // region and do not escape. 215 assert(!ArtificialEntry.empty() && 216 "Expected instructions to sink in the outlined region"); 217 for (BasicBlock::iterator It = ArtificialEntry.begin(), 218 End = ArtificialEntry.end(); 219 It != End;) { 220 Instruction &I = *It; 221 It++; 222 223 if (I.isTerminator()) 224 continue; 225 226 I.moveBefore(*OI.EntryBB, OI.EntryBB->getFirstInsertionPt()); 227 } 228 } 229 OI.EntryBB->moveBefore(&ArtificialEntry); 230 ArtificialEntry.eraseFromParent(); 231 } 232 assert(&OutlinedFn->getEntryBlock() == OI.EntryBB); 233 assert(OutlinedFn && OutlinedFn->getNumUses() == 1); 234 235 // Run a user callback, e.g. to add attributes. 236 if (OI.PostOutlineCB) 237 OI.PostOutlineCB(*OutlinedFn); 238 } 239 240 // Remove work items that have been completed. 241 OutlineInfos = std::move(DeferredOutlines); 242 } 243 244 OpenMPIRBuilder::~OpenMPIRBuilder() { 245 assert(OutlineInfos.empty() && "There must be no outstanding outlinings"); 246 } 247 248 GlobalValue *OpenMPIRBuilder::createGlobalFlag(unsigned Value, StringRef Name) { 249 IntegerType *I32Ty = Type::getInt32Ty(M.getContext()); 250 auto *GV = 251 new GlobalVariable(M, I32Ty, 252 /* isConstant = */ true, GlobalValue::WeakODRLinkage, 253 ConstantInt::get(I32Ty, Value), Name); 254 255 return GV; 256 } 257 258 Value *OpenMPIRBuilder::getOrCreateIdent(Constant *SrcLocStr, 259 IdentFlag LocFlags, 260 unsigned Reserve2Flags) { 261 // Enable "C-mode". 262 LocFlags |= OMP_IDENT_FLAG_KMPC; 263 264 Value *&Ident = 265 IdentMap[{SrcLocStr, uint64_t(LocFlags) << 31 | Reserve2Flags}]; 266 if (!Ident) { 267 Constant *I32Null = ConstantInt::getNullValue(Int32); 268 Constant *IdentData[] = { 269 I32Null, ConstantInt::get(Int32, uint32_t(LocFlags)), 270 ConstantInt::get(Int32, Reserve2Flags), I32Null, SrcLocStr}; 271 Constant *Initializer = 272 ConstantStruct::get(OpenMPIRBuilder::Ident, IdentData); 273 274 // Look for existing encoding of the location + flags, not needed but 275 // minimizes the difference to the existing solution while we transition. 276 for (GlobalVariable &GV : M.getGlobalList()) 277 if (GV.getValueType() == OpenMPIRBuilder::Ident && GV.hasInitializer()) 278 if (GV.getInitializer() == Initializer) 279 Ident = &GV; 280 281 if (!Ident) { 282 auto *GV = new GlobalVariable( 283 M, OpenMPIRBuilder::Ident, 284 /* isConstant = */ true, GlobalValue::PrivateLinkage, Initializer, "", 285 nullptr, GlobalValue::NotThreadLocal, 286 M.getDataLayout().getDefaultGlobalsAddressSpace()); 287 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); 288 GV->setAlignment(Align(8)); 289 Ident = GV; 290 } 291 } 292 293 return Builder.CreatePointerCast(Ident, IdentPtr); 294 } 295 296 Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(StringRef LocStr) { 297 Constant *&SrcLocStr = SrcLocStrMap[LocStr]; 298 if (!SrcLocStr) { 299 Constant *Initializer = 300 ConstantDataArray::getString(M.getContext(), LocStr); 301 302 // Look for existing encoding of the location, not needed but minimizes the 303 // difference to the existing solution while we transition. 304 for (GlobalVariable &GV : M.getGlobalList()) 305 if (GV.isConstant() && GV.hasInitializer() && 306 GV.getInitializer() == Initializer) 307 return SrcLocStr = ConstantExpr::getPointerCast(&GV, Int8Ptr); 308 309 SrcLocStr = Builder.CreateGlobalStringPtr(LocStr, /* Name */ "", 310 /* AddressSpace */ 0, &M); 311 } 312 return SrcLocStr; 313 } 314 315 Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(StringRef FunctionName, 316 StringRef FileName, 317 unsigned Line, 318 unsigned Column) { 319 SmallString<128> Buffer; 320 Buffer.push_back(';'); 321 Buffer.append(FileName); 322 Buffer.push_back(';'); 323 Buffer.append(FunctionName); 324 Buffer.push_back(';'); 325 Buffer.append(std::to_string(Line)); 326 Buffer.push_back(';'); 327 Buffer.append(std::to_string(Column)); 328 Buffer.push_back(';'); 329 Buffer.push_back(';'); 330 return getOrCreateSrcLocStr(Buffer.str()); 331 } 332 333 Constant *OpenMPIRBuilder::getOrCreateDefaultSrcLocStr() { 334 return getOrCreateSrcLocStr(";unknown;unknown;0;0;;"); 335 } 336 337 Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(DebugLoc DL, Function *F) { 338 DILocation *DIL = DL.get(); 339 if (!DIL) 340 return getOrCreateDefaultSrcLocStr(); 341 StringRef FileName = M.getName(); 342 if (DIFile *DIF = DIL->getFile()) 343 if (Optional<StringRef> Source = DIF->getSource()) 344 FileName = *Source; 345 StringRef Function = DIL->getScope()->getSubprogram()->getName(); 346 if (Function.empty() && F) 347 Function = F->getName(); 348 return getOrCreateSrcLocStr(Function, FileName, DIL->getLine(), 349 DIL->getColumn()); 350 } 351 352 Constant * 353 OpenMPIRBuilder::getOrCreateSrcLocStr(const LocationDescription &Loc) { 354 return getOrCreateSrcLocStr(Loc.DL, Loc.IP.getBlock()->getParent()); 355 } 356 357 Value *OpenMPIRBuilder::getOrCreateThreadID(Value *Ident) { 358 return Builder.CreateCall( 359 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_global_thread_num), Ident, 360 "omp_global_thread_num"); 361 } 362 363 OpenMPIRBuilder::InsertPointTy 364 OpenMPIRBuilder::createBarrier(const LocationDescription &Loc, Directive DK, 365 bool ForceSimpleCall, bool CheckCancelFlag) { 366 if (!updateToLocation(Loc)) 367 return Loc.IP; 368 return emitBarrierImpl(Loc, DK, ForceSimpleCall, CheckCancelFlag); 369 } 370 371 OpenMPIRBuilder::InsertPointTy 372 OpenMPIRBuilder::emitBarrierImpl(const LocationDescription &Loc, Directive Kind, 373 bool ForceSimpleCall, bool CheckCancelFlag) { 374 // Build call __kmpc_cancel_barrier(loc, thread_id) or 375 // __kmpc_barrier(loc, thread_id); 376 377 IdentFlag BarrierLocFlags; 378 switch (Kind) { 379 case OMPD_for: 380 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_FOR; 381 break; 382 case OMPD_sections: 383 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_SECTIONS; 384 break; 385 case OMPD_single: 386 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_SINGLE; 387 break; 388 case OMPD_barrier: 389 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_EXPL; 390 break; 391 default: 392 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL; 393 break; 394 } 395 396 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); 397 Value *Args[] = {getOrCreateIdent(SrcLocStr, BarrierLocFlags), 398 getOrCreateThreadID(getOrCreateIdent(SrcLocStr))}; 399 400 // If we are in a cancellable parallel region, barriers are cancellation 401 // points. 402 // TODO: Check why we would force simple calls or to ignore the cancel flag. 403 bool UseCancelBarrier = 404 !ForceSimpleCall && isLastFinalizationInfoCancellable(OMPD_parallel); 405 406 Value *Result = 407 Builder.CreateCall(getOrCreateRuntimeFunctionPtr( 408 UseCancelBarrier ? OMPRTL___kmpc_cancel_barrier 409 : OMPRTL___kmpc_barrier), 410 Args); 411 412 if (UseCancelBarrier && CheckCancelFlag) 413 emitCancelationCheckImpl(Result, OMPD_parallel); 414 415 return Builder.saveIP(); 416 } 417 418 OpenMPIRBuilder::InsertPointTy 419 OpenMPIRBuilder::createCancel(const LocationDescription &Loc, 420 Value *IfCondition, 421 omp::Directive CanceledDirective) { 422 if (!updateToLocation(Loc)) 423 return Loc.IP; 424 425 // LLVM utilities like blocks with terminators. 426 auto *UI = Builder.CreateUnreachable(); 427 428 Instruction *ThenTI = UI, *ElseTI = nullptr; 429 if (IfCondition) 430 SplitBlockAndInsertIfThenElse(IfCondition, UI, &ThenTI, &ElseTI); 431 Builder.SetInsertPoint(ThenTI); 432 433 Value *CancelKind = nullptr; 434 switch (CanceledDirective) { 435 #define OMP_CANCEL_KIND(Enum, Str, DirectiveEnum, Value) \ 436 case DirectiveEnum: \ 437 CancelKind = Builder.getInt32(Value); \ 438 break; 439 #include "llvm/Frontend/OpenMP/OMPKinds.def" 440 default: 441 llvm_unreachable("Unknown cancel kind!"); 442 } 443 444 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); 445 Value *Ident = getOrCreateIdent(SrcLocStr); 446 Value *Args[] = {Ident, getOrCreateThreadID(Ident), CancelKind}; 447 Value *Result = Builder.CreateCall( 448 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_cancel), Args); 449 auto ExitCB = [this, CanceledDirective, Loc](InsertPointTy IP) { 450 if (CanceledDirective == OMPD_parallel) { 451 IRBuilder<>::InsertPointGuard IPG(Builder); 452 Builder.restoreIP(IP); 453 createBarrier(LocationDescription(Builder.saveIP(), Loc.DL), 454 omp::Directive::OMPD_unknown, /* ForceSimpleCall */ false, 455 /* CheckCancelFlag */ false); 456 } 457 }; 458 459 // The actual cancel logic is shared with others, e.g., cancel_barriers. 460 emitCancelationCheckImpl(Result, CanceledDirective, ExitCB); 461 462 // Update the insertion point and remove the terminator we introduced. 463 Builder.SetInsertPoint(UI->getParent()); 464 UI->eraseFromParent(); 465 466 return Builder.saveIP(); 467 } 468 469 void OpenMPIRBuilder::emitCancelationCheckImpl(Value *CancelFlag, 470 omp::Directive CanceledDirective, 471 FinalizeCallbackTy ExitCB) { 472 assert(isLastFinalizationInfoCancellable(CanceledDirective) && 473 "Unexpected cancellation!"); 474 475 // For a cancel barrier we create two new blocks. 476 BasicBlock *BB = Builder.GetInsertBlock(); 477 BasicBlock *NonCancellationBlock; 478 if (Builder.GetInsertPoint() == BB->end()) { 479 // TODO: This branch will not be needed once we moved to the 480 // OpenMPIRBuilder codegen completely. 481 NonCancellationBlock = BasicBlock::Create( 482 BB->getContext(), BB->getName() + ".cont", BB->getParent()); 483 } else { 484 NonCancellationBlock = SplitBlock(BB, &*Builder.GetInsertPoint()); 485 BB->getTerminator()->eraseFromParent(); 486 Builder.SetInsertPoint(BB); 487 } 488 BasicBlock *CancellationBlock = BasicBlock::Create( 489 BB->getContext(), BB->getName() + ".cncl", BB->getParent()); 490 491 // Jump to them based on the return value. 492 Value *Cmp = Builder.CreateIsNull(CancelFlag); 493 Builder.CreateCondBr(Cmp, NonCancellationBlock, CancellationBlock, 494 /* TODO weight */ nullptr, nullptr); 495 496 // From the cancellation block we finalize all variables and go to the 497 // post finalization block that is known to the FiniCB callback. 498 Builder.SetInsertPoint(CancellationBlock); 499 if (ExitCB) 500 ExitCB(Builder.saveIP()); 501 auto &FI = FinalizationStack.back(); 502 FI.FiniCB(Builder.saveIP()); 503 504 // The continuation block is where code generation continues. 505 Builder.SetInsertPoint(NonCancellationBlock, NonCancellationBlock->begin()); 506 } 507 508 IRBuilder<>::InsertPoint OpenMPIRBuilder::createParallel( 509 const LocationDescription &Loc, InsertPointTy OuterAllocaIP, 510 BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB, 511 FinalizeCallbackTy FiniCB, Value *IfCondition, Value *NumThreads, 512 omp::ProcBindKind ProcBind, bool IsCancellable) { 513 if (!updateToLocation(Loc)) 514 return Loc.IP; 515 516 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); 517 Value *Ident = getOrCreateIdent(SrcLocStr); 518 Value *ThreadID = getOrCreateThreadID(Ident); 519 520 if (NumThreads) { 521 // Build call __kmpc_push_num_threads(&Ident, global_tid, num_threads) 522 Value *Args[] = { 523 Ident, ThreadID, 524 Builder.CreateIntCast(NumThreads, Int32, /*isSigned*/ false)}; 525 Builder.CreateCall( 526 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_push_num_threads), Args); 527 } 528 529 if (ProcBind != OMP_PROC_BIND_default) { 530 // Build call __kmpc_push_proc_bind(&Ident, global_tid, proc_bind) 531 Value *Args[] = { 532 Ident, ThreadID, 533 ConstantInt::get(Int32, unsigned(ProcBind), /*isSigned=*/true)}; 534 Builder.CreateCall( 535 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_push_proc_bind), Args); 536 } 537 538 BasicBlock *InsertBB = Builder.GetInsertBlock(); 539 Function *OuterFn = InsertBB->getParent(); 540 541 // Save the outer alloca block because the insertion iterator may get 542 // invalidated and we still need this later. 543 BasicBlock *OuterAllocaBlock = OuterAllocaIP.getBlock(); 544 545 // Vector to remember instructions we used only during the modeling but which 546 // we want to delete at the end. 547 SmallVector<Instruction *, 4> ToBeDeleted; 548 549 // Change the location to the outer alloca insertion point to create and 550 // initialize the allocas we pass into the parallel region. 551 Builder.restoreIP(OuterAllocaIP); 552 AllocaInst *TIDAddr = Builder.CreateAlloca(Int32, nullptr, "tid.addr"); 553 AllocaInst *ZeroAddr = Builder.CreateAlloca(Int32, nullptr, "zero.addr"); 554 555 // If there is an if condition we actually use the TIDAddr and ZeroAddr in the 556 // program, otherwise we only need them for modeling purposes to get the 557 // associated arguments in the outlined function. In the former case, 558 // initialize the allocas properly, in the latter case, delete them later. 559 if (IfCondition) { 560 Builder.CreateStore(Constant::getNullValue(Int32), TIDAddr); 561 Builder.CreateStore(Constant::getNullValue(Int32), ZeroAddr); 562 } else { 563 ToBeDeleted.push_back(TIDAddr); 564 ToBeDeleted.push_back(ZeroAddr); 565 } 566 567 // Create an artificial insertion point that will also ensure the blocks we 568 // are about to split are not degenerated. 569 auto *UI = new UnreachableInst(Builder.getContext(), InsertBB); 570 571 Instruction *ThenTI = UI, *ElseTI = nullptr; 572 if (IfCondition) 573 SplitBlockAndInsertIfThenElse(IfCondition, UI, &ThenTI, &ElseTI); 574 575 BasicBlock *ThenBB = ThenTI->getParent(); 576 BasicBlock *PRegEntryBB = ThenBB->splitBasicBlock(ThenTI, "omp.par.entry"); 577 BasicBlock *PRegBodyBB = 578 PRegEntryBB->splitBasicBlock(ThenTI, "omp.par.region"); 579 BasicBlock *PRegPreFiniBB = 580 PRegBodyBB->splitBasicBlock(ThenTI, "omp.par.pre_finalize"); 581 BasicBlock *PRegExitBB = 582 PRegPreFiniBB->splitBasicBlock(ThenTI, "omp.par.exit"); 583 584 auto FiniCBWrapper = [&](InsertPointTy IP) { 585 // Hide "open-ended" blocks from the given FiniCB by setting the right jump 586 // target to the region exit block. 587 if (IP.getBlock()->end() == IP.getPoint()) { 588 IRBuilder<>::InsertPointGuard IPG(Builder); 589 Builder.restoreIP(IP); 590 Instruction *I = Builder.CreateBr(PRegExitBB); 591 IP = InsertPointTy(I->getParent(), I->getIterator()); 592 } 593 assert(IP.getBlock()->getTerminator()->getNumSuccessors() == 1 && 594 IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB && 595 "Unexpected insertion point for finalization call!"); 596 return FiniCB(IP); 597 }; 598 599 FinalizationStack.push_back({FiniCBWrapper, OMPD_parallel, IsCancellable}); 600 601 // Generate the privatization allocas in the block that will become the entry 602 // of the outlined function. 603 Builder.SetInsertPoint(PRegEntryBB->getTerminator()); 604 InsertPointTy InnerAllocaIP = Builder.saveIP(); 605 606 AllocaInst *PrivTIDAddr = 607 Builder.CreateAlloca(Int32, nullptr, "tid.addr.local"); 608 Instruction *PrivTID = Builder.CreateLoad(Int32, PrivTIDAddr, "tid"); 609 610 // Add some fake uses for OpenMP provided arguments. 611 ToBeDeleted.push_back(Builder.CreateLoad(Int32, TIDAddr, "tid.addr.use")); 612 Instruction *ZeroAddrUse = 613 Builder.CreateLoad(Int32, ZeroAddr, "zero.addr.use"); 614 ToBeDeleted.push_back(ZeroAddrUse); 615 616 // ThenBB 617 // | 618 // V 619 // PRegionEntryBB <- Privatization allocas are placed here. 620 // | 621 // V 622 // PRegionBodyBB <- BodeGen is invoked here. 623 // | 624 // V 625 // PRegPreFiniBB <- The block we will start finalization from. 626 // | 627 // V 628 // PRegionExitBB <- A common exit to simplify block collection. 629 // 630 631 LLVM_DEBUG(dbgs() << "Before body codegen: " << *OuterFn << "\n"); 632 633 // Let the caller create the body. 634 assert(BodyGenCB && "Expected body generation callback!"); 635 InsertPointTy CodeGenIP(PRegBodyBB, PRegBodyBB->begin()); 636 BodyGenCB(InnerAllocaIP, CodeGenIP, *PRegPreFiniBB); 637 638 LLVM_DEBUG(dbgs() << "After body codegen: " << *OuterFn << "\n"); 639 640 FunctionCallee RTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_fork_call); 641 if (auto *F = dyn_cast<llvm::Function>(RTLFn.getCallee())) { 642 if (!F->hasMetadata(llvm::LLVMContext::MD_callback)) { 643 llvm::LLVMContext &Ctx = F->getContext(); 644 MDBuilder MDB(Ctx); 645 // Annotate the callback behavior of the __kmpc_fork_call: 646 // - The callback callee is argument number 2 (microtask). 647 // - The first two arguments of the callback callee are unknown (-1). 648 // - All variadic arguments to the __kmpc_fork_call are passed to the 649 // callback callee. 650 F->addMetadata( 651 llvm::LLVMContext::MD_callback, 652 *llvm::MDNode::get( 653 Ctx, {MDB.createCallbackEncoding(2, {-1, -1}, 654 /* VarArgsArePassed */ true)})); 655 } 656 } 657 658 OutlineInfo OI; 659 OI.PostOutlineCB = [=](Function &OutlinedFn) { 660 // Add some known attributes. 661 OutlinedFn.addParamAttr(0, Attribute::NoAlias); 662 OutlinedFn.addParamAttr(1, Attribute::NoAlias); 663 OutlinedFn.addFnAttr(Attribute::NoUnwind); 664 OutlinedFn.addFnAttr(Attribute::NoRecurse); 665 666 assert(OutlinedFn.arg_size() >= 2 && 667 "Expected at least tid and bounded tid as arguments"); 668 unsigned NumCapturedVars = 669 OutlinedFn.arg_size() - /* tid & bounded tid */ 2; 670 671 CallInst *CI = cast<CallInst>(OutlinedFn.user_back()); 672 CI->getParent()->setName("omp_parallel"); 673 Builder.SetInsertPoint(CI); 674 675 // Build call __kmpc_fork_call(Ident, n, microtask, var1, .., varn); 676 Value *ForkCallArgs[] = { 677 Ident, Builder.getInt32(NumCapturedVars), 678 Builder.CreateBitCast(&OutlinedFn, ParallelTaskPtr)}; 679 680 SmallVector<Value *, 16> RealArgs; 681 RealArgs.append(std::begin(ForkCallArgs), std::end(ForkCallArgs)); 682 RealArgs.append(CI->arg_begin() + /* tid & bound tid */ 2, CI->arg_end()); 683 684 Builder.CreateCall(RTLFn, RealArgs); 685 686 LLVM_DEBUG(dbgs() << "With fork_call placed: " 687 << *Builder.GetInsertBlock()->getParent() << "\n"); 688 689 InsertPointTy ExitIP(PRegExitBB, PRegExitBB->end()); 690 691 // Initialize the local TID stack location with the argument value. 692 Builder.SetInsertPoint(PrivTID); 693 Function::arg_iterator OutlinedAI = OutlinedFn.arg_begin(); 694 Builder.CreateStore(Builder.CreateLoad(Int32, OutlinedAI), PrivTIDAddr); 695 696 // If no "if" clause was present we do not need the call created during 697 // outlining, otherwise we reuse it in the serialized parallel region. 698 if (!ElseTI) { 699 CI->eraseFromParent(); 700 } else { 701 702 // If an "if" clause was present we are now generating the serialized 703 // version into the "else" branch. 704 Builder.SetInsertPoint(ElseTI); 705 706 // Build calls __kmpc_serialized_parallel(&Ident, GTid); 707 Value *SerializedParallelCallArgs[] = {Ident, ThreadID}; 708 Builder.CreateCall( 709 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_serialized_parallel), 710 SerializedParallelCallArgs); 711 712 // OutlinedFn(>id, &zero, CapturedStruct); 713 CI->removeFromParent(); 714 Builder.Insert(CI); 715 716 // __kmpc_end_serialized_parallel(&Ident, GTid); 717 Value *EndArgs[] = {Ident, ThreadID}; 718 Builder.CreateCall( 719 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_serialized_parallel), 720 EndArgs); 721 722 LLVM_DEBUG(dbgs() << "With serialized parallel region: " 723 << *Builder.GetInsertBlock()->getParent() << "\n"); 724 } 725 726 for (Instruction *I : ToBeDeleted) 727 I->eraseFromParent(); 728 }; 729 730 // Adjust the finalization stack, verify the adjustment, and call the 731 // finalize function a last time to finalize values between the pre-fini 732 // block and the exit block if we left the parallel "the normal way". 733 auto FiniInfo = FinalizationStack.pop_back_val(); 734 (void)FiniInfo; 735 assert(FiniInfo.DK == OMPD_parallel && 736 "Unexpected finalization stack state!"); 737 738 Instruction *PRegPreFiniTI = PRegPreFiniBB->getTerminator(); 739 740 InsertPointTy PreFiniIP(PRegPreFiniBB, PRegPreFiniTI->getIterator()); 741 FiniCB(PreFiniIP); 742 743 OI.EntryBB = PRegEntryBB; 744 OI.ExitBB = PRegExitBB; 745 746 SmallPtrSet<BasicBlock *, 32> ParallelRegionBlockSet; 747 SmallVector<BasicBlock *, 32> Blocks; 748 OI.collectBlocks(ParallelRegionBlockSet, Blocks); 749 750 // Ensure a single exit node for the outlined region by creating one. 751 // We might have multiple incoming edges to the exit now due to finalizations, 752 // e.g., cancel calls that cause the control flow to leave the region. 753 BasicBlock *PRegOutlinedExitBB = PRegExitBB; 754 PRegExitBB = SplitBlock(PRegExitBB, &*PRegExitBB->getFirstInsertionPt()); 755 PRegOutlinedExitBB->setName("omp.par.outlined.exit"); 756 Blocks.push_back(PRegOutlinedExitBB); 757 758 CodeExtractorAnalysisCache CEAC(*OuterFn); 759 CodeExtractor Extractor(Blocks, /* DominatorTree */ nullptr, 760 /* AggregateArgs */ false, 761 /* BlockFrequencyInfo */ nullptr, 762 /* BranchProbabilityInfo */ nullptr, 763 /* AssumptionCache */ nullptr, 764 /* AllowVarArgs */ true, 765 /* AllowAlloca */ true, 766 /* Suffix */ ".omp_par"); 767 768 // Find inputs to, outputs from the code region. 769 BasicBlock *CommonExit = nullptr; 770 SetVector<Value *> Inputs, Outputs, SinkingCands, HoistingCands; 771 Extractor.findAllocas(CEAC, SinkingCands, HoistingCands, CommonExit); 772 Extractor.findInputsOutputs(Inputs, Outputs, SinkingCands); 773 774 LLVM_DEBUG(dbgs() << "Before privatization: " << *OuterFn << "\n"); 775 776 FunctionCallee TIDRTLFn = 777 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_global_thread_num); 778 779 auto PrivHelper = [&](Value &V) { 780 if (&V == TIDAddr || &V == ZeroAddr) 781 return; 782 783 SetVector<Use *> Uses; 784 for (Use &U : V.uses()) 785 if (auto *UserI = dyn_cast<Instruction>(U.getUser())) 786 if (ParallelRegionBlockSet.count(UserI->getParent())) 787 Uses.insert(&U); 788 789 // __kmpc_fork_call expects extra arguments as pointers. If the input 790 // already has a pointer type, everything is fine. Otherwise, store the 791 // value onto stack and load it back inside the to-be-outlined region. This 792 // will ensure only the pointer will be passed to the function. 793 // FIXME: if there are more than 15 trailing arguments, they must be 794 // additionally packed in a struct. 795 Value *Inner = &V; 796 if (!V.getType()->isPointerTy()) { 797 IRBuilder<>::InsertPointGuard Guard(Builder); 798 LLVM_DEBUG(llvm::dbgs() << "Forwarding input as pointer: " << V << "\n"); 799 800 Builder.restoreIP(OuterAllocaIP); 801 Value *Ptr = 802 Builder.CreateAlloca(V.getType(), nullptr, V.getName() + ".reloaded"); 803 804 // Store to stack at end of the block that currently branches to the entry 805 // block of the to-be-outlined region. 806 Builder.SetInsertPoint(InsertBB, 807 InsertBB->getTerminator()->getIterator()); 808 Builder.CreateStore(&V, Ptr); 809 810 // Load back next to allocations in the to-be-outlined region. 811 Builder.restoreIP(InnerAllocaIP); 812 Inner = Builder.CreateLoad(V.getType(), Ptr); 813 } 814 815 Value *ReplacementValue = nullptr; 816 CallInst *CI = dyn_cast<CallInst>(&V); 817 if (CI && CI->getCalledFunction() == TIDRTLFn.getCallee()) { 818 ReplacementValue = PrivTID; 819 } else { 820 Builder.restoreIP( 821 PrivCB(InnerAllocaIP, Builder.saveIP(), V, *Inner, ReplacementValue)); 822 assert(ReplacementValue && 823 "Expected copy/create callback to set replacement value!"); 824 if (ReplacementValue == &V) 825 return; 826 } 827 828 for (Use *UPtr : Uses) 829 UPtr->set(ReplacementValue); 830 }; 831 832 // Reset the inner alloca insertion as it will be used for loading the values 833 // wrapped into pointers before passing them into the to-be-outlined region. 834 // Configure it to insert immediately after the fake use of zero address so 835 // that they are available in the generated body and so that the 836 // OpenMP-related values (thread ID and zero address pointers) remain leading 837 // in the argument list. 838 InnerAllocaIP = IRBuilder<>::InsertPoint( 839 ZeroAddrUse->getParent(), ZeroAddrUse->getNextNode()->getIterator()); 840 841 // Reset the outer alloca insertion point to the entry of the relevant block 842 // in case it was invalidated. 843 OuterAllocaIP = IRBuilder<>::InsertPoint( 844 OuterAllocaBlock, OuterAllocaBlock->getFirstInsertionPt()); 845 846 for (Value *Input : Inputs) { 847 LLVM_DEBUG(dbgs() << "Captured input: " << *Input << "\n"); 848 PrivHelper(*Input); 849 } 850 LLVM_DEBUG({ 851 for (Value *Output : Outputs) 852 LLVM_DEBUG(dbgs() << "Captured output: " << *Output << "\n"); 853 }); 854 assert(Outputs.empty() && 855 "OpenMP outlining should not produce live-out values!"); 856 857 LLVM_DEBUG(dbgs() << "After privatization: " << *OuterFn << "\n"); 858 LLVM_DEBUG({ 859 for (auto *BB : Blocks) 860 dbgs() << " PBR: " << BB->getName() << "\n"; 861 }); 862 863 // Register the outlined info. 864 addOutlineInfo(std::move(OI)); 865 866 InsertPointTy AfterIP(UI->getParent(), UI->getParent()->end()); 867 UI->eraseFromParent(); 868 869 return AfterIP; 870 } 871 872 void OpenMPIRBuilder::emitFlush(const LocationDescription &Loc) { 873 // Build call void __kmpc_flush(ident_t *loc) 874 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); 875 Value *Args[] = {getOrCreateIdent(SrcLocStr)}; 876 877 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_flush), Args); 878 } 879 880 void OpenMPIRBuilder::createFlush(const LocationDescription &Loc) { 881 if (!updateToLocation(Loc)) 882 return; 883 emitFlush(Loc); 884 } 885 886 void OpenMPIRBuilder::emitTaskwaitImpl(const LocationDescription &Loc) { 887 // Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32 888 // global_tid); 889 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); 890 Value *Ident = getOrCreateIdent(SrcLocStr); 891 Value *Args[] = {Ident, getOrCreateThreadID(Ident)}; 892 893 // Ignore return result until untied tasks are supported. 894 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_taskwait), 895 Args); 896 } 897 898 void OpenMPIRBuilder::createTaskwait(const LocationDescription &Loc) { 899 if (!updateToLocation(Loc)) 900 return; 901 emitTaskwaitImpl(Loc); 902 } 903 904 void OpenMPIRBuilder::emitTaskyieldImpl(const LocationDescription &Loc) { 905 // Build call __kmpc_omp_taskyield(loc, thread_id, 0); 906 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); 907 Value *Ident = getOrCreateIdent(SrcLocStr); 908 Constant *I32Null = ConstantInt::getNullValue(Int32); 909 Value *Args[] = {Ident, getOrCreateThreadID(Ident), I32Null}; 910 911 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_taskyield), 912 Args); 913 } 914 915 void OpenMPIRBuilder::createTaskyield(const LocationDescription &Loc) { 916 if (!updateToLocation(Loc)) 917 return; 918 emitTaskyieldImpl(Loc); 919 } 920 921 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createSections( 922 const LocationDescription &Loc, InsertPointTy AllocaIP, 923 ArrayRef<StorableBodyGenCallbackTy> SectionCBs, PrivatizeCallbackTy PrivCB, 924 FinalizeCallbackTy FiniCB, bool IsCancellable, bool IsNowait) { 925 if (!updateToLocation(Loc)) 926 return Loc.IP; 927 928 auto FiniCBWrapper = [&](InsertPointTy IP) { 929 if (IP.getBlock()->end() != IP.getPoint()) 930 return FiniCB(IP); 931 // This must be done otherwise any nested constructs using FinalizeOMPRegion 932 // will fail because that function requires the Finalization Basic Block to 933 // have a terminator, which is already removed by EmitOMPRegionBody. 934 // IP is currently at cancelation block. 935 // We need to backtrack to the condition block to fetch 936 // the exit block and create a branch from cancelation 937 // to exit block. 938 IRBuilder<>::InsertPointGuard IPG(Builder); 939 Builder.restoreIP(IP); 940 auto *CaseBB = IP.getBlock()->getSinglePredecessor(); 941 auto *CondBB = CaseBB->getSinglePredecessor()->getSinglePredecessor(); 942 auto *ExitBB = CondBB->getTerminator()->getSuccessor(1); 943 Instruction *I = Builder.CreateBr(ExitBB); 944 IP = InsertPointTy(I->getParent(), I->getIterator()); 945 return FiniCB(IP); 946 }; 947 948 FinalizationStack.push_back({FiniCBWrapper, OMPD_sections, IsCancellable}); 949 950 // Each section is emitted as a switch case 951 // Each finalization callback is handled from clang.EmitOMPSectionDirective() 952 // -> OMP.createSection() which generates the IR for each section 953 // Iterate through all sections and emit a switch construct: 954 // switch (IV) { 955 // case 0: 956 // <SectionStmt[0]>; 957 // break; 958 // ... 959 // case <NumSection> - 1: 960 // <SectionStmt[<NumSection> - 1]>; 961 // break; 962 // } 963 // ... 964 // section_loop.after: 965 // <FiniCB>; 966 auto LoopBodyGenCB = [&](InsertPointTy CodeGenIP, Value *IndVar) { 967 auto *CurFn = CodeGenIP.getBlock()->getParent(); 968 auto *ForIncBB = CodeGenIP.getBlock()->getSingleSuccessor(); 969 auto *ForExitBB = CodeGenIP.getBlock() 970 ->getSinglePredecessor() 971 ->getTerminator() 972 ->getSuccessor(1); 973 SwitchInst *SwitchStmt = Builder.CreateSwitch(IndVar, ForIncBB); 974 Builder.restoreIP(CodeGenIP); 975 unsigned CaseNumber = 0; 976 for (auto SectionCB : SectionCBs) { 977 auto *CaseBB = BasicBlock::Create(M.getContext(), 978 "omp_section_loop.body.case", CurFn); 979 SwitchStmt->addCase(Builder.getInt32(CaseNumber), CaseBB); 980 Builder.SetInsertPoint(CaseBB); 981 SectionCB(InsertPointTy(), Builder.saveIP(), *ForExitBB); 982 CaseNumber++; 983 } 984 // remove the existing terminator from body BB since there can be no 985 // terminators after switch/case 986 CodeGenIP.getBlock()->getTerminator()->eraseFromParent(); 987 }; 988 // Loop body ends here 989 // LowerBound, UpperBound, and STride for createCanonicalLoop 990 Type *I32Ty = Type::getInt32Ty(M.getContext()); 991 Value *LB = ConstantInt::get(I32Ty, 0); 992 Value *UB = ConstantInt::get(I32Ty, SectionCBs.size()); 993 Value *ST = ConstantInt::get(I32Ty, 1); 994 llvm::CanonicalLoopInfo *LoopInfo = createCanonicalLoop( 995 Loc, LoopBodyGenCB, LB, UB, ST, true, false, AllocaIP, "section_loop"); 996 Builder.SetInsertPoint(AllocaIP.getBlock()->getTerminator()); 997 AllocaIP = Builder.saveIP(); 998 InsertPointTy AfterIP = 999 applyStaticWorkshareLoop(Loc.DL, LoopInfo, AllocaIP, !IsNowait); 1000 BasicBlock *LoopAfterBB = AfterIP.getBlock(); 1001 Instruction *SplitPos = LoopAfterBB->getTerminator(); 1002 if (!isa_and_nonnull<BranchInst>(SplitPos)) 1003 SplitPos = new UnreachableInst(Builder.getContext(), LoopAfterBB); 1004 // ExitBB after LoopAfterBB because LoopAfterBB is used for FinalizationCB, 1005 // which requires a BB with branch 1006 BasicBlock *ExitBB = 1007 LoopAfterBB->splitBasicBlock(SplitPos, "omp_sections.end"); 1008 SplitPos->eraseFromParent(); 1009 1010 // Apply the finalization callback in LoopAfterBB 1011 auto FiniInfo = FinalizationStack.pop_back_val(); 1012 assert(FiniInfo.DK == OMPD_sections && 1013 "Unexpected finalization stack state!"); 1014 Builder.SetInsertPoint(LoopAfterBB->getTerminator()); 1015 FiniInfo.FiniCB(Builder.saveIP()); 1016 Builder.SetInsertPoint(ExitBB); 1017 1018 return Builder.saveIP(); 1019 } 1020 1021 OpenMPIRBuilder::InsertPointTy 1022 OpenMPIRBuilder::createSection(const LocationDescription &Loc, 1023 BodyGenCallbackTy BodyGenCB, 1024 FinalizeCallbackTy FiniCB) { 1025 if (!updateToLocation(Loc)) 1026 return Loc.IP; 1027 1028 auto FiniCBWrapper = [&](InsertPointTy IP) { 1029 if (IP.getBlock()->end() != IP.getPoint()) 1030 return FiniCB(IP); 1031 // This must be done otherwise any nested constructs using FinalizeOMPRegion 1032 // will fail because that function requires the Finalization Basic Block to 1033 // have a terminator, which is already removed by EmitOMPRegionBody. 1034 // IP is currently at cancelation block. 1035 // We need to backtrack to the condition block to fetch 1036 // the exit block and create a branch from cancelation 1037 // to exit block. 1038 IRBuilder<>::InsertPointGuard IPG(Builder); 1039 Builder.restoreIP(IP); 1040 auto *CaseBB = Loc.IP.getBlock(); 1041 auto *CondBB = CaseBB->getSinglePredecessor()->getSinglePredecessor(); 1042 auto *ExitBB = CondBB->getTerminator()->getSuccessor(1); 1043 Instruction *I = Builder.CreateBr(ExitBB); 1044 IP = InsertPointTy(I->getParent(), I->getIterator()); 1045 return FiniCB(IP); 1046 }; 1047 1048 Directive OMPD = Directive::OMPD_sections; 1049 // Since we are using Finalization Callback here, HasFinalize 1050 // and IsCancellable have to be true 1051 return EmitOMPInlinedRegion(OMPD, nullptr, nullptr, BodyGenCB, FiniCBWrapper, 1052 /*Conditional*/ false, /*hasFinalize*/ true, 1053 /*IsCancellable*/ true); 1054 } 1055 1056 /// Create a function with a unique name and a "void (i8*, i8*)" signature in 1057 /// the given module and return it. 1058 Function *getFreshReductionFunc(Module &M) { 1059 Type *VoidTy = Type::getVoidTy(M.getContext()); 1060 Type *Int8PtrTy = Type::getInt8PtrTy(M.getContext()); 1061 auto *FuncTy = 1062 FunctionType::get(VoidTy, {Int8PtrTy, Int8PtrTy}, /* IsVarArg */ false); 1063 return Function::Create(FuncTy, GlobalVariable::InternalLinkage, 1064 M.getDataLayout().getDefaultGlobalsAddressSpace(), 1065 ".omp.reduction.func", &M); 1066 } 1067 1068 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createReductions( 1069 const LocationDescription &Loc, InsertPointTy AllocaIP, 1070 ArrayRef<ReductionInfo> ReductionInfos, bool IsNoWait) { 1071 for (const ReductionInfo &RI : ReductionInfos) { 1072 (void)RI; 1073 assert(RI.Variable && "expected non-null variable"); 1074 assert(RI.PrivateVariable && "expected non-null private variable"); 1075 assert(RI.ReductionGen && "expected non-null reduction generator callback"); 1076 assert(RI.Variable->getType() == RI.PrivateVariable->getType() && 1077 "expected variables and their private equivalents to have the same " 1078 "type"); 1079 assert(RI.Variable->getType()->isPointerTy() && 1080 "expected variables to be pointers"); 1081 } 1082 1083 if (!updateToLocation(Loc)) 1084 return InsertPointTy(); 1085 1086 BasicBlock *InsertBlock = Loc.IP.getBlock(); 1087 BasicBlock *ContinuationBlock = 1088 InsertBlock->splitBasicBlock(Loc.IP.getPoint(), "reduce.finalize"); 1089 InsertBlock->getTerminator()->eraseFromParent(); 1090 1091 // Create and populate array of type-erased pointers to private reduction 1092 // values. 1093 unsigned NumReductions = ReductionInfos.size(); 1094 Type *RedArrayTy = ArrayType::get(Builder.getInt8PtrTy(), NumReductions); 1095 Builder.restoreIP(AllocaIP); 1096 Value *RedArray = Builder.CreateAlloca(RedArrayTy, nullptr, "red.array"); 1097 1098 Builder.SetInsertPoint(InsertBlock, InsertBlock->end()); 1099 1100 for (auto En : enumerate(ReductionInfos)) { 1101 unsigned Index = En.index(); 1102 const ReductionInfo &RI = En.value(); 1103 Value *RedArrayElemPtr = Builder.CreateConstInBoundsGEP2_64( 1104 RedArrayTy, RedArray, 0, Index, "red.array.elem." + Twine(Index)); 1105 Value *Casted = 1106 Builder.CreateBitCast(RI.PrivateVariable, Builder.getInt8PtrTy(), 1107 "private.red.var." + Twine(Index) + ".casted"); 1108 Builder.CreateStore(Casted, RedArrayElemPtr); 1109 } 1110 1111 // Emit a call to the runtime function that orchestrates the reduction. 1112 // Declare the reduction function in the process. 1113 Function *Func = Builder.GetInsertBlock()->getParent(); 1114 Module *Module = Func->getParent(); 1115 Value *RedArrayPtr = 1116 Builder.CreateBitCast(RedArray, Builder.getInt8PtrTy(), "red.array.ptr"); 1117 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); 1118 bool CanGenerateAtomic = 1119 llvm::all_of(ReductionInfos, [](const ReductionInfo &RI) { 1120 return RI.AtomicReductionGen; 1121 }); 1122 Value *Ident = getOrCreateIdent( 1123 SrcLocStr, CanGenerateAtomic ? IdentFlag::OMP_IDENT_FLAG_ATOMIC_REDUCE 1124 : IdentFlag(0)); 1125 Value *ThreadId = getOrCreateThreadID(Ident); 1126 Constant *NumVariables = Builder.getInt32(NumReductions); 1127 const DataLayout &DL = Module->getDataLayout(); 1128 unsigned RedArrayByteSize = DL.getTypeStoreSize(RedArrayTy); 1129 Constant *RedArraySize = Builder.getInt64(RedArrayByteSize); 1130 Function *ReductionFunc = getFreshReductionFunc(*Module); 1131 Value *Lock = getOMPCriticalRegionLock(".reduction"); 1132 Function *ReduceFunc = getOrCreateRuntimeFunctionPtr( 1133 IsNoWait ? RuntimeFunction::OMPRTL___kmpc_reduce_nowait 1134 : RuntimeFunction::OMPRTL___kmpc_reduce); 1135 CallInst *ReduceCall = 1136 Builder.CreateCall(ReduceFunc, 1137 {Ident, ThreadId, NumVariables, RedArraySize, 1138 RedArrayPtr, ReductionFunc, Lock}, 1139 "reduce"); 1140 1141 // Create final reduction entry blocks for the atomic and non-atomic case. 1142 // Emit IR that dispatches control flow to one of the blocks based on the 1143 // reduction supporting the atomic mode. 1144 BasicBlock *NonAtomicRedBlock = 1145 BasicBlock::Create(Module->getContext(), "reduce.switch.nonatomic", Func); 1146 BasicBlock *AtomicRedBlock = 1147 BasicBlock::Create(Module->getContext(), "reduce.switch.atomic", Func); 1148 SwitchInst *Switch = 1149 Builder.CreateSwitch(ReduceCall, ContinuationBlock, /* NumCases */ 2); 1150 Switch->addCase(Builder.getInt32(1), NonAtomicRedBlock); 1151 Switch->addCase(Builder.getInt32(2), AtomicRedBlock); 1152 1153 // Populate the non-atomic reduction using the elementwise reduction function. 1154 // This loads the elements from the global and private variables and reduces 1155 // them before storing back the result to the global variable. 1156 Builder.SetInsertPoint(NonAtomicRedBlock); 1157 for (auto En : enumerate(ReductionInfos)) { 1158 const ReductionInfo &RI = En.value(); 1159 Type *ValueType = RI.ElementType; 1160 Value *RedValue = Builder.CreateLoad(ValueType, RI.Variable, 1161 "red.value." + Twine(En.index())); 1162 Value *PrivateRedValue = 1163 Builder.CreateLoad(ValueType, RI.PrivateVariable, 1164 "red.private.value." + Twine(En.index())); 1165 Value *Reduced; 1166 Builder.restoreIP( 1167 RI.ReductionGen(Builder.saveIP(), RedValue, PrivateRedValue, Reduced)); 1168 if (!Builder.GetInsertBlock()) 1169 return InsertPointTy(); 1170 Builder.CreateStore(Reduced, RI.Variable); 1171 } 1172 Function *EndReduceFunc = getOrCreateRuntimeFunctionPtr( 1173 IsNoWait ? RuntimeFunction::OMPRTL___kmpc_end_reduce_nowait 1174 : RuntimeFunction::OMPRTL___kmpc_end_reduce); 1175 Builder.CreateCall(EndReduceFunc, {Ident, ThreadId, Lock}); 1176 Builder.CreateBr(ContinuationBlock); 1177 1178 // Populate the atomic reduction using the atomic elementwise reduction 1179 // function. There are no loads/stores here because they will be happening 1180 // inside the atomic elementwise reduction. 1181 Builder.SetInsertPoint(AtomicRedBlock); 1182 if (CanGenerateAtomic) { 1183 for (const ReductionInfo &RI : ReductionInfos) { 1184 Builder.restoreIP(RI.AtomicReductionGen(Builder.saveIP(), RI.ElementType, 1185 RI.Variable, RI.PrivateVariable)); 1186 if (!Builder.GetInsertBlock()) 1187 return InsertPointTy(); 1188 } 1189 Builder.CreateBr(ContinuationBlock); 1190 } else { 1191 Builder.CreateUnreachable(); 1192 } 1193 1194 // Populate the outlined reduction function using the elementwise reduction 1195 // function. Partial values are extracted from the type-erased array of 1196 // pointers to private variables. 1197 BasicBlock *ReductionFuncBlock = 1198 BasicBlock::Create(Module->getContext(), "", ReductionFunc); 1199 Builder.SetInsertPoint(ReductionFuncBlock); 1200 Value *LHSArrayPtr = Builder.CreateBitCast(ReductionFunc->getArg(0), 1201 RedArrayTy->getPointerTo()); 1202 Value *RHSArrayPtr = Builder.CreateBitCast(ReductionFunc->getArg(1), 1203 RedArrayTy->getPointerTo()); 1204 for (auto En : enumerate(ReductionInfos)) { 1205 const ReductionInfo &RI = En.value(); 1206 Value *LHSI8PtrPtr = Builder.CreateConstInBoundsGEP2_64( 1207 RedArrayTy, LHSArrayPtr, 0, En.index()); 1208 Value *LHSI8Ptr = Builder.CreateLoad(Builder.getInt8PtrTy(), LHSI8PtrPtr); 1209 Value *LHSPtr = Builder.CreateBitCast(LHSI8Ptr, RI.Variable->getType()); 1210 Value *LHS = Builder.CreateLoad(RI.ElementType, LHSPtr); 1211 Value *RHSI8PtrPtr = Builder.CreateConstInBoundsGEP2_64( 1212 RedArrayTy, RHSArrayPtr, 0, En.index()); 1213 Value *RHSI8Ptr = Builder.CreateLoad(Builder.getInt8PtrTy(), RHSI8PtrPtr); 1214 Value *RHSPtr = 1215 Builder.CreateBitCast(RHSI8Ptr, RI.PrivateVariable->getType()); 1216 Value *RHS = Builder.CreateLoad(RI.ElementType, RHSPtr); 1217 Value *Reduced; 1218 Builder.restoreIP(RI.ReductionGen(Builder.saveIP(), LHS, RHS, Reduced)); 1219 if (!Builder.GetInsertBlock()) 1220 return InsertPointTy(); 1221 Builder.CreateStore(Reduced, LHSPtr); 1222 } 1223 Builder.CreateRetVoid(); 1224 1225 Builder.SetInsertPoint(ContinuationBlock); 1226 return Builder.saveIP(); 1227 } 1228 1229 OpenMPIRBuilder::InsertPointTy 1230 OpenMPIRBuilder::createMaster(const LocationDescription &Loc, 1231 BodyGenCallbackTy BodyGenCB, 1232 FinalizeCallbackTy FiniCB) { 1233 1234 if (!updateToLocation(Loc)) 1235 return Loc.IP; 1236 1237 Directive OMPD = Directive::OMPD_master; 1238 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); 1239 Value *Ident = getOrCreateIdent(SrcLocStr); 1240 Value *ThreadId = getOrCreateThreadID(Ident); 1241 Value *Args[] = {Ident, ThreadId}; 1242 1243 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_master); 1244 Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args); 1245 1246 Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_master); 1247 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args); 1248 1249 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB, 1250 /*Conditional*/ true, /*hasFinalize*/ true); 1251 } 1252 1253 OpenMPIRBuilder::InsertPointTy 1254 OpenMPIRBuilder::createMasked(const LocationDescription &Loc, 1255 BodyGenCallbackTy BodyGenCB, 1256 FinalizeCallbackTy FiniCB, Value *Filter) { 1257 if (!updateToLocation(Loc)) 1258 return Loc.IP; 1259 1260 Directive OMPD = Directive::OMPD_masked; 1261 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); 1262 Value *Ident = getOrCreateIdent(SrcLocStr); 1263 Value *ThreadId = getOrCreateThreadID(Ident); 1264 Value *Args[] = {Ident, ThreadId, Filter}; 1265 Value *ArgsEnd[] = {Ident, ThreadId}; 1266 1267 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_masked); 1268 Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args); 1269 1270 Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_masked); 1271 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, ArgsEnd); 1272 1273 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB, 1274 /*Conditional*/ true, /*hasFinalize*/ true); 1275 } 1276 1277 CanonicalLoopInfo *OpenMPIRBuilder::createLoopSkeleton( 1278 DebugLoc DL, Value *TripCount, Function *F, BasicBlock *PreInsertBefore, 1279 BasicBlock *PostInsertBefore, const Twine &Name) { 1280 Module *M = F->getParent(); 1281 LLVMContext &Ctx = M->getContext(); 1282 Type *IndVarTy = TripCount->getType(); 1283 1284 // Create the basic block structure. 1285 BasicBlock *Preheader = 1286 BasicBlock::Create(Ctx, "omp_" + Name + ".preheader", F, PreInsertBefore); 1287 BasicBlock *Header = 1288 BasicBlock::Create(Ctx, "omp_" + Name + ".header", F, PreInsertBefore); 1289 BasicBlock *Cond = 1290 BasicBlock::Create(Ctx, "omp_" + Name + ".cond", F, PreInsertBefore); 1291 BasicBlock *Body = 1292 BasicBlock::Create(Ctx, "omp_" + Name + ".body", F, PreInsertBefore); 1293 BasicBlock *Latch = 1294 BasicBlock::Create(Ctx, "omp_" + Name + ".inc", F, PostInsertBefore); 1295 BasicBlock *Exit = 1296 BasicBlock::Create(Ctx, "omp_" + Name + ".exit", F, PostInsertBefore); 1297 BasicBlock *After = 1298 BasicBlock::Create(Ctx, "omp_" + Name + ".after", F, PostInsertBefore); 1299 1300 // Use specified DebugLoc for new instructions. 1301 Builder.SetCurrentDebugLocation(DL); 1302 1303 Builder.SetInsertPoint(Preheader); 1304 Builder.CreateBr(Header); 1305 1306 Builder.SetInsertPoint(Header); 1307 PHINode *IndVarPHI = Builder.CreatePHI(IndVarTy, 2, "omp_" + Name + ".iv"); 1308 IndVarPHI->addIncoming(ConstantInt::get(IndVarTy, 0), Preheader); 1309 Builder.CreateBr(Cond); 1310 1311 Builder.SetInsertPoint(Cond); 1312 Value *Cmp = 1313 Builder.CreateICmpULT(IndVarPHI, TripCount, "omp_" + Name + ".cmp"); 1314 Builder.CreateCondBr(Cmp, Body, Exit); 1315 1316 Builder.SetInsertPoint(Body); 1317 Builder.CreateBr(Latch); 1318 1319 Builder.SetInsertPoint(Latch); 1320 Value *Next = Builder.CreateAdd(IndVarPHI, ConstantInt::get(IndVarTy, 1), 1321 "omp_" + Name + ".next", /*HasNUW=*/true); 1322 Builder.CreateBr(Header); 1323 IndVarPHI->addIncoming(Next, Latch); 1324 1325 Builder.SetInsertPoint(Exit); 1326 Builder.CreateBr(After); 1327 1328 // Remember and return the canonical control flow. 1329 LoopInfos.emplace_front(); 1330 CanonicalLoopInfo *CL = &LoopInfos.front(); 1331 1332 CL->Header = Header; 1333 CL->Cond = Cond; 1334 CL->Latch = Latch; 1335 CL->Exit = Exit; 1336 1337 #ifndef NDEBUG 1338 CL->assertOK(); 1339 #endif 1340 return CL; 1341 } 1342 1343 CanonicalLoopInfo * 1344 OpenMPIRBuilder::createCanonicalLoop(const LocationDescription &Loc, 1345 LoopBodyGenCallbackTy BodyGenCB, 1346 Value *TripCount, const Twine &Name) { 1347 BasicBlock *BB = Loc.IP.getBlock(); 1348 BasicBlock *NextBB = BB->getNextNode(); 1349 1350 CanonicalLoopInfo *CL = createLoopSkeleton(Loc.DL, TripCount, BB->getParent(), 1351 NextBB, NextBB, Name); 1352 BasicBlock *After = CL->getAfter(); 1353 1354 // If location is not set, don't connect the loop. 1355 if (updateToLocation(Loc)) { 1356 // Split the loop at the insertion point: Branch to the preheader and move 1357 // every following instruction to after the loop (the After BB). Also, the 1358 // new successor is the loop's after block. 1359 Builder.CreateBr(CL->getPreheader()); 1360 After->getInstList().splice(After->begin(), BB->getInstList(), 1361 Builder.GetInsertPoint(), BB->end()); 1362 After->replaceSuccessorsPhiUsesWith(BB, After); 1363 } 1364 1365 // Emit the body content. We do it after connecting the loop to the CFG to 1366 // avoid that the callback encounters degenerate BBs. 1367 BodyGenCB(CL->getBodyIP(), CL->getIndVar()); 1368 1369 #ifndef NDEBUG 1370 CL->assertOK(); 1371 #endif 1372 return CL; 1373 } 1374 1375 CanonicalLoopInfo *OpenMPIRBuilder::createCanonicalLoop( 1376 const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB, 1377 Value *Start, Value *Stop, Value *Step, bool IsSigned, bool InclusiveStop, 1378 InsertPointTy ComputeIP, const Twine &Name) { 1379 1380 // Consider the following difficulties (assuming 8-bit signed integers): 1381 // * Adding \p Step to the loop counter which passes \p Stop may overflow: 1382 // DO I = 1, 100, 50 1383 /// * A \p Step of INT_MIN cannot not be normalized to a positive direction: 1384 // DO I = 100, 0, -128 1385 1386 // Start, Stop and Step must be of the same integer type. 1387 auto *IndVarTy = cast<IntegerType>(Start->getType()); 1388 assert(IndVarTy == Stop->getType() && "Stop type mismatch"); 1389 assert(IndVarTy == Step->getType() && "Step type mismatch"); 1390 1391 LocationDescription ComputeLoc = 1392 ComputeIP.isSet() ? LocationDescription(ComputeIP, Loc.DL) : Loc; 1393 updateToLocation(ComputeLoc); 1394 1395 ConstantInt *Zero = ConstantInt::get(IndVarTy, 0); 1396 ConstantInt *One = ConstantInt::get(IndVarTy, 1); 1397 1398 // Like Step, but always positive. 1399 Value *Incr = Step; 1400 1401 // Distance between Start and Stop; always positive. 1402 Value *Span; 1403 1404 // Condition whether there are no iterations are executed at all, e.g. because 1405 // UB < LB. 1406 Value *ZeroCmp; 1407 1408 if (IsSigned) { 1409 // Ensure that increment is positive. If not, negate and invert LB and UB. 1410 Value *IsNeg = Builder.CreateICmpSLT(Step, Zero); 1411 Incr = Builder.CreateSelect(IsNeg, Builder.CreateNeg(Step), Step); 1412 Value *LB = Builder.CreateSelect(IsNeg, Stop, Start); 1413 Value *UB = Builder.CreateSelect(IsNeg, Start, Stop); 1414 Span = Builder.CreateSub(UB, LB, "", false, true); 1415 ZeroCmp = Builder.CreateICmp( 1416 InclusiveStop ? CmpInst::ICMP_SLT : CmpInst::ICMP_SLE, UB, LB); 1417 } else { 1418 Span = Builder.CreateSub(Stop, Start, "", true); 1419 ZeroCmp = Builder.CreateICmp( 1420 InclusiveStop ? CmpInst::ICMP_ULT : CmpInst::ICMP_ULE, Stop, Start); 1421 } 1422 1423 Value *CountIfLooping; 1424 if (InclusiveStop) { 1425 CountIfLooping = Builder.CreateAdd(Builder.CreateUDiv(Span, Incr), One); 1426 } else { 1427 // Avoid incrementing past stop since it could overflow. 1428 Value *CountIfTwo = Builder.CreateAdd( 1429 Builder.CreateUDiv(Builder.CreateSub(Span, One), Incr), One); 1430 Value *OneCmp = Builder.CreateICmp( 1431 InclusiveStop ? CmpInst::ICMP_ULT : CmpInst::ICMP_ULE, Span, Incr); 1432 CountIfLooping = Builder.CreateSelect(OneCmp, One, CountIfTwo); 1433 } 1434 Value *TripCount = Builder.CreateSelect(ZeroCmp, Zero, CountIfLooping, 1435 "omp_" + Name + ".tripcount"); 1436 1437 auto BodyGen = [=](InsertPointTy CodeGenIP, Value *IV) { 1438 Builder.restoreIP(CodeGenIP); 1439 Value *Span = Builder.CreateMul(IV, Step); 1440 Value *IndVar = Builder.CreateAdd(Span, Start); 1441 BodyGenCB(Builder.saveIP(), IndVar); 1442 }; 1443 LocationDescription LoopLoc = ComputeIP.isSet() ? Loc.IP : Builder.saveIP(); 1444 return createCanonicalLoop(LoopLoc, BodyGen, TripCount, Name); 1445 } 1446 1447 // Returns an LLVM function to call for initializing loop bounds using OpenMP 1448 // static scheduling depending on `type`. Only i32 and i64 are supported by the 1449 // runtime. Always interpret integers as unsigned similarly to 1450 // CanonicalLoopInfo. 1451 static FunctionCallee getKmpcForStaticInitForType(Type *Ty, Module &M, 1452 OpenMPIRBuilder &OMPBuilder) { 1453 unsigned Bitwidth = Ty->getIntegerBitWidth(); 1454 if (Bitwidth == 32) 1455 return OMPBuilder.getOrCreateRuntimeFunction( 1456 M, omp::RuntimeFunction::OMPRTL___kmpc_for_static_init_4u); 1457 if (Bitwidth == 64) 1458 return OMPBuilder.getOrCreateRuntimeFunction( 1459 M, omp::RuntimeFunction::OMPRTL___kmpc_for_static_init_8u); 1460 llvm_unreachable("unknown OpenMP loop iterator bitwidth"); 1461 } 1462 1463 // Sets the number of loop iterations to the given value. This value must be 1464 // valid in the condition block (i.e., defined in the preheader) and is 1465 // interpreted as an unsigned integer. 1466 void setCanonicalLoopTripCount(CanonicalLoopInfo *CLI, Value *TripCount) { 1467 Instruction *CmpI = &CLI->getCond()->front(); 1468 assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount"); 1469 CmpI->setOperand(1, TripCount); 1470 CLI->assertOK(); 1471 } 1472 1473 OpenMPIRBuilder::InsertPointTy 1474 OpenMPIRBuilder::applyStaticWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI, 1475 InsertPointTy AllocaIP, 1476 bool NeedsBarrier, Value *Chunk) { 1477 assert(CLI->isValid() && "Requires a valid canonical loop"); 1478 1479 // Set up the source location value for OpenMP runtime. 1480 Builder.restoreIP(CLI->getPreheaderIP()); 1481 Builder.SetCurrentDebugLocation(DL); 1482 1483 Constant *SrcLocStr = getOrCreateSrcLocStr(DL); 1484 Value *SrcLoc = getOrCreateIdent(SrcLocStr); 1485 1486 // Declare useful OpenMP runtime functions. 1487 Value *IV = CLI->getIndVar(); 1488 Type *IVTy = IV->getType(); 1489 FunctionCallee StaticInit = getKmpcForStaticInitForType(IVTy, M, *this); 1490 FunctionCallee StaticFini = 1491 getOrCreateRuntimeFunction(M, omp::OMPRTL___kmpc_for_static_fini); 1492 1493 // Allocate space for computed loop bounds as expected by the "init" function. 1494 Builder.restoreIP(AllocaIP); 1495 Type *I32Type = Type::getInt32Ty(M.getContext()); 1496 Value *PLastIter = Builder.CreateAlloca(I32Type, nullptr, "p.lastiter"); 1497 Value *PLowerBound = Builder.CreateAlloca(IVTy, nullptr, "p.lowerbound"); 1498 Value *PUpperBound = Builder.CreateAlloca(IVTy, nullptr, "p.upperbound"); 1499 Value *PStride = Builder.CreateAlloca(IVTy, nullptr, "p.stride"); 1500 1501 // At the end of the preheader, prepare for calling the "init" function by 1502 // storing the current loop bounds into the allocated space. A canonical loop 1503 // always iterates from 0 to trip-count with step 1. Note that "init" expects 1504 // and produces an inclusive upper bound. 1505 Builder.SetInsertPoint(CLI->getPreheader()->getTerminator()); 1506 Constant *Zero = ConstantInt::get(IVTy, 0); 1507 Constant *One = ConstantInt::get(IVTy, 1); 1508 Builder.CreateStore(Zero, PLowerBound); 1509 Value *UpperBound = Builder.CreateSub(CLI->getTripCount(), One); 1510 Builder.CreateStore(UpperBound, PUpperBound); 1511 Builder.CreateStore(One, PStride); 1512 1513 // FIXME: schedule(static) is NOT the same as schedule(static,1) 1514 if (!Chunk) 1515 Chunk = One; 1516 1517 Value *ThreadNum = getOrCreateThreadID(SrcLoc); 1518 1519 Constant *SchedulingType = 1520 ConstantInt::get(I32Type, static_cast<int>(OMPScheduleType::Static)); 1521 1522 // Call the "init" function and update the trip count of the loop with the 1523 // value it produced. 1524 Builder.CreateCall(StaticInit, 1525 {SrcLoc, ThreadNum, SchedulingType, PLastIter, PLowerBound, 1526 PUpperBound, PStride, One, Chunk}); 1527 Value *LowerBound = Builder.CreateLoad(IVTy, PLowerBound); 1528 Value *InclusiveUpperBound = Builder.CreateLoad(IVTy, PUpperBound); 1529 Value *TripCountMinusOne = Builder.CreateSub(InclusiveUpperBound, LowerBound); 1530 Value *TripCount = Builder.CreateAdd(TripCountMinusOne, One); 1531 setCanonicalLoopTripCount(CLI, TripCount); 1532 1533 // Update all uses of the induction variable except the one in the condition 1534 // block that compares it with the actual upper bound, and the increment in 1535 // the latch block. 1536 // TODO: this can eventually move to CanonicalLoopInfo or to a new 1537 // CanonicalLoopInfoUpdater interface. 1538 Builder.SetInsertPoint(CLI->getBody(), CLI->getBody()->getFirstInsertionPt()); 1539 Value *UpdatedIV = Builder.CreateAdd(IV, LowerBound); 1540 IV->replaceUsesWithIf(UpdatedIV, [&](Use &U) { 1541 auto *Instr = dyn_cast<Instruction>(U.getUser()); 1542 return !Instr || 1543 (Instr->getParent() != CLI->getCond() && 1544 Instr->getParent() != CLI->getLatch() && Instr != UpdatedIV); 1545 }); 1546 1547 // In the "exit" block, call the "fini" function. 1548 Builder.SetInsertPoint(CLI->getExit(), 1549 CLI->getExit()->getTerminator()->getIterator()); 1550 Builder.CreateCall(StaticFini, {SrcLoc, ThreadNum}); 1551 1552 // Add the barrier if requested. 1553 if (NeedsBarrier) 1554 createBarrier(LocationDescription(Builder.saveIP(), DL), 1555 omp::Directive::OMPD_for, /* ForceSimpleCall */ false, 1556 /* CheckCancelFlag */ false); 1557 1558 InsertPointTy AfterIP = CLI->getAfterIP(); 1559 CLI->invalidate(); 1560 1561 return AfterIP; 1562 } 1563 1564 OpenMPIRBuilder::InsertPointTy 1565 OpenMPIRBuilder::applyWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI, 1566 InsertPointTy AllocaIP, bool NeedsBarrier) { 1567 // Currently only supports static schedules. 1568 return applyStaticWorkshareLoop(DL, CLI, AllocaIP, NeedsBarrier); 1569 } 1570 1571 /// Returns an LLVM function to call for initializing loop bounds using OpenMP 1572 /// dynamic scheduling depending on `type`. Only i32 and i64 are supported by 1573 /// the runtime. Always interpret integers as unsigned similarly to 1574 /// CanonicalLoopInfo. 1575 static FunctionCallee 1576 getKmpcForDynamicInitForType(Type *Ty, Module &M, OpenMPIRBuilder &OMPBuilder) { 1577 unsigned Bitwidth = Ty->getIntegerBitWidth(); 1578 if (Bitwidth == 32) 1579 return OMPBuilder.getOrCreateRuntimeFunction( 1580 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_init_4u); 1581 if (Bitwidth == 64) 1582 return OMPBuilder.getOrCreateRuntimeFunction( 1583 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_init_8u); 1584 llvm_unreachable("unknown OpenMP loop iterator bitwidth"); 1585 } 1586 1587 /// Returns an LLVM function to call for updating the next loop using OpenMP 1588 /// dynamic scheduling depending on `type`. Only i32 and i64 are supported by 1589 /// the runtime. Always interpret integers as unsigned similarly to 1590 /// CanonicalLoopInfo. 1591 static FunctionCallee 1592 getKmpcForDynamicNextForType(Type *Ty, Module &M, OpenMPIRBuilder &OMPBuilder) { 1593 unsigned Bitwidth = Ty->getIntegerBitWidth(); 1594 if (Bitwidth == 32) 1595 return OMPBuilder.getOrCreateRuntimeFunction( 1596 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_next_4u); 1597 if (Bitwidth == 64) 1598 return OMPBuilder.getOrCreateRuntimeFunction( 1599 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_next_8u); 1600 llvm_unreachable("unknown OpenMP loop iterator bitwidth"); 1601 } 1602 1603 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::applyDynamicWorkshareLoop( 1604 DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, 1605 OMPScheduleType SchedType, bool NeedsBarrier, Value *Chunk) { 1606 assert(CLI->isValid() && "Requires a valid canonical loop"); 1607 1608 // Set up the source location value for OpenMP runtime. 1609 Builder.SetCurrentDebugLocation(DL); 1610 1611 Constant *SrcLocStr = getOrCreateSrcLocStr(DL); 1612 Value *SrcLoc = getOrCreateIdent(SrcLocStr); 1613 1614 // Declare useful OpenMP runtime functions. 1615 Value *IV = CLI->getIndVar(); 1616 Type *IVTy = IV->getType(); 1617 FunctionCallee DynamicInit = getKmpcForDynamicInitForType(IVTy, M, *this); 1618 FunctionCallee DynamicNext = getKmpcForDynamicNextForType(IVTy, M, *this); 1619 1620 // Allocate space for computed loop bounds as expected by the "init" function. 1621 Builder.restoreIP(AllocaIP); 1622 Type *I32Type = Type::getInt32Ty(M.getContext()); 1623 Value *PLastIter = Builder.CreateAlloca(I32Type, nullptr, "p.lastiter"); 1624 Value *PLowerBound = Builder.CreateAlloca(IVTy, nullptr, "p.lowerbound"); 1625 Value *PUpperBound = Builder.CreateAlloca(IVTy, nullptr, "p.upperbound"); 1626 Value *PStride = Builder.CreateAlloca(IVTy, nullptr, "p.stride"); 1627 1628 // At the end of the preheader, prepare for calling the "init" function by 1629 // storing the current loop bounds into the allocated space. A canonical loop 1630 // always iterates from 0 to trip-count with step 1. Note that "init" expects 1631 // and produces an inclusive upper bound. 1632 BasicBlock *PreHeader = CLI->getPreheader(); 1633 Builder.SetInsertPoint(PreHeader->getTerminator()); 1634 Constant *One = ConstantInt::get(IVTy, 1); 1635 Builder.CreateStore(One, PLowerBound); 1636 Value *UpperBound = CLI->getTripCount(); 1637 Builder.CreateStore(UpperBound, PUpperBound); 1638 Builder.CreateStore(One, PStride); 1639 1640 BasicBlock *Header = CLI->getHeader(); 1641 BasicBlock *Exit = CLI->getExit(); 1642 BasicBlock *Cond = CLI->getCond(); 1643 InsertPointTy AfterIP = CLI->getAfterIP(); 1644 1645 // The CLI will be "broken" in the code below, as the loop is no longer 1646 // a valid canonical loop. 1647 1648 if (!Chunk) 1649 Chunk = One; 1650 1651 Value *ThreadNum = getOrCreateThreadID(SrcLoc); 1652 1653 Constant *SchedulingType = 1654 ConstantInt::get(I32Type, static_cast<int>(SchedType)); 1655 1656 // Call the "init" function. 1657 Builder.CreateCall(DynamicInit, 1658 {SrcLoc, ThreadNum, SchedulingType, /* LowerBound */ One, 1659 UpperBound, /* step */ One, Chunk}); 1660 1661 // An outer loop around the existing one. 1662 BasicBlock *OuterCond = BasicBlock::Create( 1663 PreHeader->getContext(), Twine(PreHeader->getName()) + ".outer.cond", 1664 PreHeader->getParent()); 1665 // This needs to be 32-bit always, so can't use the IVTy Zero above. 1666 Builder.SetInsertPoint(OuterCond, OuterCond->getFirstInsertionPt()); 1667 Value *Res = 1668 Builder.CreateCall(DynamicNext, {SrcLoc, ThreadNum, PLastIter, 1669 PLowerBound, PUpperBound, PStride}); 1670 Constant *Zero32 = ConstantInt::get(I32Type, 0); 1671 Value *MoreWork = Builder.CreateCmp(CmpInst::ICMP_NE, Res, Zero32); 1672 Value *LowerBound = 1673 Builder.CreateSub(Builder.CreateLoad(IVTy, PLowerBound), One, "lb"); 1674 Builder.CreateCondBr(MoreWork, Header, Exit); 1675 1676 // Change PHI-node in loop header to use outer cond rather than preheader, 1677 // and set IV to the LowerBound. 1678 Instruction *Phi = &Header->front(); 1679 auto *PI = cast<PHINode>(Phi); 1680 PI->setIncomingBlock(0, OuterCond); 1681 PI->setIncomingValue(0, LowerBound); 1682 1683 // Then set the pre-header to jump to the OuterCond 1684 Instruction *Term = PreHeader->getTerminator(); 1685 auto *Br = cast<BranchInst>(Term); 1686 Br->setSuccessor(0, OuterCond); 1687 1688 // Modify the inner condition: 1689 // * Use the UpperBound returned from the DynamicNext call. 1690 // * jump to the loop outer loop when done with one of the inner loops. 1691 Builder.SetInsertPoint(Cond, Cond->getFirstInsertionPt()); 1692 UpperBound = Builder.CreateLoad(IVTy, PUpperBound, "ub"); 1693 Instruction *Comp = &*Builder.GetInsertPoint(); 1694 auto *CI = cast<CmpInst>(Comp); 1695 CI->setOperand(1, UpperBound); 1696 // Redirect the inner exit to branch to outer condition. 1697 Instruction *Branch = &Cond->back(); 1698 auto *BI = cast<BranchInst>(Branch); 1699 assert(BI->getSuccessor(1) == Exit); 1700 BI->setSuccessor(1, OuterCond); 1701 1702 // Add the barrier if requested. 1703 if (NeedsBarrier) { 1704 Builder.SetInsertPoint(&Exit->back()); 1705 createBarrier(LocationDescription(Builder.saveIP(), DL), 1706 omp::Directive::OMPD_for, /* ForceSimpleCall */ false, 1707 /* CheckCancelFlag */ false); 1708 } 1709 1710 CLI->invalidate(); 1711 return AfterIP; 1712 } 1713 1714 /// Make \p Source branch to \p Target. 1715 /// 1716 /// Handles two situations: 1717 /// * \p Source already has an unconditional branch. 1718 /// * \p Source is a degenerate block (no terminator because the BB is 1719 /// the current head of the IR construction). 1720 static void redirectTo(BasicBlock *Source, BasicBlock *Target, DebugLoc DL) { 1721 if (Instruction *Term = Source->getTerminator()) { 1722 auto *Br = cast<BranchInst>(Term); 1723 assert(!Br->isConditional() && 1724 "BB's terminator must be an unconditional branch (or degenerate)"); 1725 BasicBlock *Succ = Br->getSuccessor(0); 1726 Succ->removePredecessor(Source, /*KeepOneInputPHIs=*/true); 1727 Br->setSuccessor(0, Target); 1728 return; 1729 } 1730 1731 auto *NewBr = BranchInst::Create(Target, Source); 1732 NewBr->setDebugLoc(DL); 1733 } 1734 1735 /// Redirect all edges that branch to \p OldTarget to \p NewTarget. That is, 1736 /// after this \p OldTarget will be orphaned. 1737 static void redirectAllPredecessorsTo(BasicBlock *OldTarget, 1738 BasicBlock *NewTarget, DebugLoc DL) { 1739 for (BasicBlock *Pred : make_early_inc_range(predecessors(OldTarget))) 1740 redirectTo(Pred, NewTarget, DL); 1741 } 1742 1743 /// Determine which blocks in \p BBs are reachable from outside and remove the 1744 /// ones that are not reachable from the function. 1745 static void removeUnusedBlocksFromParent(ArrayRef<BasicBlock *> BBs) { 1746 SmallPtrSet<BasicBlock *, 6> BBsToErase{BBs.begin(), BBs.end()}; 1747 auto HasRemainingUses = [&BBsToErase](BasicBlock *BB) { 1748 for (Use &U : BB->uses()) { 1749 auto *UseInst = dyn_cast<Instruction>(U.getUser()); 1750 if (!UseInst) 1751 continue; 1752 if (BBsToErase.count(UseInst->getParent())) 1753 continue; 1754 return true; 1755 } 1756 return false; 1757 }; 1758 1759 while (true) { 1760 bool Changed = false; 1761 for (BasicBlock *BB : make_early_inc_range(BBsToErase)) { 1762 if (HasRemainingUses(BB)) { 1763 BBsToErase.erase(BB); 1764 Changed = true; 1765 } 1766 } 1767 if (!Changed) 1768 break; 1769 } 1770 1771 SmallVector<BasicBlock *, 7> BBVec(BBsToErase.begin(), BBsToErase.end()); 1772 DeleteDeadBlocks(BBVec); 1773 } 1774 1775 CanonicalLoopInfo * 1776 OpenMPIRBuilder::collapseLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops, 1777 InsertPointTy ComputeIP) { 1778 assert(Loops.size() >= 1 && "At least one loop required"); 1779 size_t NumLoops = Loops.size(); 1780 1781 // Nothing to do if there is already just one loop. 1782 if (NumLoops == 1) 1783 return Loops.front(); 1784 1785 CanonicalLoopInfo *Outermost = Loops.front(); 1786 CanonicalLoopInfo *Innermost = Loops.back(); 1787 BasicBlock *OrigPreheader = Outermost->getPreheader(); 1788 BasicBlock *OrigAfter = Outermost->getAfter(); 1789 Function *F = OrigPreheader->getParent(); 1790 1791 // Loop control blocks that may become orphaned later. 1792 SmallVector<BasicBlock *, 12> OldControlBBs; 1793 OldControlBBs.reserve(6 * Loops.size()); 1794 for (CanonicalLoopInfo *Loop : Loops) 1795 Loop->collectControlBlocks(OldControlBBs); 1796 1797 // Setup the IRBuilder for inserting the trip count computation. 1798 Builder.SetCurrentDebugLocation(DL); 1799 if (ComputeIP.isSet()) 1800 Builder.restoreIP(ComputeIP); 1801 else 1802 Builder.restoreIP(Outermost->getPreheaderIP()); 1803 1804 // Derive the collapsed' loop trip count. 1805 // TODO: Find common/largest indvar type. 1806 Value *CollapsedTripCount = nullptr; 1807 for (CanonicalLoopInfo *L : Loops) { 1808 assert(L->isValid() && 1809 "All loops to collapse must be valid canonical loops"); 1810 Value *OrigTripCount = L->getTripCount(); 1811 if (!CollapsedTripCount) { 1812 CollapsedTripCount = OrigTripCount; 1813 continue; 1814 } 1815 1816 // TODO: Enable UndefinedSanitizer to diagnose an overflow here. 1817 CollapsedTripCount = Builder.CreateMul(CollapsedTripCount, OrigTripCount, 1818 {}, /*HasNUW=*/true); 1819 } 1820 1821 // Create the collapsed loop control flow. 1822 CanonicalLoopInfo *Result = 1823 createLoopSkeleton(DL, CollapsedTripCount, F, 1824 OrigPreheader->getNextNode(), OrigAfter, "collapsed"); 1825 1826 // Build the collapsed loop body code. 1827 // Start with deriving the input loop induction variables from the collapsed 1828 // one, using a divmod scheme. To preserve the original loops' order, the 1829 // innermost loop use the least significant bits. 1830 Builder.restoreIP(Result->getBodyIP()); 1831 1832 Value *Leftover = Result->getIndVar(); 1833 SmallVector<Value *> NewIndVars; 1834 NewIndVars.resize(NumLoops); 1835 for (int i = NumLoops - 1; i >= 1; --i) { 1836 Value *OrigTripCount = Loops[i]->getTripCount(); 1837 1838 Value *NewIndVar = Builder.CreateURem(Leftover, OrigTripCount); 1839 NewIndVars[i] = NewIndVar; 1840 1841 Leftover = Builder.CreateUDiv(Leftover, OrigTripCount); 1842 } 1843 // Outermost loop gets all the remaining bits. 1844 NewIndVars[0] = Leftover; 1845 1846 // Construct the loop body control flow. 1847 // We progressively construct the branch structure following in direction of 1848 // the control flow, from the leading in-between code, the loop nest body, the 1849 // trailing in-between code, and rejoining the collapsed loop's latch. 1850 // ContinueBlock and ContinuePred keep track of the source(s) of next edge. If 1851 // the ContinueBlock is set, continue with that block. If ContinuePred, use 1852 // its predecessors as sources. 1853 BasicBlock *ContinueBlock = Result->getBody(); 1854 BasicBlock *ContinuePred = nullptr; 1855 auto ContinueWith = [&ContinueBlock, &ContinuePred, DL](BasicBlock *Dest, 1856 BasicBlock *NextSrc) { 1857 if (ContinueBlock) 1858 redirectTo(ContinueBlock, Dest, DL); 1859 else 1860 redirectAllPredecessorsTo(ContinuePred, Dest, DL); 1861 1862 ContinueBlock = nullptr; 1863 ContinuePred = NextSrc; 1864 }; 1865 1866 // The code before the nested loop of each level. 1867 // Because we are sinking it into the nest, it will be executed more often 1868 // that the original loop. More sophisticated schemes could keep track of what 1869 // the in-between code is and instantiate it only once per thread. 1870 for (size_t i = 0; i < NumLoops - 1; ++i) 1871 ContinueWith(Loops[i]->getBody(), Loops[i + 1]->getHeader()); 1872 1873 // Connect the loop nest body. 1874 ContinueWith(Innermost->getBody(), Innermost->getLatch()); 1875 1876 // The code after the nested loop at each level. 1877 for (size_t i = NumLoops - 1; i > 0; --i) 1878 ContinueWith(Loops[i]->getAfter(), Loops[i - 1]->getLatch()); 1879 1880 // Connect the finished loop to the collapsed loop latch. 1881 ContinueWith(Result->getLatch(), nullptr); 1882 1883 // Replace the input loops with the new collapsed loop. 1884 redirectTo(Outermost->getPreheader(), Result->getPreheader(), DL); 1885 redirectTo(Result->getAfter(), Outermost->getAfter(), DL); 1886 1887 // Replace the input loop indvars with the derived ones. 1888 for (size_t i = 0; i < NumLoops; ++i) 1889 Loops[i]->getIndVar()->replaceAllUsesWith(NewIndVars[i]); 1890 1891 // Remove unused parts of the input loops. 1892 removeUnusedBlocksFromParent(OldControlBBs); 1893 1894 for (CanonicalLoopInfo *L : Loops) 1895 L->invalidate(); 1896 1897 #ifndef NDEBUG 1898 Result->assertOK(); 1899 #endif 1900 return Result; 1901 } 1902 1903 std::vector<CanonicalLoopInfo *> 1904 OpenMPIRBuilder::tileLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops, 1905 ArrayRef<Value *> TileSizes) { 1906 assert(TileSizes.size() == Loops.size() && 1907 "Must pass as many tile sizes as there are loops"); 1908 int NumLoops = Loops.size(); 1909 assert(NumLoops >= 1 && "At least one loop to tile required"); 1910 1911 CanonicalLoopInfo *OutermostLoop = Loops.front(); 1912 CanonicalLoopInfo *InnermostLoop = Loops.back(); 1913 Function *F = OutermostLoop->getBody()->getParent(); 1914 BasicBlock *InnerEnter = InnermostLoop->getBody(); 1915 BasicBlock *InnerLatch = InnermostLoop->getLatch(); 1916 1917 // Loop control blocks that may become orphaned later. 1918 SmallVector<BasicBlock *, 12> OldControlBBs; 1919 OldControlBBs.reserve(6 * Loops.size()); 1920 for (CanonicalLoopInfo *Loop : Loops) 1921 Loop->collectControlBlocks(OldControlBBs); 1922 1923 // Collect original trip counts and induction variable to be accessible by 1924 // index. Also, the structure of the original loops is not preserved during 1925 // the construction of the tiled loops, so do it before we scavenge the BBs of 1926 // any original CanonicalLoopInfo. 1927 SmallVector<Value *, 4> OrigTripCounts, OrigIndVars; 1928 for (CanonicalLoopInfo *L : Loops) { 1929 assert(L->isValid() && "All input loops must be valid canonical loops"); 1930 OrigTripCounts.push_back(L->getTripCount()); 1931 OrigIndVars.push_back(L->getIndVar()); 1932 } 1933 1934 // Collect the code between loop headers. These may contain SSA definitions 1935 // that are used in the loop nest body. To be usable with in the innermost 1936 // body, these BasicBlocks will be sunk into the loop nest body. That is, 1937 // these instructions may be executed more often than before the tiling. 1938 // TODO: It would be sufficient to only sink them into body of the 1939 // corresponding tile loop. 1940 SmallVector<std::pair<BasicBlock *, BasicBlock *>, 4> InbetweenCode; 1941 for (int i = 0; i < NumLoops - 1; ++i) { 1942 CanonicalLoopInfo *Surrounding = Loops[i]; 1943 CanonicalLoopInfo *Nested = Loops[i + 1]; 1944 1945 BasicBlock *EnterBB = Surrounding->getBody(); 1946 BasicBlock *ExitBB = Nested->getHeader(); 1947 InbetweenCode.emplace_back(EnterBB, ExitBB); 1948 } 1949 1950 // Compute the trip counts of the floor loops. 1951 Builder.SetCurrentDebugLocation(DL); 1952 Builder.restoreIP(OutermostLoop->getPreheaderIP()); 1953 SmallVector<Value *, 4> FloorCount, FloorRems; 1954 for (int i = 0; i < NumLoops; ++i) { 1955 Value *TileSize = TileSizes[i]; 1956 Value *OrigTripCount = OrigTripCounts[i]; 1957 Type *IVType = OrigTripCount->getType(); 1958 1959 Value *FloorTripCount = Builder.CreateUDiv(OrigTripCount, TileSize); 1960 Value *FloorTripRem = Builder.CreateURem(OrigTripCount, TileSize); 1961 1962 // 0 if tripcount divides the tilesize, 1 otherwise. 1963 // 1 means we need an additional iteration for a partial tile. 1964 // 1965 // Unfortunately we cannot just use the roundup-formula 1966 // (tripcount + tilesize - 1)/tilesize 1967 // because the summation might overflow. We do not want introduce undefined 1968 // behavior when the untiled loop nest did not. 1969 Value *FloorTripOverflow = 1970 Builder.CreateICmpNE(FloorTripRem, ConstantInt::get(IVType, 0)); 1971 1972 FloorTripOverflow = Builder.CreateZExt(FloorTripOverflow, IVType); 1973 FloorTripCount = 1974 Builder.CreateAdd(FloorTripCount, FloorTripOverflow, 1975 "omp_floor" + Twine(i) + ".tripcount", true); 1976 1977 // Remember some values for later use. 1978 FloorCount.push_back(FloorTripCount); 1979 FloorRems.push_back(FloorTripRem); 1980 } 1981 1982 // Generate the new loop nest, from the outermost to the innermost. 1983 std::vector<CanonicalLoopInfo *> Result; 1984 Result.reserve(NumLoops * 2); 1985 1986 // The basic block of the surrounding loop that enters the nest generated 1987 // loop. 1988 BasicBlock *Enter = OutermostLoop->getPreheader(); 1989 1990 // The basic block of the surrounding loop where the inner code should 1991 // continue. 1992 BasicBlock *Continue = OutermostLoop->getAfter(); 1993 1994 // Where the next loop basic block should be inserted. 1995 BasicBlock *OutroInsertBefore = InnermostLoop->getExit(); 1996 1997 auto EmbeddNewLoop = 1998 [this, DL, F, InnerEnter, &Enter, &Continue, &OutroInsertBefore]( 1999 Value *TripCount, const Twine &Name) -> CanonicalLoopInfo * { 2000 CanonicalLoopInfo *EmbeddedLoop = createLoopSkeleton( 2001 DL, TripCount, F, InnerEnter, OutroInsertBefore, Name); 2002 redirectTo(Enter, EmbeddedLoop->getPreheader(), DL); 2003 redirectTo(EmbeddedLoop->getAfter(), Continue, DL); 2004 2005 // Setup the position where the next embedded loop connects to this loop. 2006 Enter = EmbeddedLoop->getBody(); 2007 Continue = EmbeddedLoop->getLatch(); 2008 OutroInsertBefore = EmbeddedLoop->getLatch(); 2009 return EmbeddedLoop; 2010 }; 2011 2012 auto EmbeddNewLoops = [&Result, &EmbeddNewLoop](ArrayRef<Value *> TripCounts, 2013 const Twine &NameBase) { 2014 for (auto P : enumerate(TripCounts)) { 2015 CanonicalLoopInfo *EmbeddedLoop = 2016 EmbeddNewLoop(P.value(), NameBase + Twine(P.index())); 2017 Result.push_back(EmbeddedLoop); 2018 } 2019 }; 2020 2021 EmbeddNewLoops(FloorCount, "floor"); 2022 2023 // Within the innermost floor loop, emit the code that computes the tile 2024 // sizes. 2025 Builder.SetInsertPoint(Enter->getTerminator()); 2026 SmallVector<Value *, 4> TileCounts; 2027 for (int i = 0; i < NumLoops; ++i) { 2028 CanonicalLoopInfo *FloorLoop = Result[i]; 2029 Value *TileSize = TileSizes[i]; 2030 2031 Value *FloorIsEpilogue = 2032 Builder.CreateICmpEQ(FloorLoop->getIndVar(), FloorCount[i]); 2033 Value *TileTripCount = 2034 Builder.CreateSelect(FloorIsEpilogue, FloorRems[i], TileSize); 2035 2036 TileCounts.push_back(TileTripCount); 2037 } 2038 2039 // Create the tile loops. 2040 EmbeddNewLoops(TileCounts, "tile"); 2041 2042 // Insert the inbetween code into the body. 2043 BasicBlock *BodyEnter = Enter; 2044 BasicBlock *BodyEntered = nullptr; 2045 for (std::pair<BasicBlock *, BasicBlock *> P : InbetweenCode) { 2046 BasicBlock *EnterBB = P.first; 2047 BasicBlock *ExitBB = P.second; 2048 2049 if (BodyEnter) 2050 redirectTo(BodyEnter, EnterBB, DL); 2051 else 2052 redirectAllPredecessorsTo(BodyEntered, EnterBB, DL); 2053 2054 BodyEnter = nullptr; 2055 BodyEntered = ExitBB; 2056 } 2057 2058 // Append the original loop nest body into the generated loop nest body. 2059 if (BodyEnter) 2060 redirectTo(BodyEnter, InnerEnter, DL); 2061 else 2062 redirectAllPredecessorsTo(BodyEntered, InnerEnter, DL); 2063 redirectAllPredecessorsTo(InnerLatch, Continue, DL); 2064 2065 // Replace the original induction variable with an induction variable computed 2066 // from the tile and floor induction variables. 2067 Builder.restoreIP(Result.back()->getBodyIP()); 2068 for (int i = 0; i < NumLoops; ++i) { 2069 CanonicalLoopInfo *FloorLoop = Result[i]; 2070 CanonicalLoopInfo *TileLoop = Result[NumLoops + i]; 2071 Value *OrigIndVar = OrigIndVars[i]; 2072 Value *Size = TileSizes[i]; 2073 2074 Value *Scale = 2075 Builder.CreateMul(Size, FloorLoop->getIndVar(), {}, /*HasNUW=*/true); 2076 Value *Shift = 2077 Builder.CreateAdd(Scale, TileLoop->getIndVar(), {}, /*HasNUW=*/true); 2078 OrigIndVar->replaceAllUsesWith(Shift); 2079 } 2080 2081 // Remove unused parts of the original loops. 2082 removeUnusedBlocksFromParent(OldControlBBs); 2083 2084 for (CanonicalLoopInfo *L : Loops) 2085 L->invalidate(); 2086 2087 #ifndef NDEBUG 2088 for (CanonicalLoopInfo *GenL : Result) 2089 GenL->assertOK(); 2090 #endif 2091 return Result; 2092 } 2093 2094 /// Attach loop metadata \p Properties to the loop described by \p Loop. If the 2095 /// loop already has metadata, the loop properties are appended. 2096 static void addLoopMetadata(CanonicalLoopInfo *Loop, 2097 ArrayRef<Metadata *> Properties) { 2098 assert(Loop->isValid() && "Expecting a valid CanonicalLoopInfo"); 2099 2100 // Nothing to do if no property to attach. 2101 if (Properties.empty()) 2102 return; 2103 2104 LLVMContext &Ctx = Loop->getFunction()->getContext(); 2105 SmallVector<Metadata *> NewLoopProperties; 2106 NewLoopProperties.push_back(nullptr); 2107 2108 // If the loop already has metadata, prepend it to the new metadata. 2109 BasicBlock *Latch = Loop->getLatch(); 2110 assert(Latch && "A valid CanonicalLoopInfo must have a unique latch"); 2111 MDNode *Existing = Latch->getTerminator()->getMetadata(LLVMContext::MD_loop); 2112 if (Existing) 2113 append_range(NewLoopProperties, drop_begin(Existing->operands(), 1)); 2114 2115 append_range(NewLoopProperties, Properties); 2116 MDNode *LoopID = MDNode::getDistinct(Ctx, NewLoopProperties); 2117 LoopID->replaceOperandWith(0, LoopID); 2118 2119 Latch->getTerminator()->setMetadata(LLVMContext::MD_loop, LoopID); 2120 } 2121 2122 void OpenMPIRBuilder::unrollLoopFull(DebugLoc, CanonicalLoopInfo *Loop) { 2123 LLVMContext &Ctx = Builder.getContext(); 2124 addLoopMetadata( 2125 Loop, {MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable")), 2126 MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.full"))}); 2127 } 2128 2129 void OpenMPIRBuilder::unrollLoopHeuristic(DebugLoc, CanonicalLoopInfo *Loop) { 2130 LLVMContext &Ctx = Builder.getContext(); 2131 addLoopMetadata( 2132 Loop, { 2133 MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable")), 2134 }); 2135 } 2136 2137 /// Create the TargetMachine object to query the backend for optimization 2138 /// preferences. 2139 /// 2140 /// Ideally, this would be passed from the front-end to the OpenMPBuilder, but 2141 /// e.g. Clang does not pass it to its CodeGen layer and creates it only when 2142 /// needed for the LLVM pass pipline. We use some default options to avoid 2143 /// having to pass too many settings from the frontend that probably do not 2144 /// matter. 2145 /// 2146 /// Currently, TargetMachine is only used sometimes by the unrollLoopPartial 2147 /// method. If we are going to use TargetMachine for more purposes, especially 2148 /// those that are sensitive to TargetOptions, RelocModel and CodeModel, it 2149 /// might become be worth requiring front-ends to pass on their TargetMachine, 2150 /// or at least cache it between methods. Note that while fontends such as Clang 2151 /// have just a single main TargetMachine per translation unit, "target-cpu" and 2152 /// "target-features" that determine the TargetMachine are per-function and can 2153 /// be overrided using __attribute__((target("OPTIONS"))). 2154 static std::unique_ptr<TargetMachine> 2155 createTargetMachine(Function *F, CodeGenOpt::Level OptLevel) { 2156 Module *M = F->getParent(); 2157 2158 StringRef CPU = F->getFnAttribute("target-cpu").getValueAsString(); 2159 StringRef Features = F->getFnAttribute("target-features").getValueAsString(); 2160 const std::string &Triple = M->getTargetTriple(); 2161 2162 std::string Error; 2163 const llvm::Target *TheTarget = TargetRegistry::lookupTarget(Triple, Error); 2164 if (!TheTarget) 2165 return {}; 2166 2167 llvm::TargetOptions Options; 2168 return std::unique_ptr<TargetMachine>(TheTarget->createTargetMachine( 2169 Triple, CPU, Features, Options, /*RelocModel=*/None, /*CodeModel=*/None, 2170 OptLevel)); 2171 } 2172 2173 /// Heuristically determine the best-performant unroll factor for \p CLI. This 2174 /// depends on the target processor. We are re-using the same heuristics as the 2175 /// LoopUnrollPass. 2176 static int32_t computeHeuristicUnrollFactor(CanonicalLoopInfo *CLI) { 2177 Function *F = CLI->getFunction(); 2178 2179 // Assume the user requests the most aggressive unrolling, even if the rest of 2180 // the code is optimized using a lower setting. 2181 CodeGenOpt::Level OptLevel = CodeGenOpt::Aggressive; 2182 std::unique_ptr<TargetMachine> TM = createTargetMachine(F, OptLevel); 2183 2184 FunctionAnalysisManager FAM; 2185 FAM.registerPass([]() { return TargetLibraryAnalysis(); }); 2186 FAM.registerPass([]() { return AssumptionAnalysis(); }); 2187 FAM.registerPass([]() { return DominatorTreeAnalysis(); }); 2188 FAM.registerPass([]() { return LoopAnalysis(); }); 2189 FAM.registerPass([]() { return ScalarEvolutionAnalysis(); }); 2190 FAM.registerPass([]() { return PassInstrumentationAnalysis(); }); 2191 TargetIRAnalysis TIRA; 2192 if (TM) 2193 TIRA = TargetIRAnalysis( 2194 [&](const Function &F) { return TM->getTargetTransformInfo(F); }); 2195 FAM.registerPass([&]() { return TIRA; }); 2196 2197 TargetIRAnalysis::Result &&TTI = TIRA.run(*F, FAM); 2198 ScalarEvolutionAnalysis SEA; 2199 ScalarEvolution &&SE = SEA.run(*F, FAM); 2200 DominatorTreeAnalysis DTA; 2201 DominatorTree &&DT = DTA.run(*F, FAM); 2202 LoopAnalysis LIA; 2203 LoopInfo &&LI = LIA.run(*F, FAM); 2204 AssumptionAnalysis ACT; 2205 AssumptionCache &&AC = ACT.run(*F, FAM); 2206 OptimizationRemarkEmitter ORE{F}; 2207 2208 Loop *L = LI.getLoopFor(CLI->getHeader()); 2209 assert(L && "Expecting CanonicalLoopInfo to be recognized as a loop"); 2210 2211 TargetTransformInfo::UnrollingPreferences UP = 2212 gatherUnrollingPreferences(L, SE, TTI, 2213 /*BlockFrequencyInfo=*/nullptr, 2214 /*ProfileSummaryInfo=*/nullptr, ORE, OptLevel, 2215 /*UserThreshold=*/None, 2216 /*UserCount=*/None, 2217 /*UserAllowPartial=*/true, 2218 /*UserAllowRuntime=*/true, 2219 /*UserUpperBound=*/None, 2220 /*UserFullUnrollMaxCount=*/None); 2221 2222 UP.Force = true; 2223 2224 // Account for additional optimizations taking place before the LoopUnrollPass 2225 // would unroll the loop. 2226 UP.Threshold *= UnrollThresholdFactor; 2227 UP.PartialThreshold *= UnrollThresholdFactor; 2228 2229 // Use normal unroll factors even if the rest of the code is optimized for 2230 // size. 2231 UP.OptSizeThreshold = UP.Threshold; 2232 UP.PartialOptSizeThreshold = UP.PartialThreshold; 2233 2234 LLVM_DEBUG(dbgs() << "Unroll heuristic thresholds:\n" 2235 << " Threshold=" << UP.Threshold << "\n" 2236 << " PartialThreshold=" << UP.PartialThreshold << "\n" 2237 << " OptSizeThreshold=" << UP.OptSizeThreshold << "\n" 2238 << " PartialOptSizeThreshold=" 2239 << UP.PartialOptSizeThreshold << "\n"); 2240 2241 // Disable peeling. 2242 TargetTransformInfo::PeelingPreferences PP = 2243 gatherPeelingPreferences(L, SE, TTI, 2244 /*UserAllowPeeling=*/false, 2245 /*UserAllowProfileBasedPeeling=*/false, 2246 /*UserUnrollingSpecficValues=*/false); 2247 2248 SmallPtrSet<const Value *, 32> EphValues; 2249 CodeMetrics::collectEphemeralValues(L, &AC, EphValues); 2250 2251 // Assume that reads and writes to stack variables can be eliminated by 2252 // Mem2Reg, SROA or LICM. That is, don't count them towards the loop body's 2253 // size. 2254 for (BasicBlock *BB : L->blocks()) { 2255 for (Instruction &I : *BB) { 2256 Value *Ptr; 2257 if (auto *Load = dyn_cast<LoadInst>(&I)) { 2258 Ptr = Load->getPointerOperand(); 2259 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 2260 Ptr = Store->getPointerOperand(); 2261 } else 2262 continue; 2263 2264 Ptr = Ptr->stripPointerCasts(); 2265 2266 if (auto *Alloca = dyn_cast<AllocaInst>(Ptr)) { 2267 if (Alloca->getParent() == &F->getEntryBlock()) 2268 EphValues.insert(&I); 2269 } 2270 } 2271 } 2272 2273 unsigned NumInlineCandidates; 2274 bool NotDuplicatable; 2275 bool Convergent; 2276 unsigned LoopSize = 2277 ApproximateLoopSize(L, NumInlineCandidates, NotDuplicatable, Convergent, 2278 TTI, EphValues, UP.BEInsns); 2279 LLVM_DEBUG(dbgs() << "Estimated loop size is " << LoopSize << "\n"); 2280 2281 // Loop is not unrollable if the loop contains certain instructions. 2282 if (NotDuplicatable || Convergent) { 2283 LLVM_DEBUG(dbgs() << "Loop not considered unrollable\n"); 2284 return 1; 2285 } 2286 2287 // TODO: Determine trip count of \p CLI if constant, computeUnrollCount might 2288 // be able to use it. 2289 int TripCount = 0; 2290 int MaxTripCount = 0; 2291 bool MaxOrZero = false; 2292 unsigned TripMultiple = 0; 2293 2294 bool UseUpperBound = false; 2295 computeUnrollCount(L, TTI, DT, &LI, SE, EphValues, &ORE, TripCount, 2296 MaxTripCount, MaxOrZero, TripMultiple, LoopSize, UP, PP, 2297 UseUpperBound); 2298 unsigned Factor = UP.Count; 2299 LLVM_DEBUG(dbgs() << "Suggesting unroll factor of " << Factor << "\n"); 2300 2301 // This function returns 1 to signal to not unroll a loop. 2302 if (Factor == 0) 2303 return 1; 2304 return Factor; 2305 } 2306 2307 void OpenMPIRBuilder::unrollLoopPartial(DebugLoc DL, CanonicalLoopInfo *Loop, 2308 int32_t Factor, 2309 CanonicalLoopInfo **UnrolledCLI) { 2310 assert(Factor >= 0 && "Unroll factor must not be negative"); 2311 2312 Function *F = Loop->getFunction(); 2313 LLVMContext &Ctx = F->getContext(); 2314 2315 // If the unrolled loop is not used for another loop-associated directive, it 2316 // is sufficient to add metadata for the LoopUnrollPass. 2317 if (!UnrolledCLI) { 2318 SmallVector<Metadata *, 2> LoopMetadata; 2319 LoopMetadata.push_back( 2320 MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable"))); 2321 2322 if (Factor >= 1) { 2323 ConstantAsMetadata *FactorConst = ConstantAsMetadata::get( 2324 ConstantInt::get(Type::getInt32Ty(Ctx), APInt(32, Factor))); 2325 LoopMetadata.push_back(MDNode::get( 2326 Ctx, {MDString::get(Ctx, "llvm.loop.unroll.count"), FactorConst})); 2327 } 2328 2329 addLoopMetadata(Loop, LoopMetadata); 2330 return; 2331 } 2332 2333 // Heuristically determine the unroll factor. 2334 if (Factor == 0) 2335 Factor = computeHeuristicUnrollFactor(Loop); 2336 2337 // No change required with unroll factor 1. 2338 if (Factor == 1) { 2339 *UnrolledCLI = Loop; 2340 return; 2341 } 2342 2343 assert(Factor >= 2 && 2344 "unrolling only makes sense with a factor of 2 or larger"); 2345 2346 Type *IndVarTy = Loop->getIndVarType(); 2347 2348 // Apply partial unrolling by tiling the loop by the unroll-factor, then fully 2349 // unroll the inner loop. 2350 Value *FactorVal = 2351 ConstantInt::get(IndVarTy, APInt(IndVarTy->getIntegerBitWidth(), Factor, 2352 /*isSigned=*/false)); 2353 std::vector<CanonicalLoopInfo *> LoopNest = 2354 tileLoops(DL, {Loop}, {FactorVal}); 2355 assert(LoopNest.size() == 2 && "Expect 2 loops after tiling"); 2356 *UnrolledCLI = LoopNest[0]; 2357 CanonicalLoopInfo *InnerLoop = LoopNest[1]; 2358 2359 // LoopUnrollPass can only fully unroll loops with constant trip count. 2360 // Unroll by the unroll factor with a fallback epilog for the remainder 2361 // iterations if necessary. 2362 ConstantAsMetadata *FactorConst = ConstantAsMetadata::get( 2363 ConstantInt::get(Type::getInt32Ty(Ctx), APInt(32, Factor))); 2364 addLoopMetadata( 2365 InnerLoop, 2366 {MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable")), 2367 MDNode::get( 2368 Ctx, {MDString::get(Ctx, "llvm.loop.unroll.count"), FactorConst})}); 2369 2370 #ifndef NDEBUG 2371 (*UnrolledCLI)->assertOK(); 2372 #endif 2373 } 2374 2375 OpenMPIRBuilder::InsertPointTy 2376 OpenMPIRBuilder::createCopyPrivate(const LocationDescription &Loc, 2377 llvm::Value *BufSize, llvm::Value *CpyBuf, 2378 llvm::Value *CpyFn, llvm::Value *DidIt) { 2379 if (!updateToLocation(Loc)) 2380 return Loc.IP; 2381 2382 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); 2383 Value *Ident = getOrCreateIdent(SrcLocStr); 2384 Value *ThreadId = getOrCreateThreadID(Ident); 2385 2386 llvm::Value *DidItLD = Builder.CreateLoad(Builder.getInt32Ty(), DidIt); 2387 2388 Value *Args[] = {Ident, ThreadId, BufSize, CpyBuf, CpyFn, DidItLD}; 2389 2390 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_copyprivate); 2391 Builder.CreateCall(Fn, Args); 2392 2393 return Builder.saveIP(); 2394 } 2395 2396 OpenMPIRBuilder::InsertPointTy 2397 OpenMPIRBuilder::createSingle(const LocationDescription &Loc, 2398 BodyGenCallbackTy BodyGenCB, 2399 FinalizeCallbackTy FiniCB, llvm::Value *DidIt) { 2400 2401 if (!updateToLocation(Loc)) 2402 return Loc.IP; 2403 2404 // If needed (i.e. not null), initialize `DidIt` with 0 2405 if (DidIt) { 2406 Builder.CreateStore(Builder.getInt32(0), DidIt); 2407 } 2408 2409 Directive OMPD = Directive::OMPD_single; 2410 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); 2411 Value *Ident = getOrCreateIdent(SrcLocStr); 2412 Value *ThreadId = getOrCreateThreadID(Ident); 2413 Value *Args[] = {Ident, ThreadId}; 2414 2415 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_single); 2416 Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args); 2417 2418 Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_single); 2419 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args); 2420 2421 // generates the following: 2422 // if (__kmpc_single()) { 2423 // .... single region ... 2424 // __kmpc_end_single 2425 // } 2426 2427 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB, 2428 /*Conditional*/ true, /*hasFinalize*/ true); 2429 } 2430 2431 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createCritical( 2432 const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, 2433 FinalizeCallbackTy FiniCB, StringRef CriticalName, Value *HintInst) { 2434 2435 if (!updateToLocation(Loc)) 2436 return Loc.IP; 2437 2438 Directive OMPD = Directive::OMPD_critical; 2439 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); 2440 Value *Ident = getOrCreateIdent(SrcLocStr); 2441 Value *ThreadId = getOrCreateThreadID(Ident); 2442 Value *LockVar = getOMPCriticalRegionLock(CriticalName); 2443 Value *Args[] = {Ident, ThreadId, LockVar}; 2444 2445 SmallVector<llvm::Value *, 4> EnterArgs(std::begin(Args), std::end(Args)); 2446 Function *RTFn = nullptr; 2447 if (HintInst) { 2448 // Add Hint to entry Args and create call 2449 EnterArgs.push_back(HintInst); 2450 RTFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_critical_with_hint); 2451 } else { 2452 RTFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_critical); 2453 } 2454 Instruction *EntryCall = Builder.CreateCall(RTFn, EnterArgs); 2455 2456 Function *ExitRTLFn = 2457 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_critical); 2458 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args); 2459 2460 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB, 2461 /*Conditional*/ false, /*hasFinalize*/ true); 2462 } 2463 2464 OpenMPIRBuilder::InsertPointTy 2465 OpenMPIRBuilder::createOrderedDepend(const LocationDescription &Loc, 2466 InsertPointTy AllocaIP, unsigned NumLoops, 2467 ArrayRef<llvm::Value *> StoreValues, 2468 const Twine &Name, bool IsDependSource) { 2469 if (!updateToLocation(Loc)) 2470 return Loc.IP; 2471 2472 // Allocate space for vector and generate alloc instruction. 2473 auto *ArrI64Ty = ArrayType::get(Int64, NumLoops); 2474 Builder.restoreIP(AllocaIP); 2475 AllocaInst *ArgsBase = Builder.CreateAlloca(ArrI64Ty, nullptr, Name); 2476 ArgsBase->setAlignment(Align(8)); 2477 Builder.restoreIP(Loc.IP); 2478 2479 // Store the index value with offset in depend vector. 2480 for (unsigned I = 0; I < NumLoops; ++I) { 2481 Value *DependAddrGEPIter = Builder.CreateInBoundsGEP( 2482 ArrI64Ty, ArgsBase, {Builder.getInt64(0), Builder.getInt64(I)}); 2483 Builder.CreateStore(StoreValues[I], DependAddrGEPIter); 2484 } 2485 2486 Value *DependBaseAddrGEP = Builder.CreateInBoundsGEP( 2487 ArrI64Ty, ArgsBase, {Builder.getInt64(0), Builder.getInt64(0)}); 2488 2489 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); 2490 Value *Ident = getOrCreateIdent(SrcLocStr); 2491 Value *ThreadId = getOrCreateThreadID(Ident); 2492 Value *Args[] = {Ident, ThreadId, DependBaseAddrGEP}; 2493 2494 Function *RTLFn = nullptr; 2495 if (IsDependSource) 2496 RTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_doacross_post); 2497 else 2498 RTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_doacross_wait); 2499 Builder.CreateCall(RTLFn, Args); 2500 2501 return Builder.saveIP(); 2502 } 2503 2504 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createOrderedThreadsSimd( 2505 const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, 2506 FinalizeCallbackTy FiniCB, bool IsThreads) { 2507 if (!updateToLocation(Loc)) 2508 return Loc.IP; 2509 2510 Directive OMPD = Directive::OMPD_ordered; 2511 Instruction *EntryCall = nullptr; 2512 Instruction *ExitCall = nullptr; 2513 2514 if (IsThreads) { 2515 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); 2516 Value *Ident = getOrCreateIdent(SrcLocStr); 2517 Value *ThreadId = getOrCreateThreadID(Ident); 2518 Value *Args[] = {Ident, ThreadId}; 2519 2520 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_ordered); 2521 EntryCall = Builder.CreateCall(EntryRTLFn, Args); 2522 2523 Function *ExitRTLFn = 2524 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_ordered); 2525 ExitCall = Builder.CreateCall(ExitRTLFn, Args); 2526 } 2527 2528 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB, 2529 /*Conditional*/ false, /*hasFinalize*/ true); 2530 } 2531 2532 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::EmitOMPInlinedRegion( 2533 Directive OMPD, Instruction *EntryCall, Instruction *ExitCall, 2534 BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool Conditional, 2535 bool HasFinalize, bool IsCancellable) { 2536 2537 if (HasFinalize) 2538 FinalizationStack.push_back({FiniCB, OMPD, IsCancellable}); 2539 2540 // Create inlined region's entry and body blocks, in preparation 2541 // for conditional creation 2542 BasicBlock *EntryBB = Builder.GetInsertBlock(); 2543 Instruction *SplitPos = EntryBB->getTerminator(); 2544 if (!isa_and_nonnull<BranchInst>(SplitPos)) 2545 SplitPos = new UnreachableInst(Builder.getContext(), EntryBB); 2546 BasicBlock *ExitBB = EntryBB->splitBasicBlock(SplitPos, "omp_region.end"); 2547 BasicBlock *FiniBB = 2548 EntryBB->splitBasicBlock(EntryBB->getTerminator(), "omp_region.finalize"); 2549 2550 Builder.SetInsertPoint(EntryBB->getTerminator()); 2551 emitCommonDirectiveEntry(OMPD, EntryCall, ExitBB, Conditional); 2552 2553 // generate body 2554 BodyGenCB(/* AllocaIP */ InsertPointTy(), 2555 /* CodeGenIP */ Builder.saveIP(), *FiniBB); 2556 2557 // If we didn't emit a branch to FiniBB during body generation, it means 2558 // FiniBB is unreachable (e.g. while(1);). stop generating all the 2559 // unreachable blocks, and remove anything we are not going to use. 2560 auto SkipEmittingRegion = FiniBB->hasNPredecessors(0); 2561 if (SkipEmittingRegion) { 2562 FiniBB->eraseFromParent(); 2563 ExitCall->eraseFromParent(); 2564 // Discard finalization if we have it. 2565 if (HasFinalize) { 2566 assert(!FinalizationStack.empty() && 2567 "Unexpected finalization stack state!"); 2568 FinalizationStack.pop_back(); 2569 } 2570 } else { 2571 // emit exit call and do any needed finalization. 2572 auto FinIP = InsertPointTy(FiniBB, FiniBB->getFirstInsertionPt()); 2573 assert(FiniBB->getTerminator()->getNumSuccessors() == 1 && 2574 FiniBB->getTerminator()->getSuccessor(0) == ExitBB && 2575 "Unexpected control flow graph state!!"); 2576 emitCommonDirectiveExit(OMPD, FinIP, ExitCall, HasFinalize); 2577 assert(FiniBB->getUniquePredecessor()->getUniqueSuccessor() == FiniBB && 2578 "Unexpected Control Flow State!"); 2579 MergeBlockIntoPredecessor(FiniBB); 2580 } 2581 2582 // If we are skipping the region of a non conditional, remove the exit 2583 // block, and clear the builder's insertion point. 2584 assert(SplitPos->getParent() == ExitBB && 2585 "Unexpected Insertion point location!"); 2586 if (!Conditional && SkipEmittingRegion) { 2587 ExitBB->eraseFromParent(); 2588 Builder.ClearInsertionPoint(); 2589 } else { 2590 auto merged = MergeBlockIntoPredecessor(ExitBB); 2591 BasicBlock *ExitPredBB = SplitPos->getParent(); 2592 auto InsertBB = merged ? ExitPredBB : ExitBB; 2593 if (!isa_and_nonnull<BranchInst>(SplitPos)) 2594 SplitPos->eraseFromParent(); 2595 Builder.SetInsertPoint(InsertBB); 2596 } 2597 2598 return Builder.saveIP(); 2599 } 2600 2601 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::emitCommonDirectiveEntry( 2602 Directive OMPD, Value *EntryCall, BasicBlock *ExitBB, bool Conditional) { 2603 // if nothing to do, Return current insertion point. 2604 if (!Conditional || !EntryCall) 2605 return Builder.saveIP(); 2606 2607 BasicBlock *EntryBB = Builder.GetInsertBlock(); 2608 Value *CallBool = Builder.CreateIsNotNull(EntryCall); 2609 auto *ThenBB = BasicBlock::Create(M.getContext(), "omp_region.body"); 2610 auto *UI = new UnreachableInst(Builder.getContext(), ThenBB); 2611 2612 // Emit thenBB and set the Builder's insertion point there for 2613 // body generation next. Place the block after the current block. 2614 Function *CurFn = EntryBB->getParent(); 2615 CurFn->getBasicBlockList().insertAfter(EntryBB->getIterator(), ThenBB); 2616 2617 // Move Entry branch to end of ThenBB, and replace with conditional 2618 // branch (If-stmt) 2619 Instruction *EntryBBTI = EntryBB->getTerminator(); 2620 Builder.CreateCondBr(CallBool, ThenBB, ExitBB); 2621 EntryBBTI->removeFromParent(); 2622 Builder.SetInsertPoint(UI); 2623 Builder.Insert(EntryBBTI); 2624 UI->eraseFromParent(); 2625 Builder.SetInsertPoint(ThenBB->getTerminator()); 2626 2627 // return an insertion point to ExitBB. 2628 return IRBuilder<>::InsertPoint(ExitBB, ExitBB->getFirstInsertionPt()); 2629 } 2630 2631 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::emitCommonDirectiveExit( 2632 omp::Directive OMPD, InsertPointTy FinIP, Instruction *ExitCall, 2633 bool HasFinalize) { 2634 2635 Builder.restoreIP(FinIP); 2636 2637 // If there is finalization to do, emit it before the exit call 2638 if (HasFinalize) { 2639 assert(!FinalizationStack.empty() && 2640 "Unexpected finalization stack state!"); 2641 2642 FinalizationInfo Fi = FinalizationStack.pop_back_val(); 2643 assert(Fi.DK == OMPD && "Unexpected Directive for Finalization call!"); 2644 2645 Fi.FiniCB(FinIP); 2646 2647 BasicBlock *FiniBB = FinIP.getBlock(); 2648 Instruction *FiniBBTI = FiniBB->getTerminator(); 2649 2650 // set Builder IP for call creation 2651 Builder.SetInsertPoint(FiniBBTI); 2652 } 2653 2654 if (!ExitCall) 2655 return Builder.saveIP(); 2656 2657 // place the Exitcall as last instruction before Finalization block terminator 2658 ExitCall->removeFromParent(); 2659 Builder.Insert(ExitCall); 2660 2661 return IRBuilder<>::InsertPoint(ExitCall->getParent(), 2662 ExitCall->getIterator()); 2663 } 2664 2665 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createCopyinClauseBlocks( 2666 InsertPointTy IP, Value *MasterAddr, Value *PrivateAddr, 2667 llvm::IntegerType *IntPtrTy, bool BranchtoEnd) { 2668 if (!IP.isSet()) 2669 return IP; 2670 2671 IRBuilder<>::InsertPointGuard IPG(Builder); 2672 2673 // creates the following CFG structure 2674 // OMP_Entry : (MasterAddr != PrivateAddr)? 2675 // F T 2676 // | \ 2677 // | copin.not.master 2678 // | / 2679 // v / 2680 // copyin.not.master.end 2681 // | 2682 // v 2683 // OMP.Entry.Next 2684 2685 BasicBlock *OMP_Entry = IP.getBlock(); 2686 Function *CurFn = OMP_Entry->getParent(); 2687 BasicBlock *CopyBegin = 2688 BasicBlock::Create(M.getContext(), "copyin.not.master", CurFn); 2689 BasicBlock *CopyEnd = nullptr; 2690 2691 // If entry block is terminated, split to preserve the branch to following 2692 // basic block (i.e. OMP.Entry.Next), otherwise, leave everything as is. 2693 if (isa_and_nonnull<BranchInst>(OMP_Entry->getTerminator())) { 2694 CopyEnd = OMP_Entry->splitBasicBlock(OMP_Entry->getTerminator(), 2695 "copyin.not.master.end"); 2696 OMP_Entry->getTerminator()->eraseFromParent(); 2697 } else { 2698 CopyEnd = 2699 BasicBlock::Create(M.getContext(), "copyin.not.master.end", CurFn); 2700 } 2701 2702 Builder.SetInsertPoint(OMP_Entry); 2703 Value *MasterPtr = Builder.CreatePtrToInt(MasterAddr, IntPtrTy); 2704 Value *PrivatePtr = Builder.CreatePtrToInt(PrivateAddr, IntPtrTy); 2705 Value *cmp = Builder.CreateICmpNE(MasterPtr, PrivatePtr); 2706 Builder.CreateCondBr(cmp, CopyBegin, CopyEnd); 2707 2708 Builder.SetInsertPoint(CopyBegin); 2709 if (BranchtoEnd) 2710 Builder.SetInsertPoint(Builder.CreateBr(CopyEnd)); 2711 2712 return Builder.saveIP(); 2713 } 2714 2715 CallInst *OpenMPIRBuilder::createOMPAlloc(const LocationDescription &Loc, 2716 Value *Size, Value *Allocator, 2717 std::string Name) { 2718 IRBuilder<>::InsertPointGuard IPG(Builder); 2719 Builder.restoreIP(Loc.IP); 2720 2721 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); 2722 Value *Ident = getOrCreateIdent(SrcLocStr); 2723 Value *ThreadId = getOrCreateThreadID(Ident); 2724 Value *Args[] = {ThreadId, Size, Allocator}; 2725 2726 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_alloc); 2727 2728 return Builder.CreateCall(Fn, Args, Name); 2729 } 2730 2731 CallInst *OpenMPIRBuilder::createOMPFree(const LocationDescription &Loc, 2732 Value *Addr, Value *Allocator, 2733 std::string Name) { 2734 IRBuilder<>::InsertPointGuard IPG(Builder); 2735 Builder.restoreIP(Loc.IP); 2736 2737 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); 2738 Value *Ident = getOrCreateIdent(SrcLocStr); 2739 Value *ThreadId = getOrCreateThreadID(Ident); 2740 Value *Args[] = {ThreadId, Addr, Allocator}; 2741 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_free); 2742 return Builder.CreateCall(Fn, Args, Name); 2743 } 2744 2745 CallInst *OpenMPIRBuilder::createCachedThreadPrivate( 2746 const LocationDescription &Loc, llvm::Value *Pointer, 2747 llvm::ConstantInt *Size, const llvm::Twine &Name) { 2748 IRBuilder<>::InsertPointGuard IPG(Builder); 2749 Builder.restoreIP(Loc.IP); 2750 2751 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); 2752 Value *Ident = getOrCreateIdent(SrcLocStr); 2753 Value *ThreadId = getOrCreateThreadID(Ident); 2754 Constant *ThreadPrivateCache = 2755 getOrCreateOMPInternalVariable(Int8PtrPtr, Name); 2756 llvm::Value *Args[] = {Ident, ThreadId, Pointer, Size, ThreadPrivateCache}; 2757 2758 Function *Fn = 2759 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_threadprivate_cached); 2760 2761 return Builder.CreateCall(Fn, Args); 2762 } 2763 2764 OpenMPIRBuilder::InsertPointTy 2765 OpenMPIRBuilder::createTargetInit(const LocationDescription &Loc, bool IsSPMD, 2766 bool RequiresFullRuntime) { 2767 if (!updateToLocation(Loc)) 2768 return Loc.IP; 2769 2770 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); 2771 Value *Ident = getOrCreateIdent(SrcLocStr); 2772 ConstantInt *IsSPMDVal = ConstantInt::getSigned( 2773 IntegerType::getInt8Ty(Int8->getContext()), 2774 IsSPMD ? OMP_TGT_EXEC_MODE_SPMD : OMP_TGT_EXEC_MODE_GENERIC); 2775 ConstantInt *UseGenericStateMachine = 2776 ConstantInt::getBool(Int32->getContext(), !IsSPMD); 2777 ConstantInt *RequiresFullRuntimeVal = 2778 ConstantInt::getBool(Int32->getContext(), RequiresFullRuntime); 2779 2780 Function *Fn = getOrCreateRuntimeFunctionPtr( 2781 omp::RuntimeFunction::OMPRTL___kmpc_target_init); 2782 2783 CallInst *ThreadKind = Builder.CreateCall( 2784 Fn, {Ident, IsSPMDVal, UseGenericStateMachine, RequiresFullRuntimeVal}); 2785 2786 Value *ExecUserCode = Builder.CreateICmpEQ( 2787 ThreadKind, ConstantInt::get(ThreadKind->getType(), -1), 2788 "exec_user_code"); 2789 2790 // ThreadKind = __kmpc_target_init(...) 2791 // if (ThreadKind == -1) 2792 // user_code 2793 // else 2794 // return; 2795 2796 auto *UI = Builder.CreateUnreachable(); 2797 BasicBlock *CheckBB = UI->getParent(); 2798 BasicBlock *UserCodeEntryBB = CheckBB->splitBasicBlock(UI, "user_code.entry"); 2799 2800 BasicBlock *WorkerExitBB = BasicBlock::Create( 2801 CheckBB->getContext(), "worker.exit", CheckBB->getParent()); 2802 Builder.SetInsertPoint(WorkerExitBB); 2803 Builder.CreateRetVoid(); 2804 2805 auto *CheckBBTI = CheckBB->getTerminator(); 2806 Builder.SetInsertPoint(CheckBBTI); 2807 Builder.CreateCondBr(ExecUserCode, UI->getParent(), WorkerExitBB); 2808 2809 CheckBBTI->eraseFromParent(); 2810 UI->eraseFromParent(); 2811 2812 // Continue in the "user_code" block, see diagram above and in 2813 // openmp/libomptarget/deviceRTLs/common/include/target.h . 2814 return InsertPointTy(UserCodeEntryBB, UserCodeEntryBB->getFirstInsertionPt()); 2815 } 2816 2817 void OpenMPIRBuilder::createTargetDeinit(const LocationDescription &Loc, 2818 bool IsSPMD, 2819 bool RequiresFullRuntime) { 2820 if (!updateToLocation(Loc)) 2821 return; 2822 2823 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc); 2824 Value *Ident = getOrCreateIdent(SrcLocStr); 2825 ConstantInt *IsSPMDVal = ConstantInt::getSigned( 2826 IntegerType::getInt8Ty(Int8->getContext()), 2827 IsSPMD ? OMP_TGT_EXEC_MODE_SPMD : OMP_TGT_EXEC_MODE_GENERIC); 2828 ConstantInt *RequiresFullRuntimeVal = 2829 ConstantInt::getBool(Int32->getContext(), RequiresFullRuntime); 2830 2831 Function *Fn = getOrCreateRuntimeFunctionPtr( 2832 omp::RuntimeFunction::OMPRTL___kmpc_target_deinit); 2833 2834 Builder.CreateCall(Fn, {Ident, IsSPMDVal, RequiresFullRuntimeVal}); 2835 } 2836 2837 std::string OpenMPIRBuilder::getNameWithSeparators(ArrayRef<StringRef> Parts, 2838 StringRef FirstSeparator, 2839 StringRef Separator) { 2840 SmallString<128> Buffer; 2841 llvm::raw_svector_ostream OS(Buffer); 2842 StringRef Sep = FirstSeparator; 2843 for (StringRef Part : Parts) { 2844 OS << Sep << Part; 2845 Sep = Separator; 2846 } 2847 return OS.str().str(); 2848 } 2849 2850 Constant *OpenMPIRBuilder::getOrCreateOMPInternalVariable( 2851 llvm::Type *Ty, const llvm::Twine &Name, unsigned AddressSpace) { 2852 // TODO: Replace the twine arg with stringref to get rid of the conversion 2853 // logic. However This is taken from current implementation in clang as is. 2854 // Since this method is used in many places exclusively for OMP internal use 2855 // we will keep it as is for temporarily until we move all users to the 2856 // builder and then, if possible, fix it everywhere in one go. 2857 SmallString<256> Buffer; 2858 llvm::raw_svector_ostream Out(Buffer); 2859 Out << Name; 2860 StringRef RuntimeName = Out.str(); 2861 auto &Elem = *InternalVars.try_emplace(RuntimeName, nullptr).first; 2862 if (Elem.second) { 2863 assert(Elem.second->getType()->getPointerElementType() == Ty && 2864 "OMP internal variable has different type than requested"); 2865 } else { 2866 // TODO: investigate the appropriate linkage type used for the global 2867 // variable for possibly changing that to internal or private, or maybe 2868 // create different versions of the function for different OMP internal 2869 // variables. 2870 Elem.second = new llvm::GlobalVariable( 2871 M, Ty, /*IsConstant*/ false, llvm::GlobalValue::CommonLinkage, 2872 llvm::Constant::getNullValue(Ty), Elem.first(), 2873 /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal, 2874 AddressSpace); 2875 } 2876 2877 return Elem.second; 2878 } 2879 2880 Value *OpenMPIRBuilder::getOMPCriticalRegionLock(StringRef CriticalName) { 2881 std::string Prefix = Twine("gomp_critical_user_", CriticalName).str(); 2882 std::string Name = getNameWithSeparators({Prefix, "var"}, ".", "."); 2883 return getOrCreateOMPInternalVariable(KmpCriticalNameTy, Name); 2884 } 2885 2886 GlobalVariable * 2887 OpenMPIRBuilder::createOffloadMaptypes(SmallVectorImpl<uint64_t> &Mappings, 2888 std::string VarName) { 2889 llvm::Constant *MaptypesArrayInit = 2890 llvm::ConstantDataArray::get(M.getContext(), Mappings); 2891 auto *MaptypesArrayGlobal = new llvm::GlobalVariable( 2892 M, MaptypesArrayInit->getType(), 2893 /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, MaptypesArrayInit, 2894 VarName); 2895 MaptypesArrayGlobal->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 2896 return MaptypesArrayGlobal; 2897 } 2898 2899 void OpenMPIRBuilder::createMapperAllocas(const LocationDescription &Loc, 2900 InsertPointTy AllocaIP, 2901 unsigned NumOperands, 2902 struct MapperAllocas &MapperAllocas) { 2903 if (!updateToLocation(Loc)) 2904 return; 2905 2906 auto *ArrI8PtrTy = ArrayType::get(Int8Ptr, NumOperands); 2907 auto *ArrI64Ty = ArrayType::get(Int64, NumOperands); 2908 Builder.restoreIP(AllocaIP); 2909 AllocaInst *ArgsBase = Builder.CreateAlloca(ArrI8PtrTy); 2910 AllocaInst *Args = Builder.CreateAlloca(ArrI8PtrTy); 2911 AllocaInst *ArgSizes = Builder.CreateAlloca(ArrI64Ty); 2912 Builder.restoreIP(Loc.IP); 2913 MapperAllocas.ArgsBase = ArgsBase; 2914 MapperAllocas.Args = Args; 2915 MapperAllocas.ArgSizes = ArgSizes; 2916 } 2917 2918 void OpenMPIRBuilder::emitMapperCall(const LocationDescription &Loc, 2919 Function *MapperFunc, Value *SrcLocInfo, 2920 Value *MaptypesArg, Value *MapnamesArg, 2921 struct MapperAllocas &MapperAllocas, 2922 int64_t DeviceID, unsigned NumOperands) { 2923 if (!updateToLocation(Loc)) 2924 return; 2925 2926 auto *ArrI8PtrTy = ArrayType::get(Int8Ptr, NumOperands); 2927 auto *ArrI64Ty = ArrayType::get(Int64, NumOperands); 2928 Value *ArgsBaseGEP = 2929 Builder.CreateInBoundsGEP(ArrI8PtrTy, MapperAllocas.ArgsBase, 2930 {Builder.getInt32(0), Builder.getInt32(0)}); 2931 Value *ArgsGEP = 2932 Builder.CreateInBoundsGEP(ArrI8PtrTy, MapperAllocas.Args, 2933 {Builder.getInt32(0), Builder.getInt32(0)}); 2934 Value *ArgSizesGEP = 2935 Builder.CreateInBoundsGEP(ArrI64Ty, MapperAllocas.ArgSizes, 2936 {Builder.getInt32(0), Builder.getInt32(0)}); 2937 Value *NullPtr = Constant::getNullValue(Int8Ptr->getPointerTo()); 2938 Builder.CreateCall(MapperFunc, 2939 {SrcLocInfo, Builder.getInt64(DeviceID), 2940 Builder.getInt32(NumOperands), ArgsBaseGEP, ArgsGEP, 2941 ArgSizesGEP, MaptypesArg, MapnamesArg, NullPtr}); 2942 } 2943 2944 bool OpenMPIRBuilder::checkAndEmitFlushAfterAtomic( 2945 const LocationDescription &Loc, llvm::AtomicOrdering AO, AtomicKind AK) { 2946 assert(!(AO == AtomicOrdering::NotAtomic || 2947 AO == llvm::AtomicOrdering::Unordered) && 2948 "Unexpected Atomic Ordering."); 2949 2950 bool Flush = false; 2951 llvm::AtomicOrdering FlushAO = AtomicOrdering::Monotonic; 2952 2953 switch (AK) { 2954 case Read: 2955 if (AO == AtomicOrdering::Acquire || AO == AtomicOrdering::AcquireRelease || 2956 AO == AtomicOrdering::SequentiallyConsistent) { 2957 FlushAO = AtomicOrdering::Acquire; 2958 Flush = true; 2959 } 2960 break; 2961 case Write: 2962 case Update: 2963 if (AO == AtomicOrdering::Release || AO == AtomicOrdering::AcquireRelease || 2964 AO == AtomicOrdering::SequentiallyConsistent) { 2965 FlushAO = AtomicOrdering::Release; 2966 Flush = true; 2967 } 2968 break; 2969 case Capture: 2970 switch (AO) { 2971 case AtomicOrdering::Acquire: 2972 FlushAO = AtomicOrdering::Acquire; 2973 Flush = true; 2974 break; 2975 case AtomicOrdering::Release: 2976 FlushAO = AtomicOrdering::Release; 2977 Flush = true; 2978 break; 2979 case AtomicOrdering::AcquireRelease: 2980 case AtomicOrdering::SequentiallyConsistent: 2981 FlushAO = AtomicOrdering::AcquireRelease; 2982 Flush = true; 2983 break; 2984 default: 2985 // do nothing - leave silently. 2986 break; 2987 } 2988 } 2989 2990 if (Flush) { 2991 // Currently Flush RT call still doesn't take memory_ordering, so for when 2992 // that happens, this tries to do the resolution of which atomic ordering 2993 // to use with but issue the flush call 2994 // TODO: pass `FlushAO` after memory ordering support is added 2995 (void)FlushAO; 2996 emitFlush(Loc); 2997 } 2998 2999 // for AO == AtomicOrdering::Monotonic and all other case combinations 3000 // do nothing 3001 return Flush; 3002 } 3003 3004 OpenMPIRBuilder::InsertPointTy 3005 OpenMPIRBuilder::createAtomicRead(const LocationDescription &Loc, 3006 AtomicOpValue &X, AtomicOpValue &V, 3007 AtomicOrdering AO) { 3008 if (!updateToLocation(Loc)) 3009 return Loc.IP; 3010 3011 Type *XTy = X.Var->getType(); 3012 assert(XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"); 3013 Type *XElemTy = XTy->getPointerElementType(); 3014 assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || 3015 XElemTy->isPointerTy()) && 3016 "OMP atomic read expected a scalar type"); 3017 3018 Value *XRead = nullptr; 3019 3020 if (XElemTy->isIntegerTy()) { 3021 LoadInst *XLD = 3022 Builder.CreateLoad(XElemTy, X.Var, X.IsVolatile, "omp.atomic.read"); 3023 XLD->setAtomic(AO); 3024 XRead = cast<Value>(XLD); 3025 } else { 3026 // We need to bitcast and perform atomic op as integer 3027 unsigned Addrspace = cast<PointerType>(XTy)->getAddressSpace(); 3028 IntegerType *IntCastTy = 3029 IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits()); 3030 Value *XBCast = Builder.CreateBitCast( 3031 X.Var, IntCastTy->getPointerTo(Addrspace), "atomic.src.int.cast"); 3032 LoadInst *XLoad = 3033 Builder.CreateLoad(IntCastTy, XBCast, X.IsVolatile, "omp.atomic.load"); 3034 XLoad->setAtomic(AO); 3035 if (XElemTy->isFloatingPointTy()) { 3036 XRead = Builder.CreateBitCast(XLoad, XElemTy, "atomic.flt.cast"); 3037 } else { 3038 XRead = Builder.CreateIntToPtr(XLoad, XElemTy, "atomic.ptr.cast"); 3039 } 3040 } 3041 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Read); 3042 Builder.CreateStore(XRead, V.Var, V.IsVolatile); 3043 return Builder.saveIP(); 3044 } 3045 3046 OpenMPIRBuilder::InsertPointTy 3047 OpenMPIRBuilder::createAtomicWrite(const LocationDescription &Loc, 3048 AtomicOpValue &X, Value *Expr, 3049 AtomicOrdering AO) { 3050 if (!updateToLocation(Loc)) 3051 return Loc.IP; 3052 3053 Type *XTy = X.Var->getType(); 3054 assert(XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"); 3055 Type *XElemTy = XTy->getPointerElementType(); 3056 assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || 3057 XElemTy->isPointerTy()) && 3058 "OMP atomic write expected a scalar type"); 3059 3060 if (XElemTy->isIntegerTy()) { 3061 StoreInst *XSt = Builder.CreateStore(Expr, X.Var, X.IsVolatile); 3062 XSt->setAtomic(AO); 3063 } else { 3064 // We need to bitcast and perform atomic op as integers 3065 unsigned Addrspace = cast<PointerType>(XTy)->getAddressSpace(); 3066 IntegerType *IntCastTy = 3067 IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits()); 3068 Value *XBCast = Builder.CreateBitCast( 3069 X.Var, IntCastTy->getPointerTo(Addrspace), "atomic.dst.int.cast"); 3070 Value *ExprCast = 3071 Builder.CreateBitCast(Expr, IntCastTy, "atomic.src.int.cast"); 3072 StoreInst *XSt = Builder.CreateStore(ExprCast, XBCast, X.IsVolatile); 3073 XSt->setAtomic(AO); 3074 } 3075 3076 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Write); 3077 return Builder.saveIP(); 3078 } 3079 3080 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createAtomicUpdate( 3081 const LocationDescription &Loc, Instruction *AllocIP, AtomicOpValue &X, 3082 Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, 3083 AtomicUpdateCallbackTy &UpdateOp, bool IsXBinopExpr) { 3084 if (!updateToLocation(Loc)) 3085 return Loc.IP; 3086 3087 LLVM_DEBUG({ 3088 Type *XTy = X.Var->getType(); 3089 assert(XTy->isPointerTy() && 3090 "OMP Atomic expects a pointer to target memory"); 3091 Type *XElemTy = XTy->getPointerElementType(); 3092 assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || 3093 XElemTy->isPointerTy()) && 3094 "OMP atomic update expected a scalar type"); 3095 assert((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && 3096 (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && 3097 "OpenMP atomic does not support LT or GT operations"); 3098 }); 3099 3100 emitAtomicUpdate(AllocIP, X.Var, Expr, AO, RMWOp, UpdateOp, X.IsVolatile, 3101 IsXBinopExpr); 3102 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Update); 3103 return Builder.saveIP(); 3104 } 3105 3106 Value *OpenMPIRBuilder::emitRMWOpAsInstruction(Value *Src1, Value *Src2, 3107 AtomicRMWInst::BinOp RMWOp) { 3108 switch (RMWOp) { 3109 case AtomicRMWInst::Add: 3110 return Builder.CreateAdd(Src1, Src2); 3111 case AtomicRMWInst::Sub: 3112 return Builder.CreateSub(Src1, Src2); 3113 case AtomicRMWInst::And: 3114 return Builder.CreateAnd(Src1, Src2); 3115 case AtomicRMWInst::Nand: 3116 return Builder.CreateNeg(Builder.CreateAnd(Src1, Src2)); 3117 case AtomicRMWInst::Or: 3118 return Builder.CreateOr(Src1, Src2); 3119 case AtomicRMWInst::Xor: 3120 return Builder.CreateXor(Src1, Src2); 3121 case AtomicRMWInst::Xchg: 3122 case AtomicRMWInst::FAdd: 3123 case AtomicRMWInst::FSub: 3124 case AtomicRMWInst::BAD_BINOP: 3125 case AtomicRMWInst::Max: 3126 case AtomicRMWInst::Min: 3127 case AtomicRMWInst::UMax: 3128 case AtomicRMWInst::UMin: 3129 llvm_unreachable("Unsupported atomic update operation"); 3130 } 3131 llvm_unreachable("Unsupported atomic update operation"); 3132 } 3133 3134 std::pair<Value *, Value *> 3135 OpenMPIRBuilder::emitAtomicUpdate(Instruction *AllocIP, Value *X, Value *Expr, 3136 AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, 3137 AtomicUpdateCallbackTy &UpdateOp, 3138 bool VolatileX, bool IsXBinopExpr) { 3139 Type *XElemTy = X->getType()->getPointerElementType(); 3140 3141 bool DoCmpExch = 3142 ((RMWOp == AtomicRMWInst::BAD_BINOP) || (RMWOp == AtomicRMWInst::FAdd)) || 3143 (RMWOp == AtomicRMWInst::FSub) || 3144 (RMWOp == AtomicRMWInst::Sub && !IsXBinopExpr); 3145 3146 std::pair<Value *, Value *> Res; 3147 if (XElemTy->isIntegerTy() && !DoCmpExch) { 3148 Res.first = Builder.CreateAtomicRMW(RMWOp, X, Expr, llvm::MaybeAlign(), AO); 3149 // not needed except in case of postfix captures. Generate anyway for 3150 // consistency with the else part. Will be removed with any DCE pass. 3151 Res.second = emitRMWOpAsInstruction(Res.first, Expr, RMWOp); 3152 } else { 3153 unsigned Addrspace = cast<PointerType>(X->getType())->getAddressSpace(); 3154 IntegerType *IntCastTy = 3155 IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits()); 3156 Value *XBCast = 3157 Builder.CreateBitCast(X, IntCastTy->getPointerTo(Addrspace)); 3158 LoadInst *OldVal = 3159 Builder.CreateLoad(IntCastTy, XBCast, X->getName() + ".atomic.load"); 3160 OldVal->setAtomic(AO); 3161 // CurBB 3162 // | /---\ 3163 // ContBB | 3164 // | \---/ 3165 // ExitBB 3166 BasicBlock *CurBB = Builder.GetInsertBlock(); 3167 Instruction *CurBBTI = CurBB->getTerminator(); 3168 CurBBTI = CurBBTI ? CurBBTI : Builder.CreateUnreachable(); 3169 BasicBlock *ExitBB = 3170 CurBB->splitBasicBlock(CurBBTI, X->getName() + ".atomic.exit"); 3171 BasicBlock *ContBB = CurBB->splitBasicBlock(CurBB->getTerminator(), 3172 X->getName() + ".atomic.cont"); 3173 ContBB->getTerminator()->eraseFromParent(); 3174 Builder.SetInsertPoint(ContBB); 3175 llvm::PHINode *PHI = Builder.CreatePHI(OldVal->getType(), 2); 3176 PHI->addIncoming(OldVal, CurBB); 3177 AllocaInst *NewAtomicAddr = Builder.CreateAlloca(XElemTy); 3178 NewAtomicAddr->setName(X->getName() + "x.new.val"); 3179 NewAtomicAddr->moveBefore(AllocIP); 3180 IntegerType *NewAtomicCastTy = 3181 IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits()); 3182 bool IsIntTy = XElemTy->isIntegerTy(); 3183 Value *NewAtomicIntAddr = 3184 (IsIntTy) 3185 ? NewAtomicAddr 3186 : Builder.CreateBitCast(NewAtomicAddr, 3187 NewAtomicCastTy->getPointerTo(Addrspace)); 3188 Value *OldExprVal = PHI; 3189 if (!IsIntTy) { 3190 if (XElemTy->isFloatingPointTy()) { 3191 OldExprVal = Builder.CreateBitCast(PHI, XElemTy, 3192 X->getName() + ".atomic.fltCast"); 3193 } else { 3194 OldExprVal = Builder.CreateIntToPtr(PHI, XElemTy, 3195 X->getName() + ".atomic.ptrCast"); 3196 } 3197 } 3198 3199 Value *Upd = UpdateOp(OldExprVal, Builder); 3200 Builder.CreateStore(Upd, NewAtomicAddr); 3201 LoadInst *DesiredVal = Builder.CreateLoad(XElemTy, NewAtomicIntAddr); 3202 Value *XAddr = 3203 (IsIntTy) 3204 ? X 3205 : Builder.CreateBitCast(X, IntCastTy->getPointerTo(Addrspace)); 3206 AtomicOrdering Failure = 3207 llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO); 3208 AtomicCmpXchgInst *Result = Builder.CreateAtomicCmpXchg( 3209 XAddr, OldExprVal, DesiredVal, llvm::MaybeAlign(), AO, Failure); 3210 Result->setVolatile(VolatileX); 3211 Value *PreviousVal = Builder.CreateExtractValue(Result, /*Idxs=*/0); 3212 Value *SuccessFailureVal = Builder.CreateExtractValue(Result, /*Idxs=*/1); 3213 PHI->addIncoming(PreviousVal, Builder.GetInsertBlock()); 3214 Builder.CreateCondBr(SuccessFailureVal, ExitBB, ContBB); 3215 3216 Res.first = OldExprVal; 3217 Res.second = Upd; 3218 3219 // set Insertion point in exit block 3220 if (UnreachableInst *ExitTI = 3221 dyn_cast<UnreachableInst>(ExitBB->getTerminator())) { 3222 CurBBTI->eraseFromParent(); 3223 Builder.SetInsertPoint(ExitBB); 3224 } else { 3225 Builder.SetInsertPoint(ExitTI); 3226 } 3227 } 3228 3229 return Res; 3230 } 3231 3232 OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createAtomicCapture( 3233 const LocationDescription &Loc, Instruction *AllocIP, AtomicOpValue &X, 3234 AtomicOpValue &V, Value *Expr, AtomicOrdering AO, 3235 AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, 3236 bool UpdateExpr, bool IsPostfixUpdate, bool IsXBinopExpr) { 3237 if (!updateToLocation(Loc)) 3238 return Loc.IP; 3239 3240 LLVM_DEBUG({ 3241 Type *XTy = X.Var->getType(); 3242 assert(XTy->isPointerTy() && 3243 "OMP Atomic expects a pointer to target memory"); 3244 Type *XElemTy = XTy->getPointerElementType(); 3245 assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || 3246 XElemTy->isPointerTy()) && 3247 "OMP atomic capture expected a scalar type"); 3248 assert((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && 3249 "OpenMP atomic does not support LT or GT operations"); 3250 }); 3251 3252 // If UpdateExpr is 'x' updated with some `expr` not based on 'x', 3253 // 'x' is simply atomically rewritten with 'expr'. 3254 AtomicRMWInst::BinOp AtomicOp = (UpdateExpr ? RMWOp : AtomicRMWInst::Xchg); 3255 std::pair<Value *, Value *> Result = emitAtomicUpdate( 3256 AllocIP, X.Var, Expr, AO, AtomicOp, UpdateOp, X.IsVolatile, IsXBinopExpr); 3257 3258 Value *CapturedVal = (IsPostfixUpdate ? Result.first : Result.second); 3259 Builder.CreateStore(CapturedVal, V.Var, V.IsVolatile); 3260 3261 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Capture); 3262 return Builder.saveIP(); 3263 } 3264 3265 GlobalVariable * 3266 OpenMPIRBuilder::createOffloadMapnames(SmallVectorImpl<llvm::Constant *> &Names, 3267 std::string VarName) { 3268 llvm::Constant *MapNamesArrayInit = llvm::ConstantArray::get( 3269 llvm::ArrayType::get( 3270 llvm::Type::getInt8Ty(M.getContext())->getPointerTo(), Names.size()), 3271 Names); 3272 auto *MapNamesArrayGlobal = new llvm::GlobalVariable( 3273 M, MapNamesArrayInit->getType(), 3274 /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, MapNamesArrayInit, 3275 VarName); 3276 return MapNamesArrayGlobal; 3277 } 3278 3279 // Create all simple and struct types exposed by the runtime and remember 3280 // the llvm::PointerTypes of them for easy access later. 3281 void OpenMPIRBuilder::initializeTypes(Module &M) { 3282 LLVMContext &Ctx = M.getContext(); 3283 StructType *T; 3284 #define OMP_TYPE(VarName, InitValue) VarName = InitValue; 3285 #define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \ 3286 VarName##Ty = ArrayType::get(ElemTy, ArraySize); \ 3287 VarName##PtrTy = PointerType::getUnqual(VarName##Ty); 3288 #define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \ 3289 VarName = FunctionType::get(ReturnType, {__VA_ARGS__}, IsVarArg); \ 3290 VarName##Ptr = PointerType::getUnqual(VarName); 3291 #define OMP_STRUCT_TYPE(VarName, StructName, ...) \ 3292 T = StructType::getTypeByName(Ctx, StructName); \ 3293 if (!T) \ 3294 T = StructType::create(Ctx, {__VA_ARGS__}, StructName); \ 3295 VarName = T; \ 3296 VarName##Ptr = PointerType::getUnqual(T); 3297 #include "llvm/Frontend/OpenMP/OMPKinds.def" 3298 } 3299 3300 void OpenMPIRBuilder::OutlineInfo::collectBlocks( 3301 SmallPtrSetImpl<BasicBlock *> &BlockSet, 3302 SmallVectorImpl<BasicBlock *> &BlockVector) { 3303 SmallVector<BasicBlock *, 32> Worklist; 3304 BlockSet.insert(EntryBB); 3305 BlockSet.insert(ExitBB); 3306 3307 Worklist.push_back(EntryBB); 3308 while (!Worklist.empty()) { 3309 BasicBlock *BB = Worklist.pop_back_val(); 3310 BlockVector.push_back(BB); 3311 for (BasicBlock *SuccBB : successors(BB)) 3312 if (BlockSet.insert(SuccBB).second) 3313 Worklist.push_back(SuccBB); 3314 } 3315 } 3316 3317 void CanonicalLoopInfo::collectControlBlocks( 3318 SmallVectorImpl<BasicBlock *> &BBs) { 3319 // We only count those BBs as control block for which we do not need to 3320 // reverse the CFG, i.e. not the loop body which can contain arbitrary control 3321 // flow. For consistency, this also means we do not add the Body block, which 3322 // is just the entry to the body code. 3323 BBs.reserve(BBs.size() + 6); 3324 BBs.append({getPreheader(), Header, Cond, Latch, Exit, getAfter()}); 3325 } 3326 3327 BasicBlock *CanonicalLoopInfo::getPreheader() const { 3328 assert(isValid() && "Requires a valid canonical loop"); 3329 for (BasicBlock *Pred : predecessors(Header)) { 3330 if (Pred != Latch) 3331 return Pred; 3332 } 3333 llvm_unreachable("Missing preheader"); 3334 } 3335 3336 void CanonicalLoopInfo::assertOK() const { 3337 #ifndef NDEBUG 3338 // No constraints if this object currently does not describe a loop. 3339 if (!isValid()) 3340 return; 3341 3342 BasicBlock *Preheader = getPreheader(); 3343 BasicBlock *Body = getBody(); 3344 BasicBlock *After = getAfter(); 3345 3346 // Verify standard control-flow we use for OpenMP loops. 3347 assert(Preheader); 3348 assert(isa<BranchInst>(Preheader->getTerminator()) && 3349 "Preheader must terminate with unconditional branch"); 3350 assert(Preheader->getSingleSuccessor() == Header && 3351 "Preheader must jump to header"); 3352 3353 assert(Header); 3354 assert(isa<BranchInst>(Header->getTerminator()) && 3355 "Header must terminate with unconditional branch"); 3356 assert(Header->getSingleSuccessor() == Cond && 3357 "Header must jump to exiting block"); 3358 3359 assert(Cond); 3360 assert(Cond->getSinglePredecessor() == Header && 3361 "Exiting block only reachable from header"); 3362 3363 assert(isa<BranchInst>(Cond->getTerminator()) && 3364 "Exiting block must terminate with conditional branch"); 3365 assert(size(successors(Cond)) == 2 && 3366 "Exiting block must have two successors"); 3367 assert(cast<BranchInst>(Cond->getTerminator())->getSuccessor(0) == Body && 3368 "Exiting block's first successor jump to the body"); 3369 assert(cast<BranchInst>(Cond->getTerminator())->getSuccessor(1) == Exit && 3370 "Exiting block's second successor must exit the loop"); 3371 3372 assert(Body); 3373 assert(Body->getSinglePredecessor() == Cond && 3374 "Body only reachable from exiting block"); 3375 assert(!isa<PHINode>(Body->front())); 3376 3377 assert(Latch); 3378 assert(isa<BranchInst>(Latch->getTerminator()) && 3379 "Latch must terminate with unconditional branch"); 3380 assert(Latch->getSingleSuccessor() == Header && "Latch must jump to header"); 3381 // TODO: To support simple redirecting of the end of the body code that has 3382 // multiple; introduce another auxiliary basic block like preheader and after. 3383 assert(Latch->getSinglePredecessor() != nullptr); 3384 assert(!isa<PHINode>(Latch->front())); 3385 3386 assert(Exit); 3387 assert(isa<BranchInst>(Exit->getTerminator()) && 3388 "Exit block must terminate with unconditional branch"); 3389 assert(Exit->getSingleSuccessor() == After && 3390 "Exit block must jump to after block"); 3391 3392 assert(After); 3393 assert(After->getSinglePredecessor() == Exit && 3394 "After block only reachable from exit block"); 3395 assert(After->empty() || !isa<PHINode>(After->front())); 3396 3397 Instruction *IndVar = getIndVar(); 3398 assert(IndVar && "Canonical induction variable not found?"); 3399 assert(isa<IntegerType>(IndVar->getType()) && 3400 "Induction variable must be an integer"); 3401 assert(cast<PHINode>(IndVar)->getParent() == Header && 3402 "Induction variable must be a PHI in the loop header"); 3403 assert(cast<PHINode>(IndVar)->getIncomingBlock(0) == Preheader); 3404 assert( 3405 cast<ConstantInt>(cast<PHINode>(IndVar)->getIncomingValue(0))->isZero()); 3406 assert(cast<PHINode>(IndVar)->getIncomingBlock(1) == Latch); 3407 3408 auto *NextIndVar = cast<PHINode>(IndVar)->getIncomingValue(1); 3409 assert(cast<Instruction>(NextIndVar)->getParent() == Latch); 3410 assert(cast<BinaryOperator>(NextIndVar)->getOpcode() == BinaryOperator::Add); 3411 assert(cast<BinaryOperator>(NextIndVar)->getOperand(0) == IndVar); 3412 assert(cast<ConstantInt>(cast<BinaryOperator>(NextIndVar)->getOperand(1)) 3413 ->isOne()); 3414 3415 Value *TripCount = getTripCount(); 3416 assert(TripCount && "Loop trip count not found?"); 3417 assert(IndVar->getType() == TripCount->getType() && 3418 "Trip count and induction variable must have the same type"); 3419 3420 auto *CmpI = cast<CmpInst>(&Cond->front()); 3421 assert(CmpI->getPredicate() == CmpInst::ICMP_ULT && 3422 "Exit condition must be a signed less-than comparison"); 3423 assert(CmpI->getOperand(0) == IndVar && 3424 "Exit condition must compare the induction variable"); 3425 assert(CmpI->getOperand(1) == TripCount && 3426 "Exit condition must compare with the trip count"); 3427 #endif 3428 } 3429 3430 void CanonicalLoopInfo::invalidate() { 3431 Header = nullptr; 3432 Cond = nullptr; 3433 Latch = nullptr; 3434 Exit = nullptr; 3435 } 3436