1 //===- Function.cpp - Implement the Global object classes -----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the Function class for the IR library. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/IR/Function.h" 14 #include "SymbolTableListTraitsImpl.h" 15 #include "llvm/ADT/ArrayRef.h" 16 #include "llvm/ADT/DenseSet.h" 17 #include "llvm/ADT/None.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/SmallString.h" 20 #include "llvm/ADT/SmallVector.h" 21 #include "llvm/ADT/StringExtras.h" 22 #include "llvm/ADT/StringRef.h" 23 #include "llvm/IR/AbstractCallSite.h" 24 #include "llvm/IR/Argument.h" 25 #include "llvm/IR/Attributes.h" 26 #include "llvm/IR/BasicBlock.h" 27 #include "llvm/IR/Constant.h" 28 #include "llvm/IR/Constants.h" 29 #include "llvm/IR/DerivedTypes.h" 30 #include "llvm/IR/GlobalValue.h" 31 #include "llvm/IR/InstIterator.h" 32 #include "llvm/IR/Instruction.h" 33 #include "llvm/IR/IntrinsicInst.h" 34 #include "llvm/IR/Intrinsics.h" 35 #include "llvm/IR/IntrinsicsAArch64.h" 36 #include "llvm/IR/IntrinsicsAMDGPU.h" 37 #include "llvm/IR/IntrinsicsARM.h" 38 #include "llvm/IR/IntrinsicsBPF.h" 39 #include "llvm/IR/IntrinsicsDirectX.h" 40 #include "llvm/IR/IntrinsicsHexagon.h" 41 #include "llvm/IR/IntrinsicsMips.h" 42 #include "llvm/IR/IntrinsicsNVPTX.h" 43 #include "llvm/IR/IntrinsicsPowerPC.h" 44 #include "llvm/IR/IntrinsicsR600.h" 45 #include "llvm/IR/IntrinsicsRISCV.h" 46 #include "llvm/IR/IntrinsicsS390.h" 47 #include "llvm/IR/IntrinsicsVE.h" 48 #include "llvm/IR/IntrinsicsWebAssembly.h" 49 #include "llvm/IR/IntrinsicsX86.h" 50 #include "llvm/IR/IntrinsicsXCore.h" 51 #include "llvm/IR/LLVMContext.h" 52 #include "llvm/IR/MDBuilder.h" 53 #include "llvm/IR/Metadata.h" 54 #include "llvm/IR/Module.h" 55 #include "llvm/IR/Operator.h" 56 #include "llvm/IR/SymbolTableListTraits.h" 57 #include "llvm/IR/Type.h" 58 #include "llvm/IR/Use.h" 59 #include "llvm/IR/User.h" 60 #include "llvm/IR/Value.h" 61 #include "llvm/IR/ValueSymbolTable.h" 62 #include "llvm/Support/Casting.h" 63 #include "llvm/Support/CommandLine.h" 64 #include "llvm/Support/Compiler.h" 65 #include "llvm/Support/ErrorHandling.h" 66 #include "llvm/Support/ModRef.h" 67 #include <cassert> 68 #include <cstddef> 69 #include <cstdint> 70 #include <cstring> 71 #include <string> 72 73 using namespace llvm; 74 using ProfileCount = Function::ProfileCount; 75 76 // Explicit instantiations of SymbolTableListTraits since some of the methods 77 // are not in the public header file... 78 template class llvm::SymbolTableListTraits<BasicBlock>; 79 80 static cl::opt<unsigned> NonGlobalValueMaxNameSize( 81 "non-global-value-max-name-size", cl::Hidden, cl::init(1024), 82 cl::desc("Maximum size for the name of non-global values.")); 83 84 //===----------------------------------------------------------------------===// 85 // Argument Implementation 86 //===----------------------------------------------------------------------===// 87 88 Argument::Argument(Type *Ty, const Twine &Name, Function *Par, unsigned ArgNo) 89 : Value(Ty, Value::ArgumentVal), Parent(Par), ArgNo(ArgNo) { 90 setName(Name); 91 } 92 93 void Argument::setParent(Function *parent) { 94 Parent = parent; 95 } 96 97 bool Argument::hasNonNullAttr(bool AllowUndefOrPoison) const { 98 if (!getType()->isPointerTy()) return false; 99 if (getParent()->hasParamAttribute(getArgNo(), Attribute::NonNull) && 100 (AllowUndefOrPoison || 101 getParent()->hasParamAttribute(getArgNo(), Attribute::NoUndef))) 102 return true; 103 else if (getDereferenceableBytes() > 0 && 104 !NullPointerIsDefined(getParent(), 105 getType()->getPointerAddressSpace())) 106 return true; 107 return false; 108 } 109 110 bool Argument::hasByValAttr() const { 111 if (!getType()->isPointerTy()) return false; 112 return hasAttribute(Attribute::ByVal); 113 } 114 115 bool Argument::hasByRefAttr() const { 116 if (!getType()->isPointerTy()) 117 return false; 118 return hasAttribute(Attribute::ByRef); 119 } 120 121 bool Argument::hasSwiftSelfAttr() const { 122 return getParent()->hasParamAttribute(getArgNo(), Attribute::SwiftSelf); 123 } 124 125 bool Argument::hasSwiftErrorAttr() const { 126 return getParent()->hasParamAttribute(getArgNo(), Attribute::SwiftError); 127 } 128 129 bool Argument::hasInAllocaAttr() const { 130 if (!getType()->isPointerTy()) return false; 131 return hasAttribute(Attribute::InAlloca); 132 } 133 134 bool Argument::hasPreallocatedAttr() const { 135 if (!getType()->isPointerTy()) 136 return false; 137 return hasAttribute(Attribute::Preallocated); 138 } 139 140 bool Argument::hasPassPointeeByValueCopyAttr() const { 141 if (!getType()->isPointerTy()) return false; 142 AttributeList Attrs = getParent()->getAttributes(); 143 return Attrs.hasParamAttr(getArgNo(), Attribute::ByVal) || 144 Attrs.hasParamAttr(getArgNo(), Attribute::InAlloca) || 145 Attrs.hasParamAttr(getArgNo(), Attribute::Preallocated); 146 } 147 148 bool Argument::hasPointeeInMemoryValueAttr() const { 149 if (!getType()->isPointerTy()) 150 return false; 151 AttributeList Attrs = getParent()->getAttributes(); 152 return Attrs.hasParamAttr(getArgNo(), Attribute::ByVal) || 153 Attrs.hasParamAttr(getArgNo(), Attribute::StructRet) || 154 Attrs.hasParamAttr(getArgNo(), Attribute::InAlloca) || 155 Attrs.hasParamAttr(getArgNo(), Attribute::Preallocated) || 156 Attrs.hasParamAttr(getArgNo(), Attribute::ByRef); 157 } 158 159 /// For a byval, sret, inalloca, or preallocated parameter, get the in-memory 160 /// parameter type. 161 static Type *getMemoryParamAllocType(AttributeSet ParamAttrs) { 162 // FIXME: All the type carrying attributes are mutually exclusive, so there 163 // should be a single query to get the stored type that handles any of them. 164 if (Type *ByValTy = ParamAttrs.getByValType()) 165 return ByValTy; 166 if (Type *ByRefTy = ParamAttrs.getByRefType()) 167 return ByRefTy; 168 if (Type *PreAllocTy = ParamAttrs.getPreallocatedType()) 169 return PreAllocTy; 170 if (Type *InAllocaTy = ParamAttrs.getInAllocaType()) 171 return InAllocaTy; 172 if (Type *SRetTy = ParamAttrs.getStructRetType()) 173 return SRetTy; 174 175 return nullptr; 176 } 177 178 uint64_t Argument::getPassPointeeByValueCopySize(const DataLayout &DL) const { 179 AttributeSet ParamAttrs = 180 getParent()->getAttributes().getParamAttrs(getArgNo()); 181 if (Type *MemTy = getMemoryParamAllocType(ParamAttrs)) 182 return DL.getTypeAllocSize(MemTy); 183 return 0; 184 } 185 186 Type *Argument::getPointeeInMemoryValueType() const { 187 AttributeSet ParamAttrs = 188 getParent()->getAttributes().getParamAttrs(getArgNo()); 189 return getMemoryParamAllocType(ParamAttrs); 190 } 191 192 uint64_t Argument::getParamAlignment() const { 193 assert(getType()->isPointerTy() && "Only pointers have alignments"); 194 return getParent()->getParamAlignment(getArgNo()); 195 } 196 197 MaybeAlign Argument::getParamAlign() const { 198 assert(getType()->isPointerTy() && "Only pointers have alignments"); 199 return getParent()->getParamAlign(getArgNo()); 200 } 201 202 MaybeAlign Argument::getParamStackAlign() const { 203 return getParent()->getParamStackAlign(getArgNo()); 204 } 205 206 Type *Argument::getParamByValType() const { 207 assert(getType()->isPointerTy() && "Only pointers have byval types"); 208 return getParent()->getParamByValType(getArgNo()); 209 } 210 211 Type *Argument::getParamStructRetType() const { 212 assert(getType()->isPointerTy() && "Only pointers have sret types"); 213 return getParent()->getParamStructRetType(getArgNo()); 214 } 215 216 Type *Argument::getParamByRefType() const { 217 assert(getType()->isPointerTy() && "Only pointers have byref types"); 218 return getParent()->getParamByRefType(getArgNo()); 219 } 220 221 Type *Argument::getParamInAllocaType() const { 222 assert(getType()->isPointerTy() && "Only pointers have inalloca types"); 223 return getParent()->getParamInAllocaType(getArgNo()); 224 } 225 226 uint64_t Argument::getDereferenceableBytes() const { 227 assert(getType()->isPointerTy() && 228 "Only pointers have dereferenceable bytes"); 229 return getParent()->getParamDereferenceableBytes(getArgNo()); 230 } 231 232 uint64_t Argument::getDereferenceableOrNullBytes() const { 233 assert(getType()->isPointerTy() && 234 "Only pointers have dereferenceable bytes"); 235 return getParent()->getParamDereferenceableOrNullBytes(getArgNo()); 236 } 237 238 bool Argument::hasNestAttr() const { 239 if (!getType()->isPointerTy()) return false; 240 return hasAttribute(Attribute::Nest); 241 } 242 243 bool Argument::hasNoAliasAttr() const { 244 if (!getType()->isPointerTy()) return false; 245 return hasAttribute(Attribute::NoAlias); 246 } 247 248 bool Argument::hasNoCaptureAttr() const { 249 if (!getType()->isPointerTy()) return false; 250 return hasAttribute(Attribute::NoCapture); 251 } 252 253 bool Argument::hasNoFreeAttr() const { 254 if (!getType()->isPointerTy()) return false; 255 return hasAttribute(Attribute::NoFree); 256 } 257 258 bool Argument::hasStructRetAttr() const { 259 if (!getType()->isPointerTy()) return false; 260 return hasAttribute(Attribute::StructRet); 261 } 262 263 bool Argument::hasInRegAttr() const { 264 return hasAttribute(Attribute::InReg); 265 } 266 267 bool Argument::hasReturnedAttr() const { 268 return hasAttribute(Attribute::Returned); 269 } 270 271 bool Argument::hasZExtAttr() const { 272 return hasAttribute(Attribute::ZExt); 273 } 274 275 bool Argument::hasSExtAttr() const { 276 return hasAttribute(Attribute::SExt); 277 } 278 279 bool Argument::onlyReadsMemory() const { 280 AttributeList Attrs = getParent()->getAttributes(); 281 return Attrs.hasParamAttr(getArgNo(), Attribute::ReadOnly) || 282 Attrs.hasParamAttr(getArgNo(), Attribute::ReadNone); 283 } 284 285 void Argument::addAttrs(AttrBuilder &B) { 286 AttributeList AL = getParent()->getAttributes(); 287 AL = AL.addParamAttributes(Parent->getContext(), getArgNo(), B); 288 getParent()->setAttributes(AL); 289 } 290 291 void Argument::addAttr(Attribute::AttrKind Kind) { 292 getParent()->addParamAttr(getArgNo(), Kind); 293 } 294 295 void Argument::addAttr(Attribute Attr) { 296 getParent()->addParamAttr(getArgNo(), Attr); 297 } 298 299 void Argument::removeAttr(Attribute::AttrKind Kind) { 300 getParent()->removeParamAttr(getArgNo(), Kind); 301 } 302 303 void Argument::removeAttrs(const AttributeMask &AM) { 304 AttributeList AL = getParent()->getAttributes(); 305 AL = AL.removeParamAttributes(Parent->getContext(), getArgNo(), AM); 306 getParent()->setAttributes(AL); 307 } 308 309 bool Argument::hasAttribute(Attribute::AttrKind Kind) const { 310 return getParent()->hasParamAttribute(getArgNo(), Kind); 311 } 312 313 Attribute Argument::getAttribute(Attribute::AttrKind Kind) const { 314 return getParent()->getParamAttribute(getArgNo(), Kind); 315 } 316 317 //===----------------------------------------------------------------------===// 318 // Helper Methods in Function 319 //===----------------------------------------------------------------------===// 320 321 LLVMContext &Function::getContext() const { 322 return getType()->getContext(); 323 } 324 325 unsigned Function::getInstructionCount() const { 326 unsigned NumInstrs = 0; 327 for (const BasicBlock &BB : BasicBlocks) 328 NumInstrs += std::distance(BB.instructionsWithoutDebug().begin(), 329 BB.instructionsWithoutDebug().end()); 330 return NumInstrs; 331 } 332 333 Function *Function::Create(FunctionType *Ty, LinkageTypes Linkage, 334 const Twine &N, Module &M) { 335 return Create(Ty, Linkage, M.getDataLayout().getProgramAddressSpace(), N, &M); 336 } 337 338 Function *Function::createWithDefaultAttr(FunctionType *Ty, 339 LinkageTypes Linkage, 340 unsigned AddrSpace, const Twine &N, 341 Module *M) { 342 auto *F = new Function(Ty, Linkage, AddrSpace, N, M); 343 AttrBuilder B(F->getContext()); 344 UWTableKind UWTable = M->getUwtable(); 345 if (UWTable != UWTableKind::None) 346 B.addUWTableAttr(UWTable); 347 switch (M->getFramePointer()) { 348 case FramePointerKind::None: 349 // 0 ("none") is the default. 350 break; 351 case FramePointerKind::NonLeaf: 352 B.addAttribute("frame-pointer", "non-leaf"); 353 break; 354 case FramePointerKind::All: 355 B.addAttribute("frame-pointer", "all"); 356 break; 357 } 358 if (M->getModuleFlag("function_return_thunk_extern")) 359 B.addAttribute(Attribute::FnRetThunkExtern); 360 F->addFnAttrs(B); 361 return F; 362 } 363 364 void Function::removeFromParent() { 365 getParent()->getFunctionList().remove(getIterator()); 366 } 367 368 void Function::eraseFromParent() { 369 getParent()->getFunctionList().erase(getIterator()); 370 } 371 372 //===----------------------------------------------------------------------===// 373 // Function Implementation 374 //===----------------------------------------------------------------------===// 375 376 static unsigned computeAddrSpace(unsigned AddrSpace, Module *M) { 377 // If AS == -1 and we are passed a valid module pointer we place the function 378 // in the program address space. Otherwise we default to AS0. 379 if (AddrSpace == static_cast<unsigned>(-1)) 380 return M ? M->getDataLayout().getProgramAddressSpace() : 0; 381 return AddrSpace; 382 } 383 384 Function::Function(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, 385 const Twine &name, Module *ParentModule) 386 : GlobalObject(Ty, Value::FunctionVal, 387 OperandTraits<Function>::op_begin(this), 0, Linkage, name, 388 computeAddrSpace(AddrSpace, ParentModule)), 389 NumArgs(Ty->getNumParams()) { 390 assert(FunctionType::isValidReturnType(getReturnType()) && 391 "invalid return type"); 392 setGlobalObjectSubClassData(0); 393 394 // We only need a symbol table for a function if the context keeps value names 395 if (!getContext().shouldDiscardValueNames()) 396 SymTab = std::make_unique<ValueSymbolTable>(NonGlobalValueMaxNameSize); 397 398 // If the function has arguments, mark them as lazily built. 399 if (Ty->getNumParams()) 400 setValueSubclassData(1); // Set the "has lazy arguments" bit. 401 402 if (ParentModule) 403 ParentModule->getFunctionList().push_back(this); 404 405 HasLLVMReservedName = getName().startswith("llvm."); 406 // Ensure intrinsics have the right parameter attributes. 407 // Note, the IntID field will have been set in Value::setName if this function 408 // name is a valid intrinsic ID. 409 if (IntID) 410 setAttributes(Intrinsic::getAttributes(getContext(), IntID)); 411 } 412 413 Function::~Function() { 414 dropAllReferences(); // After this it is safe to delete instructions. 415 416 // Delete all of the method arguments and unlink from symbol table... 417 if (Arguments) 418 clearArguments(); 419 420 // Remove the function from the on-the-side GC table. 421 clearGC(); 422 } 423 424 void Function::BuildLazyArguments() const { 425 // Create the arguments vector, all arguments start out unnamed. 426 auto *FT = getFunctionType(); 427 if (NumArgs > 0) { 428 Arguments = std::allocator<Argument>().allocate(NumArgs); 429 for (unsigned i = 0, e = NumArgs; i != e; ++i) { 430 Type *ArgTy = FT->getParamType(i); 431 assert(!ArgTy->isVoidTy() && "Cannot have void typed arguments!"); 432 new (Arguments + i) Argument(ArgTy, "", const_cast<Function *>(this), i); 433 } 434 } 435 436 // Clear the lazy arguments bit. 437 unsigned SDC = getSubclassDataFromValue(); 438 SDC &= ~(1 << 0); 439 const_cast<Function*>(this)->setValueSubclassData(SDC); 440 assert(!hasLazyArguments()); 441 } 442 443 static MutableArrayRef<Argument> makeArgArray(Argument *Args, size_t Count) { 444 return MutableArrayRef<Argument>(Args, Count); 445 } 446 447 bool Function::isConstrainedFPIntrinsic() const { 448 switch (getIntrinsicID()) { 449 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \ 450 case Intrinsic::INTRINSIC: 451 #include "llvm/IR/ConstrainedOps.def" 452 return true; 453 #undef INSTRUCTION 454 default: 455 return false; 456 } 457 } 458 459 void Function::clearArguments() { 460 for (Argument &A : makeArgArray(Arguments, NumArgs)) { 461 A.setName(""); 462 A.~Argument(); 463 } 464 std::allocator<Argument>().deallocate(Arguments, NumArgs); 465 Arguments = nullptr; 466 } 467 468 void Function::stealArgumentListFrom(Function &Src) { 469 assert(isDeclaration() && "Expected no references to current arguments"); 470 471 // Drop the current arguments, if any, and set the lazy argument bit. 472 if (!hasLazyArguments()) { 473 assert(llvm::all_of(makeArgArray(Arguments, NumArgs), 474 [](const Argument &A) { return A.use_empty(); }) && 475 "Expected arguments to be unused in declaration"); 476 clearArguments(); 477 setValueSubclassData(getSubclassDataFromValue() | (1 << 0)); 478 } 479 480 // Nothing to steal if Src has lazy arguments. 481 if (Src.hasLazyArguments()) 482 return; 483 484 // Steal arguments from Src, and fix the lazy argument bits. 485 assert(arg_size() == Src.arg_size()); 486 Arguments = Src.Arguments; 487 Src.Arguments = nullptr; 488 for (Argument &A : makeArgArray(Arguments, NumArgs)) { 489 // FIXME: This does the work of transferNodesFromList inefficiently. 490 SmallString<128> Name; 491 if (A.hasName()) 492 Name = A.getName(); 493 if (!Name.empty()) 494 A.setName(""); 495 A.setParent(this); 496 if (!Name.empty()) 497 A.setName(Name); 498 } 499 500 setValueSubclassData(getSubclassDataFromValue() & ~(1 << 0)); 501 assert(!hasLazyArguments()); 502 Src.setValueSubclassData(Src.getSubclassDataFromValue() | (1 << 0)); 503 } 504 505 // dropAllReferences() - This function causes all the subinstructions to "let 506 // go" of all references that they are maintaining. This allows one to 507 // 'delete' a whole class at a time, even though there may be circular 508 // references... first all references are dropped, and all use counts go to 509 // zero. Then everything is deleted for real. Note that no operations are 510 // valid on an object that has "dropped all references", except operator 511 // delete. 512 // 513 void Function::dropAllReferences() { 514 setIsMaterializable(false); 515 516 for (BasicBlock &BB : *this) 517 BB.dropAllReferences(); 518 519 // Delete all basic blocks. They are now unused, except possibly by 520 // blockaddresses, but BasicBlock's destructor takes care of those. 521 while (!BasicBlocks.empty()) 522 BasicBlocks.begin()->eraseFromParent(); 523 524 // Drop uses of any optional data (real or placeholder). 525 if (getNumOperands()) { 526 User::dropAllReferences(); 527 setNumHungOffUseOperands(0); 528 setValueSubclassData(getSubclassDataFromValue() & ~0xe); 529 } 530 531 // Metadata is stored in a side-table. 532 clearMetadata(); 533 } 534 535 void Function::addAttributeAtIndex(unsigned i, Attribute Attr) { 536 AttributeSets = AttributeSets.addAttributeAtIndex(getContext(), i, Attr); 537 } 538 539 void Function::addFnAttr(Attribute::AttrKind Kind) { 540 AttributeSets = AttributeSets.addFnAttribute(getContext(), Kind); 541 } 542 543 void Function::addFnAttr(StringRef Kind, StringRef Val) { 544 AttributeSets = AttributeSets.addFnAttribute(getContext(), Kind, Val); 545 } 546 547 void Function::addFnAttr(Attribute Attr) { 548 AttributeSets = AttributeSets.addFnAttribute(getContext(), Attr); 549 } 550 551 void Function::addFnAttrs(const AttrBuilder &Attrs) { 552 AttributeSets = AttributeSets.addFnAttributes(getContext(), Attrs); 553 } 554 555 void Function::addRetAttr(Attribute::AttrKind Kind) { 556 AttributeSets = AttributeSets.addRetAttribute(getContext(), Kind); 557 } 558 559 void Function::addRetAttr(Attribute Attr) { 560 AttributeSets = AttributeSets.addRetAttribute(getContext(), Attr); 561 } 562 563 void Function::addRetAttrs(const AttrBuilder &Attrs) { 564 AttributeSets = AttributeSets.addRetAttributes(getContext(), Attrs); 565 } 566 567 void Function::addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) { 568 AttributeSets = AttributeSets.addParamAttribute(getContext(), ArgNo, Kind); 569 } 570 571 void Function::addParamAttr(unsigned ArgNo, Attribute Attr) { 572 AttributeSets = AttributeSets.addParamAttribute(getContext(), ArgNo, Attr); 573 } 574 575 void Function::addParamAttrs(unsigned ArgNo, const AttrBuilder &Attrs) { 576 AttributeSets = AttributeSets.addParamAttributes(getContext(), ArgNo, Attrs); 577 } 578 579 void Function::removeAttributeAtIndex(unsigned i, Attribute::AttrKind Kind) { 580 AttributeSets = AttributeSets.removeAttributeAtIndex(getContext(), i, Kind); 581 } 582 583 void Function::removeAttributeAtIndex(unsigned i, StringRef Kind) { 584 AttributeSets = AttributeSets.removeAttributeAtIndex(getContext(), i, Kind); 585 } 586 587 void Function::removeFnAttr(Attribute::AttrKind Kind) { 588 AttributeSets = AttributeSets.removeFnAttribute(getContext(), Kind); 589 } 590 591 void Function::removeFnAttr(StringRef Kind) { 592 AttributeSets = AttributeSets.removeFnAttribute(getContext(), Kind); 593 } 594 595 void Function::removeFnAttrs(const AttributeMask &AM) { 596 AttributeSets = AttributeSets.removeFnAttributes(getContext(), AM); 597 } 598 599 void Function::removeRetAttr(Attribute::AttrKind Kind) { 600 AttributeSets = AttributeSets.removeRetAttribute(getContext(), Kind); 601 } 602 603 void Function::removeRetAttr(StringRef Kind) { 604 AttributeSets = AttributeSets.removeRetAttribute(getContext(), Kind); 605 } 606 607 void Function::removeRetAttrs(const AttributeMask &Attrs) { 608 AttributeSets = AttributeSets.removeRetAttributes(getContext(), Attrs); 609 } 610 611 void Function::removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) { 612 AttributeSets = AttributeSets.removeParamAttribute(getContext(), ArgNo, Kind); 613 } 614 615 void Function::removeParamAttr(unsigned ArgNo, StringRef Kind) { 616 AttributeSets = AttributeSets.removeParamAttribute(getContext(), ArgNo, Kind); 617 } 618 619 void Function::removeParamAttrs(unsigned ArgNo, const AttributeMask &Attrs) { 620 AttributeSets = 621 AttributeSets.removeParamAttributes(getContext(), ArgNo, Attrs); 622 } 623 624 void Function::addDereferenceableParamAttr(unsigned ArgNo, uint64_t Bytes) { 625 AttributeSets = 626 AttributeSets.addDereferenceableParamAttr(getContext(), ArgNo, Bytes); 627 } 628 629 bool Function::hasFnAttribute(Attribute::AttrKind Kind) const { 630 return AttributeSets.hasFnAttr(Kind); 631 } 632 633 bool Function::hasFnAttribute(StringRef Kind) const { 634 return AttributeSets.hasFnAttr(Kind); 635 } 636 637 bool Function::hasRetAttribute(Attribute::AttrKind Kind) const { 638 return AttributeSets.hasRetAttr(Kind); 639 } 640 641 bool Function::hasParamAttribute(unsigned ArgNo, 642 Attribute::AttrKind Kind) const { 643 return AttributeSets.hasParamAttr(ArgNo, Kind); 644 } 645 646 Attribute Function::getAttributeAtIndex(unsigned i, 647 Attribute::AttrKind Kind) const { 648 return AttributeSets.getAttributeAtIndex(i, Kind); 649 } 650 651 Attribute Function::getAttributeAtIndex(unsigned i, StringRef Kind) const { 652 return AttributeSets.getAttributeAtIndex(i, Kind); 653 } 654 655 Attribute Function::getFnAttribute(Attribute::AttrKind Kind) const { 656 return AttributeSets.getFnAttr(Kind); 657 } 658 659 Attribute Function::getFnAttribute(StringRef Kind) const { 660 return AttributeSets.getFnAttr(Kind); 661 } 662 663 /// gets the specified attribute from the list of attributes. 664 Attribute Function::getParamAttribute(unsigned ArgNo, 665 Attribute::AttrKind Kind) const { 666 return AttributeSets.getParamAttr(ArgNo, Kind); 667 } 668 669 void Function::addDereferenceableOrNullParamAttr(unsigned ArgNo, 670 uint64_t Bytes) { 671 AttributeSets = AttributeSets.addDereferenceableOrNullParamAttr(getContext(), 672 ArgNo, Bytes); 673 } 674 675 DenormalMode Function::getDenormalMode(const fltSemantics &FPType) const { 676 if (&FPType == &APFloat::IEEEsingle()) { 677 Attribute Attr = getFnAttribute("denormal-fp-math-f32"); 678 StringRef Val = Attr.getValueAsString(); 679 if (!Val.empty()) 680 return parseDenormalFPAttribute(Val); 681 682 // If the f32 variant of the attribute isn't specified, try to use the 683 // generic one. 684 } 685 686 Attribute Attr = getFnAttribute("denormal-fp-math"); 687 return parseDenormalFPAttribute(Attr.getValueAsString()); 688 } 689 690 const std::string &Function::getGC() const { 691 assert(hasGC() && "Function has no collector"); 692 return getContext().getGC(*this); 693 } 694 695 void Function::setGC(std::string Str) { 696 setValueSubclassDataBit(14, !Str.empty()); 697 getContext().setGC(*this, std::move(Str)); 698 } 699 700 void Function::clearGC() { 701 if (!hasGC()) 702 return; 703 getContext().deleteGC(*this); 704 setValueSubclassDataBit(14, false); 705 } 706 707 bool Function::hasStackProtectorFnAttr() const { 708 return hasFnAttribute(Attribute::StackProtect) || 709 hasFnAttribute(Attribute::StackProtectStrong) || 710 hasFnAttribute(Attribute::StackProtectReq); 711 } 712 713 /// Copy all additional attributes (those not needed to create a Function) from 714 /// the Function Src to this one. 715 void Function::copyAttributesFrom(const Function *Src) { 716 GlobalObject::copyAttributesFrom(Src); 717 setCallingConv(Src->getCallingConv()); 718 setAttributes(Src->getAttributes()); 719 if (Src->hasGC()) 720 setGC(Src->getGC()); 721 else 722 clearGC(); 723 if (Src->hasPersonalityFn()) 724 setPersonalityFn(Src->getPersonalityFn()); 725 if (Src->hasPrefixData()) 726 setPrefixData(Src->getPrefixData()); 727 if (Src->hasPrologueData()) 728 setPrologueData(Src->getPrologueData()); 729 } 730 731 MemoryEffects Function::getMemoryEffects() const { 732 return getAttributes().getMemoryEffects(); 733 } 734 void Function::setMemoryEffects(MemoryEffects ME) { 735 addFnAttr(Attribute::getWithMemoryEffects(getContext(), ME)); 736 } 737 738 /// Determine if the function does not access memory. 739 bool Function::doesNotAccessMemory() const { 740 return getMemoryEffects().doesNotAccessMemory(); 741 } 742 void Function::setDoesNotAccessMemory() { 743 setMemoryEffects(MemoryEffects::none()); 744 } 745 746 /// Determine if the function does not access or only reads memory. 747 bool Function::onlyReadsMemory() const { 748 return getMemoryEffects().onlyReadsMemory(); 749 } 750 void Function::setOnlyReadsMemory() { 751 setMemoryEffects(getMemoryEffects() & MemoryEffects::readOnly()); 752 } 753 754 /// Determine if the function does not access or only writes memory. 755 bool Function::onlyWritesMemory() const { 756 return getMemoryEffects().onlyWritesMemory(); 757 } 758 void Function::setOnlyWritesMemory() { 759 setMemoryEffects(getMemoryEffects() & MemoryEffects::writeOnly()); 760 } 761 762 /// Determine if the call can access memmory only using pointers based 763 /// on its arguments. 764 bool Function::onlyAccessesArgMemory() const { 765 return getMemoryEffects().onlyAccessesArgPointees(); 766 } 767 void Function::setOnlyAccessesArgMemory() { 768 setMemoryEffects(getMemoryEffects() & MemoryEffects::argMemOnly()); 769 } 770 771 /// Determine if the function may only access memory that is 772 /// inaccessible from the IR. 773 bool Function::onlyAccessesInaccessibleMemory() const { 774 return getMemoryEffects().onlyAccessesInaccessibleMem(); 775 } 776 void Function::setOnlyAccessesInaccessibleMemory() { 777 setMemoryEffects(getMemoryEffects() & MemoryEffects::inaccessibleMemOnly()); 778 } 779 780 /// Determine if the function may only access memory that is 781 /// either inaccessible from the IR or pointed to by its arguments. 782 bool Function::onlyAccessesInaccessibleMemOrArgMem() const { 783 return getMemoryEffects().onlyAccessesInaccessibleOrArgMem(); 784 } 785 void Function::setOnlyAccessesInaccessibleMemOrArgMem() { 786 setMemoryEffects(getMemoryEffects() & 787 MemoryEffects::inaccessibleOrArgMemOnly()); 788 } 789 790 /// Table of string intrinsic names indexed by enum value. 791 static const char * const IntrinsicNameTable[] = { 792 "not_intrinsic", 793 #define GET_INTRINSIC_NAME_TABLE 794 #include "llvm/IR/IntrinsicImpl.inc" 795 #undef GET_INTRINSIC_NAME_TABLE 796 }; 797 798 /// Table of per-target intrinsic name tables. 799 #define GET_INTRINSIC_TARGET_DATA 800 #include "llvm/IR/IntrinsicImpl.inc" 801 #undef GET_INTRINSIC_TARGET_DATA 802 803 bool Function::isTargetIntrinsic(Intrinsic::ID IID) { 804 return IID > TargetInfos[0].Count; 805 } 806 807 bool Function::isTargetIntrinsic() const { 808 return isTargetIntrinsic(IntID); 809 } 810 811 /// Find the segment of \c IntrinsicNameTable for intrinsics with the same 812 /// target as \c Name, or the generic table if \c Name is not target specific. 813 /// 814 /// Returns the relevant slice of \c IntrinsicNameTable 815 static ArrayRef<const char *> findTargetSubtable(StringRef Name) { 816 assert(Name.startswith("llvm.")); 817 818 ArrayRef<IntrinsicTargetInfo> Targets(TargetInfos); 819 // Drop "llvm." and take the first dotted component. That will be the target 820 // if this is target specific. 821 StringRef Target = Name.drop_front(5).split('.').first; 822 auto It = partition_point( 823 Targets, [=](const IntrinsicTargetInfo &TI) { return TI.Name < Target; }); 824 // We've either found the target or just fall back to the generic set, which 825 // is always first. 826 const auto &TI = It != Targets.end() && It->Name == Target ? *It : Targets[0]; 827 return makeArrayRef(&IntrinsicNameTable[1] + TI.Offset, TI.Count); 828 } 829 830 /// This does the actual lookup of an intrinsic ID which 831 /// matches the given function name. 832 Intrinsic::ID Function::lookupIntrinsicID(StringRef Name) { 833 ArrayRef<const char *> NameTable = findTargetSubtable(Name); 834 int Idx = Intrinsic::lookupLLVMIntrinsicByName(NameTable, Name); 835 if (Idx == -1) 836 return Intrinsic::not_intrinsic; 837 838 // Intrinsic IDs correspond to the location in IntrinsicNameTable, but we have 839 // an index into a sub-table. 840 int Adjust = NameTable.data() - IntrinsicNameTable; 841 Intrinsic::ID ID = static_cast<Intrinsic::ID>(Idx + Adjust); 842 843 // If the intrinsic is not overloaded, require an exact match. If it is 844 // overloaded, require either exact or prefix match. 845 const auto MatchSize = strlen(NameTable[Idx]); 846 assert(Name.size() >= MatchSize && "Expected either exact or prefix match"); 847 bool IsExactMatch = Name.size() == MatchSize; 848 return IsExactMatch || Intrinsic::isOverloaded(ID) ? ID 849 : Intrinsic::not_intrinsic; 850 } 851 852 void Function::recalculateIntrinsicID() { 853 StringRef Name = getName(); 854 if (!Name.startswith("llvm.")) { 855 HasLLVMReservedName = false; 856 IntID = Intrinsic::not_intrinsic; 857 return; 858 } 859 HasLLVMReservedName = true; 860 IntID = lookupIntrinsicID(Name); 861 } 862 863 /// Returns a stable mangling for the type specified for use in the name 864 /// mangling scheme used by 'any' types in intrinsic signatures. The mangling 865 /// of named types is simply their name. Manglings for unnamed types consist 866 /// of a prefix ('p' for pointers, 'a' for arrays, 'f_' for functions) 867 /// combined with the mangling of their component types. A vararg function 868 /// type will have a suffix of 'vararg'. Since function types can contain 869 /// other function types, we close a function type mangling with suffix 'f' 870 /// which can't be confused with it's prefix. This ensures we don't have 871 /// collisions between two unrelated function types. Otherwise, you might 872 /// parse ffXX as f(fXX) or f(fX)X. (X is a placeholder for any other type.) 873 /// The HasUnnamedType boolean is set if an unnamed type was encountered, 874 /// indicating that extra care must be taken to ensure a unique name. 875 static std::string getMangledTypeStr(Type *Ty, bool &HasUnnamedType) { 876 std::string Result; 877 if (PointerType *PTyp = dyn_cast<PointerType>(Ty)) { 878 Result += "p" + utostr(PTyp->getAddressSpace()); 879 // Opaque pointer doesn't have pointee type information, so we just mangle 880 // address space for opaque pointer. 881 if (!PTyp->isOpaque()) 882 Result += getMangledTypeStr(PTyp->getNonOpaquePointerElementType(), 883 HasUnnamedType); 884 } else if (ArrayType *ATyp = dyn_cast<ArrayType>(Ty)) { 885 Result += "a" + utostr(ATyp->getNumElements()) + 886 getMangledTypeStr(ATyp->getElementType(), HasUnnamedType); 887 } else if (StructType *STyp = dyn_cast<StructType>(Ty)) { 888 if (!STyp->isLiteral()) { 889 Result += "s_"; 890 if (STyp->hasName()) 891 Result += STyp->getName(); 892 else 893 HasUnnamedType = true; 894 } else { 895 Result += "sl_"; 896 for (auto *Elem : STyp->elements()) 897 Result += getMangledTypeStr(Elem, HasUnnamedType); 898 } 899 // Ensure nested structs are distinguishable. 900 Result += "s"; 901 } else if (FunctionType *FT = dyn_cast<FunctionType>(Ty)) { 902 Result += "f_" + getMangledTypeStr(FT->getReturnType(), HasUnnamedType); 903 for (size_t i = 0; i < FT->getNumParams(); i++) 904 Result += getMangledTypeStr(FT->getParamType(i), HasUnnamedType); 905 if (FT->isVarArg()) 906 Result += "vararg"; 907 // Ensure nested function types are distinguishable. 908 Result += "f"; 909 } else if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 910 ElementCount EC = VTy->getElementCount(); 911 if (EC.isScalable()) 912 Result += "nx"; 913 Result += "v" + utostr(EC.getKnownMinValue()) + 914 getMangledTypeStr(VTy->getElementType(), HasUnnamedType); 915 } else if (Ty) { 916 switch (Ty->getTypeID()) { 917 default: llvm_unreachable("Unhandled type"); 918 case Type::VoidTyID: Result += "isVoid"; break; 919 case Type::MetadataTyID: Result += "Metadata"; break; 920 case Type::HalfTyID: Result += "f16"; break; 921 case Type::BFloatTyID: Result += "bf16"; break; 922 case Type::FloatTyID: Result += "f32"; break; 923 case Type::DoubleTyID: Result += "f64"; break; 924 case Type::X86_FP80TyID: Result += "f80"; break; 925 case Type::FP128TyID: Result += "f128"; break; 926 case Type::PPC_FP128TyID: Result += "ppcf128"; break; 927 case Type::X86_MMXTyID: Result += "x86mmx"; break; 928 case Type::X86_AMXTyID: Result += "x86amx"; break; 929 case Type::IntegerTyID: 930 Result += "i" + utostr(cast<IntegerType>(Ty)->getBitWidth()); 931 break; 932 } 933 } 934 return Result; 935 } 936 937 StringRef Intrinsic::getBaseName(ID id) { 938 assert(id < num_intrinsics && "Invalid intrinsic ID!"); 939 return IntrinsicNameTable[id]; 940 } 941 942 StringRef Intrinsic::getName(ID id) { 943 assert(id < num_intrinsics && "Invalid intrinsic ID!"); 944 assert(!Intrinsic::isOverloaded(id) && 945 "This version of getName does not support overloading"); 946 return getBaseName(id); 947 } 948 949 static std::string getIntrinsicNameImpl(Intrinsic::ID Id, ArrayRef<Type *> Tys, 950 Module *M, FunctionType *FT, 951 bool EarlyModuleCheck) { 952 953 assert(Id < Intrinsic::num_intrinsics && "Invalid intrinsic ID!"); 954 assert((Tys.empty() || Intrinsic::isOverloaded(Id)) && 955 "This version of getName is for overloaded intrinsics only"); 956 (void)EarlyModuleCheck; 957 assert((!EarlyModuleCheck || M || 958 !any_of(Tys, [](Type *T) { return isa<PointerType>(T); })) && 959 "Intrinsic overloading on pointer types need to provide a Module"); 960 bool HasUnnamedType = false; 961 std::string Result(Intrinsic::getBaseName(Id)); 962 for (Type *Ty : Tys) 963 Result += "." + getMangledTypeStr(Ty, HasUnnamedType); 964 if (HasUnnamedType) { 965 assert(M && "unnamed types need a module"); 966 if (!FT) 967 FT = Intrinsic::getType(M->getContext(), Id, Tys); 968 else 969 assert((FT == Intrinsic::getType(M->getContext(), Id, Tys)) && 970 "Provided FunctionType must match arguments"); 971 return M->getUniqueIntrinsicName(Result, Id, FT); 972 } 973 return Result; 974 } 975 976 std::string Intrinsic::getName(ID Id, ArrayRef<Type *> Tys, Module *M, 977 FunctionType *FT) { 978 assert(M && "We need to have a Module"); 979 return getIntrinsicNameImpl(Id, Tys, M, FT, true); 980 } 981 982 std::string Intrinsic::getNameNoUnnamedTypes(ID Id, ArrayRef<Type *> Tys) { 983 return getIntrinsicNameImpl(Id, Tys, nullptr, nullptr, false); 984 } 985 986 /// IIT_Info - These are enumerators that describe the entries returned by the 987 /// getIntrinsicInfoTableEntries function. 988 /// 989 /// NOTE: This must be kept in synch with the copy in TblGen/IntrinsicEmitter! 990 enum IIT_Info { 991 // Common values should be encoded with 0-15. 992 IIT_Done = 0, 993 IIT_I1 = 1, 994 IIT_I8 = 2, 995 IIT_I16 = 3, 996 IIT_I32 = 4, 997 IIT_I64 = 5, 998 IIT_F16 = 6, 999 IIT_F32 = 7, 1000 IIT_F64 = 8, 1001 IIT_V2 = 9, 1002 IIT_V4 = 10, 1003 IIT_V8 = 11, 1004 IIT_V16 = 12, 1005 IIT_V32 = 13, 1006 IIT_PTR = 14, 1007 IIT_ARG = 15, 1008 1009 // Values from 16+ are only encodable with the inefficient encoding. 1010 IIT_V64 = 16, 1011 IIT_MMX = 17, 1012 IIT_TOKEN = 18, 1013 IIT_METADATA = 19, 1014 IIT_EMPTYSTRUCT = 20, 1015 IIT_STRUCT2 = 21, 1016 IIT_STRUCT3 = 22, 1017 IIT_STRUCT4 = 23, 1018 IIT_STRUCT5 = 24, 1019 IIT_EXTEND_ARG = 25, 1020 IIT_TRUNC_ARG = 26, 1021 IIT_ANYPTR = 27, 1022 IIT_V1 = 28, 1023 IIT_VARARG = 29, 1024 IIT_HALF_VEC_ARG = 30, 1025 IIT_SAME_VEC_WIDTH_ARG = 31, 1026 IIT_PTR_TO_ARG = 32, 1027 IIT_PTR_TO_ELT = 33, 1028 IIT_VEC_OF_ANYPTRS_TO_ELT = 34, 1029 IIT_I128 = 35, 1030 IIT_V512 = 36, 1031 IIT_V1024 = 37, 1032 IIT_STRUCT6 = 38, 1033 IIT_STRUCT7 = 39, 1034 IIT_STRUCT8 = 40, 1035 IIT_F128 = 41, 1036 IIT_VEC_ELEMENT = 42, 1037 IIT_SCALABLE_VEC = 43, 1038 IIT_SUBDIVIDE2_ARG = 44, 1039 IIT_SUBDIVIDE4_ARG = 45, 1040 IIT_VEC_OF_BITCASTS_TO_INT = 46, 1041 IIT_V128 = 47, 1042 IIT_BF16 = 48, 1043 IIT_STRUCT9 = 49, 1044 IIT_V256 = 50, 1045 IIT_AMX = 51, 1046 IIT_PPCF128 = 52, 1047 IIT_V3 = 53, 1048 IIT_EXTERNREF = 54, 1049 IIT_FUNCREF = 55, 1050 IIT_ANYPTR_TO_ELT = 56, 1051 IIT_I2 = 57, 1052 IIT_I4 = 58, 1053 }; 1054 1055 static void DecodeIITType(unsigned &NextElt, ArrayRef<unsigned char> Infos, 1056 IIT_Info LastInfo, 1057 SmallVectorImpl<Intrinsic::IITDescriptor> &OutputTable) { 1058 using namespace Intrinsic; 1059 1060 bool IsScalableVector = (LastInfo == IIT_SCALABLE_VEC); 1061 1062 IIT_Info Info = IIT_Info(Infos[NextElt++]); 1063 unsigned StructElts = 2; 1064 1065 switch (Info) { 1066 case IIT_Done: 1067 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Void, 0)); 1068 return; 1069 case IIT_VARARG: 1070 OutputTable.push_back(IITDescriptor::get(IITDescriptor::VarArg, 0)); 1071 return; 1072 case IIT_MMX: 1073 OutputTable.push_back(IITDescriptor::get(IITDescriptor::MMX, 0)); 1074 return; 1075 case IIT_AMX: 1076 OutputTable.push_back(IITDescriptor::get(IITDescriptor::AMX, 0)); 1077 return; 1078 case IIT_TOKEN: 1079 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Token, 0)); 1080 return; 1081 case IIT_METADATA: 1082 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Metadata, 0)); 1083 return; 1084 case IIT_F16: 1085 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Half, 0)); 1086 return; 1087 case IIT_BF16: 1088 OutputTable.push_back(IITDescriptor::get(IITDescriptor::BFloat, 0)); 1089 return; 1090 case IIT_F32: 1091 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Float, 0)); 1092 return; 1093 case IIT_F64: 1094 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Double, 0)); 1095 return; 1096 case IIT_F128: 1097 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Quad, 0)); 1098 return; 1099 case IIT_PPCF128: 1100 OutputTable.push_back(IITDescriptor::get(IITDescriptor::PPCQuad, 0)); 1101 return; 1102 case IIT_I1: 1103 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 1)); 1104 return; 1105 case IIT_I2: 1106 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 2)); 1107 return; 1108 case IIT_I4: 1109 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 4)); 1110 return; 1111 case IIT_I8: 1112 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 8)); 1113 return; 1114 case IIT_I16: 1115 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer,16)); 1116 return; 1117 case IIT_I32: 1118 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 32)); 1119 return; 1120 case IIT_I64: 1121 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 64)); 1122 return; 1123 case IIT_I128: 1124 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 128)); 1125 return; 1126 case IIT_V1: 1127 OutputTable.push_back(IITDescriptor::getVector(1, IsScalableVector)); 1128 DecodeIITType(NextElt, Infos, Info, OutputTable); 1129 return; 1130 case IIT_V2: 1131 OutputTable.push_back(IITDescriptor::getVector(2, IsScalableVector)); 1132 DecodeIITType(NextElt, Infos, Info, OutputTable); 1133 return; 1134 case IIT_V3: 1135 OutputTable.push_back(IITDescriptor::getVector(3, IsScalableVector)); 1136 DecodeIITType(NextElt, Infos, Info, OutputTable); 1137 return; 1138 case IIT_V4: 1139 OutputTable.push_back(IITDescriptor::getVector(4, IsScalableVector)); 1140 DecodeIITType(NextElt, Infos, Info, OutputTable); 1141 return; 1142 case IIT_V8: 1143 OutputTable.push_back(IITDescriptor::getVector(8, IsScalableVector)); 1144 DecodeIITType(NextElt, Infos, Info, OutputTable); 1145 return; 1146 case IIT_V16: 1147 OutputTable.push_back(IITDescriptor::getVector(16, IsScalableVector)); 1148 DecodeIITType(NextElt, Infos, Info, OutputTable); 1149 return; 1150 case IIT_V32: 1151 OutputTable.push_back(IITDescriptor::getVector(32, IsScalableVector)); 1152 DecodeIITType(NextElt, Infos, Info, OutputTable); 1153 return; 1154 case IIT_V64: 1155 OutputTable.push_back(IITDescriptor::getVector(64, IsScalableVector)); 1156 DecodeIITType(NextElt, Infos, Info, OutputTable); 1157 return; 1158 case IIT_V128: 1159 OutputTable.push_back(IITDescriptor::getVector(128, IsScalableVector)); 1160 DecodeIITType(NextElt, Infos, Info, OutputTable); 1161 return; 1162 case IIT_V256: 1163 OutputTable.push_back(IITDescriptor::getVector(256, IsScalableVector)); 1164 DecodeIITType(NextElt, Infos, Info, OutputTable); 1165 return; 1166 case IIT_V512: 1167 OutputTable.push_back(IITDescriptor::getVector(512, IsScalableVector)); 1168 DecodeIITType(NextElt, Infos, Info, OutputTable); 1169 return; 1170 case IIT_V1024: 1171 OutputTable.push_back(IITDescriptor::getVector(1024, IsScalableVector)); 1172 DecodeIITType(NextElt, Infos, Info, OutputTable); 1173 return; 1174 case IIT_EXTERNREF: 1175 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Pointer, 10)); 1176 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Struct, 0)); 1177 return; 1178 case IIT_FUNCREF: 1179 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Pointer, 20)); 1180 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 8)); 1181 return; 1182 case IIT_PTR: 1183 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Pointer, 0)); 1184 DecodeIITType(NextElt, Infos, Info, OutputTable); 1185 return; 1186 case IIT_ANYPTR: { // [ANYPTR addrspace, subtype] 1187 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Pointer, 1188 Infos[NextElt++])); 1189 DecodeIITType(NextElt, Infos, Info, OutputTable); 1190 return; 1191 } 1192 case IIT_ARG: { 1193 unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]); 1194 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Argument, ArgInfo)); 1195 return; 1196 } 1197 case IIT_EXTEND_ARG: { 1198 unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]); 1199 OutputTable.push_back(IITDescriptor::get(IITDescriptor::ExtendArgument, 1200 ArgInfo)); 1201 return; 1202 } 1203 case IIT_TRUNC_ARG: { 1204 unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]); 1205 OutputTable.push_back(IITDescriptor::get(IITDescriptor::TruncArgument, 1206 ArgInfo)); 1207 return; 1208 } 1209 case IIT_HALF_VEC_ARG: { 1210 unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]); 1211 OutputTable.push_back(IITDescriptor::get(IITDescriptor::HalfVecArgument, 1212 ArgInfo)); 1213 return; 1214 } 1215 case IIT_SAME_VEC_WIDTH_ARG: { 1216 unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]); 1217 OutputTable.push_back(IITDescriptor::get(IITDescriptor::SameVecWidthArgument, 1218 ArgInfo)); 1219 return; 1220 } 1221 case IIT_PTR_TO_ARG: { 1222 unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]); 1223 OutputTable.push_back(IITDescriptor::get(IITDescriptor::PtrToArgument, 1224 ArgInfo)); 1225 return; 1226 } 1227 case IIT_PTR_TO_ELT: { 1228 unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]); 1229 OutputTable.push_back(IITDescriptor::get(IITDescriptor::PtrToElt, ArgInfo)); 1230 return; 1231 } 1232 case IIT_ANYPTR_TO_ELT: { 1233 unsigned short ArgNo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]); 1234 unsigned short RefNo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]); 1235 OutputTable.push_back( 1236 IITDescriptor::get(IITDescriptor::AnyPtrToElt, ArgNo, RefNo)); 1237 return; 1238 } 1239 case IIT_VEC_OF_ANYPTRS_TO_ELT: { 1240 unsigned short ArgNo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]); 1241 unsigned short RefNo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]); 1242 OutputTable.push_back( 1243 IITDescriptor::get(IITDescriptor::VecOfAnyPtrsToElt, ArgNo, RefNo)); 1244 return; 1245 } 1246 case IIT_EMPTYSTRUCT: 1247 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Struct, 0)); 1248 return; 1249 case IIT_STRUCT9: ++StructElts; [[fallthrough]]; 1250 case IIT_STRUCT8: ++StructElts; [[fallthrough]]; 1251 case IIT_STRUCT7: ++StructElts; [[fallthrough]]; 1252 case IIT_STRUCT6: ++StructElts; [[fallthrough]]; 1253 case IIT_STRUCT5: ++StructElts; [[fallthrough]]; 1254 case IIT_STRUCT4: ++StructElts; [[fallthrough]]; 1255 case IIT_STRUCT3: ++StructElts; [[fallthrough]]; 1256 case IIT_STRUCT2: { 1257 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Struct,StructElts)); 1258 1259 for (unsigned i = 0; i != StructElts; ++i) 1260 DecodeIITType(NextElt, Infos, Info, OutputTable); 1261 return; 1262 } 1263 case IIT_SUBDIVIDE2_ARG: { 1264 unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]); 1265 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Subdivide2Argument, 1266 ArgInfo)); 1267 return; 1268 } 1269 case IIT_SUBDIVIDE4_ARG: { 1270 unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]); 1271 OutputTable.push_back(IITDescriptor::get(IITDescriptor::Subdivide4Argument, 1272 ArgInfo)); 1273 return; 1274 } 1275 case IIT_VEC_ELEMENT: { 1276 unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]); 1277 OutputTable.push_back(IITDescriptor::get(IITDescriptor::VecElementArgument, 1278 ArgInfo)); 1279 return; 1280 } 1281 case IIT_SCALABLE_VEC: { 1282 DecodeIITType(NextElt, Infos, Info, OutputTable); 1283 return; 1284 } 1285 case IIT_VEC_OF_BITCASTS_TO_INT: { 1286 unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]); 1287 OutputTable.push_back(IITDescriptor::get(IITDescriptor::VecOfBitcastsToInt, 1288 ArgInfo)); 1289 return; 1290 } 1291 } 1292 llvm_unreachable("unhandled"); 1293 } 1294 1295 #define GET_INTRINSIC_GENERATOR_GLOBAL 1296 #include "llvm/IR/IntrinsicImpl.inc" 1297 #undef GET_INTRINSIC_GENERATOR_GLOBAL 1298 1299 void Intrinsic::getIntrinsicInfoTableEntries(ID id, 1300 SmallVectorImpl<IITDescriptor> &T){ 1301 // Check to see if the intrinsic's type was expressible by the table. 1302 unsigned TableVal = IIT_Table[id-1]; 1303 1304 // Decode the TableVal into an array of IITValues. 1305 SmallVector<unsigned char, 8> IITValues; 1306 ArrayRef<unsigned char> IITEntries; 1307 unsigned NextElt = 0; 1308 if ((TableVal >> 31) != 0) { 1309 // This is an offset into the IIT_LongEncodingTable. 1310 IITEntries = IIT_LongEncodingTable; 1311 1312 // Strip sentinel bit. 1313 NextElt = (TableVal << 1) >> 1; 1314 } else { 1315 // Decode the TableVal into an array of IITValues. If the entry was encoded 1316 // into a single word in the table itself, decode it now. 1317 do { 1318 IITValues.push_back(TableVal & 0xF); 1319 TableVal >>= 4; 1320 } while (TableVal); 1321 1322 IITEntries = IITValues; 1323 NextElt = 0; 1324 } 1325 1326 // Okay, decode the table into the output vector of IITDescriptors. 1327 DecodeIITType(NextElt, IITEntries, IIT_Done, T); 1328 while (NextElt != IITEntries.size() && IITEntries[NextElt] != 0) 1329 DecodeIITType(NextElt, IITEntries, IIT_Done, T); 1330 } 1331 1332 static Type *DecodeFixedType(ArrayRef<Intrinsic::IITDescriptor> &Infos, 1333 ArrayRef<Type*> Tys, LLVMContext &Context) { 1334 using namespace Intrinsic; 1335 1336 IITDescriptor D = Infos.front(); 1337 Infos = Infos.slice(1); 1338 1339 switch (D.Kind) { 1340 case IITDescriptor::Void: return Type::getVoidTy(Context); 1341 case IITDescriptor::VarArg: return Type::getVoidTy(Context); 1342 case IITDescriptor::MMX: return Type::getX86_MMXTy(Context); 1343 case IITDescriptor::AMX: return Type::getX86_AMXTy(Context); 1344 case IITDescriptor::Token: return Type::getTokenTy(Context); 1345 case IITDescriptor::Metadata: return Type::getMetadataTy(Context); 1346 case IITDescriptor::Half: return Type::getHalfTy(Context); 1347 case IITDescriptor::BFloat: return Type::getBFloatTy(Context); 1348 case IITDescriptor::Float: return Type::getFloatTy(Context); 1349 case IITDescriptor::Double: return Type::getDoubleTy(Context); 1350 case IITDescriptor::Quad: return Type::getFP128Ty(Context); 1351 case IITDescriptor::PPCQuad: return Type::getPPC_FP128Ty(Context); 1352 1353 case IITDescriptor::Integer: 1354 return IntegerType::get(Context, D.Integer_Width); 1355 case IITDescriptor::Vector: 1356 return VectorType::get(DecodeFixedType(Infos, Tys, Context), 1357 D.Vector_Width); 1358 case IITDescriptor::Pointer: 1359 return PointerType::get(DecodeFixedType(Infos, Tys, Context), 1360 D.Pointer_AddressSpace); 1361 case IITDescriptor::Struct: { 1362 SmallVector<Type *, 8> Elts; 1363 for (unsigned i = 0, e = D.Struct_NumElements; i != e; ++i) 1364 Elts.push_back(DecodeFixedType(Infos, Tys, Context)); 1365 return StructType::get(Context, Elts); 1366 } 1367 case IITDescriptor::Argument: 1368 return Tys[D.getArgumentNumber()]; 1369 case IITDescriptor::ExtendArgument: { 1370 Type *Ty = Tys[D.getArgumentNumber()]; 1371 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) 1372 return VectorType::getExtendedElementVectorType(VTy); 1373 1374 return IntegerType::get(Context, 2 * cast<IntegerType>(Ty)->getBitWidth()); 1375 } 1376 case IITDescriptor::TruncArgument: { 1377 Type *Ty = Tys[D.getArgumentNumber()]; 1378 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) 1379 return VectorType::getTruncatedElementVectorType(VTy); 1380 1381 IntegerType *ITy = cast<IntegerType>(Ty); 1382 assert(ITy->getBitWidth() % 2 == 0); 1383 return IntegerType::get(Context, ITy->getBitWidth() / 2); 1384 } 1385 case IITDescriptor::Subdivide2Argument: 1386 case IITDescriptor::Subdivide4Argument: { 1387 Type *Ty = Tys[D.getArgumentNumber()]; 1388 VectorType *VTy = dyn_cast<VectorType>(Ty); 1389 assert(VTy && "Expected an argument of Vector Type"); 1390 int SubDivs = D.Kind == IITDescriptor::Subdivide2Argument ? 1 : 2; 1391 return VectorType::getSubdividedVectorType(VTy, SubDivs); 1392 } 1393 case IITDescriptor::HalfVecArgument: 1394 return VectorType::getHalfElementsVectorType(cast<VectorType>( 1395 Tys[D.getArgumentNumber()])); 1396 case IITDescriptor::SameVecWidthArgument: { 1397 Type *EltTy = DecodeFixedType(Infos, Tys, Context); 1398 Type *Ty = Tys[D.getArgumentNumber()]; 1399 if (auto *VTy = dyn_cast<VectorType>(Ty)) 1400 return VectorType::get(EltTy, VTy->getElementCount()); 1401 return EltTy; 1402 } 1403 case IITDescriptor::PtrToArgument: { 1404 Type *Ty = Tys[D.getArgumentNumber()]; 1405 return PointerType::getUnqual(Ty); 1406 } 1407 case IITDescriptor::PtrToElt: { 1408 Type *Ty = Tys[D.getArgumentNumber()]; 1409 VectorType *VTy = dyn_cast<VectorType>(Ty); 1410 if (!VTy) 1411 llvm_unreachable("Expected an argument of Vector Type"); 1412 Type *EltTy = VTy->getElementType(); 1413 return PointerType::getUnqual(EltTy); 1414 } 1415 case IITDescriptor::VecElementArgument: { 1416 Type *Ty = Tys[D.getArgumentNumber()]; 1417 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) 1418 return VTy->getElementType(); 1419 llvm_unreachable("Expected an argument of Vector Type"); 1420 } 1421 case IITDescriptor::VecOfBitcastsToInt: { 1422 Type *Ty = Tys[D.getArgumentNumber()]; 1423 VectorType *VTy = dyn_cast<VectorType>(Ty); 1424 assert(VTy && "Expected an argument of Vector Type"); 1425 return VectorType::getInteger(VTy); 1426 } 1427 case IITDescriptor::VecOfAnyPtrsToElt: 1428 // Return the overloaded type (which determines the pointers address space) 1429 return Tys[D.getOverloadArgNumber()]; 1430 case IITDescriptor::AnyPtrToElt: 1431 // Return the overloaded type (which determines the pointers address space) 1432 return Tys[D.getOverloadArgNumber()]; 1433 } 1434 llvm_unreachable("unhandled"); 1435 } 1436 1437 FunctionType *Intrinsic::getType(LLVMContext &Context, 1438 ID id, ArrayRef<Type*> Tys) { 1439 SmallVector<IITDescriptor, 8> Table; 1440 getIntrinsicInfoTableEntries(id, Table); 1441 1442 ArrayRef<IITDescriptor> TableRef = Table; 1443 Type *ResultTy = DecodeFixedType(TableRef, Tys, Context); 1444 1445 SmallVector<Type*, 8> ArgTys; 1446 while (!TableRef.empty()) 1447 ArgTys.push_back(DecodeFixedType(TableRef, Tys, Context)); 1448 1449 // DecodeFixedType returns Void for IITDescriptor::Void and IITDescriptor::VarArg 1450 // If we see void type as the type of the last argument, it is vararg intrinsic 1451 if (!ArgTys.empty() && ArgTys.back()->isVoidTy()) { 1452 ArgTys.pop_back(); 1453 return FunctionType::get(ResultTy, ArgTys, true); 1454 } 1455 return FunctionType::get(ResultTy, ArgTys, false); 1456 } 1457 1458 bool Intrinsic::isOverloaded(ID id) { 1459 #define GET_INTRINSIC_OVERLOAD_TABLE 1460 #include "llvm/IR/IntrinsicImpl.inc" 1461 #undef GET_INTRINSIC_OVERLOAD_TABLE 1462 } 1463 1464 bool Intrinsic::isLeaf(ID id) { 1465 switch (id) { 1466 default: 1467 return true; 1468 1469 case Intrinsic::experimental_gc_statepoint: 1470 case Intrinsic::experimental_patchpoint_void: 1471 case Intrinsic::experimental_patchpoint_i64: 1472 return false; 1473 } 1474 } 1475 1476 /// This defines the "Intrinsic::getAttributes(ID id)" method. 1477 #define GET_INTRINSIC_ATTRIBUTES 1478 #include "llvm/IR/IntrinsicImpl.inc" 1479 #undef GET_INTRINSIC_ATTRIBUTES 1480 1481 Function *Intrinsic::getDeclaration(Module *M, ID id, ArrayRef<Type*> Tys) { 1482 // There can never be multiple globals with the same name of different types, 1483 // because intrinsics must be a specific type. 1484 auto *FT = getType(M->getContext(), id, Tys); 1485 return cast<Function>( 1486 M->getOrInsertFunction( 1487 Tys.empty() ? getName(id) : getName(id, Tys, M, FT), FT) 1488 .getCallee()); 1489 } 1490 1491 // This defines the "Intrinsic::getIntrinsicForClangBuiltin()" method. 1492 #define GET_LLVM_INTRINSIC_FOR_CLANG_BUILTIN 1493 #include "llvm/IR/IntrinsicImpl.inc" 1494 #undef GET_LLVM_INTRINSIC_FOR_CLANG_BUILTIN 1495 1496 // This defines the "Intrinsic::getIntrinsicForMSBuiltin()" method. 1497 #define GET_LLVM_INTRINSIC_FOR_MS_BUILTIN 1498 #include "llvm/IR/IntrinsicImpl.inc" 1499 #undef GET_LLVM_INTRINSIC_FOR_MS_BUILTIN 1500 1501 using DeferredIntrinsicMatchPair = 1502 std::pair<Type *, ArrayRef<Intrinsic::IITDescriptor>>; 1503 1504 static bool matchIntrinsicType( 1505 Type *Ty, ArrayRef<Intrinsic::IITDescriptor> &Infos, 1506 SmallVectorImpl<Type *> &ArgTys, 1507 SmallVectorImpl<DeferredIntrinsicMatchPair> &DeferredChecks, 1508 bool IsDeferredCheck) { 1509 using namespace Intrinsic; 1510 1511 // If we ran out of descriptors, there are too many arguments. 1512 if (Infos.empty()) return true; 1513 1514 // Do this before slicing off the 'front' part 1515 auto InfosRef = Infos; 1516 auto DeferCheck = [&DeferredChecks, &InfosRef](Type *T) { 1517 DeferredChecks.emplace_back(T, InfosRef); 1518 return false; 1519 }; 1520 1521 IITDescriptor D = Infos.front(); 1522 Infos = Infos.slice(1); 1523 1524 switch (D.Kind) { 1525 case IITDescriptor::Void: return !Ty->isVoidTy(); 1526 case IITDescriptor::VarArg: return true; 1527 case IITDescriptor::MMX: return !Ty->isX86_MMXTy(); 1528 case IITDescriptor::AMX: return !Ty->isX86_AMXTy(); 1529 case IITDescriptor::Token: return !Ty->isTokenTy(); 1530 case IITDescriptor::Metadata: return !Ty->isMetadataTy(); 1531 case IITDescriptor::Half: return !Ty->isHalfTy(); 1532 case IITDescriptor::BFloat: return !Ty->isBFloatTy(); 1533 case IITDescriptor::Float: return !Ty->isFloatTy(); 1534 case IITDescriptor::Double: return !Ty->isDoubleTy(); 1535 case IITDescriptor::Quad: return !Ty->isFP128Ty(); 1536 case IITDescriptor::PPCQuad: return !Ty->isPPC_FP128Ty(); 1537 case IITDescriptor::Integer: return !Ty->isIntegerTy(D.Integer_Width); 1538 case IITDescriptor::Vector: { 1539 VectorType *VT = dyn_cast<VectorType>(Ty); 1540 return !VT || VT->getElementCount() != D.Vector_Width || 1541 matchIntrinsicType(VT->getElementType(), Infos, ArgTys, 1542 DeferredChecks, IsDeferredCheck); 1543 } 1544 case IITDescriptor::Pointer: { 1545 PointerType *PT = dyn_cast<PointerType>(Ty); 1546 if (!PT || PT->getAddressSpace() != D.Pointer_AddressSpace) 1547 return true; 1548 if (!PT->isOpaque()) { 1549 /* Manually consume a pointer to empty struct descriptor, which is 1550 * used for externref. We don't want to enforce that the struct is 1551 * anonymous in this case. (This renders externref intrinsics 1552 * non-unique, but this will go away with opaque pointers anyway.) */ 1553 if (Infos.front().Kind == IITDescriptor::Struct && 1554 Infos.front().Struct_NumElements == 0) { 1555 Infos = Infos.slice(1); 1556 return false; 1557 } 1558 return matchIntrinsicType(PT->getNonOpaquePointerElementType(), Infos, 1559 ArgTys, DeferredChecks, IsDeferredCheck); 1560 } 1561 // Consume IIT descriptors relating to the pointer element type. 1562 // FIXME: Intrinsic type matching of nested single value types or even 1563 // aggregates doesn't work properly with opaque pointers but hopefully 1564 // doesn't happen in practice. 1565 while (Infos.front().Kind == IITDescriptor::Pointer || 1566 Infos.front().Kind == IITDescriptor::Vector) 1567 Infos = Infos.slice(1); 1568 assert((Infos.front().Kind != IITDescriptor::Argument || 1569 Infos.front().getArgumentKind() == IITDescriptor::AK_MatchType) && 1570 "Unsupported polymorphic pointer type with opaque pointer"); 1571 Infos = Infos.slice(1); 1572 return false; 1573 } 1574 1575 case IITDescriptor::Struct: { 1576 StructType *ST = dyn_cast<StructType>(Ty); 1577 if (!ST || !ST->isLiteral() || ST->isPacked() || 1578 ST->getNumElements() != D.Struct_NumElements) 1579 return true; 1580 1581 for (unsigned i = 0, e = D.Struct_NumElements; i != e; ++i) 1582 if (matchIntrinsicType(ST->getElementType(i), Infos, ArgTys, 1583 DeferredChecks, IsDeferredCheck)) 1584 return true; 1585 return false; 1586 } 1587 1588 case IITDescriptor::Argument: 1589 // If this is the second occurrence of an argument, 1590 // verify that the later instance matches the previous instance. 1591 if (D.getArgumentNumber() < ArgTys.size()) 1592 return Ty != ArgTys[D.getArgumentNumber()]; 1593 1594 if (D.getArgumentNumber() > ArgTys.size() || 1595 D.getArgumentKind() == IITDescriptor::AK_MatchType) 1596 return IsDeferredCheck || DeferCheck(Ty); 1597 1598 assert(D.getArgumentNumber() == ArgTys.size() && !IsDeferredCheck && 1599 "Table consistency error"); 1600 ArgTys.push_back(Ty); 1601 1602 switch (D.getArgumentKind()) { 1603 case IITDescriptor::AK_Any: return false; // Success 1604 case IITDescriptor::AK_AnyInteger: return !Ty->isIntOrIntVectorTy(); 1605 case IITDescriptor::AK_AnyFloat: return !Ty->isFPOrFPVectorTy(); 1606 case IITDescriptor::AK_AnyVector: return !isa<VectorType>(Ty); 1607 case IITDescriptor::AK_AnyPointer: return !isa<PointerType>(Ty); 1608 default: break; 1609 } 1610 llvm_unreachable("all argument kinds not covered"); 1611 1612 case IITDescriptor::ExtendArgument: { 1613 // If this is a forward reference, defer the check for later. 1614 if (D.getArgumentNumber() >= ArgTys.size()) 1615 return IsDeferredCheck || DeferCheck(Ty); 1616 1617 Type *NewTy = ArgTys[D.getArgumentNumber()]; 1618 if (VectorType *VTy = dyn_cast<VectorType>(NewTy)) 1619 NewTy = VectorType::getExtendedElementVectorType(VTy); 1620 else if (IntegerType *ITy = dyn_cast<IntegerType>(NewTy)) 1621 NewTy = IntegerType::get(ITy->getContext(), 2 * ITy->getBitWidth()); 1622 else 1623 return true; 1624 1625 return Ty != NewTy; 1626 } 1627 case IITDescriptor::TruncArgument: { 1628 // If this is a forward reference, defer the check for later. 1629 if (D.getArgumentNumber() >= ArgTys.size()) 1630 return IsDeferredCheck || DeferCheck(Ty); 1631 1632 Type *NewTy = ArgTys[D.getArgumentNumber()]; 1633 if (VectorType *VTy = dyn_cast<VectorType>(NewTy)) 1634 NewTy = VectorType::getTruncatedElementVectorType(VTy); 1635 else if (IntegerType *ITy = dyn_cast<IntegerType>(NewTy)) 1636 NewTy = IntegerType::get(ITy->getContext(), ITy->getBitWidth() / 2); 1637 else 1638 return true; 1639 1640 return Ty != NewTy; 1641 } 1642 case IITDescriptor::HalfVecArgument: 1643 // If this is a forward reference, defer the check for later. 1644 if (D.getArgumentNumber() >= ArgTys.size()) 1645 return IsDeferredCheck || DeferCheck(Ty); 1646 return !isa<VectorType>(ArgTys[D.getArgumentNumber()]) || 1647 VectorType::getHalfElementsVectorType( 1648 cast<VectorType>(ArgTys[D.getArgumentNumber()])) != Ty; 1649 case IITDescriptor::SameVecWidthArgument: { 1650 if (D.getArgumentNumber() >= ArgTys.size()) { 1651 // Defer check and subsequent check for the vector element type. 1652 Infos = Infos.slice(1); 1653 return IsDeferredCheck || DeferCheck(Ty); 1654 } 1655 auto *ReferenceType = dyn_cast<VectorType>(ArgTys[D.getArgumentNumber()]); 1656 auto *ThisArgType = dyn_cast<VectorType>(Ty); 1657 // Both must be vectors of the same number of elements or neither. 1658 if ((ReferenceType != nullptr) != (ThisArgType != nullptr)) 1659 return true; 1660 Type *EltTy = Ty; 1661 if (ThisArgType) { 1662 if (ReferenceType->getElementCount() != 1663 ThisArgType->getElementCount()) 1664 return true; 1665 EltTy = ThisArgType->getElementType(); 1666 } 1667 return matchIntrinsicType(EltTy, Infos, ArgTys, DeferredChecks, 1668 IsDeferredCheck); 1669 } 1670 case IITDescriptor::PtrToArgument: { 1671 if (D.getArgumentNumber() >= ArgTys.size()) 1672 return IsDeferredCheck || DeferCheck(Ty); 1673 Type * ReferenceType = ArgTys[D.getArgumentNumber()]; 1674 PointerType *ThisArgType = dyn_cast<PointerType>(Ty); 1675 return (!ThisArgType || 1676 !ThisArgType->isOpaqueOrPointeeTypeMatches(ReferenceType)); 1677 } 1678 case IITDescriptor::PtrToElt: { 1679 if (D.getArgumentNumber() >= ArgTys.size()) 1680 return IsDeferredCheck || DeferCheck(Ty); 1681 VectorType * ReferenceType = 1682 dyn_cast<VectorType> (ArgTys[D.getArgumentNumber()]); 1683 PointerType *ThisArgType = dyn_cast<PointerType>(Ty); 1684 1685 if (!ThisArgType || !ReferenceType) 1686 return true; 1687 return !ThisArgType->isOpaqueOrPointeeTypeMatches( 1688 ReferenceType->getElementType()); 1689 } 1690 case IITDescriptor::AnyPtrToElt: { 1691 unsigned RefArgNumber = D.getRefArgNumber(); 1692 if (RefArgNumber >= ArgTys.size()) { 1693 if (IsDeferredCheck) 1694 return true; 1695 // If forward referencing, already add the pointer type and 1696 // defer the checks for later. 1697 ArgTys.push_back(Ty); 1698 return DeferCheck(Ty); 1699 } 1700 1701 if (!IsDeferredCheck) { 1702 assert(D.getOverloadArgNumber() == ArgTys.size() && 1703 "Table consistency error"); 1704 ArgTys.push_back(Ty); 1705 } 1706 1707 auto *ReferenceType = dyn_cast<VectorType>(ArgTys[RefArgNumber]); 1708 auto *ThisArgType = dyn_cast<PointerType>(Ty); 1709 if (!ThisArgType || !ReferenceType) 1710 return true; 1711 return !ThisArgType->isOpaqueOrPointeeTypeMatches( 1712 ReferenceType->getElementType()); 1713 } 1714 case IITDescriptor::VecOfAnyPtrsToElt: { 1715 unsigned RefArgNumber = D.getRefArgNumber(); 1716 if (RefArgNumber >= ArgTys.size()) { 1717 if (IsDeferredCheck) 1718 return true; 1719 // If forward referencing, already add the pointer-vector type and 1720 // defer the checks for later. 1721 ArgTys.push_back(Ty); 1722 return DeferCheck(Ty); 1723 } 1724 1725 if (!IsDeferredCheck){ 1726 assert(D.getOverloadArgNumber() == ArgTys.size() && 1727 "Table consistency error"); 1728 ArgTys.push_back(Ty); 1729 } 1730 1731 // Verify the overloaded type "matches" the Ref type. 1732 // i.e. Ty is a vector with the same width as Ref. 1733 // Composed of pointers to the same element type as Ref. 1734 auto *ReferenceType = dyn_cast<VectorType>(ArgTys[RefArgNumber]); 1735 auto *ThisArgVecTy = dyn_cast<VectorType>(Ty); 1736 if (!ThisArgVecTy || !ReferenceType || 1737 (ReferenceType->getElementCount() != ThisArgVecTy->getElementCount())) 1738 return true; 1739 PointerType *ThisArgEltTy = 1740 dyn_cast<PointerType>(ThisArgVecTy->getElementType()); 1741 if (!ThisArgEltTy) 1742 return true; 1743 return !ThisArgEltTy->isOpaqueOrPointeeTypeMatches( 1744 ReferenceType->getElementType()); 1745 } 1746 case IITDescriptor::VecElementArgument: { 1747 if (D.getArgumentNumber() >= ArgTys.size()) 1748 return IsDeferredCheck ? true : DeferCheck(Ty); 1749 auto *ReferenceType = dyn_cast<VectorType>(ArgTys[D.getArgumentNumber()]); 1750 return !ReferenceType || Ty != ReferenceType->getElementType(); 1751 } 1752 case IITDescriptor::Subdivide2Argument: 1753 case IITDescriptor::Subdivide4Argument: { 1754 // If this is a forward reference, defer the check for later. 1755 if (D.getArgumentNumber() >= ArgTys.size()) 1756 return IsDeferredCheck || DeferCheck(Ty); 1757 1758 Type *NewTy = ArgTys[D.getArgumentNumber()]; 1759 if (auto *VTy = dyn_cast<VectorType>(NewTy)) { 1760 int SubDivs = D.Kind == IITDescriptor::Subdivide2Argument ? 1 : 2; 1761 NewTy = VectorType::getSubdividedVectorType(VTy, SubDivs); 1762 return Ty != NewTy; 1763 } 1764 return true; 1765 } 1766 case IITDescriptor::VecOfBitcastsToInt: { 1767 if (D.getArgumentNumber() >= ArgTys.size()) 1768 return IsDeferredCheck || DeferCheck(Ty); 1769 auto *ReferenceType = dyn_cast<VectorType>(ArgTys[D.getArgumentNumber()]); 1770 auto *ThisArgVecTy = dyn_cast<VectorType>(Ty); 1771 if (!ThisArgVecTy || !ReferenceType) 1772 return true; 1773 return ThisArgVecTy != VectorType::getInteger(ReferenceType); 1774 } 1775 } 1776 llvm_unreachable("unhandled"); 1777 } 1778 1779 Intrinsic::MatchIntrinsicTypesResult 1780 Intrinsic::matchIntrinsicSignature(FunctionType *FTy, 1781 ArrayRef<Intrinsic::IITDescriptor> &Infos, 1782 SmallVectorImpl<Type *> &ArgTys) { 1783 SmallVector<DeferredIntrinsicMatchPair, 2> DeferredChecks; 1784 if (matchIntrinsicType(FTy->getReturnType(), Infos, ArgTys, DeferredChecks, 1785 false)) 1786 return MatchIntrinsicTypes_NoMatchRet; 1787 1788 unsigned NumDeferredReturnChecks = DeferredChecks.size(); 1789 1790 for (auto *Ty : FTy->params()) 1791 if (matchIntrinsicType(Ty, Infos, ArgTys, DeferredChecks, false)) 1792 return MatchIntrinsicTypes_NoMatchArg; 1793 1794 for (unsigned I = 0, E = DeferredChecks.size(); I != E; ++I) { 1795 DeferredIntrinsicMatchPair &Check = DeferredChecks[I]; 1796 if (matchIntrinsicType(Check.first, Check.second, ArgTys, DeferredChecks, 1797 true)) 1798 return I < NumDeferredReturnChecks ? MatchIntrinsicTypes_NoMatchRet 1799 : MatchIntrinsicTypes_NoMatchArg; 1800 } 1801 1802 return MatchIntrinsicTypes_Match; 1803 } 1804 1805 bool 1806 Intrinsic::matchIntrinsicVarArg(bool isVarArg, 1807 ArrayRef<Intrinsic::IITDescriptor> &Infos) { 1808 // If there are no descriptors left, then it can't be a vararg. 1809 if (Infos.empty()) 1810 return isVarArg; 1811 1812 // There should be only one descriptor remaining at this point. 1813 if (Infos.size() != 1) 1814 return true; 1815 1816 // Check and verify the descriptor. 1817 IITDescriptor D = Infos.front(); 1818 Infos = Infos.slice(1); 1819 if (D.Kind == IITDescriptor::VarArg) 1820 return !isVarArg; 1821 1822 return true; 1823 } 1824 1825 bool Intrinsic::getIntrinsicSignature(Function *F, 1826 SmallVectorImpl<Type *> &ArgTys) { 1827 Intrinsic::ID ID = F->getIntrinsicID(); 1828 if (!ID) 1829 return false; 1830 1831 SmallVector<Intrinsic::IITDescriptor, 8> Table; 1832 getIntrinsicInfoTableEntries(ID, Table); 1833 ArrayRef<Intrinsic::IITDescriptor> TableRef = Table; 1834 1835 if (Intrinsic::matchIntrinsicSignature(F->getFunctionType(), TableRef, 1836 ArgTys) != 1837 Intrinsic::MatchIntrinsicTypesResult::MatchIntrinsicTypes_Match) { 1838 return false; 1839 } 1840 if (Intrinsic::matchIntrinsicVarArg(F->getFunctionType()->isVarArg(), 1841 TableRef)) 1842 return false; 1843 return true; 1844 } 1845 1846 std::optional<Function *> Intrinsic::remangleIntrinsicFunction(Function *F) { 1847 SmallVector<Type *, 4> ArgTys; 1848 if (!getIntrinsicSignature(F, ArgTys)) 1849 return std::nullopt; 1850 1851 Intrinsic::ID ID = F->getIntrinsicID(); 1852 StringRef Name = F->getName(); 1853 std::string WantedName = 1854 Intrinsic::getName(ID, ArgTys, F->getParent(), F->getFunctionType()); 1855 if (Name == WantedName) 1856 return std::nullopt; 1857 1858 Function *NewDecl = [&] { 1859 if (auto *ExistingGV = F->getParent()->getNamedValue(WantedName)) { 1860 if (auto *ExistingF = dyn_cast<Function>(ExistingGV)) 1861 if (ExistingF->getFunctionType() == F->getFunctionType()) 1862 return ExistingF; 1863 1864 // The name already exists, but is not a function or has the wrong 1865 // prototype. Make place for the new one by renaming the old version. 1866 // Either this old version will be removed later on or the module is 1867 // invalid and we'll get an error. 1868 ExistingGV->setName(WantedName + ".renamed"); 1869 } 1870 return Intrinsic::getDeclaration(F->getParent(), ID, ArgTys); 1871 }(); 1872 1873 NewDecl->setCallingConv(F->getCallingConv()); 1874 assert(NewDecl->getFunctionType() == F->getFunctionType() && 1875 "Shouldn't change the signature"); 1876 return NewDecl; 1877 } 1878 1879 /// hasAddressTaken - returns true if there are any uses of this function 1880 /// other than direct calls or invokes to it. Optionally ignores callback 1881 /// uses, assume like pointer annotation calls, and references in llvm.used 1882 /// and llvm.compiler.used variables. 1883 bool Function::hasAddressTaken(const User **PutOffender, 1884 bool IgnoreCallbackUses, 1885 bool IgnoreAssumeLikeCalls, bool IgnoreLLVMUsed, 1886 bool IgnoreARCAttachedCall) const { 1887 for (const Use &U : uses()) { 1888 const User *FU = U.getUser(); 1889 if (isa<BlockAddress>(FU)) 1890 continue; 1891 1892 if (IgnoreCallbackUses) { 1893 AbstractCallSite ACS(&U); 1894 if (ACS && ACS.isCallbackCall()) 1895 continue; 1896 } 1897 1898 const auto *Call = dyn_cast<CallBase>(FU); 1899 if (!Call) { 1900 if (IgnoreAssumeLikeCalls) { 1901 if (const auto *FI = dyn_cast<Instruction>(FU)) { 1902 if (FI->isCast() && !FI->user_empty() && 1903 llvm::all_of(FU->users(), [](const User *U) { 1904 if (const auto *I = dyn_cast<IntrinsicInst>(U)) 1905 return I->isAssumeLikeIntrinsic(); 1906 return false; 1907 })) 1908 continue; 1909 } 1910 } 1911 if (IgnoreLLVMUsed && !FU->user_empty()) { 1912 const User *FUU = FU; 1913 if (isa<BitCastOperator>(FU) && FU->hasOneUse() && 1914 !FU->user_begin()->user_empty()) 1915 FUU = *FU->user_begin(); 1916 if (llvm::all_of(FUU->users(), [](const User *U) { 1917 if (const auto *GV = dyn_cast<GlobalVariable>(U)) 1918 return GV->hasName() && 1919 (GV->getName().equals("llvm.compiler.used") || 1920 GV->getName().equals("llvm.used")); 1921 return false; 1922 })) 1923 continue; 1924 } 1925 if (PutOffender) 1926 *PutOffender = FU; 1927 return true; 1928 } 1929 if (!Call->isCallee(&U) || Call->getFunctionType() != getFunctionType()) { 1930 if (IgnoreARCAttachedCall && 1931 Call->isOperandBundleOfType(LLVMContext::OB_clang_arc_attachedcall, 1932 U.getOperandNo())) 1933 continue; 1934 1935 if (PutOffender) 1936 *PutOffender = FU; 1937 return true; 1938 } 1939 } 1940 return false; 1941 } 1942 1943 bool Function::isDefTriviallyDead() const { 1944 // Check the linkage 1945 if (!hasLinkOnceLinkage() && !hasLocalLinkage() && 1946 !hasAvailableExternallyLinkage()) 1947 return false; 1948 1949 // Check if the function is used by anything other than a blockaddress. 1950 for (const User *U : users()) 1951 if (!isa<BlockAddress>(U)) 1952 return false; 1953 1954 return true; 1955 } 1956 1957 /// callsFunctionThatReturnsTwice - Return true if the function has a call to 1958 /// setjmp or other function that gcc recognizes as "returning twice". 1959 bool Function::callsFunctionThatReturnsTwice() const { 1960 for (const Instruction &I : instructions(this)) 1961 if (const auto *Call = dyn_cast<CallBase>(&I)) 1962 if (Call->hasFnAttr(Attribute::ReturnsTwice)) 1963 return true; 1964 1965 return false; 1966 } 1967 1968 Constant *Function::getPersonalityFn() const { 1969 assert(hasPersonalityFn() && getNumOperands()); 1970 return cast<Constant>(Op<0>()); 1971 } 1972 1973 void Function::setPersonalityFn(Constant *Fn) { 1974 setHungoffOperand<0>(Fn); 1975 setValueSubclassDataBit(3, Fn != nullptr); 1976 } 1977 1978 Constant *Function::getPrefixData() const { 1979 assert(hasPrefixData() && getNumOperands()); 1980 return cast<Constant>(Op<1>()); 1981 } 1982 1983 void Function::setPrefixData(Constant *PrefixData) { 1984 setHungoffOperand<1>(PrefixData); 1985 setValueSubclassDataBit(1, PrefixData != nullptr); 1986 } 1987 1988 Constant *Function::getPrologueData() const { 1989 assert(hasPrologueData() && getNumOperands()); 1990 return cast<Constant>(Op<2>()); 1991 } 1992 1993 void Function::setPrologueData(Constant *PrologueData) { 1994 setHungoffOperand<2>(PrologueData); 1995 setValueSubclassDataBit(2, PrologueData != nullptr); 1996 } 1997 1998 void Function::allocHungoffUselist() { 1999 // If we've already allocated a uselist, stop here. 2000 if (getNumOperands()) 2001 return; 2002 2003 allocHungoffUses(3, /*IsPhi=*/ false); 2004 setNumHungOffUseOperands(3); 2005 2006 // Initialize the uselist with placeholder operands to allow traversal. 2007 auto *CPN = ConstantPointerNull::get(Type::getInt1PtrTy(getContext(), 0)); 2008 Op<0>().set(CPN); 2009 Op<1>().set(CPN); 2010 Op<2>().set(CPN); 2011 } 2012 2013 template <int Idx> 2014 void Function::setHungoffOperand(Constant *C) { 2015 if (C) { 2016 allocHungoffUselist(); 2017 Op<Idx>().set(C); 2018 } else if (getNumOperands()) { 2019 Op<Idx>().set( 2020 ConstantPointerNull::get(Type::getInt1PtrTy(getContext(), 0))); 2021 } 2022 } 2023 2024 void Function::setValueSubclassDataBit(unsigned Bit, bool On) { 2025 assert(Bit < 16 && "SubclassData contains only 16 bits"); 2026 if (On) 2027 setValueSubclassData(getSubclassDataFromValue() | (1 << Bit)); 2028 else 2029 setValueSubclassData(getSubclassDataFromValue() & ~(1 << Bit)); 2030 } 2031 2032 void Function::setEntryCount(ProfileCount Count, 2033 const DenseSet<GlobalValue::GUID> *S) { 2034 #if !defined(NDEBUG) 2035 auto PrevCount = getEntryCount(); 2036 assert(!PrevCount || PrevCount->getType() == Count.getType()); 2037 #endif 2038 2039 auto ImportGUIDs = getImportGUIDs(); 2040 if (S == nullptr && ImportGUIDs.size()) 2041 S = &ImportGUIDs; 2042 2043 MDBuilder MDB(getContext()); 2044 setMetadata( 2045 LLVMContext::MD_prof, 2046 MDB.createFunctionEntryCount(Count.getCount(), Count.isSynthetic(), S)); 2047 } 2048 2049 void Function::setEntryCount(uint64_t Count, Function::ProfileCountType Type, 2050 const DenseSet<GlobalValue::GUID> *Imports) { 2051 setEntryCount(ProfileCount(Count, Type), Imports); 2052 } 2053 2054 std::optional<ProfileCount> Function::getEntryCount(bool AllowSynthetic) const { 2055 MDNode *MD = getMetadata(LLVMContext::MD_prof); 2056 if (MD && MD->getOperand(0)) 2057 if (MDString *MDS = dyn_cast<MDString>(MD->getOperand(0))) { 2058 if (MDS->getString().equals("function_entry_count")) { 2059 ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(1)); 2060 uint64_t Count = CI->getValue().getZExtValue(); 2061 // A value of -1 is used for SamplePGO when there were no samples. 2062 // Treat this the same as unknown. 2063 if (Count == (uint64_t)-1) 2064 return std::nullopt; 2065 return ProfileCount(Count, PCT_Real); 2066 } else if (AllowSynthetic && 2067 MDS->getString().equals("synthetic_function_entry_count")) { 2068 ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(1)); 2069 uint64_t Count = CI->getValue().getZExtValue(); 2070 return ProfileCount(Count, PCT_Synthetic); 2071 } 2072 } 2073 return std::nullopt; 2074 } 2075 2076 DenseSet<GlobalValue::GUID> Function::getImportGUIDs() const { 2077 DenseSet<GlobalValue::GUID> R; 2078 if (MDNode *MD = getMetadata(LLVMContext::MD_prof)) 2079 if (MDString *MDS = dyn_cast<MDString>(MD->getOperand(0))) 2080 if (MDS->getString().equals("function_entry_count")) 2081 for (unsigned i = 2; i < MD->getNumOperands(); i++) 2082 R.insert(mdconst::extract<ConstantInt>(MD->getOperand(i)) 2083 ->getValue() 2084 .getZExtValue()); 2085 return R; 2086 } 2087 2088 void Function::setSectionPrefix(StringRef Prefix) { 2089 MDBuilder MDB(getContext()); 2090 setMetadata(LLVMContext::MD_section_prefix, 2091 MDB.createFunctionSectionPrefix(Prefix)); 2092 } 2093 2094 std::optional<StringRef> Function::getSectionPrefix() const { 2095 if (MDNode *MD = getMetadata(LLVMContext::MD_section_prefix)) { 2096 assert(cast<MDString>(MD->getOperand(0)) 2097 ->getString() 2098 .equals("function_section_prefix") && 2099 "Metadata not match"); 2100 return cast<MDString>(MD->getOperand(1))->getString(); 2101 } 2102 return std::nullopt; 2103 } 2104 2105 bool Function::nullPointerIsDefined() const { 2106 return hasFnAttribute(Attribute::NullPointerIsValid); 2107 } 2108 2109 bool llvm::NullPointerIsDefined(const Function *F, unsigned AS) { 2110 if (F && F->nullPointerIsDefined()) 2111 return true; 2112 2113 if (AS != 0) 2114 return true; 2115 2116 return false; 2117 } 2118