1 //===- ReducerWorkItem.cpp - Wrapper for Module and MachineFunction -------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "ReducerWorkItem.h" 10 #include "TestRunner.h" 11 #include "llvm/Analysis/ModuleSummaryAnalysis.h" 12 #include "llvm/Analysis/ProfileSummaryInfo.h" 13 #include "llvm/Bitcode/BitcodeReader.h" 14 #include "llvm/Bitcode/BitcodeWriter.h" 15 #include "llvm/CodeGen/CommandFlags.h" 16 #include "llvm/CodeGen/MIRParser/MIRParser.h" 17 #include "llvm/CodeGen/MIRPrinter.h" 18 #include "llvm/CodeGen/MachineDominators.h" 19 #include "llvm/CodeGen/MachineFrameInfo.h" 20 #include "llvm/CodeGen/MachineFunction.h" 21 #include "llvm/CodeGen/MachineFunctionPass.h" 22 #include "llvm/CodeGen/MachineModuleInfo.h" 23 #include "llvm/CodeGen/MachineRegisterInfo.h" 24 #include "llvm/CodeGen/TargetInstrInfo.h" 25 #include "llvm/IR/Constants.h" 26 #include "llvm/IR/Instructions.h" 27 #include "llvm/IR/ModuleSummaryIndex.h" 28 #include "llvm/IR/Operator.h" 29 #include "llvm/IR/Verifier.h" 30 #include "llvm/IRReader/IRReader.h" 31 #include "llvm/MC/TargetRegistry.h" 32 #include "llvm/Passes/PassBuilder.h" 33 #include "llvm/Support/MemoryBufferRef.h" 34 #include "llvm/Support/SourceMgr.h" 35 #include "llvm/Support/TargetSelect.h" 36 #include "llvm/Support/ToolOutputFile.h" 37 #include "llvm/Support/WithColor.h" 38 #include "llvm/Target/TargetMachine.h" 39 #include "llvm/TargetParser/Host.h" 40 #include "llvm/Transforms/IPO/ThinLTOBitcodeWriter.h" 41 #include "llvm/Transforms/Utils/Cloning.h" 42 #include <optional> 43 44 using namespace llvm; 45 46 ReducerWorkItem::ReducerWorkItem() = default; 47 ReducerWorkItem::~ReducerWorkItem() = default; 48 49 extern cl::OptionCategory LLVMReduceOptions; 50 static cl::opt<std::string> TargetTriple("mtriple", 51 cl::desc("Set the target triple"), 52 cl::cat(LLVMReduceOptions)); 53 54 static cl::opt<bool> TmpFilesAsBitcode( 55 "write-tmp-files-as-bitcode", 56 cl::desc("Always write temporary files as bitcode instead of textual IR"), 57 cl::init(false), cl::cat(LLVMReduceOptions)); 58 59 static void cloneFrameInfo( 60 MachineFrameInfo &DstMFI, const MachineFrameInfo &SrcMFI, 61 const DenseMap<MachineBasicBlock *, MachineBasicBlock *> &Src2DstMBB) { 62 DstMFI.setFrameAddressIsTaken(SrcMFI.isFrameAddressTaken()); 63 DstMFI.setReturnAddressIsTaken(SrcMFI.isReturnAddressTaken()); 64 DstMFI.setHasStackMap(SrcMFI.hasStackMap()); 65 DstMFI.setHasPatchPoint(SrcMFI.hasPatchPoint()); 66 DstMFI.setUseLocalStackAllocationBlock( 67 SrcMFI.getUseLocalStackAllocationBlock()); 68 DstMFI.setOffsetAdjustment(SrcMFI.getOffsetAdjustment()); 69 70 DstMFI.ensureMaxAlignment(SrcMFI.getMaxAlign()); 71 assert(DstMFI.getMaxAlign() == SrcMFI.getMaxAlign() && 72 "we need to set exact alignment"); 73 74 DstMFI.setAdjustsStack(SrcMFI.adjustsStack()); 75 DstMFI.setHasCalls(SrcMFI.hasCalls()); 76 DstMFI.setHasOpaqueSPAdjustment(SrcMFI.hasOpaqueSPAdjustment()); 77 DstMFI.setHasCopyImplyingStackAdjustment( 78 SrcMFI.hasCopyImplyingStackAdjustment()); 79 DstMFI.setHasVAStart(SrcMFI.hasVAStart()); 80 DstMFI.setHasMustTailInVarArgFunc(SrcMFI.hasMustTailInVarArgFunc()); 81 DstMFI.setHasTailCall(SrcMFI.hasTailCall()); 82 83 if (SrcMFI.isMaxCallFrameSizeComputed()) 84 DstMFI.setMaxCallFrameSize(SrcMFI.getMaxCallFrameSize()); 85 86 DstMFI.setCVBytesOfCalleeSavedRegisters( 87 SrcMFI.getCVBytesOfCalleeSavedRegisters()); 88 89 if (MachineBasicBlock *SavePt = SrcMFI.getSavePoint()) 90 DstMFI.setSavePoint(Src2DstMBB.find(SavePt)->second); 91 if (MachineBasicBlock *RestorePt = SrcMFI.getRestorePoint()) 92 DstMFI.setRestorePoint(Src2DstMBB.find(RestorePt)->second); 93 94 95 auto CopyObjectProperties = [](MachineFrameInfo &DstMFI, 96 const MachineFrameInfo &SrcMFI, int FI) { 97 if (SrcMFI.isStatepointSpillSlotObjectIndex(FI)) 98 DstMFI.markAsStatepointSpillSlotObjectIndex(FI); 99 DstMFI.setObjectSSPLayout(FI, SrcMFI.getObjectSSPLayout(FI)); 100 DstMFI.setObjectZExt(FI, SrcMFI.isObjectZExt(FI)); 101 DstMFI.setObjectSExt(FI, SrcMFI.isObjectSExt(FI)); 102 }; 103 104 for (int i = 0, e = SrcMFI.getNumObjects() - SrcMFI.getNumFixedObjects(); 105 i != e; ++i) { 106 int NewFI; 107 108 assert(!SrcMFI.isFixedObjectIndex(i)); 109 if (SrcMFI.isVariableSizedObjectIndex(i)) { 110 NewFI = DstMFI.CreateVariableSizedObject(SrcMFI.getObjectAlign(i), 111 SrcMFI.getObjectAllocation(i)); 112 } else { 113 NewFI = DstMFI.CreateStackObject( 114 SrcMFI.getObjectSize(i), SrcMFI.getObjectAlign(i), 115 SrcMFI.isSpillSlotObjectIndex(i), SrcMFI.getObjectAllocation(i), 116 SrcMFI.getStackID(i)); 117 DstMFI.setObjectOffset(NewFI, SrcMFI.getObjectOffset(i)); 118 } 119 120 CopyObjectProperties(DstMFI, SrcMFI, i); 121 122 (void)NewFI; 123 assert(i == NewFI && "expected to keep stable frame index numbering"); 124 } 125 126 // Copy the fixed frame objects backwards to preserve frame index numbers, 127 // since CreateFixedObject uses front insertion. 128 for (int i = -1; i >= (int)-SrcMFI.getNumFixedObjects(); --i) { 129 assert(SrcMFI.isFixedObjectIndex(i)); 130 int NewFI = DstMFI.CreateFixedObject( 131 SrcMFI.getObjectSize(i), SrcMFI.getObjectOffset(i), 132 SrcMFI.isImmutableObjectIndex(i), SrcMFI.isAliasedObjectIndex(i)); 133 CopyObjectProperties(DstMFI, SrcMFI, i); 134 135 (void)NewFI; 136 assert(i == NewFI && "expected to keep stable frame index numbering"); 137 } 138 139 for (unsigned I = 0, E = SrcMFI.getLocalFrameObjectCount(); I < E; ++I) { 140 auto LocalObject = SrcMFI.getLocalFrameObjectMap(I); 141 DstMFI.mapLocalFrameObject(LocalObject.first, LocalObject.second); 142 } 143 144 DstMFI.setCalleeSavedInfo(SrcMFI.getCalleeSavedInfo()); 145 146 if (SrcMFI.hasStackProtectorIndex()) { 147 DstMFI.setStackProtectorIndex(SrcMFI.getStackProtectorIndex()); 148 } 149 150 // FIXME: Needs test, missing MIR serialization. 151 if (SrcMFI.hasFunctionContextIndex()) { 152 DstMFI.setFunctionContextIndex(SrcMFI.getFunctionContextIndex()); 153 } 154 } 155 156 static void cloneMemOperands(MachineInstr &DstMI, MachineInstr &SrcMI, 157 MachineFunction &SrcMF, MachineFunction &DstMF) { 158 // The new MachineMemOperands should be owned by the new function's 159 // Allocator. 160 PseudoSourceValueManager &PSVMgr = DstMF.getPSVManager(); 161 162 // We also need to remap the PseudoSourceValues from the new function's 163 // PseudoSourceValueManager. 164 SmallVector<MachineMemOperand *, 2> NewMMOs; 165 for (MachineMemOperand *OldMMO : SrcMI.memoperands()) { 166 MachinePointerInfo NewPtrInfo(OldMMO->getPointerInfo()); 167 if (const PseudoSourceValue *PSV = 168 dyn_cast_if_present<const PseudoSourceValue *>(NewPtrInfo.V)) { 169 switch (PSV->kind()) { 170 case PseudoSourceValue::Stack: 171 NewPtrInfo.V = PSVMgr.getStack(); 172 break; 173 case PseudoSourceValue::GOT: 174 NewPtrInfo.V = PSVMgr.getGOT(); 175 break; 176 case PseudoSourceValue::JumpTable: 177 NewPtrInfo.V = PSVMgr.getJumpTable(); 178 break; 179 case PseudoSourceValue::ConstantPool: 180 NewPtrInfo.V = PSVMgr.getConstantPool(); 181 break; 182 case PseudoSourceValue::FixedStack: 183 NewPtrInfo.V = PSVMgr.getFixedStack( 184 cast<FixedStackPseudoSourceValue>(PSV)->getFrameIndex()); 185 break; 186 case PseudoSourceValue::GlobalValueCallEntry: 187 NewPtrInfo.V = PSVMgr.getGlobalValueCallEntry( 188 cast<GlobalValuePseudoSourceValue>(PSV)->getValue()); 189 break; 190 case PseudoSourceValue::ExternalSymbolCallEntry: 191 NewPtrInfo.V = PSVMgr.getExternalSymbolCallEntry( 192 cast<ExternalSymbolPseudoSourceValue>(PSV)->getSymbol()); 193 break; 194 case PseudoSourceValue::TargetCustom: 195 default: 196 // FIXME: We have no generic interface for allocating custom PSVs. 197 report_fatal_error("Cloning TargetCustom PSV not handled"); 198 } 199 } 200 201 MachineMemOperand *NewMMO = DstMF.getMachineMemOperand( 202 NewPtrInfo, OldMMO->getFlags(), OldMMO->getMemoryType(), 203 OldMMO->getBaseAlign(), OldMMO->getAAInfo(), OldMMO->getRanges(), 204 OldMMO->getSyncScopeID(), OldMMO->getSuccessOrdering(), 205 OldMMO->getFailureOrdering()); 206 NewMMOs.push_back(NewMMO); 207 } 208 209 DstMI.setMemRefs(DstMF, NewMMOs); 210 } 211 212 static std::unique_ptr<MachineFunction> cloneMF(MachineFunction *SrcMF, 213 MachineModuleInfo &DestMMI) { 214 auto DstMF = std::make_unique<MachineFunction>( 215 SrcMF->getFunction(), SrcMF->getTarget(), SrcMF->getSubtarget(), 216 SrcMF->getFunctionNumber(), DestMMI); 217 DenseMap<MachineBasicBlock *, MachineBasicBlock *> Src2DstMBB; 218 219 auto *SrcMRI = &SrcMF->getRegInfo(); 220 auto *DstMRI = &DstMF->getRegInfo(); 221 222 // Clone blocks. 223 for (MachineBasicBlock &SrcMBB : *SrcMF) { 224 MachineBasicBlock *DstMBB = 225 DstMF->CreateMachineBasicBlock(SrcMBB.getBasicBlock()); 226 Src2DstMBB[&SrcMBB] = DstMBB; 227 228 DstMBB->setCallFrameSize(SrcMBB.getCallFrameSize()); 229 230 if (SrcMBB.isIRBlockAddressTaken()) 231 DstMBB->setAddressTakenIRBlock(SrcMBB.getAddressTakenIRBlock()); 232 if (SrcMBB.isMachineBlockAddressTaken()) 233 DstMBB->setMachineBlockAddressTaken(); 234 235 // FIXME: This is not serialized 236 if (SrcMBB.hasLabelMustBeEmitted()) 237 DstMBB->setLabelMustBeEmitted(); 238 239 DstMBB->setAlignment(SrcMBB.getAlignment()); 240 241 // FIXME: This is not serialized 242 DstMBB->setMaxBytesForAlignment(SrcMBB.getMaxBytesForAlignment()); 243 244 DstMBB->setIsEHPad(SrcMBB.isEHPad()); 245 DstMBB->setIsEHScopeEntry(SrcMBB.isEHScopeEntry()); 246 DstMBB->setIsEHCatchretTarget(SrcMBB.isEHCatchretTarget()); 247 DstMBB->setIsEHFuncletEntry(SrcMBB.isEHFuncletEntry()); 248 249 // FIXME: These are not serialized 250 DstMBB->setIsCleanupFuncletEntry(SrcMBB.isCleanupFuncletEntry()); 251 DstMBB->setIsBeginSection(SrcMBB.isBeginSection()); 252 DstMBB->setIsEndSection(SrcMBB.isEndSection()); 253 254 DstMBB->setSectionID(SrcMBB.getSectionID()); 255 DstMBB->setIsInlineAsmBrIndirectTarget( 256 SrcMBB.isInlineAsmBrIndirectTarget()); 257 258 // FIXME: This is not serialized 259 if (std::optional<uint64_t> Weight = SrcMBB.getIrrLoopHeaderWeight()) 260 DstMBB->setIrrLoopHeaderWeight(*Weight); 261 } 262 263 const MachineFrameInfo &SrcMFI = SrcMF->getFrameInfo(); 264 MachineFrameInfo &DstMFI = DstMF->getFrameInfo(); 265 266 // Copy stack objects and other info 267 cloneFrameInfo(DstMFI, SrcMFI, Src2DstMBB); 268 269 // Remap the debug info frame index references. 270 DstMF->VariableDbgInfos = SrcMF->VariableDbgInfos; 271 272 // Clone virtual registers 273 for (unsigned I = 0, E = SrcMRI->getNumVirtRegs(); I != E; ++I) { 274 Register Reg = Register::index2VirtReg(I); 275 Register NewReg = DstMRI->createIncompleteVirtualRegister( 276 SrcMRI->getVRegName(Reg)); 277 assert(NewReg == Reg && "expected to preserve virtreg number"); 278 279 DstMRI->setRegClassOrRegBank(NewReg, SrcMRI->getRegClassOrRegBank(Reg)); 280 281 LLT RegTy = SrcMRI->getType(Reg); 282 if (RegTy.isValid()) 283 DstMRI->setType(NewReg, RegTy); 284 285 // Copy register allocation hints. 286 const auto &Hints = SrcMRI->getRegAllocationHints(Reg); 287 for (Register PrefReg : Hints.second) 288 DstMRI->addRegAllocationHint(NewReg, PrefReg); 289 } 290 291 const TargetSubtargetInfo &STI = DstMF->getSubtarget(); 292 const TargetInstrInfo *TII = STI.getInstrInfo(); 293 const TargetRegisterInfo *TRI = STI.getRegisterInfo(); 294 295 // Link blocks. 296 for (auto &SrcMBB : *SrcMF) { 297 auto *DstMBB = Src2DstMBB[&SrcMBB]; 298 DstMF->push_back(DstMBB); 299 300 for (auto It = SrcMBB.succ_begin(), IterEnd = SrcMBB.succ_end(); 301 It != IterEnd; ++It) { 302 auto *SrcSuccMBB = *It; 303 auto *DstSuccMBB = Src2DstMBB[SrcSuccMBB]; 304 DstMBB->addSuccessor(DstSuccMBB, SrcMBB.getSuccProbability(It)); 305 } 306 307 for (auto &LI : SrcMBB.liveins_dbg()) 308 DstMBB->addLiveIn(LI); 309 310 // Make sure MRI knows about registers clobbered by unwinder. 311 if (DstMBB->isEHPad()) { 312 if (auto *RegMask = TRI->getCustomEHPadPreservedMask(*DstMF)) 313 DstMRI->addPhysRegsUsedFromRegMask(RegMask); 314 } 315 } 316 317 DenseSet<const uint32_t *> ConstRegisterMasks; 318 319 // Track predefined/named regmasks which we ignore. 320 for (const uint32_t *Mask : TRI->getRegMasks()) 321 ConstRegisterMasks.insert(Mask); 322 323 // Clone instructions. 324 for (auto &SrcMBB : *SrcMF) { 325 auto *DstMBB = Src2DstMBB[&SrcMBB]; 326 for (auto &SrcMI : SrcMBB) { 327 const auto &MCID = TII->get(SrcMI.getOpcode()); 328 auto *DstMI = DstMF->CreateMachineInstr(MCID, SrcMI.getDebugLoc(), 329 /*NoImplicit=*/true); 330 DstMI->setFlags(SrcMI.getFlags()); 331 DstMI->setAsmPrinterFlag(SrcMI.getAsmPrinterFlags()); 332 333 DstMBB->push_back(DstMI); 334 for (auto &SrcMO : SrcMI.operands()) { 335 MachineOperand DstMO(SrcMO); 336 DstMO.clearParent(); 337 338 // Update MBB. 339 if (DstMO.isMBB()) 340 DstMO.setMBB(Src2DstMBB[DstMO.getMBB()]); 341 else if (DstMO.isRegMask()) { 342 DstMRI->addPhysRegsUsedFromRegMask(DstMO.getRegMask()); 343 344 if (!ConstRegisterMasks.count(DstMO.getRegMask())) { 345 uint32_t *DstMask = DstMF->allocateRegMask(); 346 std::memcpy(DstMask, SrcMO.getRegMask(), 347 sizeof(*DstMask) * 348 MachineOperand::getRegMaskSize(TRI->getNumRegs())); 349 DstMO.setRegMask(DstMask); 350 } 351 } 352 353 DstMI->addOperand(DstMO); 354 } 355 356 cloneMemOperands(*DstMI, SrcMI, *SrcMF, *DstMF); 357 } 358 } 359 360 DstMF->setAlignment(SrcMF->getAlignment()); 361 DstMF->setExposesReturnsTwice(SrcMF->exposesReturnsTwice()); 362 DstMF->setHasInlineAsm(SrcMF->hasInlineAsm()); 363 DstMF->setHasWinCFI(SrcMF->hasWinCFI()); 364 365 DstMF->getProperties().reset().set(SrcMF->getProperties()); 366 367 if (!SrcMF->getFrameInstructions().empty() || 368 !SrcMF->getLongjmpTargets().empty() || 369 !SrcMF->getCatchretTargets().empty()) 370 report_fatal_error("cloning not implemented for machine function property"); 371 372 DstMF->setCallsEHReturn(SrcMF->callsEHReturn()); 373 DstMF->setCallsUnwindInit(SrcMF->callsUnwindInit()); 374 DstMF->setHasEHCatchret(SrcMF->hasEHCatchret()); 375 DstMF->setHasEHScopes(SrcMF->hasEHScopes()); 376 DstMF->setHasEHFunclets(SrcMF->hasEHFunclets()); 377 DstMF->setIsOutlined(SrcMF->isOutlined()); 378 379 if (!SrcMF->getLandingPads().empty() || 380 !SrcMF->getCodeViewAnnotations().empty() || 381 !SrcMF->getTypeInfos().empty() || 382 !SrcMF->getFilterIds().empty() || 383 SrcMF->hasAnyWasmLandingPadIndex() || 384 SrcMF->hasAnyCallSiteLandingPad() || 385 SrcMF->hasAnyCallSiteLabel() || 386 !SrcMF->getCallSitesInfo().empty()) 387 report_fatal_error("cloning not implemented for machine function property"); 388 389 DstMF->setDebugInstrNumberingCount(SrcMF->DebugInstrNumberingCount); 390 391 if (!DstMF->cloneInfoFrom(*SrcMF, Src2DstMBB)) 392 report_fatal_error("target does not implement MachineFunctionInfo cloning"); 393 394 DstMRI->freezeReservedRegs(*DstMF); 395 396 DstMF->verify(nullptr, "", /*AbortOnError=*/true); 397 return DstMF; 398 } 399 400 static void initializeTargetInfo() { 401 InitializeAllTargets(); 402 InitializeAllTargetMCs(); 403 InitializeAllAsmPrinters(); 404 InitializeAllAsmParsers(); 405 } 406 407 void ReducerWorkItem::print(raw_ostream &ROS, void *p) const { 408 if (MMI) { 409 printMIR(ROS, *M); 410 for (Function &F : *M) { 411 if (auto *MF = MMI->getMachineFunction(F)) 412 printMIR(ROS, *MF); 413 } 414 } else { 415 M->print(ROS, /*AssemblyAnnotationWriter=*/nullptr, 416 /*ShouldPreserveUseListOrder=*/true); 417 } 418 } 419 420 bool ReducerWorkItem::verify(raw_fd_ostream *OS) const { 421 if (verifyModule(*M, OS)) 422 return true; 423 424 if (!MMI) 425 return false; 426 427 for (const Function &F : getModule()) { 428 if (const MachineFunction *MF = MMI->getMachineFunction(F)) { 429 if (!MF->verify(nullptr, "", /*AbortOnError=*/false)) 430 return true; 431 } 432 } 433 434 return false; 435 } 436 437 bool ReducerWorkItem::isReduced(const TestRunner &Test) const { 438 const bool UseBitcode = Test.inputIsBitcode() || TmpFilesAsBitcode; 439 440 SmallString<128> CurrentFilepath; 441 442 // Write ReducerWorkItem to tmp file 443 int FD; 444 std::error_code EC = sys::fs::createTemporaryFile( 445 "llvm-reduce", isMIR() ? "mir" : (UseBitcode ? "bc" : "ll"), FD, 446 CurrentFilepath, 447 UseBitcode && !isMIR() ? sys::fs::OF_None : sys::fs::OF_Text); 448 if (EC) { 449 WithColor::error(errs(), Test.getToolName()) 450 << "error making unique filename: " << EC.message() << '\n'; 451 exit(1); 452 } 453 454 ToolOutputFile Out(CurrentFilepath, FD); 455 456 writeOutput(Out.os(), UseBitcode); 457 458 Out.os().close(); 459 if (Out.os().has_error()) { 460 WithColor::error(errs(), Test.getToolName()) 461 << "error emitting bitcode to file '" << CurrentFilepath 462 << "': " << Out.os().error().message() << '\n'; 463 exit(1); 464 } 465 466 // Current Chunks aren't interesting 467 return Test.run(CurrentFilepath); 468 } 469 470 std::unique_ptr<ReducerWorkItem> 471 ReducerWorkItem::clone(const TargetMachine *TM) const { 472 auto CloneMMM = std::make_unique<ReducerWorkItem>(); 473 if (TM) { 474 // We're assuming the Module IR contents are always unchanged by MIR 475 // reductions, and can share it as a constant. 476 CloneMMM->M = M; 477 478 // MachineModuleInfo contains a lot of other state used during codegen which 479 // we won't be using here, but we should be able to ignore it (although this 480 // is pretty ugly). 481 const LLVMTargetMachine *LLVMTM = 482 static_cast<const LLVMTargetMachine *>(TM); 483 CloneMMM->MMI = std::make_unique<MachineModuleInfo>(LLVMTM); 484 485 for (const Function &F : getModule()) { 486 if (auto *MF = MMI->getMachineFunction(F)) 487 CloneMMM->MMI->insertFunction(F, cloneMF(MF, *CloneMMM->MMI)); 488 } 489 } else { 490 CloneMMM->M = CloneModule(*M); 491 } 492 return CloneMMM; 493 } 494 495 /// Try to produce some number that indicates a function is getting smaller / 496 /// simpler. 497 static uint64_t computeMIRComplexityScoreImpl(const MachineFunction &MF) { 498 uint64_t Score = 0; 499 const MachineFrameInfo &MFI = MF.getFrameInfo(); 500 501 // Add for stack objects 502 Score += MFI.getNumObjects(); 503 504 // Add in the block count. 505 Score += 2 * MF.size(); 506 507 const MachineRegisterInfo &MRI = MF.getRegInfo(); 508 for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) { 509 Register Reg = Register::index2VirtReg(I); 510 Score += MRI.getRegAllocationHints(Reg).second.size(); 511 } 512 513 for (const MachineBasicBlock &MBB : MF) { 514 for (const MachineInstr &MI : MBB) { 515 const unsigned Opc = MI.getOpcode(); 516 517 // Reductions may want or need to introduce implicit_defs, so don't count 518 // them. 519 // TODO: These probably should count in some way. 520 if (Opc == TargetOpcode::IMPLICIT_DEF || 521 Opc == TargetOpcode::G_IMPLICIT_DEF) 522 continue; 523 524 // Each instruction adds to the score 525 Score += 4; 526 527 if (Opc == TargetOpcode::PHI || Opc == TargetOpcode::G_PHI || 528 Opc == TargetOpcode::INLINEASM || Opc == TargetOpcode::INLINEASM_BR) 529 ++Score; 530 531 if (MI.getFlags() != 0) 532 ++Score; 533 534 // Increase weight for more operands. 535 for (const MachineOperand &MO : MI.operands()) { 536 ++Score; 537 538 // Treat registers as more complex. 539 if (MO.isReg()) { 540 ++Score; 541 542 // And subregisters as even more complex. 543 if (MO.getSubReg()) { 544 ++Score; 545 if (MO.isDef()) 546 ++Score; 547 } 548 } else if (MO.isRegMask()) 549 ++Score; 550 } 551 } 552 } 553 554 return Score; 555 } 556 557 uint64_t ReducerWorkItem::computeMIRComplexityScore() const { 558 uint64_t Score = 0; 559 560 for (const Function &F : getModule()) { 561 if (auto *MF = MMI->getMachineFunction(F)) 562 Score += computeMIRComplexityScoreImpl(*MF); 563 } 564 565 return Score; 566 } 567 568 // FIXME: ReduceOperandsSkip has similar function, except it uses larger numbers 569 // for more reduced. 570 static unsigned classifyReductivePower(const Value *V) { 571 if (auto *C = dyn_cast<ConstantData>(V)) { 572 if (C->isNullValue()) 573 return 0; 574 if (C->isOneValue()) 575 return 1; 576 if (isa<UndefValue>(V)) 577 return 2; 578 return 3; 579 } 580 581 if (isa<GlobalValue>(V)) 582 return 4; 583 584 // TODO: Account for expression size 585 if (isa<ConstantExpr>(V)) 586 return 5; 587 588 if (isa<Constant>(V)) 589 return 1; 590 591 if (isa<Argument>(V)) 592 return 6; 593 594 if (isa<Instruction>(V)) 595 return 7; 596 597 return 0; 598 } 599 600 // TODO: Additional flags and attributes may be complexity reducing. If we start 601 // adding flags and attributes, they could have negative cost. 602 static uint64_t computeIRComplexityScoreImpl(const Function &F) { 603 uint64_t Score = 1; // Count the function itself 604 SmallVector<std::pair<unsigned, MDNode *>> MDs; 605 606 AttributeList Attrs = F.getAttributes(); 607 for (AttributeSet AttrSet : Attrs) 608 Score += AttrSet.getNumAttributes(); 609 610 for (const BasicBlock &BB : F) { 611 ++Score; 612 613 for (const Instruction &I : BB) { 614 ++Score; 615 616 if (const auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(&I)) { 617 if (OverflowOp->hasNoUnsignedWrap()) 618 ++Score; 619 if (OverflowOp->hasNoSignedWrap()) 620 ++Score; 621 } else if (const auto *GEP = dyn_cast<GEPOperator>(&I)) { 622 if (GEP->isInBounds()) 623 ++Score; 624 } else if (const auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I)) { 625 if (ExactOp->isExact()) 626 ++Score; 627 } else if (const auto *FPOp = dyn_cast<FPMathOperator>(&I)) { 628 FastMathFlags FMF = FPOp->getFastMathFlags(); 629 if (FMF.allowReassoc()) 630 ++Score; 631 if (FMF.noNaNs()) 632 ++Score; 633 if (FMF.noInfs()) 634 ++Score; 635 if (FMF.noSignedZeros()) 636 ++Score; 637 if (FMF.allowReciprocal()) 638 ++Score; 639 if (FMF.allowContract()) 640 ++Score; 641 if (FMF.approxFunc()) 642 ++Score; 643 } 644 645 for (const Value *Operand : I.operands()) { 646 ++Score; 647 Score += classifyReductivePower(Operand); 648 } 649 650 I.getAllMetadata(MDs); 651 Score += MDs.size(); 652 MDs.clear(); 653 } 654 } 655 656 return Score; 657 } 658 659 uint64_t ReducerWorkItem::computeIRComplexityScore() const { 660 uint64_t Score = 0; 661 662 const Module &M = getModule(); 663 Score += M.named_metadata_size(); 664 665 SmallVector<std::pair<unsigned, MDNode *>, 32> GlobalMetadata; 666 for (const GlobalVariable &GV : M.globals()) { 667 ++Score; 668 669 if (GV.hasInitializer()) 670 Score += classifyReductivePower(GV.getInitializer()); 671 672 // TODO: Account for linkage? 673 674 GV.getAllMetadata(GlobalMetadata); 675 Score += GlobalMetadata.size(); 676 GlobalMetadata.clear(); 677 } 678 679 for (const GlobalAlias &GA : M.aliases()) 680 Score += classifyReductivePower(GA.getAliasee()); 681 682 for (const GlobalIFunc &GI : M.ifuncs()) 683 Score += classifyReductivePower(GI.getResolver()); 684 685 for (const Function &F : M) 686 Score += computeIRComplexityScoreImpl(F); 687 688 return Score; 689 } 690 691 void ReducerWorkItem::writeOutput(raw_ostream &OS, bool EmitBitcode) const { 692 // Requesting bitcode emission with mir is nonsense, so just ignore it. 693 if (EmitBitcode && !isMIR()) 694 writeBitcode(OS); 695 else 696 print(OS, /*AnnotationWriter=*/nullptr); 697 } 698 699 void ReducerWorkItem::readBitcode(MemoryBufferRef Data, LLVMContext &Ctx, 700 StringRef ToolName) { 701 Expected<BitcodeFileContents> IF = llvm::getBitcodeFileContents(Data); 702 if (!IF) { 703 WithColor::error(errs(), ToolName) << IF.takeError(); 704 exit(1); 705 } 706 BitcodeModule BM = IF->Mods[0]; 707 Expected<BitcodeLTOInfo> LI = BM.getLTOInfo(); 708 Expected<std::unique_ptr<Module>> MOrErr = BM.parseModule(Ctx); 709 if (!LI || !MOrErr) { 710 WithColor::error(errs(), ToolName) << IF.takeError(); 711 exit(1); 712 } 713 LTOInfo = std::make_unique<BitcodeLTOInfo>(*LI); 714 M = std::move(MOrErr.get()); 715 } 716 717 void ReducerWorkItem::writeBitcode(raw_ostream &OutStream) const { 718 if (LTOInfo && LTOInfo->IsThinLTO && LTOInfo->EnableSplitLTOUnit) { 719 PassBuilder PB; 720 LoopAnalysisManager LAM; 721 FunctionAnalysisManager FAM; 722 CGSCCAnalysisManager CGAM; 723 ModuleAnalysisManager MAM; 724 PB.registerModuleAnalyses(MAM); 725 PB.registerCGSCCAnalyses(CGAM); 726 PB.registerFunctionAnalyses(FAM); 727 PB.registerLoopAnalyses(LAM); 728 PB.crossRegisterProxies(LAM, FAM, CGAM, MAM); 729 ModulePassManager MPM; 730 MPM.addPass(ThinLTOBitcodeWriterPass(OutStream, nullptr)); 731 MPM.run(*M, MAM); 732 } else { 733 std::unique_ptr<ModuleSummaryIndex> Index; 734 if (LTOInfo && LTOInfo->HasSummary) { 735 ProfileSummaryInfo PSI(*M); 736 Index = std::make_unique<ModuleSummaryIndex>( 737 buildModuleSummaryIndex(*M, nullptr, &PSI)); 738 } 739 WriteBitcodeToFile(getModule(), OutStream, 740 /*ShouldPreserveUseListOrder=*/true, Index.get()); 741 } 742 } 743 744 std::pair<std::unique_ptr<ReducerWorkItem>, bool> 745 llvm::parseReducerWorkItem(StringRef ToolName, StringRef Filename, 746 LLVMContext &Ctxt, 747 std::unique_ptr<TargetMachine> &TM, bool IsMIR) { 748 bool IsBitcode = false; 749 Triple TheTriple; 750 751 auto MMM = std::make_unique<ReducerWorkItem>(); 752 753 if (IsMIR) { 754 initializeTargetInfo(); 755 756 auto FileOrErr = MemoryBuffer::getFileOrSTDIN(Filename, /*IsText=*/true); 757 if (std::error_code EC = FileOrErr.getError()) { 758 WithColor::error(errs(), ToolName) << EC.message() << '\n'; 759 return {nullptr, false}; 760 } 761 762 std::unique_ptr<MIRParser> MParser = 763 createMIRParser(std::move(FileOrErr.get()), Ctxt); 764 765 auto SetDataLayout = [&](StringRef DataLayoutTargetTriple, 766 StringRef OldDLStr) -> std::optional<std::string> { 767 // NB: We always call createTargetMachineForTriple() even if an explicit 768 // DataLayout is already set in the module since we want to use this 769 // callback to setup the TargetMachine rather than doing it later. 770 std::string IRTargetTriple = DataLayoutTargetTriple.str(); 771 if (!TargetTriple.empty()) 772 IRTargetTriple = Triple::normalize(TargetTriple); 773 TheTriple = Triple(IRTargetTriple); 774 if (TheTriple.getTriple().empty()) 775 TheTriple.setTriple(sys::getDefaultTargetTriple()); 776 ExitOnError ExitOnErr(std::string(ToolName) + ": error: "); 777 TM = ExitOnErr(codegen::createTargetMachineForTriple(TheTriple.str())); 778 779 return TM->createDataLayout().getStringRepresentation(); 780 }; 781 782 std::unique_ptr<Module> M = MParser->parseIRModule(SetDataLayout); 783 LLVMTargetMachine *LLVMTM = static_cast<LLVMTargetMachine *>(TM.get()); 784 785 MMM->MMI = std::make_unique<MachineModuleInfo>(LLVMTM); 786 MParser->parseMachineFunctions(*M, *MMM->MMI); 787 MMM->M = std::move(M); 788 } else { 789 SMDiagnostic Err; 790 ErrorOr<std::unique_ptr<MemoryBuffer>> MB = 791 MemoryBuffer::getFileOrSTDIN(Filename); 792 if (std::error_code EC = MB.getError()) { 793 WithColor::error(errs(), ToolName) 794 << Filename << ": " << EC.message() << "\n"; 795 return {nullptr, false}; 796 } 797 798 if (!isBitcode((const unsigned char *)(*MB)->getBufferStart(), 799 (const unsigned char *)(*MB)->getBufferEnd())) { 800 std::unique_ptr<Module> Result = parseIR(**MB, Err, Ctxt); 801 if (!Result) { 802 Err.print(ToolName.data(), errs()); 803 return {nullptr, false}; 804 } 805 MMM->M = std::move(Result); 806 } else { 807 IsBitcode = true; 808 MMM->readBitcode(MemoryBufferRef(**MB), Ctxt, ToolName); 809 810 if (MMM->LTOInfo->IsThinLTO && MMM->LTOInfo->EnableSplitLTOUnit) 811 initializeTargetInfo(); 812 } 813 } 814 if (MMM->verify(&errs())) { 815 WithColor::error(errs(), ToolName) 816 << Filename << " - input module is broken!\n"; 817 return {nullptr, false}; 818 } 819 return {std::move(MMM), IsBitcode}; 820 } 821