1 //===- ReducerWorkItem.cpp - Wrapper for Module and MachineFunction -------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "ReducerWorkItem.h" 10 #include "TestRunner.h" 11 #include "llvm/Analysis/ModuleSummaryAnalysis.h" 12 #include "llvm/Analysis/ProfileSummaryInfo.h" 13 #include "llvm/Bitcode/BitcodeReader.h" 14 #include "llvm/Bitcode/BitcodeWriter.h" 15 #include "llvm/CodeGen/CommandFlags.h" 16 #include "llvm/CodeGen/MIRParser/MIRParser.h" 17 #include "llvm/CodeGen/MIRPrinter.h" 18 #include "llvm/CodeGen/MachineDominators.h" 19 #include "llvm/CodeGen/MachineFrameInfo.h" 20 #include "llvm/CodeGen/MachineFunction.h" 21 #include "llvm/CodeGen/MachineFunctionPass.h" 22 #include "llvm/CodeGen/MachineJumpTableInfo.h" 23 #include "llvm/CodeGen/MachineModuleInfo.h" 24 #include "llvm/CodeGen/MachineRegisterInfo.h" 25 #include "llvm/CodeGen/PseudoSourceValueManager.h" 26 #include "llvm/CodeGen/TargetInstrInfo.h" 27 #include "llvm/IR/Constants.h" 28 #include "llvm/IR/Instructions.h" 29 #include "llvm/IR/ModuleSummaryIndex.h" 30 #include "llvm/IR/Operator.h" 31 #include "llvm/IR/Verifier.h" 32 #include "llvm/IRReader/IRReader.h" 33 #include "llvm/MC/TargetRegistry.h" 34 #include "llvm/Passes/PassBuilder.h" 35 #include "llvm/Support/MemoryBufferRef.h" 36 #include "llvm/Support/SourceMgr.h" 37 #include "llvm/Support/TargetSelect.h" 38 #include "llvm/Support/ToolOutputFile.h" 39 #include "llvm/Support/WithColor.h" 40 #include "llvm/Target/TargetMachine.h" 41 #include "llvm/TargetParser/Host.h" 42 #include "llvm/Transforms/IPO/ThinLTOBitcodeWriter.h" 43 #include "llvm/Transforms/Utils/Cloning.h" 44 #include <optional> 45 46 using namespace llvm; 47 48 ReducerWorkItem::ReducerWorkItem() = default; 49 ReducerWorkItem::~ReducerWorkItem() = default; 50 51 extern cl::OptionCategory LLVMReduceOptions; 52 static cl::opt<std::string> TargetTriple("mtriple", 53 cl::desc("Set the target triple"), 54 cl::cat(LLVMReduceOptions)); 55 56 static cl::opt<bool> TmpFilesAsBitcode( 57 "write-tmp-files-as-bitcode", 58 cl::desc("Always write temporary files as bitcode instead of textual IR"), 59 cl::init(false), cl::cat(LLVMReduceOptions)); 60 61 static void cloneFrameInfo( 62 MachineFrameInfo &DstMFI, const MachineFrameInfo &SrcMFI, 63 const DenseMap<MachineBasicBlock *, MachineBasicBlock *> &Src2DstMBB) { 64 DstMFI.setFrameAddressIsTaken(SrcMFI.isFrameAddressTaken()); 65 DstMFI.setReturnAddressIsTaken(SrcMFI.isReturnAddressTaken()); 66 DstMFI.setHasStackMap(SrcMFI.hasStackMap()); 67 DstMFI.setHasPatchPoint(SrcMFI.hasPatchPoint()); 68 DstMFI.setUseLocalStackAllocationBlock( 69 SrcMFI.getUseLocalStackAllocationBlock()); 70 DstMFI.setOffsetAdjustment(SrcMFI.getOffsetAdjustment()); 71 72 DstMFI.ensureMaxAlignment(SrcMFI.getMaxAlign()); 73 assert(DstMFI.getMaxAlign() == SrcMFI.getMaxAlign() && 74 "we need to set exact alignment"); 75 76 DstMFI.setAdjustsStack(SrcMFI.adjustsStack()); 77 DstMFI.setHasCalls(SrcMFI.hasCalls()); 78 DstMFI.setHasOpaqueSPAdjustment(SrcMFI.hasOpaqueSPAdjustment()); 79 DstMFI.setHasCopyImplyingStackAdjustment( 80 SrcMFI.hasCopyImplyingStackAdjustment()); 81 DstMFI.setHasVAStart(SrcMFI.hasVAStart()); 82 DstMFI.setHasMustTailInVarArgFunc(SrcMFI.hasMustTailInVarArgFunc()); 83 DstMFI.setHasTailCall(SrcMFI.hasTailCall()); 84 85 if (SrcMFI.isMaxCallFrameSizeComputed()) 86 DstMFI.setMaxCallFrameSize(SrcMFI.getMaxCallFrameSize()); 87 88 DstMFI.setCVBytesOfCalleeSavedRegisters( 89 SrcMFI.getCVBytesOfCalleeSavedRegisters()); 90 91 if (MachineBasicBlock *SavePt = SrcMFI.getSavePoint()) 92 DstMFI.setSavePoint(Src2DstMBB.find(SavePt)->second); 93 if (MachineBasicBlock *RestorePt = SrcMFI.getRestorePoint()) 94 DstMFI.setRestorePoint(Src2DstMBB.find(RestorePt)->second); 95 96 97 auto CopyObjectProperties = [](MachineFrameInfo &DstMFI, 98 const MachineFrameInfo &SrcMFI, int FI) { 99 if (SrcMFI.isStatepointSpillSlotObjectIndex(FI)) 100 DstMFI.markAsStatepointSpillSlotObjectIndex(FI); 101 DstMFI.setObjectSSPLayout(FI, SrcMFI.getObjectSSPLayout(FI)); 102 DstMFI.setObjectZExt(FI, SrcMFI.isObjectZExt(FI)); 103 DstMFI.setObjectSExt(FI, SrcMFI.isObjectSExt(FI)); 104 }; 105 106 for (int i = 0, e = SrcMFI.getNumObjects() - SrcMFI.getNumFixedObjects(); 107 i != e; ++i) { 108 int NewFI; 109 110 assert(!SrcMFI.isFixedObjectIndex(i)); 111 if (SrcMFI.isVariableSizedObjectIndex(i)) { 112 NewFI = DstMFI.CreateVariableSizedObject(SrcMFI.getObjectAlign(i), 113 SrcMFI.getObjectAllocation(i)); 114 } else { 115 NewFI = DstMFI.CreateStackObject( 116 SrcMFI.getObjectSize(i), SrcMFI.getObjectAlign(i), 117 SrcMFI.isSpillSlotObjectIndex(i), SrcMFI.getObjectAllocation(i), 118 SrcMFI.getStackID(i)); 119 DstMFI.setObjectOffset(NewFI, SrcMFI.getObjectOffset(i)); 120 } 121 122 CopyObjectProperties(DstMFI, SrcMFI, i); 123 124 (void)NewFI; 125 assert(i == NewFI && "expected to keep stable frame index numbering"); 126 } 127 128 // Copy the fixed frame objects backwards to preserve frame index numbers, 129 // since CreateFixedObject uses front insertion. 130 for (int i = -1; i >= (int)-SrcMFI.getNumFixedObjects(); --i) { 131 assert(SrcMFI.isFixedObjectIndex(i)); 132 int NewFI = DstMFI.CreateFixedObject( 133 SrcMFI.getObjectSize(i), SrcMFI.getObjectOffset(i), 134 SrcMFI.isImmutableObjectIndex(i), SrcMFI.isAliasedObjectIndex(i)); 135 CopyObjectProperties(DstMFI, SrcMFI, i); 136 137 (void)NewFI; 138 assert(i == NewFI && "expected to keep stable frame index numbering"); 139 } 140 141 for (unsigned I = 0, E = SrcMFI.getLocalFrameObjectCount(); I < E; ++I) { 142 auto LocalObject = SrcMFI.getLocalFrameObjectMap(I); 143 DstMFI.mapLocalFrameObject(LocalObject.first, LocalObject.second); 144 } 145 146 DstMFI.setCalleeSavedInfo(SrcMFI.getCalleeSavedInfo()); 147 148 if (SrcMFI.hasStackProtectorIndex()) { 149 DstMFI.setStackProtectorIndex(SrcMFI.getStackProtectorIndex()); 150 } 151 152 // FIXME: Needs test, missing MIR serialization. 153 if (SrcMFI.hasFunctionContextIndex()) { 154 DstMFI.setFunctionContextIndex(SrcMFI.getFunctionContextIndex()); 155 } 156 } 157 158 static void cloneJumpTableInfo( 159 MachineFunction &DstMF, const MachineJumpTableInfo &SrcJTI, 160 const DenseMap<MachineBasicBlock *, MachineBasicBlock *> &Src2DstMBB) { 161 162 auto *DstJTI = DstMF.getOrCreateJumpTableInfo(SrcJTI.getEntryKind()); 163 164 std::vector<MachineBasicBlock *> DstBBs; 165 166 for (const MachineJumpTableEntry &Entry : SrcJTI.getJumpTables()) { 167 for (MachineBasicBlock *X : Entry.MBBs) 168 DstBBs.push_back(Src2DstMBB.find(X)->second); 169 170 DstJTI->createJumpTableIndex(DstBBs); 171 DstBBs.clear(); 172 } 173 } 174 175 static void cloneMemOperands(MachineInstr &DstMI, MachineInstr &SrcMI, 176 MachineFunction &SrcMF, MachineFunction &DstMF) { 177 // The new MachineMemOperands should be owned by the new function's 178 // Allocator. 179 PseudoSourceValueManager &PSVMgr = DstMF.getPSVManager(); 180 181 // We also need to remap the PseudoSourceValues from the new function's 182 // PseudoSourceValueManager. 183 SmallVector<MachineMemOperand *, 2> NewMMOs; 184 for (MachineMemOperand *OldMMO : SrcMI.memoperands()) { 185 MachinePointerInfo NewPtrInfo(OldMMO->getPointerInfo()); 186 if (const PseudoSourceValue *PSV = 187 dyn_cast_if_present<const PseudoSourceValue *>(NewPtrInfo.V)) { 188 switch (PSV->kind()) { 189 case PseudoSourceValue::Stack: 190 NewPtrInfo.V = PSVMgr.getStack(); 191 break; 192 case PseudoSourceValue::GOT: 193 NewPtrInfo.V = PSVMgr.getGOT(); 194 break; 195 case PseudoSourceValue::JumpTable: 196 NewPtrInfo.V = PSVMgr.getJumpTable(); 197 break; 198 case PseudoSourceValue::ConstantPool: 199 NewPtrInfo.V = PSVMgr.getConstantPool(); 200 break; 201 case PseudoSourceValue::FixedStack: 202 NewPtrInfo.V = PSVMgr.getFixedStack( 203 cast<FixedStackPseudoSourceValue>(PSV)->getFrameIndex()); 204 break; 205 case PseudoSourceValue::GlobalValueCallEntry: 206 NewPtrInfo.V = PSVMgr.getGlobalValueCallEntry( 207 cast<GlobalValuePseudoSourceValue>(PSV)->getValue()); 208 break; 209 case PseudoSourceValue::ExternalSymbolCallEntry: 210 NewPtrInfo.V = PSVMgr.getExternalSymbolCallEntry( 211 cast<ExternalSymbolPseudoSourceValue>(PSV)->getSymbol()); 212 break; 213 case PseudoSourceValue::TargetCustom: 214 default: 215 // FIXME: We have no generic interface for allocating custom PSVs. 216 report_fatal_error("Cloning TargetCustom PSV not handled"); 217 } 218 } 219 220 MachineMemOperand *NewMMO = DstMF.getMachineMemOperand( 221 NewPtrInfo, OldMMO->getFlags(), OldMMO->getMemoryType(), 222 OldMMO->getBaseAlign(), OldMMO->getAAInfo(), OldMMO->getRanges(), 223 OldMMO->getSyncScopeID(), OldMMO->getSuccessOrdering(), 224 OldMMO->getFailureOrdering()); 225 NewMMOs.push_back(NewMMO); 226 } 227 228 DstMI.setMemRefs(DstMF, NewMMOs); 229 } 230 231 static std::unique_ptr<MachineFunction> cloneMF(MachineFunction *SrcMF, 232 MachineModuleInfo &DestMMI) { 233 auto DstMF = std::make_unique<MachineFunction>( 234 SrcMF->getFunction(), SrcMF->getTarget(), SrcMF->getSubtarget(), 235 SrcMF->getContext(), SrcMF->getFunctionNumber()); 236 DenseMap<MachineBasicBlock *, MachineBasicBlock *> Src2DstMBB; 237 238 auto *SrcMRI = &SrcMF->getRegInfo(); 239 auto *DstMRI = &DstMF->getRegInfo(); 240 241 // Clone blocks. 242 for (MachineBasicBlock &SrcMBB : *SrcMF) { 243 MachineBasicBlock *DstMBB = 244 DstMF->CreateMachineBasicBlock(SrcMBB.getBasicBlock()); 245 Src2DstMBB[&SrcMBB] = DstMBB; 246 247 DstMBB->setCallFrameSize(SrcMBB.getCallFrameSize()); 248 249 if (SrcMBB.isIRBlockAddressTaken()) 250 DstMBB->setAddressTakenIRBlock(SrcMBB.getAddressTakenIRBlock()); 251 if (SrcMBB.isMachineBlockAddressTaken()) 252 DstMBB->setMachineBlockAddressTaken(); 253 254 // FIXME: This is not serialized 255 if (SrcMBB.hasLabelMustBeEmitted()) 256 DstMBB->setLabelMustBeEmitted(); 257 258 DstMBB->setAlignment(SrcMBB.getAlignment()); 259 260 // FIXME: This is not serialized 261 DstMBB->setMaxBytesForAlignment(SrcMBB.getMaxBytesForAlignment()); 262 263 DstMBB->setIsEHPad(SrcMBB.isEHPad()); 264 DstMBB->setIsEHScopeEntry(SrcMBB.isEHScopeEntry()); 265 DstMBB->setIsEHCatchretTarget(SrcMBB.isEHCatchretTarget()); 266 DstMBB->setIsEHFuncletEntry(SrcMBB.isEHFuncletEntry()); 267 268 // FIXME: These are not serialized 269 DstMBB->setIsCleanupFuncletEntry(SrcMBB.isCleanupFuncletEntry()); 270 DstMBB->setIsBeginSection(SrcMBB.isBeginSection()); 271 DstMBB->setIsEndSection(SrcMBB.isEndSection()); 272 273 DstMBB->setSectionID(SrcMBB.getSectionID()); 274 DstMBB->setIsInlineAsmBrIndirectTarget( 275 SrcMBB.isInlineAsmBrIndirectTarget()); 276 277 // FIXME: This is not serialized 278 if (std::optional<uint64_t> Weight = SrcMBB.getIrrLoopHeaderWeight()) 279 DstMBB->setIrrLoopHeaderWeight(*Weight); 280 } 281 282 const MachineFrameInfo &SrcMFI = SrcMF->getFrameInfo(); 283 MachineFrameInfo &DstMFI = DstMF->getFrameInfo(); 284 285 // Copy stack objects and other info 286 cloneFrameInfo(DstMFI, SrcMFI, Src2DstMBB); 287 288 if (MachineJumpTableInfo *SrcJTI = SrcMF->getJumpTableInfo()) { 289 cloneJumpTableInfo(*DstMF, *SrcJTI, Src2DstMBB); 290 } 291 292 // Remap the debug info frame index references. 293 DstMF->VariableDbgInfos = SrcMF->VariableDbgInfos; 294 295 // Clone virtual registers 296 for (unsigned I = 0, E = SrcMRI->getNumVirtRegs(); I != E; ++I) { 297 Register Reg = Register::index2VirtReg(I); 298 Register NewReg = DstMRI->createIncompleteVirtualRegister( 299 SrcMRI->getVRegName(Reg)); 300 assert(NewReg == Reg && "expected to preserve virtreg number"); 301 302 DstMRI->setRegClassOrRegBank(NewReg, SrcMRI->getRegClassOrRegBank(Reg)); 303 304 LLT RegTy = SrcMRI->getType(Reg); 305 if (RegTy.isValid()) 306 DstMRI->setType(NewReg, RegTy); 307 308 // Copy register allocation hints. 309 const auto *Hints = SrcMRI->getRegAllocationHints(Reg); 310 if (Hints) 311 for (Register PrefReg : Hints->second) 312 DstMRI->addRegAllocationHint(NewReg, PrefReg); 313 } 314 315 const TargetSubtargetInfo &STI = DstMF->getSubtarget(); 316 const TargetInstrInfo *TII = STI.getInstrInfo(); 317 const TargetRegisterInfo *TRI = STI.getRegisterInfo(); 318 319 // Link blocks. 320 for (auto &SrcMBB : *SrcMF) { 321 auto *DstMBB = Src2DstMBB[&SrcMBB]; 322 DstMF->push_back(DstMBB); 323 324 for (auto It = SrcMBB.succ_begin(), IterEnd = SrcMBB.succ_end(); 325 It != IterEnd; ++It) { 326 auto *SrcSuccMBB = *It; 327 auto *DstSuccMBB = Src2DstMBB[SrcSuccMBB]; 328 DstMBB->addSuccessor(DstSuccMBB, SrcMBB.getSuccProbability(It)); 329 } 330 331 for (auto &LI : SrcMBB.liveins_dbg()) 332 DstMBB->addLiveIn(LI); 333 334 // Make sure MRI knows about registers clobbered by unwinder. 335 if (DstMBB->isEHPad()) { 336 if (auto *RegMask = TRI->getCustomEHPadPreservedMask(*DstMF)) 337 DstMRI->addPhysRegsUsedFromRegMask(RegMask); 338 } 339 } 340 341 DenseSet<const uint32_t *> ConstRegisterMasks; 342 343 // Track predefined/named regmasks which we ignore. 344 for (const uint32_t *Mask : TRI->getRegMasks()) 345 ConstRegisterMasks.insert(Mask); 346 347 // Clone instructions. 348 for (auto &SrcMBB : *SrcMF) { 349 auto *DstMBB = Src2DstMBB[&SrcMBB]; 350 for (auto &SrcMI : SrcMBB) { 351 const auto &MCID = TII->get(SrcMI.getOpcode()); 352 auto *DstMI = DstMF->CreateMachineInstr(MCID, SrcMI.getDebugLoc(), 353 /*NoImplicit=*/true); 354 DstMI->setFlags(SrcMI.getFlags()); 355 DstMI->setAsmPrinterFlag(SrcMI.getAsmPrinterFlags()); 356 357 DstMBB->push_back(DstMI); 358 for (auto &SrcMO : SrcMI.operands()) { 359 MachineOperand DstMO(SrcMO); 360 DstMO.clearParent(); 361 362 // Update MBB. 363 if (DstMO.isMBB()) 364 DstMO.setMBB(Src2DstMBB[DstMO.getMBB()]); 365 else if (DstMO.isRegMask()) { 366 DstMRI->addPhysRegsUsedFromRegMask(DstMO.getRegMask()); 367 368 if (!ConstRegisterMasks.count(DstMO.getRegMask())) { 369 uint32_t *DstMask = DstMF->allocateRegMask(); 370 std::memcpy(DstMask, SrcMO.getRegMask(), 371 sizeof(*DstMask) * 372 MachineOperand::getRegMaskSize(TRI->getNumRegs())); 373 DstMO.setRegMask(DstMask); 374 } 375 } 376 377 DstMI->addOperand(DstMO); 378 } 379 380 cloneMemOperands(*DstMI, SrcMI, *SrcMF, *DstMF); 381 } 382 } 383 384 DstMF->setAlignment(SrcMF->getAlignment()); 385 DstMF->setExposesReturnsTwice(SrcMF->exposesReturnsTwice()); 386 DstMF->setHasInlineAsm(SrcMF->hasInlineAsm()); 387 DstMF->setHasWinCFI(SrcMF->hasWinCFI()); 388 389 DstMF->getProperties().reset().set(SrcMF->getProperties()); 390 391 if (!SrcMF->getFrameInstructions().empty() || 392 !SrcMF->getLongjmpTargets().empty() || 393 !SrcMF->getCatchretTargets().empty()) 394 report_fatal_error("cloning not implemented for machine function property"); 395 396 DstMF->setCallsEHReturn(SrcMF->callsEHReturn()); 397 DstMF->setCallsUnwindInit(SrcMF->callsUnwindInit()); 398 DstMF->setHasEHCatchret(SrcMF->hasEHCatchret()); 399 DstMF->setHasEHScopes(SrcMF->hasEHScopes()); 400 DstMF->setHasEHFunclets(SrcMF->hasEHFunclets()); 401 DstMF->setIsOutlined(SrcMF->isOutlined()); 402 403 if (!SrcMF->getLandingPads().empty() || 404 !SrcMF->getCodeViewAnnotations().empty() || 405 !SrcMF->getTypeInfos().empty() || 406 !SrcMF->getFilterIds().empty() || 407 SrcMF->hasAnyWasmLandingPadIndex() || 408 SrcMF->hasAnyCallSiteLandingPad() || 409 SrcMF->hasAnyCallSiteLabel() || 410 !SrcMF->getCallSitesInfo().empty()) 411 report_fatal_error("cloning not implemented for machine function property"); 412 413 DstMF->setDebugInstrNumberingCount(SrcMF->DebugInstrNumberingCount); 414 415 if (!DstMF->cloneInfoFrom(*SrcMF, Src2DstMBB)) 416 report_fatal_error("target does not implement MachineFunctionInfo cloning"); 417 418 DstMRI->freezeReservedRegs(); 419 420 DstMF->verify(nullptr, "", /*AbortOnError=*/true); 421 return DstMF; 422 } 423 424 static void initializeTargetInfo() { 425 InitializeAllTargets(); 426 InitializeAllTargetMCs(); 427 InitializeAllAsmPrinters(); 428 InitializeAllAsmParsers(); 429 } 430 431 void ReducerWorkItem::print(raw_ostream &ROS, void *p) const { 432 if (MMI) { 433 printMIR(ROS, *M); 434 for (Function &F : *M) { 435 if (auto *MF = MMI->getMachineFunction(F)) 436 printMIR(ROS, *MMI, *MF); 437 } 438 } else { 439 M->print(ROS, /*AssemblyAnnotationWriter=*/nullptr, 440 /*ShouldPreserveUseListOrder=*/true); 441 } 442 } 443 444 bool ReducerWorkItem::verify(raw_fd_ostream *OS) const { 445 if (verifyModule(*M, OS)) 446 return true; 447 448 if (!MMI) 449 return false; 450 451 for (const Function &F : getModule()) { 452 if (const MachineFunction *MF = MMI->getMachineFunction(F)) { 453 if (!MF->verify(nullptr, "", /*AbortOnError=*/false)) 454 return true; 455 } 456 } 457 458 return false; 459 } 460 461 bool ReducerWorkItem::isReduced(const TestRunner &Test) const { 462 const bool UseBitcode = Test.inputIsBitcode() || TmpFilesAsBitcode; 463 464 SmallString<128> CurrentFilepath; 465 466 // Write ReducerWorkItem to tmp file 467 int FD; 468 std::error_code EC = sys::fs::createTemporaryFile( 469 "llvm-reduce", isMIR() ? "mir" : (UseBitcode ? "bc" : "ll"), FD, 470 CurrentFilepath, 471 UseBitcode && !isMIR() ? sys::fs::OF_None : sys::fs::OF_Text); 472 if (EC) { 473 WithColor::error(errs(), Test.getToolName()) 474 << "error making unique filename: " << EC.message() << '\n'; 475 exit(1); 476 } 477 478 ToolOutputFile Out(CurrentFilepath, FD); 479 480 writeOutput(Out.os(), UseBitcode); 481 482 Out.os().close(); 483 if (Out.os().has_error()) { 484 WithColor::error(errs(), Test.getToolName()) 485 << "error emitting bitcode to file '" << CurrentFilepath 486 << "': " << Out.os().error().message() << '\n'; 487 exit(1); 488 } 489 490 // Current Chunks aren't interesting 491 return Test.run(CurrentFilepath); 492 } 493 494 std::unique_ptr<ReducerWorkItem> 495 ReducerWorkItem::clone(const TargetMachine *TM) const { 496 auto CloneMMM = std::make_unique<ReducerWorkItem>(); 497 if (TM) { 498 // We're assuming the Module IR contents are always unchanged by MIR 499 // reductions, and can share it as a constant. 500 CloneMMM->M = M; 501 502 // MachineModuleInfo contains a lot of other state used during codegen which 503 // we won't be using here, but we should be able to ignore it (although this 504 // is pretty ugly). 505 const LLVMTargetMachine *LLVMTM = 506 static_cast<const LLVMTargetMachine *>(TM); 507 CloneMMM->MMI = std::make_unique<MachineModuleInfo>(LLVMTM); 508 509 for (const Function &F : getModule()) { 510 if (auto *MF = MMI->getMachineFunction(F)) 511 CloneMMM->MMI->insertFunction(F, cloneMF(MF, *CloneMMM->MMI)); 512 } 513 } else { 514 CloneMMM->M = CloneModule(*M); 515 } 516 return CloneMMM; 517 } 518 519 /// Try to produce some number that indicates a function is getting smaller / 520 /// simpler. 521 static uint64_t computeMIRComplexityScoreImpl(const MachineFunction &MF) { 522 uint64_t Score = 0; 523 const MachineFrameInfo &MFI = MF.getFrameInfo(); 524 525 // Add for stack objects 526 Score += MFI.getNumObjects(); 527 528 // Add in the block count. 529 Score += 2 * MF.size(); 530 531 const MachineRegisterInfo &MRI = MF.getRegInfo(); 532 for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) { 533 Register Reg = Register::index2VirtReg(I); 534 if (const auto *Hints = MRI.getRegAllocationHints(Reg)) 535 Score += Hints->second.size(); 536 } 537 538 for (const MachineBasicBlock &MBB : MF) { 539 for (const MachineInstr &MI : MBB) { 540 const unsigned Opc = MI.getOpcode(); 541 542 // Reductions may want or need to introduce implicit_defs, so don't count 543 // them. 544 // TODO: These probably should count in some way. 545 if (Opc == TargetOpcode::IMPLICIT_DEF || 546 Opc == TargetOpcode::G_IMPLICIT_DEF) 547 continue; 548 549 // Each instruction adds to the score 550 Score += 4; 551 552 if (Opc == TargetOpcode::PHI || Opc == TargetOpcode::G_PHI || 553 Opc == TargetOpcode::INLINEASM || Opc == TargetOpcode::INLINEASM_BR) 554 ++Score; 555 556 if (MI.getFlags() != 0) 557 ++Score; 558 559 // Increase weight for more operands. 560 for (const MachineOperand &MO : MI.operands()) { 561 ++Score; 562 563 // Treat registers as more complex. 564 if (MO.isReg()) { 565 ++Score; 566 567 // And subregisters as even more complex. 568 if (MO.getSubReg()) { 569 ++Score; 570 if (MO.isDef()) 571 ++Score; 572 } 573 } else if (MO.isRegMask()) 574 ++Score; 575 } 576 } 577 } 578 579 return Score; 580 } 581 582 uint64_t ReducerWorkItem::computeMIRComplexityScore() const { 583 uint64_t Score = 0; 584 585 for (const Function &F : getModule()) { 586 if (auto *MF = MMI->getMachineFunction(F)) 587 Score += computeMIRComplexityScoreImpl(*MF); 588 } 589 590 return Score; 591 } 592 593 // FIXME: ReduceOperandsSkip has similar function, except it uses larger numbers 594 // for more reduced. 595 static unsigned classifyReductivePower(const Value *V) { 596 if (auto *C = dyn_cast<ConstantData>(V)) { 597 if (C->isNullValue()) 598 return 0; 599 if (C->isOneValue()) 600 return 1; 601 if (isa<UndefValue>(V)) 602 return 2; 603 return 3; 604 } 605 606 if (isa<GlobalValue>(V)) 607 return 4; 608 609 // TODO: Account for expression size 610 if (isa<ConstantExpr>(V)) 611 return 5; 612 613 if (isa<Constant>(V)) 614 return 1; 615 616 if (isa<Argument>(V)) 617 return 6; 618 619 if (isa<Instruction>(V)) 620 return 7; 621 622 return 0; 623 } 624 625 // TODO: Additional flags and attributes may be complexity reducing. If we start 626 // adding flags and attributes, they could have negative cost. 627 static uint64_t computeIRComplexityScoreImpl(const Function &F) { 628 uint64_t Score = 1; // Count the function itself 629 SmallVector<std::pair<unsigned, MDNode *>> MDs; 630 631 AttributeList Attrs = F.getAttributes(); 632 for (AttributeSet AttrSet : Attrs) 633 Score += AttrSet.getNumAttributes(); 634 635 for (const BasicBlock &BB : F) { 636 ++Score; 637 638 for (const Instruction &I : BB) { 639 ++Score; 640 641 if (const auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(&I)) { 642 if (OverflowOp->hasNoUnsignedWrap()) 643 ++Score; 644 if (OverflowOp->hasNoSignedWrap()) 645 ++Score; 646 } else if (const auto *GEP = dyn_cast<GEPOperator>(&I)) { 647 if (GEP->isInBounds()) 648 ++Score; 649 } else if (const auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I)) { 650 if (ExactOp->isExact()) 651 ++Score; 652 } else if (const auto *FPOp = dyn_cast<FPMathOperator>(&I)) { 653 FastMathFlags FMF = FPOp->getFastMathFlags(); 654 if (FMF.allowReassoc()) 655 ++Score; 656 if (FMF.noNaNs()) 657 ++Score; 658 if (FMF.noInfs()) 659 ++Score; 660 if (FMF.noSignedZeros()) 661 ++Score; 662 if (FMF.allowReciprocal()) 663 ++Score; 664 if (FMF.allowContract()) 665 ++Score; 666 if (FMF.approxFunc()) 667 ++Score; 668 } 669 670 for (const Value *Operand : I.operands()) { 671 ++Score; 672 Score += classifyReductivePower(Operand); 673 } 674 675 I.getAllMetadata(MDs); 676 Score += MDs.size(); 677 MDs.clear(); 678 } 679 } 680 681 return Score; 682 } 683 684 uint64_t ReducerWorkItem::computeIRComplexityScore() const { 685 uint64_t Score = 0; 686 687 const Module &M = getModule(); 688 Score += M.named_metadata_size(); 689 690 SmallVector<std::pair<unsigned, MDNode *>, 32> GlobalMetadata; 691 for (const GlobalVariable &GV : M.globals()) { 692 ++Score; 693 694 if (GV.hasInitializer()) 695 Score += classifyReductivePower(GV.getInitializer()); 696 697 // TODO: Account for linkage? 698 699 GV.getAllMetadata(GlobalMetadata); 700 Score += GlobalMetadata.size(); 701 GlobalMetadata.clear(); 702 } 703 704 for (const GlobalAlias &GA : M.aliases()) 705 Score += classifyReductivePower(GA.getAliasee()); 706 707 for (const GlobalIFunc &GI : M.ifuncs()) 708 Score += classifyReductivePower(GI.getResolver()); 709 710 for (const Function &F : M) 711 Score += computeIRComplexityScoreImpl(F); 712 713 return Score; 714 } 715 716 void ReducerWorkItem::writeOutput(raw_ostream &OS, bool EmitBitcode) const { 717 // Requesting bitcode emission with mir is nonsense, so just ignore it. 718 if (EmitBitcode && !isMIR()) 719 writeBitcode(OS); 720 else 721 print(OS, /*AnnotationWriter=*/nullptr); 722 } 723 724 void ReducerWorkItem::readBitcode(MemoryBufferRef Data, LLVMContext &Ctx, 725 StringRef ToolName) { 726 Expected<BitcodeFileContents> IF = llvm::getBitcodeFileContents(Data); 727 if (!IF) { 728 WithColor::error(errs(), ToolName) << IF.takeError(); 729 exit(1); 730 } 731 BitcodeModule BM = IF->Mods[0]; 732 Expected<BitcodeLTOInfo> LI = BM.getLTOInfo(); 733 Expected<std::unique_ptr<Module>> MOrErr = BM.parseModule(Ctx); 734 if (!LI || !MOrErr) { 735 WithColor::error(errs(), ToolName) << IF.takeError(); 736 exit(1); 737 } 738 LTOInfo = std::make_unique<BitcodeLTOInfo>(*LI); 739 M = std::move(MOrErr.get()); 740 } 741 742 void ReducerWorkItem::writeBitcode(raw_ostream &OutStream) const { 743 if (LTOInfo && LTOInfo->IsThinLTO && LTOInfo->EnableSplitLTOUnit) { 744 PassBuilder PB; 745 LoopAnalysisManager LAM; 746 FunctionAnalysisManager FAM; 747 CGSCCAnalysisManager CGAM; 748 ModuleAnalysisManager MAM; 749 PB.registerModuleAnalyses(MAM); 750 PB.registerCGSCCAnalyses(CGAM); 751 PB.registerFunctionAnalyses(FAM); 752 PB.registerLoopAnalyses(LAM); 753 PB.crossRegisterProxies(LAM, FAM, CGAM, MAM); 754 ModulePassManager MPM; 755 MPM.addPass(ThinLTOBitcodeWriterPass(OutStream, nullptr)); 756 MPM.run(*M, MAM); 757 } else { 758 std::unique_ptr<ModuleSummaryIndex> Index; 759 if (LTOInfo && LTOInfo->HasSummary) { 760 ProfileSummaryInfo PSI(*M); 761 Index = std::make_unique<ModuleSummaryIndex>( 762 buildModuleSummaryIndex(*M, nullptr, &PSI)); 763 } 764 WriteBitcodeToFile(getModule(), OutStream, 765 /*ShouldPreserveUseListOrder=*/true, Index.get()); 766 } 767 } 768 769 std::pair<std::unique_ptr<ReducerWorkItem>, bool> 770 llvm::parseReducerWorkItem(StringRef ToolName, StringRef Filename, 771 LLVMContext &Ctxt, 772 std::unique_ptr<TargetMachine> &TM, bool IsMIR) { 773 bool IsBitcode = false; 774 Triple TheTriple; 775 776 auto MMM = std::make_unique<ReducerWorkItem>(); 777 778 if (IsMIR) { 779 initializeTargetInfo(); 780 781 auto FileOrErr = MemoryBuffer::getFileOrSTDIN(Filename, /*IsText=*/true); 782 if (std::error_code EC = FileOrErr.getError()) { 783 WithColor::error(errs(), ToolName) << EC.message() << '\n'; 784 return {nullptr, false}; 785 } 786 787 std::unique_ptr<MIRParser> MParser = 788 createMIRParser(std::move(FileOrErr.get()), Ctxt); 789 790 auto SetDataLayout = [&](StringRef DataLayoutTargetTriple, 791 StringRef OldDLStr) -> std::optional<std::string> { 792 // NB: We always call createTargetMachineForTriple() even if an explicit 793 // DataLayout is already set in the module since we want to use this 794 // callback to setup the TargetMachine rather than doing it later. 795 std::string IRTargetTriple = DataLayoutTargetTriple.str(); 796 if (!TargetTriple.empty()) 797 IRTargetTriple = Triple::normalize(TargetTriple); 798 TheTriple = Triple(IRTargetTriple); 799 if (TheTriple.getTriple().empty()) 800 TheTriple.setTriple(sys::getDefaultTargetTriple()); 801 ExitOnError ExitOnErr(std::string(ToolName) + ": error: "); 802 TM = ExitOnErr(codegen::createTargetMachineForTriple(TheTriple.str())); 803 804 return TM->createDataLayout().getStringRepresentation(); 805 }; 806 807 std::unique_ptr<Module> M = MParser->parseIRModule(SetDataLayout); 808 LLVMTargetMachine *LLVMTM = static_cast<LLVMTargetMachine *>(TM.get()); 809 810 MMM->MMI = std::make_unique<MachineModuleInfo>(LLVMTM); 811 MParser->parseMachineFunctions(*M, *MMM->MMI); 812 MMM->M = std::move(M); 813 } else { 814 SMDiagnostic Err; 815 ErrorOr<std::unique_ptr<MemoryBuffer>> MB = 816 MemoryBuffer::getFileOrSTDIN(Filename); 817 if (std::error_code EC = MB.getError()) { 818 WithColor::error(errs(), ToolName) 819 << Filename << ": " << EC.message() << "\n"; 820 return {nullptr, false}; 821 } 822 823 if (!isBitcode((const unsigned char *)(*MB)->getBufferStart(), 824 (const unsigned char *)(*MB)->getBufferEnd())) { 825 std::unique_ptr<Module> Result = parseIR(**MB, Err, Ctxt); 826 if (!Result) { 827 Err.print(ToolName.data(), errs()); 828 return {nullptr, false}; 829 } 830 MMM->M = std::move(Result); 831 } else { 832 IsBitcode = true; 833 MMM->readBitcode(MemoryBufferRef(**MB), Ctxt, ToolName); 834 835 if (MMM->LTOInfo->IsThinLTO && MMM->LTOInfo->EnableSplitLTOUnit) 836 initializeTargetInfo(); 837 } 838 } 839 if (MMM->verify(&errs())) { 840 WithColor::error(errs(), ToolName) 841 << Filename << " - input module is broken!\n"; 842 return {nullptr, false}; 843 } 844 return {std::move(MMM), IsBitcode}; 845 } 846