1 //===-- RISCVTargetMachine.cpp - Define TargetMachine for RISC-V ----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Implements the info about RISC-V target spec. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "RISCVTargetMachine.h" 14 #include "MCTargetDesc/RISCVBaseInfo.h" 15 #include "RISCV.h" 16 #include "RISCVMachineFunctionInfo.h" 17 #include "RISCVTargetObjectFile.h" 18 #include "RISCVTargetTransformInfo.h" 19 #include "TargetInfo/RISCVTargetInfo.h" 20 #include "llvm/Analysis/TargetTransformInfo.h" 21 #include "llvm/CodeGen/GlobalISel/CSEInfo.h" 22 #include "llvm/CodeGen/GlobalISel/IRTranslator.h" 23 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h" 24 #include "llvm/CodeGen/GlobalISel/Legalizer.h" 25 #include "llvm/CodeGen/GlobalISel/RegBankSelect.h" 26 #include "llvm/CodeGen/MIRParser/MIParser.h" 27 #include "llvm/CodeGen/MIRYamlMapping.h" 28 #include "llvm/CodeGen/MachineScheduler.h" 29 #include "llvm/CodeGen/MacroFusion.h" 30 #include "llvm/CodeGen/Passes.h" 31 #include "llvm/CodeGen/RegAllocRegistry.h" 32 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 33 #include "llvm/CodeGen/TargetPassConfig.h" 34 #include "llvm/InitializePasses.h" 35 #include "llvm/MC/TargetRegistry.h" 36 #include "llvm/Passes/PassBuilder.h" 37 #include "llvm/Target/TargetOptions.h" 38 #include "llvm/Transforms/IPO.h" 39 #include "llvm/Transforms/Scalar.h" 40 #include "llvm/Transforms/Vectorize/LoopIdiomVectorize.h" 41 #include <optional> 42 using namespace llvm; 43 44 static cl::opt<bool> EnableRedundantCopyElimination( 45 "riscv-enable-copyelim", 46 cl::desc("Enable the redundant copy elimination pass"), cl::init(true), 47 cl::Hidden); 48 49 // FIXME: Unify control over GlobalMerge. 50 static cl::opt<cl::boolOrDefault> 51 EnableGlobalMerge("riscv-enable-global-merge", cl::Hidden, 52 cl::desc("Enable the global merge pass")); 53 54 static cl::opt<bool> 55 EnableMachineCombiner("riscv-enable-machine-combiner", 56 cl::desc("Enable the machine combiner pass"), 57 cl::init(true), cl::Hidden); 58 59 static cl::opt<unsigned> RVVVectorBitsMaxOpt( 60 "riscv-v-vector-bits-max", 61 cl::desc("Assume V extension vector registers are at most this big, " 62 "with zero meaning no maximum size is assumed."), 63 cl::init(0), cl::Hidden); 64 65 static cl::opt<int> RVVVectorBitsMinOpt( 66 "riscv-v-vector-bits-min", 67 cl::desc("Assume V extension vector registers are at least this big, " 68 "with zero meaning no minimum size is assumed. A value of -1 " 69 "means use Zvl*b extension. This is primarily used to enable " 70 "autovectorization with fixed width vectors."), 71 cl::init(-1), cl::Hidden); 72 73 static cl::opt<bool> EnableRISCVCopyPropagation( 74 "riscv-enable-copy-propagation", 75 cl::desc("Enable the copy propagation with RISC-V copy instr"), 76 cl::init(true), cl::Hidden); 77 78 static cl::opt<bool> EnableRISCVDeadRegisterElimination( 79 "riscv-enable-dead-defs", cl::Hidden, 80 cl::desc("Enable the pass that removes dead" 81 " definitons and replaces stores to" 82 " them with stores to x0"), 83 cl::init(true)); 84 85 static cl::opt<bool> 86 EnableSinkFold("riscv-enable-sink-fold", 87 cl::desc("Enable sinking and folding of instruction copies"), 88 cl::init(true), cl::Hidden); 89 90 static cl::opt<bool> 91 EnableLoopDataPrefetch("riscv-enable-loop-data-prefetch", cl::Hidden, 92 cl::desc("Enable the loop data prefetch pass"), 93 cl::init(true)); 94 95 static cl::opt<bool> EnableMISchedLoadStoreClustering( 96 "riscv-misched-load-store-clustering", cl::Hidden, 97 cl::desc("Enable load and store clustering in the machine scheduler"), 98 cl::init(true)); 99 100 static cl::opt<bool> EnablePostMISchedLoadStoreClustering( 101 "riscv-postmisched-load-store-clustering", cl::Hidden, 102 cl::desc("Enable PostRA load and store clustering in the machine scheduler"), 103 cl::init(true)); 104 105 static cl::opt<bool> 106 EnableVLOptimizer("riscv-enable-vl-optimizer", 107 cl::desc("Enable the RISC-V VL Optimizer pass"), 108 cl::init(true), cl::Hidden); 109 110 static cl::opt<bool> DisableVectorMaskMutation( 111 "riscv-disable-vector-mask-mutation", 112 cl::desc("Disable the vector mask scheduling mutation"), cl::init(false), 113 cl::Hidden); 114 115 static cl::opt<bool> 116 EnableMachinePipeliner("riscv-enable-pipeliner", 117 cl::desc("Enable Machine Pipeliner for RISC-V"), 118 cl::init(false), cl::Hidden); 119 120 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeRISCVTarget() { 121 RegisterTargetMachine<RISCVTargetMachine> X(getTheRISCV32Target()); 122 RegisterTargetMachine<RISCVTargetMachine> Y(getTheRISCV64Target()); 123 auto *PR = PassRegistry::getPassRegistry(); 124 initializeGlobalISel(*PR); 125 initializeRISCVO0PreLegalizerCombinerPass(*PR); 126 initializeRISCVPreLegalizerCombinerPass(*PR); 127 initializeRISCVPostLegalizerCombinerPass(*PR); 128 initializeKCFIPass(*PR); 129 initializeRISCVDeadRegisterDefinitionsPass(*PR); 130 initializeRISCVMakeCompressibleOptPass(*PR); 131 initializeRISCVGatherScatterLoweringPass(*PR); 132 initializeRISCVCodeGenPreparePass(*PR); 133 initializeRISCVPostRAExpandPseudoPass(*PR); 134 initializeRISCVMergeBaseOffsetOptPass(*PR); 135 initializeRISCVOptWInstrsPass(*PR); 136 initializeRISCVPreRAExpandPseudoPass(*PR); 137 initializeRISCVExpandPseudoPass(*PR); 138 initializeRISCVVectorPeepholePass(*PR); 139 initializeRISCVVLOptimizerPass(*PR); 140 initializeRISCVInsertVSETVLIPass(*PR); 141 initializeRISCVInsertReadWriteCSRPass(*PR); 142 initializeRISCVInsertWriteVXRMPass(*PR); 143 initializeRISCVDAGToDAGISelLegacyPass(*PR); 144 initializeRISCVMoveMergePass(*PR); 145 initializeRISCVPushPopOptPass(*PR); 146 } 147 148 static StringRef computeDataLayout(const Triple &TT, 149 const TargetOptions &Options) { 150 StringRef ABIName = Options.MCOptions.getABIName(); 151 if (TT.isArch64Bit()) { 152 if (ABIName == "lp64e") 153 return "e-m:e-p:64:64-i64:64-i128:128-n32:64-S64"; 154 155 return "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128"; 156 } 157 assert(TT.isArch32Bit() && "only RV32 and RV64 are currently supported"); 158 159 if (ABIName == "ilp32e") 160 return "e-m:e-p:32:32-i64:64-n32-S32"; 161 162 return "e-m:e-p:32:32-i64:64-n32-S128"; 163 } 164 165 static Reloc::Model getEffectiveRelocModel(const Triple &TT, 166 std::optional<Reloc::Model> RM) { 167 return RM.value_or(Reloc::Static); 168 } 169 170 RISCVTargetMachine::RISCVTargetMachine(const Target &T, const Triple &TT, 171 StringRef CPU, StringRef FS, 172 const TargetOptions &Options, 173 std::optional<Reloc::Model> RM, 174 std::optional<CodeModel::Model> CM, 175 CodeGenOptLevel OL, bool JIT) 176 : CodeGenTargetMachineImpl(T, computeDataLayout(TT, Options), TT, CPU, FS, 177 Options, getEffectiveRelocModel(TT, RM), 178 getEffectiveCodeModel(CM, CodeModel::Small), OL), 179 TLOF(std::make_unique<RISCVELFTargetObjectFile>()) { 180 initAsmInfo(); 181 182 // RISC-V supports the MachineOutliner. 183 setMachineOutliner(true); 184 setSupportsDefaultOutlining(true); 185 186 if (TT.isOSFuchsia() && !TT.isArch64Bit()) 187 report_fatal_error("Fuchsia is only supported for 64-bit"); 188 189 setCFIFixup(true); 190 } 191 192 const RISCVSubtarget * 193 RISCVTargetMachine::getSubtargetImpl(const Function &F) const { 194 Attribute CPUAttr = F.getFnAttribute("target-cpu"); 195 Attribute TuneAttr = F.getFnAttribute("tune-cpu"); 196 Attribute FSAttr = F.getFnAttribute("target-features"); 197 198 std::string CPU = 199 CPUAttr.isValid() ? CPUAttr.getValueAsString().str() : TargetCPU; 200 std::string TuneCPU = 201 TuneAttr.isValid() ? TuneAttr.getValueAsString().str() : CPU; 202 std::string FS = 203 FSAttr.isValid() ? FSAttr.getValueAsString().str() : TargetFS; 204 205 unsigned RVVBitsMin = RVVVectorBitsMinOpt; 206 unsigned RVVBitsMax = RVVVectorBitsMaxOpt; 207 208 Attribute VScaleRangeAttr = F.getFnAttribute(Attribute::VScaleRange); 209 if (VScaleRangeAttr.isValid()) { 210 if (!RVVVectorBitsMinOpt.getNumOccurrences()) 211 RVVBitsMin = VScaleRangeAttr.getVScaleRangeMin() * RISCV::RVVBitsPerBlock; 212 std::optional<unsigned> VScaleMax = VScaleRangeAttr.getVScaleRangeMax(); 213 if (VScaleMax.has_value() && !RVVVectorBitsMaxOpt.getNumOccurrences()) 214 RVVBitsMax = *VScaleMax * RISCV::RVVBitsPerBlock; 215 } 216 217 if (RVVBitsMin != -1U) { 218 // FIXME: Change to >= 32 when VLEN = 32 is supported. 219 assert((RVVBitsMin == 0 || (RVVBitsMin >= 64 && RVVBitsMin <= 65536 && 220 isPowerOf2_32(RVVBitsMin))) && 221 "V or Zve* extension requires vector length to be in the range of " 222 "64 to 65536 and a power 2!"); 223 assert((RVVBitsMax >= RVVBitsMin || RVVBitsMax == 0) && 224 "Minimum V extension vector length should not be larger than its " 225 "maximum!"); 226 } 227 assert((RVVBitsMax == 0 || (RVVBitsMax >= 64 && RVVBitsMax <= 65536 && 228 isPowerOf2_32(RVVBitsMax))) && 229 "V or Zve* extension requires vector length to be in the range of " 230 "64 to 65536 and a power 2!"); 231 232 if (RVVBitsMin != -1U) { 233 if (RVVBitsMax != 0) { 234 RVVBitsMin = std::min(RVVBitsMin, RVVBitsMax); 235 RVVBitsMax = std::max(RVVBitsMin, RVVBitsMax); 236 } 237 238 RVVBitsMin = llvm::bit_floor( 239 (RVVBitsMin < 64 || RVVBitsMin > 65536) ? 0 : RVVBitsMin); 240 } 241 RVVBitsMax = 242 llvm::bit_floor((RVVBitsMax < 64 || RVVBitsMax > 65536) ? 0 : RVVBitsMax); 243 244 SmallString<512> Key; 245 raw_svector_ostream(Key) << "RVVMin" << RVVBitsMin << "RVVMax" << RVVBitsMax 246 << CPU << TuneCPU << FS; 247 auto &I = SubtargetMap[Key]; 248 if (!I) { 249 // This needs to be done before we create a new subtarget since any 250 // creation will depend on the TM and the code generation flags on the 251 // function that reside in TargetOptions. 252 resetTargetOptions(F); 253 auto ABIName = Options.MCOptions.getABIName(); 254 if (const MDString *ModuleTargetABI = dyn_cast_or_null<MDString>( 255 F.getParent()->getModuleFlag("target-abi"))) { 256 auto TargetABI = RISCVABI::getTargetABI(ABIName); 257 if (TargetABI != RISCVABI::ABI_Unknown && 258 ModuleTargetABI->getString() != ABIName) { 259 report_fatal_error("-target-abi option != target-abi module flag"); 260 } 261 ABIName = ModuleTargetABI->getString(); 262 } 263 I = std::make_unique<RISCVSubtarget>( 264 TargetTriple, CPU, TuneCPU, FS, ABIName, RVVBitsMin, RVVBitsMax, *this); 265 } 266 return I.get(); 267 } 268 269 MachineFunctionInfo *RISCVTargetMachine::createMachineFunctionInfo( 270 BumpPtrAllocator &Allocator, const Function &F, 271 const TargetSubtargetInfo *STI) const { 272 return RISCVMachineFunctionInfo::create<RISCVMachineFunctionInfo>( 273 Allocator, F, static_cast<const RISCVSubtarget *>(STI)); 274 } 275 276 TargetTransformInfo 277 RISCVTargetMachine::getTargetTransformInfo(const Function &F) const { 278 return TargetTransformInfo(RISCVTTIImpl(this, F)); 279 } 280 281 // A RISC-V hart has a single byte-addressable address space of 2^XLEN bytes 282 // for all memory accesses, so it is reasonable to assume that an 283 // implementation has no-op address space casts. If an implementation makes a 284 // change to this, they can override it here. 285 bool RISCVTargetMachine::isNoopAddrSpaceCast(unsigned SrcAS, 286 unsigned DstAS) const { 287 return true; 288 } 289 290 namespace { 291 292 class RVVRegisterRegAlloc : public RegisterRegAllocBase<RVVRegisterRegAlloc> { 293 public: 294 RVVRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C) 295 : RegisterRegAllocBase(N, D, C) {} 296 }; 297 298 static bool onlyAllocateRVVReg(const TargetRegisterInfo &TRI, 299 const MachineRegisterInfo &MRI, 300 const Register Reg) { 301 const TargetRegisterClass *RC = MRI.getRegClass(Reg); 302 return RISCVRegisterInfo::isRVVRegClass(RC); 303 } 304 305 static FunctionPass *useDefaultRegisterAllocator() { return nullptr; } 306 307 static llvm::once_flag InitializeDefaultRVVRegisterAllocatorFlag; 308 309 /// -riscv-rvv-regalloc=<fast|basic|greedy> command line option. 310 /// This option could designate the rvv register allocator only. 311 /// For example: -riscv-rvv-regalloc=basic 312 static cl::opt<RVVRegisterRegAlloc::FunctionPassCtor, false, 313 RegisterPassParser<RVVRegisterRegAlloc>> 314 RVVRegAlloc("riscv-rvv-regalloc", cl::Hidden, 315 cl::init(&useDefaultRegisterAllocator), 316 cl::desc("Register allocator to use for RVV register.")); 317 318 static void initializeDefaultRVVRegisterAllocatorOnce() { 319 RegisterRegAlloc::FunctionPassCtor Ctor = RVVRegisterRegAlloc::getDefault(); 320 321 if (!Ctor) { 322 Ctor = RVVRegAlloc; 323 RVVRegisterRegAlloc::setDefault(RVVRegAlloc); 324 } 325 } 326 327 static FunctionPass *createBasicRVVRegisterAllocator() { 328 return createBasicRegisterAllocator(onlyAllocateRVVReg); 329 } 330 331 static FunctionPass *createGreedyRVVRegisterAllocator() { 332 return createGreedyRegisterAllocator(onlyAllocateRVVReg); 333 } 334 335 static FunctionPass *createFastRVVRegisterAllocator() { 336 return createFastRegisterAllocator(onlyAllocateRVVReg, false); 337 } 338 339 static RVVRegisterRegAlloc basicRegAllocRVVReg("basic", 340 "basic register allocator", 341 createBasicRVVRegisterAllocator); 342 static RVVRegisterRegAlloc 343 greedyRegAllocRVVReg("greedy", "greedy register allocator", 344 createGreedyRVVRegisterAllocator); 345 346 static RVVRegisterRegAlloc fastRegAllocRVVReg("fast", "fast register allocator", 347 createFastRVVRegisterAllocator); 348 349 class RISCVPassConfig : public TargetPassConfig { 350 public: 351 RISCVPassConfig(RISCVTargetMachine &TM, PassManagerBase &PM) 352 : TargetPassConfig(TM, PM) { 353 if (TM.getOptLevel() != CodeGenOptLevel::None) 354 substitutePass(&PostRASchedulerID, &PostMachineSchedulerID); 355 setEnableSinkAndFold(EnableSinkFold); 356 EnableLoopTermFold = true; 357 } 358 359 RISCVTargetMachine &getRISCVTargetMachine() const { 360 return getTM<RISCVTargetMachine>(); 361 } 362 363 ScheduleDAGInstrs * 364 createMachineScheduler(MachineSchedContext *C) const override { 365 ScheduleDAGMILive *DAG = nullptr; 366 if (EnableMISchedLoadStoreClustering) { 367 DAG = createGenericSchedLive(C); 368 DAG->addMutation(createLoadClusterDAGMutation( 369 DAG->TII, DAG->TRI, /*ReorderWhileClustering=*/true)); 370 DAG->addMutation(createStoreClusterDAGMutation( 371 DAG->TII, DAG->TRI, /*ReorderWhileClustering=*/true)); 372 } 373 374 const RISCVSubtarget &ST = C->MF->getSubtarget<RISCVSubtarget>(); 375 if (!DisableVectorMaskMutation && ST.hasVInstructions()) { 376 DAG = DAG ? DAG : createGenericSchedLive(C); 377 DAG->addMutation(createRISCVVectorMaskDAGMutation(DAG->TRI)); 378 } 379 return DAG; 380 } 381 382 ScheduleDAGInstrs * 383 createPostMachineScheduler(MachineSchedContext *C) const override { 384 ScheduleDAGMI *DAG = nullptr; 385 if (EnablePostMISchedLoadStoreClustering) { 386 DAG = createGenericSchedPostRA(C); 387 DAG->addMutation(createLoadClusterDAGMutation( 388 DAG->TII, DAG->TRI, /*ReorderWhileClustering=*/true)); 389 DAG->addMutation(createStoreClusterDAGMutation( 390 DAG->TII, DAG->TRI, /*ReorderWhileClustering=*/true)); 391 } 392 393 return DAG; 394 } 395 396 void addIRPasses() override; 397 bool addPreISel() override; 398 void addCodeGenPrepare() override; 399 bool addInstSelector() override; 400 bool addIRTranslator() override; 401 void addPreLegalizeMachineIR() override; 402 bool addLegalizeMachineIR() override; 403 void addPreRegBankSelect() override; 404 bool addRegBankSelect() override; 405 bool addGlobalInstructionSelect() override; 406 void addPreEmitPass() override; 407 void addPreEmitPass2() override; 408 void addPreSched2() override; 409 void addMachineSSAOptimization() override; 410 FunctionPass *createRVVRegAllocPass(bool Optimized); 411 bool addRegAssignAndRewriteFast() override; 412 bool addRegAssignAndRewriteOptimized() override; 413 void addPreRegAlloc() override; 414 void addPostRegAlloc() override; 415 void addFastRegAlloc() override; 416 417 std::unique_ptr<CSEConfigBase> getCSEConfig() const override; 418 }; 419 } // namespace 420 421 TargetPassConfig *RISCVTargetMachine::createPassConfig(PassManagerBase &PM) { 422 return new RISCVPassConfig(*this, PM); 423 } 424 425 std::unique_ptr<CSEConfigBase> RISCVPassConfig::getCSEConfig() const { 426 return getStandardCSEConfigForOpt(TM->getOptLevel()); 427 } 428 429 FunctionPass *RISCVPassConfig::createRVVRegAllocPass(bool Optimized) { 430 // Initialize the global default. 431 llvm::call_once(InitializeDefaultRVVRegisterAllocatorFlag, 432 initializeDefaultRVVRegisterAllocatorOnce); 433 434 RegisterRegAlloc::FunctionPassCtor Ctor = RVVRegisterRegAlloc::getDefault(); 435 if (Ctor != useDefaultRegisterAllocator) 436 return Ctor(); 437 438 if (Optimized) 439 return createGreedyRVVRegisterAllocator(); 440 441 return createFastRVVRegisterAllocator(); 442 } 443 444 bool RISCVPassConfig::addRegAssignAndRewriteFast() { 445 addPass(createRVVRegAllocPass(false)); 446 addPass(createRISCVInsertVSETVLIPass()); 447 if (TM->getOptLevel() != CodeGenOptLevel::None && 448 EnableRISCVDeadRegisterElimination) 449 addPass(createRISCVDeadRegisterDefinitionsPass()); 450 return TargetPassConfig::addRegAssignAndRewriteFast(); 451 } 452 453 bool RISCVPassConfig::addRegAssignAndRewriteOptimized() { 454 addPass(createRVVRegAllocPass(true)); 455 addPass(createVirtRegRewriter(false)); 456 addPass(createRISCVInsertVSETVLIPass()); 457 if (TM->getOptLevel() != CodeGenOptLevel::None && 458 EnableRISCVDeadRegisterElimination) 459 addPass(createRISCVDeadRegisterDefinitionsPass()); 460 return TargetPassConfig::addRegAssignAndRewriteOptimized(); 461 } 462 463 void RISCVPassConfig::addIRPasses() { 464 addPass(createAtomicExpandLegacyPass()); 465 addPass(createRISCVZacasABIFixPass()); 466 467 if (getOptLevel() != CodeGenOptLevel::None) { 468 if (EnableLoopDataPrefetch) 469 addPass(createLoopDataPrefetchPass()); 470 471 addPass(createRISCVGatherScatterLoweringPass()); 472 addPass(createInterleavedAccessPass()); 473 addPass(createRISCVCodeGenPreparePass()); 474 } 475 476 TargetPassConfig::addIRPasses(); 477 } 478 479 bool RISCVPassConfig::addPreISel() { 480 if (TM->getOptLevel() != CodeGenOptLevel::None) { 481 // Add a barrier before instruction selection so that we will not get 482 // deleted block address after enabling default outlining. See D99707 for 483 // more details. 484 addPass(createBarrierNoopPass()); 485 } 486 487 if ((TM->getOptLevel() != CodeGenOptLevel::None && 488 EnableGlobalMerge == cl::BOU_UNSET) || 489 EnableGlobalMerge == cl::BOU_TRUE) { 490 // FIXME: Like AArch64, we disable extern global merging by default due to 491 // concerns it might regress some workloads. Unlike AArch64, we don't 492 // currently support enabling the pass in an "OnlyOptimizeForSize" mode. 493 // Investigating and addressing both items are TODO. 494 addPass(createGlobalMergePass(TM, /* MaxOffset */ 2047, 495 /* OnlyOptimizeForSize */ false, 496 /* MergeExternalByDefault */ true)); 497 } 498 499 return false; 500 } 501 502 void RISCVPassConfig::addCodeGenPrepare() { 503 if (getOptLevel() != CodeGenOptLevel::None) 504 addPass(createTypePromotionLegacyPass()); 505 TargetPassConfig::addCodeGenPrepare(); 506 } 507 508 bool RISCVPassConfig::addInstSelector() { 509 addPass(createRISCVISelDag(getRISCVTargetMachine(), getOptLevel())); 510 511 return false; 512 } 513 514 bool RISCVPassConfig::addIRTranslator() { 515 addPass(new IRTranslator(getOptLevel())); 516 return false; 517 } 518 519 void RISCVPassConfig::addPreLegalizeMachineIR() { 520 if (getOptLevel() == CodeGenOptLevel::None) { 521 addPass(createRISCVO0PreLegalizerCombiner()); 522 } else { 523 addPass(createRISCVPreLegalizerCombiner()); 524 } 525 } 526 527 bool RISCVPassConfig::addLegalizeMachineIR() { 528 addPass(new Legalizer()); 529 return false; 530 } 531 532 void RISCVPassConfig::addPreRegBankSelect() { 533 if (getOptLevel() != CodeGenOptLevel::None) 534 addPass(createRISCVPostLegalizerCombiner()); 535 } 536 537 bool RISCVPassConfig::addRegBankSelect() { 538 addPass(new RegBankSelect()); 539 return false; 540 } 541 542 bool RISCVPassConfig::addGlobalInstructionSelect() { 543 addPass(new InstructionSelect(getOptLevel())); 544 return false; 545 } 546 547 void RISCVPassConfig::addPreSched2() { 548 addPass(createRISCVPostRAExpandPseudoPass()); 549 550 // Emit KCFI checks for indirect calls. 551 addPass(createKCFIPass()); 552 } 553 554 void RISCVPassConfig::addPreEmitPass() { 555 // TODO: It would potentially be better to schedule copy propagation after 556 // expanding pseudos (in addPreEmitPass2). However, performing copy 557 // propagation after the machine outliner (which runs after addPreEmitPass) 558 // currently leads to incorrect code-gen, where copies to registers within 559 // outlined functions are removed erroneously. 560 if (TM->getOptLevel() >= CodeGenOptLevel::Default && 561 EnableRISCVCopyPropagation) 562 addPass(createMachineCopyPropagationPass(true)); 563 addPass(&BranchRelaxationPassID); 564 addPass(createRISCVMakeCompressibleOptPass()); 565 } 566 567 void RISCVPassConfig::addPreEmitPass2() { 568 if (TM->getOptLevel() != CodeGenOptLevel::None) { 569 addPass(createRISCVMoveMergePass()); 570 // Schedule PushPop Optimization before expansion of Pseudo instruction, 571 // ensuring return instruction is detected correctly. 572 addPass(createRISCVPushPopOptimizationPass()); 573 } 574 addPass(createRISCVIndirectBranchTrackingPass()); 575 addPass(createRISCVExpandPseudoPass()); 576 577 // Schedule the expansion of AMOs at the last possible moment, avoiding the 578 // possibility for other passes to break the requirements for forward 579 // progress in the LR/SC block. 580 addPass(createRISCVExpandAtomicPseudoPass()); 581 582 // KCFI indirect call checks are lowered to a bundle. 583 addPass(createUnpackMachineBundles([&](const MachineFunction &MF) { 584 return MF.getFunction().getParent()->getModuleFlag("kcfi"); 585 })); 586 } 587 588 void RISCVPassConfig::addMachineSSAOptimization() { 589 addPass(createRISCVVectorPeepholePass()); 590 591 TargetPassConfig::addMachineSSAOptimization(); 592 593 if (EnableMachineCombiner) 594 addPass(&MachineCombinerID); 595 596 if (TM->getTargetTriple().isRISCV64()) { 597 addPass(createRISCVOptWInstrsPass()); 598 } 599 } 600 601 void RISCVPassConfig::addPreRegAlloc() { 602 addPass(createRISCVPreRAExpandPseudoPass()); 603 if (TM->getOptLevel() != CodeGenOptLevel::None) { 604 addPass(createRISCVMergeBaseOffsetOptPass()); 605 if (EnableVLOptimizer) 606 addPass(createRISCVVLOptimizerPass()); 607 } 608 609 addPass(createRISCVInsertReadWriteCSRPass()); 610 addPass(createRISCVInsertWriteVXRMPass()); 611 addPass(createRISCVLandingPadSetupPass()); 612 613 if (TM->getOptLevel() != CodeGenOptLevel::None && EnableMachinePipeliner) 614 addPass(&MachinePipelinerID); 615 } 616 617 void RISCVPassConfig::addFastRegAlloc() { 618 addPass(&InitUndefID); 619 TargetPassConfig::addFastRegAlloc(); 620 } 621 622 623 void RISCVPassConfig::addPostRegAlloc() { 624 if (TM->getOptLevel() != CodeGenOptLevel::None && 625 EnableRedundantCopyElimination) 626 addPass(createRISCVRedundantCopyEliminationPass()); 627 } 628 629 void RISCVTargetMachine::registerPassBuilderCallbacks(PassBuilder &PB) { 630 PB.registerLateLoopOptimizationsEPCallback([=](LoopPassManager &LPM, 631 OptimizationLevel Level) { 632 LPM.addPass(LoopIdiomVectorizePass(LoopIdiomVectorizeStyle::Predicated)); 633 }); 634 } 635 636 yaml::MachineFunctionInfo * 637 RISCVTargetMachine::createDefaultFuncInfoYAML() const { 638 return new yaml::RISCVMachineFunctionInfo(); 639 } 640 641 yaml::MachineFunctionInfo * 642 RISCVTargetMachine::convertFuncInfoToYAML(const MachineFunction &MF) const { 643 const auto *MFI = MF.getInfo<RISCVMachineFunctionInfo>(); 644 return new yaml::RISCVMachineFunctionInfo(*MFI); 645 } 646 647 bool RISCVTargetMachine::parseMachineFunctionInfo( 648 const yaml::MachineFunctionInfo &MFI, PerFunctionMIParsingState &PFS, 649 SMDiagnostic &Error, SMRange &SourceRange) const { 650 const auto &YamlMFI = 651 static_cast<const yaml::RISCVMachineFunctionInfo &>(MFI); 652 PFS.MF.getInfo<RISCVMachineFunctionInfo>()->initializeBaseYamlFields(YamlMFI); 653 return false; 654 } 655