xref: /llvm-project/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp (revision 514580b43898921cc95659de47b383bd2c9b4b12)
1 //===-- AArch64TargetMachine.cpp - Define TargetMachine for AArch64 -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //
10 //===----------------------------------------------------------------------===//
11 
12 #include "AArch64TargetMachine.h"
13 #include "AArch64.h"
14 #include "AArch64MachineFunctionInfo.h"
15 #include "AArch64MachineScheduler.h"
16 #include "AArch64MacroFusion.h"
17 #include "AArch64Subtarget.h"
18 #include "AArch64TargetObjectFile.h"
19 #include "AArch64TargetTransformInfo.h"
20 #include "MCTargetDesc/AArch64MCTargetDesc.h"
21 #include "TargetInfo/AArch64TargetInfo.h"
22 #include "llvm/Analysis/TargetTransformInfo.h"
23 #include "llvm/Analysis/ValueTracking.h"
24 #include "llvm/CodeGen/CSEConfigBase.h"
25 #include "llvm/CodeGen/GlobalISel/CSEInfo.h"
26 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
27 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
28 #include "llvm/CodeGen/GlobalISel/Legalizer.h"
29 #include "llvm/CodeGen/GlobalISel/LoadStoreOpt.h"
30 #include "llvm/CodeGen/GlobalISel/Localizer.h"
31 #include "llvm/CodeGen/GlobalISel/RegBankSelect.h"
32 #include "llvm/CodeGen/MIRParser/MIParser.h"
33 #include "llvm/CodeGen/MachineScheduler.h"
34 #include "llvm/CodeGen/Passes.h"
35 #include "llvm/CodeGen/TargetInstrInfo.h"
36 #include "llvm/CodeGen/TargetPassConfig.h"
37 #include "llvm/IR/Attributes.h"
38 #include "llvm/IR/Function.h"
39 #include "llvm/InitializePasses.h"
40 #include "llvm/MC/MCAsmInfo.h"
41 #include "llvm/MC/MCTargetOptions.h"
42 #include "llvm/MC/TargetRegistry.h"
43 #include "llvm/Pass.h"
44 #include "llvm/Passes/PassBuilder.h"
45 #include "llvm/Support/CodeGen.h"
46 #include "llvm/Support/CommandLine.h"
47 #include "llvm/Target/TargetLoweringObjectFile.h"
48 #include "llvm/Target/TargetOptions.h"
49 #include "llvm/TargetParser/Triple.h"
50 #include "llvm/Transforms/CFGuard.h"
51 #include "llvm/Transforms/Scalar.h"
52 #include "llvm/Transforms/Utils/LowerIFunc.h"
53 #include "llvm/Transforms/Vectorize/LoopIdiomVectorize.h"
54 #include <memory>
55 #include <optional>
56 #include <string>
57 
58 using namespace llvm;
59 
60 static cl::opt<bool> EnableCCMP("aarch64-enable-ccmp",
61                                 cl::desc("Enable the CCMP formation pass"),
62                                 cl::init(true), cl::Hidden);
63 
64 static cl::opt<bool>
65     EnableCondBrTuning("aarch64-enable-cond-br-tune",
66                        cl::desc("Enable the conditional branch tuning pass"),
67                        cl::init(true), cl::Hidden);
68 
69 static cl::opt<bool> EnableAArch64CopyPropagation(
70     "aarch64-enable-copy-propagation",
71     cl::desc("Enable the copy propagation with AArch64 copy instr"),
72     cl::init(true), cl::Hidden);
73 
74 static cl::opt<bool> EnableMCR("aarch64-enable-mcr",
75                                cl::desc("Enable the machine combiner pass"),
76                                cl::init(true), cl::Hidden);
77 
78 static cl::opt<bool> EnableStPairSuppress("aarch64-enable-stp-suppress",
79                                           cl::desc("Suppress STP for AArch64"),
80                                           cl::init(true), cl::Hidden);
81 
82 static cl::opt<bool> EnableAdvSIMDScalar(
83     "aarch64-enable-simd-scalar",
84     cl::desc("Enable use of AdvSIMD scalar integer instructions"),
85     cl::init(false), cl::Hidden);
86 
87 static cl::opt<bool>
88     EnablePromoteConstant("aarch64-enable-promote-const",
89                           cl::desc("Enable the promote constant pass"),
90                           cl::init(true), cl::Hidden);
91 
92 static cl::opt<bool> EnableCollectLOH(
93     "aarch64-enable-collect-loh",
94     cl::desc("Enable the pass that emits the linker optimization hints (LOH)"),
95     cl::init(true), cl::Hidden);
96 
97 static cl::opt<bool>
98     EnableDeadRegisterElimination("aarch64-enable-dead-defs", cl::Hidden,
99                                   cl::desc("Enable the pass that removes dead"
100                                            " definitions and replaces stores to"
101                                            " them with stores to the zero"
102                                            " register"),
103                                   cl::init(true));
104 
105 static cl::opt<bool> EnableRedundantCopyElimination(
106     "aarch64-enable-copyelim",
107     cl::desc("Enable the redundant copy elimination pass"), cl::init(true),
108     cl::Hidden);
109 
110 static cl::opt<bool> EnableLoadStoreOpt("aarch64-enable-ldst-opt",
111                                         cl::desc("Enable the load/store pair"
112                                                  " optimization pass"),
113                                         cl::init(true), cl::Hidden);
114 
115 static cl::opt<bool> EnableAtomicTidy(
116     "aarch64-enable-atomic-cfg-tidy", cl::Hidden,
117     cl::desc("Run SimplifyCFG after expanding atomic operations"
118              " to make use of cmpxchg flow-based information"),
119     cl::init(true));
120 
121 static cl::opt<bool>
122 EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden,
123                         cl::desc("Run early if-conversion"),
124                         cl::init(true));
125 
126 static cl::opt<bool>
127     EnableCondOpt("aarch64-enable-condopt",
128                   cl::desc("Enable the condition optimizer pass"),
129                   cl::init(true), cl::Hidden);
130 
131 static cl::opt<bool>
132     EnableGEPOpt("aarch64-enable-gep-opt", cl::Hidden,
133                  cl::desc("Enable optimizations on complex GEPs"),
134                  cl::init(false));
135 
136 static cl::opt<bool>
137     EnableSelectOpt("aarch64-select-opt", cl::Hidden,
138                     cl::desc("Enable select to branch optimizations"),
139                     cl::init(true));
140 
141 static cl::opt<bool>
142     BranchRelaxation("aarch64-enable-branch-relax", cl::Hidden, cl::init(true),
143                      cl::desc("Relax out of range conditional branches"));
144 
145 static cl::opt<bool> EnableCompressJumpTables(
146     "aarch64-enable-compress-jump-tables", cl::Hidden, cl::init(true),
147     cl::desc("Use smallest entry possible for jump tables"));
148 
149 // FIXME: Unify control over GlobalMerge.
150 static cl::opt<cl::boolOrDefault>
151     EnableGlobalMerge("aarch64-enable-global-merge", cl::Hidden,
152                       cl::desc("Enable the global merge pass"));
153 
154 static cl::opt<bool>
155     EnableLoopDataPrefetch("aarch64-enable-loop-data-prefetch", cl::Hidden,
156                            cl::desc("Enable the loop data prefetch pass"),
157                            cl::init(true));
158 
159 static cl::opt<int> EnableGlobalISelAtO(
160     "aarch64-enable-global-isel-at-O", cl::Hidden,
161     cl::desc("Enable GlobalISel at or below an opt level (-1 to disable)"),
162     cl::init(0));
163 
164 static cl::opt<bool>
165     EnableSVEIntrinsicOpts("aarch64-enable-sve-intrinsic-opts", cl::Hidden,
166                            cl::desc("Enable SVE intrinsic opts"),
167                            cl::init(true));
168 
169 static cl::opt<bool>
170     EnableSMEPeepholeOpt("enable-aarch64-sme-peephole-opt", cl::init(true),
171                          cl::Hidden,
172                          cl::desc("Perform SME peephole optimization"));
173 
174 static cl::opt<bool> EnableFalkorHWPFFix("aarch64-enable-falkor-hwpf-fix",
175                                          cl::init(true), cl::Hidden);
176 
177 static cl::opt<bool>
178     EnableBranchTargets("aarch64-enable-branch-targets", cl::Hidden,
179                         cl::desc("Enable the AArch64 branch target pass"),
180                         cl::init(true));
181 
182 static cl::opt<unsigned> SVEVectorBitsMaxOpt(
183     "aarch64-sve-vector-bits-max",
184     cl::desc("Assume SVE vector registers are at most this big, "
185              "with zero meaning no maximum size is assumed."),
186     cl::init(0), cl::Hidden);
187 
188 static cl::opt<unsigned> SVEVectorBitsMinOpt(
189     "aarch64-sve-vector-bits-min",
190     cl::desc("Assume SVE vector registers are at least this big, "
191              "with zero meaning no minimum size is assumed."),
192     cl::init(0), cl::Hidden);
193 
194 static cl::opt<bool> ForceStreaming(
195     "force-streaming",
196     cl::desc("Force the use of streaming code for all functions"),
197     cl::init(false), cl::Hidden);
198 
199 static cl::opt<bool> ForceStreamingCompatible(
200     "force-streaming-compatible",
201     cl::desc("Force the use of streaming-compatible code for all functions"),
202     cl::init(false), cl::Hidden);
203 
204 extern cl::opt<bool> EnableHomogeneousPrologEpilog;
205 
206 static cl::opt<bool> EnableGISelLoadStoreOptPreLegal(
207     "aarch64-enable-gisel-ldst-prelegal",
208     cl::desc("Enable GlobalISel's pre-legalizer load/store optimization pass"),
209     cl::init(true), cl::Hidden);
210 
211 static cl::opt<bool> EnableGISelLoadStoreOptPostLegal(
212     "aarch64-enable-gisel-ldst-postlegal",
213     cl::desc("Enable GlobalISel's post-legalizer load/store optimization pass"),
214     cl::init(false), cl::Hidden);
215 
216 static cl::opt<bool>
217     EnableSinkFold("aarch64-enable-sink-fold",
218                    cl::desc("Enable sinking and folding of instruction copies"),
219                    cl::init(true), cl::Hidden);
220 
221 static cl::opt<bool>
222     EnableMachinePipeliner("aarch64-enable-pipeliner",
223                            cl::desc("Enable Machine Pipeliner for AArch64"),
224                            cl::init(false), cl::Hidden);
225 
226 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64Target() {
227   // Register the target.
228   RegisterTargetMachine<AArch64leTargetMachine> X(getTheAArch64leTarget());
229   RegisterTargetMachine<AArch64beTargetMachine> Y(getTheAArch64beTarget());
230   RegisterTargetMachine<AArch64leTargetMachine> Z(getTheARM64Target());
231   RegisterTargetMachine<AArch64leTargetMachine> W(getTheARM64_32Target());
232   RegisterTargetMachine<AArch64leTargetMachine> V(getTheAArch64_32Target());
233   auto PR = PassRegistry::getPassRegistry();
234   initializeGlobalISel(*PR);
235   initializeAArch64A53Fix835769Pass(*PR);
236   initializeAArch64A57FPLoadBalancingPass(*PR);
237   initializeAArch64AdvSIMDScalarPass(*PR);
238   initializeAArch64BranchTargetsPass(*PR);
239   initializeAArch64CollectLOHPass(*PR);
240   initializeAArch64CompressJumpTablesPass(*PR);
241   initializeAArch64ConditionalComparesPass(*PR);
242   initializeAArch64ConditionOptimizerPass(*PR);
243   initializeAArch64DeadRegisterDefinitionsPass(*PR);
244   initializeAArch64ExpandPseudoPass(*PR);
245   initializeAArch64LoadStoreOptPass(*PR);
246   initializeAArch64MIPeepholeOptPass(*PR);
247   initializeAArch64SIMDInstrOptPass(*PR);
248   initializeAArch64O0PreLegalizerCombinerPass(*PR);
249   initializeAArch64PreLegalizerCombinerPass(*PR);
250   initializeAArch64PointerAuthPass(*PR);
251   initializeAArch64PostCoalescerPass(*PR);
252   initializeAArch64PostLegalizerCombinerPass(*PR);
253   initializeAArch64PostLegalizerLoweringPass(*PR);
254   initializeAArch64PostSelectOptimizePass(*PR);
255   initializeAArch64PromoteConstantPass(*PR);
256   initializeAArch64RedundantCopyEliminationPass(*PR);
257   initializeAArch64StorePairSuppressPass(*PR);
258   initializeFalkorHWPFFixPass(*PR);
259   initializeFalkorMarkStridedAccessesLegacyPass(*PR);
260   initializeLDTLSCleanupPass(*PR);
261   initializeKCFIPass(*PR);
262   initializeSMEABIPass(*PR);
263   initializeSMEPeepholeOptPass(*PR);
264   initializeSVEIntrinsicOptsPass(*PR);
265   initializeAArch64SpeculationHardeningPass(*PR);
266   initializeAArch64SLSHardeningPass(*PR);
267   initializeAArch64StackTaggingPass(*PR);
268   initializeAArch64StackTaggingPreRAPass(*PR);
269   initializeAArch64LowerHomogeneousPrologEpilogPass(*PR);
270   initializeAArch64DAGToDAGISelLegacyPass(*PR);
271 }
272 
273 void AArch64TargetMachine::reset() { SubtargetMap.clear(); }
274 
275 //===----------------------------------------------------------------------===//
276 // AArch64 Lowering public interface.
277 //===----------------------------------------------------------------------===//
278 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
279   if (TT.isOSBinFormatMachO())
280     return std::make_unique<AArch64_MachoTargetObjectFile>();
281   if (TT.isOSBinFormatCOFF())
282     return std::make_unique<AArch64_COFFTargetObjectFile>();
283 
284   return std::make_unique<AArch64_ELFTargetObjectFile>();
285 }
286 
287 // Helper function to build a DataLayout string
288 static std::string computeDataLayout(const Triple &TT,
289                                      const MCTargetOptions &Options,
290                                      bool LittleEndian) {
291   if (TT.isOSBinFormatMachO()) {
292     if (TT.getArch() == Triple::aarch64_32)
293       return "e-m:o-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-"
294              "n32:64-S128-Fn32";
295     return "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-n32:64-S128-"
296            "Fn32";
297   }
298   if (TT.isOSBinFormatCOFF())
299     return "e-m:w-p270:32:32-p271:32:32-p272:64:64-p:64:64-i32:32-i64:64-i128:"
300            "128-n32:64-S128-Fn32";
301   std::string Endian = LittleEndian ? "e" : "E";
302   std::string Ptr32 = TT.getEnvironment() == Triple::GNUILP32 ? "-p:32:32" : "";
303   return Endian + "-m:e" + Ptr32 +
304          "-p270:32:32-p271:32:32-p272:64:64-i8:8:32-i16:16:32-i64:64-i128:128-"
305          "n32:64-S128-Fn32";
306 }
307 
308 static StringRef computeDefaultCPU(const Triple &TT, StringRef CPU) {
309   if (CPU.empty() && TT.isArm64e())
310     return "apple-a12";
311   return CPU;
312 }
313 
314 static Reloc::Model getEffectiveRelocModel(const Triple &TT,
315                                            std::optional<Reloc::Model> RM) {
316   // AArch64 Darwin and Windows are always PIC.
317   if (TT.isOSDarwin() || TT.isOSWindows())
318     return Reloc::PIC_;
319   // On ELF platforms the default static relocation model has a smart enough
320   // linker to cope with referencing external symbols defined in a shared
321   // library. Hence DynamicNoPIC doesn't need to be promoted to PIC.
322   if (!RM || *RM == Reloc::DynamicNoPIC)
323     return Reloc::Static;
324   return *RM;
325 }
326 
327 static CodeModel::Model
328 getEffectiveAArch64CodeModel(const Triple &TT,
329                              std::optional<CodeModel::Model> CM, bool JIT) {
330   if (CM) {
331     if (*CM != CodeModel::Small && *CM != CodeModel::Tiny &&
332         *CM != CodeModel::Large) {
333       report_fatal_error(
334           "Only small, tiny and large code models are allowed on AArch64");
335     } else if (*CM == CodeModel::Tiny && !TT.isOSBinFormatELF())
336       report_fatal_error("tiny code model is only supported on ELF");
337     return *CM;
338   }
339   // The default MCJIT memory managers make no guarantees about where they can
340   // find an executable page; JITed code needs to be able to refer to globals
341   // no matter how far away they are.
342   // We should set the CodeModel::Small for Windows ARM64 in JIT mode,
343   // since with large code model LLVM generating 4 MOV instructions, and
344   // Windows doesn't support relocating these long branch (4 MOVs).
345   if (JIT && !TT.isOSWindows())
346     return CodeModel::Large;
347   return CodeModel::Small;
348 }
349 
350 /// Create an AArch64 architecture model.
351 ///
352 AArch64TargetMachine::AArch64TargetMachine(const Target &T, const Triple &TT,
353                                            StringRef CPU, StringRef FS,
354                                            const TargetOptions &Options,
355                                            std::optional<Reloc::Model> RM,
356                                            std::optional<CodeModel::Model> CM,
357                                            CodeGenOptLevel OL, bool JIT,
358                                            bool LittleEndian)
359     : CodeGenTargetMachineImpl(
360           T, computeDataLayout(TT, Options.MCOptions, LittleEndian), TT,
361           computeDefaultCPU(TT, CPU), FS, Options,
362           getEffectiveRelocModel(TT, RM),
363           getEffectiveAArch64CodeModel(TT, CM, JIT), OL),
364       TLOF(createTLOF(getTargetTriple())), isLittle(LittleEndian) {
365   initAsmInfo();
366 
367   if (TT.isOSBinFormatMachO()) {
368     this->Options.TrapUnreachable = true;
369     this->Options.NoTrapAfterNoreturn = true;
370   }
371 
372   if (getMCAsmInfo()->usesWindowsCFI()) {
373     // Unwinding can get confused if the last instruction in an
374     // exception-handling region (function, funclet, try block, etc.)
375     // is a call.
376     //
377     // FIXME: We could elide the trap if the next instruction would be in
378     // the same region anyway.
379     this->Options.TrapUnreachable = true;
380   }
381 
382   if (this->Options.TLSSize == 0) // default
383     this->Options.TLSSize = 24;
384   if ((getCodeModel() == CodeModel::Small ||
385        getCodeModel() == CodeModel::Kernel) &&
386       this->Options.TLSSize > 32)
387     // for the small (and kernel) code model, the maximum TLS size is 4GiB
388     this->Options.TLSSize = 32;
389   else if (getCodeModel() == CodeModel::Tiny && this->Options.TLSSize > 24)
390     // for the tiny code model, the maximum TLS size is 1MiB (< 16MiB)
391     this->Options.TLSSize = 24;
392 
393   // Enable GlobalISel at or below EnableGlobalISelAt0, unless this is
394   // MachO/CodeModel::Large, which GlobalISel does not support.
395   if (static_cast<int>(getOptLevel()) <= EnableGlobalISelAtO &&
396       TT.getArch() != Triple::aarch64_32 &&
397       TT.getEnvironment() != Triple::GNUILP32 &&
398       !(getCodeModel() == CodeModel::Large && TT.isOSBinFormatMachO())) {
399     setGlobalISel(true);
400     setGlobalISelAbort(GlobalISelAbortMode::Disable);
401   }
402 
403   // AArch64 supports the MachineOutliner.
404   setMachineOutliner(true);
405 
406   // AArch64 supports default outlining behaviour.
407   setSupportsDefaultOutlining(true);
408 
409   // AArch64 supports the debug entry values.
410   setSupportsDebugEntryValues(true);
411 
412   // AArch64 supports fixing up the DWARF unwind information.
413   if (!getMCAsmInfo()->usesWindowsCFI())
414     setCFIFixup(true);
415 }
416 
417 AArch64TargetMachine::~AArch64TargetMachine() = default;
418 
419 const AArch64Subtarget *
420 AArch64TargetMachine::getSubtargetImpl(const Function &F) const {
421   Attribute CPUAttr = F.getFnAttribute("target-cpu");
422   Attribute TuneAttr = F.getFnAttribute("tune-cpu");
423   Attribute FSAttr = F.getFnAttribute("target-features");
424 
425   StringRef CPU = CPUAttr.isValid() ? CPUAttr.getValueAsString() : TargetCPU;
426   StringRef TuneCPU = TuneAttr.isValid() ? TuneAttr.getValueAsString() : CPU;
427   StringRef FS = FSAttr.isValid() ? FSAttr.getValueAsString() : TargetFS;
428   bool HasMinSize = F.hasMinSize();
429 
430   bool IsStreaming = ForceStreaming ||
431                      F.hasFnAttribute("aarch64_pstate_sm_enabled") ||
432                      F.hasFnAttribute("aarch64_pstate_sm_body");
433   bool IsStreamingCompatible = ForceStreamingCompatible ||
434                                F.hasFnAttribute("aarch64_pstate_sm_compatible");
435 
436   unsigned MinSVEVectorSize = 0;
437   unsigned MaxSVEVectorSize = 0;
438   if (F.hasFnAttribute(Attribute::VScaleRange)) {
439     ConstantRange CR = getVScaleRange(&F, 64);
440     MinSVEVectorSize = CR.getUnsignedMin().getZExtValue() * 128;
441     MaxSVEVectorSize = CR.getUnsignedMax().getZExtValue() * 128;
442   } else {
443     MinSVEVectorSize = SVEVectorBitsMinOpt;
444     MaxSVEVectorSize = SVEVectorBitsMaxOpt;
445   }
446 
447   assert(MinSVEVectorSize % 128 == 0 &&
448          "SVE requires vector length in multiples of 128!");
449   assert(MaxSVEVectorSize % 128 == 0 &&
450          "SVE requires vector length in multiples of 128!");
451   assert((MaxSVEVectorSize >= MinSVEVectorSize || MaxSVEVectorSize == 0) &&
452          "Minimum SVE vector size should not be larger than its maximum!");
453 
454   // Sanitize user input in case of no asserts
455   if (MaxSVEVectorSize != 0) {
456     MinSVEVectorSize = std::min(MinSVEVectorSize, MaxSVEVectorSize);
457     MaxSVEVectorSize = std::max(MinSVEVectorSize, MaxSVEVectorSize);
458   }
459 
460   SmallString<512> Key;
461   raw_svector_ostream(Key) << "SVEMin" << MinSVEVectorSize << "SVEMax"
462                            << MaxSVEVectorSize << "IsStreaming=" << IsStreaming
463                            << "IsStreamingCompatible=" << IsStreamingCompatible
464                            << CPU << TuneCPU << FS
465                            << "HasMinSize=" << HasMinSize;
466 
467   auto &I = SubtargetMap[Key];
468   if (!I) {
469     // This needs to be done before we create a new subtarget since any
470     // creation will depend on the TM and the code generation flags on the
471     // function that reside in TargetOptions.
472     resetTargetOptions(F);
473     I = std::make_unique<AArch64Subtarget>(
474         TargetTriple, CPU, TuneCPU, FS, *this, isLittle, MinSVEVectorSize,
475         MaxSVEVectorSize, IsStreaming, IsStreamingCompatible, HasMinSize);
476   }
477 
478   assert((!IsStreaming || I->hasSME()) && "Expected SME to be available");
479 
480   return I.get();
481 }
482 
483 void AArch64leTargetMachine::anchor() { }
484 
485 AArch64leTargetMachine::AArch64leTargetMachine(
486     const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
487     const TargetOptions &Options, std::optional<Reloc::Model> RM,
488     std::optional<CodeModel::Model> CM, CodeGenOptLevel OL, bool JIT)
489     : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, JIT, true) {}
490 
491 void AArch64beTargetMachine::anchor() { }
492 
493 AArch64beTargetMachine::AArch64beTargetMachine(
494     const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
495     const TargetOptions &Options, std::optional<Reloc::Model> RM,
496     std::optional<CodeModel::Model> CM, CodeGenOptLevel OL, bool JIT)
497     : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, JIT, false) {}
498 
499 namespace {
500 
501 /// AArch64 Code Generator Pass Configuration Options.
502 class AArch64PassConfig : public TargetPassConfig {
503 public:
504   AArch64PassConfig(AArch64TargetMachine &TM, PassManagerBase &PM)
505       : TargetPassConfig(TM, PM) {
506     if (TM.getOptLevel() != CodeGenOptLevel::None)
507       substitutePass(&PostRASchedulerID, &PostMachineSchedulerID);
508     setEnableSinkAndFold(EnableSinkFold);
509   }
510 
511   AArch64TargetMachine &getAArch64TargetMachine() const {
512     return getTM<AArch64TargetMachine>();
513   }
514 
515   ScheduleDAGInstrs *
516   createMachineScheduler(MachineSchedContext *C) const override {
517     const AArch64Subtarget &ST = C->MF->getSubtarget<AArch64Subtarget>();
518     ScheduleDAGMILive *DAG = createGenericSchedLive(C);
519     DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
520     DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
521     if (ST.hasFusion())
522       DAG->addMutation(createAArch64MacroFusionDAGMutation());
523     return DAG;
524   }
525 
526   ScheduleDAGInstrs *
527   createPostMachineScheduler(MachineSchedContext *C) const override {
528     const AArch64Subtarget &ST = C->MF->getSubtarget<AArch64Subtarget>();
529     ScheduleDAGMI *DAG =
530         new ScheduleDAGMI(C, std::make_unique<AArch64PostRASchedStrategy>(C),
531                           /* RemoveKillFlags=*/true);
532     if (ST.hasFusion()) {
533       // Run the Macro Fusion after RA again since literals are expanded from
534       // pseudos then (v. addPreSched2()).
535       DAG->addMutation(createAArch64MacroFusionDAGMutation());
536       return DAG;
537     }
538 
539     return DAG;
540   }
541 
542   void addIRPasses()  override;
543   bool addPreISel() override;
544   void addCodeGenPrepare() override;
545   bool addInstSelector() override;
546   bool addIRTranslator() override;
547   void addPreLegalizeMachineIR() override;
548   bool addLegalizeMachineIR() override;
549   void addPreRegBankSelect() override;
550   bool addRegBankSelect() override;
551   bool addGlobalInstructionSelect() override;
552   void addMachineSSAOptimization() override;
553   bool addILPOpts() override;
554   void addPreRegAlloc() override;
555   void addPostRegAlloc() override;
556   void addPreSched2() override;
557   void addPreEmitPass() override;
558   void addPostBBSections() override;
559   void addPreEmitPass2() override;
560   bool addRegAssignAndRewriteOptimized() override;
561 
562   std::unique_ptr<CSEConfigBase> getCSEConfig() const override;
563 };
564 
565 } // end anonymous namespace
566 
567 void AArch64TargetMachine::registerPassBuilderCallbacks(PassBuilder &PB) {
568 
569   PB.registerLateLoopOptimizationsEPCallback(
570       [=](LoopPassManager &LPM, OptimizationLevel Level) {
571         LPM.addPass(LoopIdiomVectorizePass());
572       });
573   if (getTargetTriple().isOSWindows())
574     PB.registerPipelineEarlySimplificationEPCallback(
575         [](ModulePassManager &PM, OptimizationLevel, ThinOrFullLTOPhase) {
576           PM.addPass(LowerIFuncPass());
577         });
578 }
579 
580 TargetTransformInfo
581 AArch64TargetMachine::getTargetTransformInfo(const Function &F) const {
582   return TargetTransformInfo(AArch64TTIImpl(this, F));
583 }
584 
585 TargetPassConfig *AArch64TargetMachine::createPassConfig(PassManagerBase &PM) {
586   return new AArch64PassConfig(*this, PM);
587 }
588 
589 std::unique_ptr<CSEConfigBase> AArch64PassConfig::getCSEConfig() const {
590   return getStandardCSEConfigForOpt(TM->getOptLevel());
591 }
592 
593 void AArch64PassConfig::addIRPasses() {
594   // Always expand atomic operations, we don't deal with atomicrmw or cmpxchg
595   // ourselves.
596   addPass(createAtomicExpandLegacyPass());
597 
598   // Expand any SVE vector library calls that we can't code generate directly.
599   if (EnableSVEIntrinsicOpts &&
600       TM->getOptLevel() != CodeGenOptLevel::None)
601     addPass(createSVEIntrinsicOptsPass());
602 
603   // Cmpxchg instructions are often used with a subsequent comparison to
604   // determine whether it succeeded. We can exploit existing control-flow in
605   // ldrex/strex loops to simplify this, but it needs tidying up.
606   if (TM->getOptLevel() != CodeGenOptLevel::None && EnableAtomicTidy)
607     addPass(createCFGSimplificationPass(SimplifyCFGOptions()
608                                             .forwardSwitchCondToPhi(true)
609                                             .convertSwitchRangeToICmp(true)
610                                             .convertSwitchToLookupTable(true)
611                                             .needCanonicalLoops(false)
612                                             .hoistCommonInsts(true)
613                                             .sinkCommonInsts(true)));
614 
615   // Run LoopDataPrefetch
616   //
617   // Run this before LSR to remove the multiplies involved in computing the
618   // pointer values N iterations ahead.
619   if (TM->getOptLevel() != CodeGenOptLevel::None) {
620     if (EnableLoopDataPrefetch)
621       addPass(createLoopDataPrefetchPass());
622     if (EnableFalkorHWPFFix)
623       addPass(createFalkorMarkStridedAccessesPass());
624   }
625 
626   if (EnableGEPOpt) {
627     // Call SeparateConstOffsetFromGEP pass to extract constants within indices
628     // and lower a GEP with multiple indices to either arithmetic operations or
629     // multiple GEPs with single index.
630     addPass(createSeparateConstOffsetFromGEPPass(true));
631     // Call EarlyCSE pass to find and remove subexpressions in the lowered
632     // result.
633     addPass(createEarlyCSEPass());
634     // Do loop invariant code motion in case part of the lowered result is
635     // invariant.
636     addPass(createLICMPass());
637   }
638 
639   TargetPassConfig::addIRPasses();
640 
641   if (getOptLevel() == CodeGenOptLevel::Aggressive && EnableSelectOpt)
642     addPass(createSelectOptimizePass());
643 
644   addPass(createAArch64StackTaggingPass(
645       /*IsOptNone=*/TM->getOptLevel() == CodeGenOptLevel::None));
646 
647   // Match complex arithmetic patterns
648   if (TM->getOptLevel() >= CodeGenOptLevel::Default)
649     addPass(createComplexDeinterleavingPass(TM));
650 
651   // Match interleaved memory accesses to ldN/stN intrinsics.
652   if (TM->getOptLevel() != CodeGenOptLevel::None) {
653     addPass(createInterleavedLoadCombinePass());
654     addPass(createInterleavedAccessPass());
655   }
656 
657   // Expand any functions marked with SME attributes which require special
658   // changes for the calling convention or that require the lazy-saving
659   // mechanism specified in the SME ABI.
660   addPass(createSMEABIPass());
661 
662   // Add Control Flow Guard checks.
663   if (TM->getTargetTriple().isOSWindows()) {
664     if (TM->getTargetTriple().isWindowsArm64EC())
665       addPass(createAArch64Arm64ECCallLoweringPass());
666     else
667       addPass(createCFGuardCheckPass());
668   }
669 
670   if (TM->Options.JMCInstrument)
671     addPass(createJMCInstrumenterPass());
672 }
673 
674 // Pass Pipeline Configuration
675 bool AArch64PassConfig::addPreISel() {
676   // Run promote constant before global merge, so that the promoted constants
677   // get a chance to be merged
678   if (TM->getOptLevel() != CodeGenOptLevel::None && EnablePromoteConstant)
679     addPass(createAArch64PromoteConstantPass());
680   // FIXME: On AArch64, this depends on the type.
681   // Basically, the addressable offsets are up to 4095 * Ty.getSizeInBytes().
682   // and the offset has to be a multiple of the related size in bytes.
683   if ((TM->getOptLevel() != CodeGenOptLevel::None &&
684        EnableGlobalMerge == cl::BOU_UNSET) ||
685       EnableGlobalMerge == cl::BOU_TRUE) {
686     bool OnlyOptimizeForSize =
687         (TM->getOptLevel() < CodeGenOptLevel::Aggressive) &&
688         (EnableGlobalMerge == cl::BOU_UNSET);
689 
690     // Merging of extern globals is enabled by default on non-Mach-O as we
691     // expect it to be generally either beneficial or harmless. On Mach-O it
692     // is disabled as we emit the .subsections_via_symbols directive which
693     // means that merging extern globals is not safe.
694     bool MergeExternalByDefault = !TM->getTargetTriple().isOSBinFormatMachO();
695 
696     // FIXME: extern global merging is only enabled when we optimise for size
697     // because there are some regressions with it also enabled for performance.
698     if (!OnlyOptimizeForSize)
699       MergeExternalByDefault = false;
700 
701     addPass(createGlobalMergePass(TM, 4095, OnlyOptimizeForSize,
702                                   MergeExternalByDefault));
703   }
704 
705   return false;
706 }
707 
708 void AArch64PassConfig::addCodeGenPrepare() {
709   if (getOptLevel() != CodeGenOptLevel::None)
710     addPass(createTypePromotionLegacyPass());
711   TargetPassConfig::addCodeGenPrepare();
712 }
713 
714 bool AArch64PassConfig::addInstSelector() {
715   addPass(createAArch64ISelDag(getAArch64TargetMachine(), getOptLevel()));
716 
717   // For ELF, cleanup any local-dynamic TLS accesses (i.e. combine as many
718   // references to _TLS_MODULE_BASE_ as possible.
719   if (TM->getTargetTriple().isOSBinFormatELF() &&
720       getOptLevel() != CodeGenOptLevel::None)
721     addPass(createAArch64CleanupLocalDynamicTLSPass());
722 
723   return false;
724 }
725 
726 bool AArch64PassConfig::addIRTranslator() {
727   addPass(new IRTranslator(getOptLevel()));
728   return false;
729 }
730 
731 void AArch64PassConfig::addPreLegalizeMachineIR() {
732   if (getOptLevel() == CodeGenOptLevel::None) {
733     addPass(createAArch64O0PreLegalizerCombiner());
734     addPass(new Localizer());
735   } else {
736     addPass(createAArch64PreLegalizerCombiner());
737     addPass(new Localizer());
738     if (EnableGISelLoadStoreOptPreLegal)
739       addPass(new LoadStoreOpt());
740   }
741 }
742 
743 bool AArch64PassConfig::addLegalizeMachineIR() {
744   addPass(new Legalizer());
745   return false;
746 }
747 
748 void AArch64PassConfig::addPreRegBankSelect() {
749   bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
750   if (!IsOptNone) {
751     addPass(createAArch64PostLegalizerCombiner(IsOptNone));
752     if (EnableGISelLoadStoreOptPostLegal)
753       addPass(new LoadStoreOpt());
754   }
755   addPass(createAArch64PostLegalizerLowering());
756 }
757 
758 bool AArch64PassConfig::addRegBankSelect() {
759   addPass(new RegBankSelect());
760   return false;
761 }
762 
763 bool AArch64PassConfig::addGlobalInstructionSelect() {
764   addPass(new InstructionSelect(getOptLevel()));
765   if (getOptLevel() != CodeGenOptLevel::None)
766     addPass(createAArch64PostSelectOptimize());
767   return false;
768 }
769 
770 void AArch64PassConfig::addMachineSSAOptimization() {
771   if (TM->getOptLevel() != CodeGenOptLevel::None && EnableSMEPeepholeOpt)
772     addPass(createSMEPeepholeOptPass());
773 
774   // Run default MachineSSAOptimization first.
775   TargetPassConfig::addMachineSSAOptimization();
776 
777   if (TM->getOptLevel() != CodeGenOptLevel::None)
778     addPass(createAArch64MIPeepholeOptPass());
779 }
780 
781 bool AArch64PassConfig::addILPOpts() {
782   if (EnableCondOpt)
783     addPass(createAArch64ConditionOptimizerPass());
784   if (EnableCCMP)
785     addPass(createAArch64ConditionalCompares());
786   if (EnableMCR)
787     addPass(&MachineCombinerID);
788   if (EnableCondBrTuning)
789     addPass(createAArch64CondBrTuning());
790   if (EnableEarlyIfConversion)
791     addPass(&EarlyIfConverterLegacyID);
792   if (EnableStPairSuppress)
793     addPass(createAArch64StorePairSuppressPass());
794   addPass(createAArch64SIMDInstrOptPass());
795   if (TM->getOptLevel() != CodeGenOptLevel::None)
796     addPass(createAArch64StackTaggingPreRAPass());
797   return true;
798 }
799 
800 void AArch64PassConfig::addPreRegAlloc() {
801   // Change dead register definitions to refer to the zero register.
802   if (TM->getOptLevel() != CodeGenOptLevel::None &&
803       EnableDeadRegisterElimination)
804     addPass(createAArch64DeadRegisterDefinitions());
805 
806   // Use AdvSIMD scalar instructions whenever profitable.
807   if (TM->getOptLevel() != CodeGenOptLevel::None && EnableAdvSIMDScalar) {
808     addPass(createAArch64AdvSIMDScalar());
809     // The AdvSIMD pass may produce copies that can be rewritten to
810     // be register coalescer friendly.
811     addPass(&PeepholeOptimizerLegacyID);
812   }
813   if (TM->getOptLevel() != CodeGenOptLevel::None && EnableMachinePipeliner)
814     addPass(&MachinePipelinerID);
815 }
816 
817 void AArch64PassConfig::addPostRegAlloc() {
818   // Remove redundant copy instructions.
819   if (TM->getOptLevel() != CodeGenOptLevel::None &&
820       EnableRedundantCopyElimination)
821     addPass(createAArch64RedundantCopyEliminationPass());
822 
823   if (TM->getOptLevel() != CodeGenOptLevel::None && usingDefaultRegAlloc())
824     // Improve performance for some FP/SIMD code for A57.
825     addPass(createAArch64A57FPLoadBalancing());
826 }
827 
828 void AArch64PassConfig::addPreSched2() {
829   // Lower homogeneous frame instructions
830   if (EnableHomogeneousPrologEpilog)
831     addPass(createAArch64LowerHomogeneousPrologEpilogPass());
832   // Expand some pseudo instructions to allow proper scheduling.
833   addPass(createAArch64ExpandPseudoPass());
834   // Use load/store pair instructions when possible.
835   if (TM->getOptLevel() != CodeGenOptLevel::None) {
836     if (EnableLoadStoreOpt)
837       addPass(createAArch64LoadStoreOptimizationPass());
838   }
839   // Emit KCFI checks for indirect calls.
840   addPass(createKCFIPass());
841 
842   // The AArch64SpeculationHardeningPass destroys dominator tree and natural
843   // loop info, which is needed for the FalkorHWPFFixPass and also later on.
844   // Therefore, run the AArch64SpeculationHardeningPass before the
845   // FalkorHWPFFixPass to avoid recomputing dominator tree and natural loop
846   // info.
847   addPass(createAArch64SpeculationHardeningPass());
848 
849   if (TM->getOptLevel() != CodeGenOptLevel::None) {
850     if (EnableFalkorHWPFFix)
851       addPass(createFalkorHWPFFixPass());
852   }
853 }
854 
855 void AArch64PassConfig::addPreEmitPass() {
856   // Machine Block Placement might have created new opportunities when run
857   // at O3, where the Tail Duplication Threshold is set to 4 instructions.
858   // Run the load/store optimizer once more.
859   if (TM->getOptLevel() >= CodeGenOptLevel::Aggressive && EnableLoadStoreOpt)
860     addPass(createAArch64LoadStoreOptimizationPass());
861 
862   if (TM->getOptLevel() >= CodeGenOptLevel::Aggressive &&
863       EnableAArch64CopyPropagation)
864     addPass(createMachineCopyPropagationPass(true));
865 
866   addPass(createAArch64A53Fix835769());
867 
868   if (TM->getTargetTriple().isOSWindows()) {
869     // Identify valid longjmp targets for Windows Control Flow Guard.
870     addPass(createCFGuardLongjmpPass());
871     // Identify valid eh continuation targets for Windows EHCont Guard.
872     addPass(createEHContGuardCatchretPass());
873   }
874 
875   if (TM->getOptLevel() != CodeGenOptLevel::None && EnableCollectLOH &&
876       TM->getTargetTriple().isOSBinFormatMachO())
877     addPass(createAArch64CollectLOHPass());
878 }
879 
880 void AArch64PassConfig::addPostBBSections() {
881   addPass(createAArch64SLSHardeningPass());
882   addPass(createAArch64PointerAuthPass());
883   if (EnableBranchTargets)
884     addPass(createAArch64BranchTargetsPass());
885   // Relax conditional branch instructions if they're otherwise out of
886   // range of their destination.
887   if (BranchRelaxation)
888     addPass(&BranchRelaxationPassID);
889 
890   if (TM->getOptLevel() != CodeGenOptLevel::None && EnableCompressJumpTables)
891     addPass(createAArch64CompressJumpTablesPass());
892 }
893 
894 void AArch64PassConfig::addPreEmitPass2() {
895   // SVE bundles move prefixes with destructive operations. BLR_RVMARKER pseudo
896   // instructions are lowered to bundles as well.
897   addPass(createUnpackMachineBundles(nullptr));
898 }
899 
900 bool AArch64PassConfig::addRegAssignAndRewriteOptimized() {
901   addPass(createAArch64PostCoalescerPass());
902   return TargetPassConfig::addRegAssignAndRewriteOptimized();
903 }
904 
905 MachineFunctionInfo *AArch64TargetMachine::createMachineFunctionInfo(
906     BumpPtrAllocator &Allocator, const Function &F,
907     const TargetSubtargetInfo *STI) const {
908   return AArch64FunctionInfo::create<AArch64FunctionInfo>(
909       Allocator, F, static_cast<const AArch64Subtarget *>(STI));
910 }
911 
912 yaml::MachineFunctionInfo *
913 AArch64TargetMachine::createDefaultFuncInfoYAML() const {
914   return new yaml::AArch64FunctionInfo();
915 }
916 
917 yaml::MachineFunctionInfo *
918 AArch64TargetMachine::convertFuncInfoToYAML(const MachineFunction &MF) const {
919   const auto *MFI = MF.getInfo<AArch64FunctionInfo>();
920   return new yaml::AArch64FunctionInfo(*MFI);
921 }
922 
923 bool AArch64TargetMachine::parseMachineFunctionInfo(
924     const yaml::MachineFunctionInfo &MFI, PerFunctionMIParsingState &PFS,
925     SMDiagnostic &Error, SMRange &SourceRange) const {
926   const auto &YamlMFI = static_cast<const yaml::AArch64FunctionInfo &>(MFI);
927   MachineFunction &MF = PFS.MF;
928   MF.getInfo<AArch64FunctionInfo>()->initializeBaseYamlFields(YamlMFI);
929   return false;
930 }
931