1 //===- AArch64AsmPrinter.cpp - AArch64 LLVM assembly writer ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains a printer that converts from our internal representation
10 // of machine-dependent LLVM code to the AArch64 assembly language.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "AArch64.h"
15 #include "AArch64MCInstLower.h"
16 #include "AArch64MachineFunctionInfo.h"
17 #include "AArch64RegisterInfo.h"
18 #include "AArch64Subtarget.h"
19 #include "AArch64TargetObjectFile.h"
20 #include "MCTargetDesc/AArch64AddressingModes.h"
21 #include "MCTargetDesc/AArch64InstPrinter.h"
22 #include "MCTargetDesc/AArch64MCExpr.h"
23 #include "MCTargetDesc/AArch64MCTargetDesc.h"
24 #include "MCTargetDesc/AArch64TargetStreamer.h"
25 #include "TargetInfo/AArch64TargetInfo.h"
26 #include "Utils/AArch64BaseInfo.h"
27 #include "llvm/ADT/SmallString.h"
28 #include "llvm/ADT/SmallVector.h"
29 #include "llvm/ADT/StringRef.h"
30 #include "llvm/ADT/Triple.h"
31 #include "llvm/ADT/Twine.h"
32 #include "llvm/BinaryFormat/COFF.h"
33 #include "llvm/BinaryFormat/ELF.h"
34 #include "llvm/CodeGen/AsmPrinter.h"
35 #include "llvm/CodeGen/FaultMaps.h"
36 #include "llvm/CodeGen/MachineBasicBlock.h"
37 #include "llvm/CodeGen/MachineFunction.h"
38 #include "llvm/CodeGen/MachineInstr.h"
39 #include "llvm/CodeGen/MachineJumpTableInfo.h"
40 #include "llvm/CodeGen/MachineModuleInfoImpls.h"
41 #include "llvm/CodeGen/MachineOperand.h"
42 #include "llvm/CodeGen/StackMaps.h"
43 #include "llvm/CodeGen/TargetRegisterInfo.h"
44 #include "llvm/IR/DataLayout.h"
45 #include "llvm/IR/DebugInfoMetadata.h"
46 #include "llvm/MC/MCAsmInfo.h"
47 #include "llvm/MC/MCContext.h"
48 #include "llvm/MC/MCInst.h"
49 #include "llvm/MC/MCInstBuilder.h"
50 #include "llvm/MC/MCSectionELF.h"
51 #include "llvm/MC/MCStreamer.h"
52 #include "llvm/MC/MCSymbol.h"
53 #include "llvm/MC/TargetRegistry.h"
54 #include "llvm/Support/Casting.h"
55 #include "llvm/Support/ErrorHandling.h"
56 #include "llvm/Support/raw_ostream.h"
57 #include "llvm/Target/TargetMachine.h"
58 #include "llvm/Transforms/Instrumentation/HWAddressSanitizer.h"
59 #include <algorithm>
60 #include <cassert>
61 #include <cstdint>
62 #include <map>
63 #include <memory>
64
65 using namespace llvm;
66
67 #define DEBUG_TYPE "asm-printer"
68
69 namespace {
70
71 class AArch64AsmPrinter : public AsmPrinter {
72 AArch64MCInstLower MCInstLowering;
73 FaultMaps FM;
74 const AArch64Subtarget *STI;
75 bool ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = false;
76
77 public:
AArch64AsmPrinter(TargetMachine & TM,std::unique_ptr<MCStreamer> Streamer)78 AArch64AsmPrinter(TargetMachine &TM, std::unique_ptr<MCStreamer> Streamer)
79 : AsmPrinter(TM, std::move(Streamer)), MCInstLowering(OutContext, *this),
80 FM(*this) {}
81
getPassName() const82 StringRef getPassName() const override { return "AArch64 Assembly Printer"; }
83
84 /// Wrapper for MCInstLowering.lowerOperand() for the
85 /// tblgen'erated pseudo lowering.
lowerOperand(const MachineOperand & MO,MCOperand & MCOp) const86 bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const {
87 return MCInstLowering.lowerOperand(MO, MCOp);
88 }
89
90 void emitStartOfAsmFile(Module &M) override;
91 void emitJumpTableInfo() override;
92
93 void emitFunctionEntryLabel() override;
94
95 void LowerJumpTableDest(MCStreamer &OutStreamer, const MachineInstr &MI);
96
97 void LowerMOPS(MCStreamer &OutStreamer, const MachineInstr &MI);
98
99 void LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
100 const MachineInstr &MI);
101 void LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
102 const MachineInstr &MI);
103 void LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
104 const MachineInstr &MI);
105 void LowerFAULTING_OP(const MachineInstr &MI);
106
107 void LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI);
108 void LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI);
109 void LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI);
110
111 typedef std::tuple<unsigned, bool, uint32_t> HwasanMemaccessTuple;
112 std::map<HwasanMemaccessTuple, MCSymbol *> HwasanMemaccessSymbols;
113 void LowerKCFI_CHECK(const MachineInstr &MI);
114 void LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI);
115 void emitHwasanMemaccessSymbols(Module &M);
116
117 void emitSled(const MachineInstr &MI, SledKind Kind);
118
119 /// tblgen'erated driver function for lowering simple MI->MC
120 /// pseudo instructions.
121 bool emitPseudoExpansionLowering(MCStreamer &OutStreamer,
122 const MachineInstr *MI);
123
124 void emitInstruction(const MachineInstr *MI) override;
125
126 void emitFunctionHeaderComment() override;
127
getAnalysisUsage(AnalysisUsage & AU) const128 void getAnalysisUsage(AnalysisUsage &AU) const override {
129 AsmPrinter::getAnalysisUsage(AU);
130 AU.setPreservesAll();
131 }
132
runOnMachineFunction(MachineFunction & MF)133 bool runOnMachineFunction(MachineFunction &MF) override {
134 AArch64FI = MF.getInfo<AArch64FunctionInfo>();
135 STI = &MF.getSubtarget<AArch64Subtarget>();
136
137 SetupMachineFunction(MF);
138
139 if (STI->isTargetCOFF()) {
140 bool Internal = MF.getFunction().hasInternalLinkage();
141 COFF::SymbolStorageClass Scl = Internal ? COFF::IMAGE_SYM_CLASS_STATIC
142 : COFF::IMAGE_SYM_CLASS_EXTERNAL;
143 int Type =
144 COFF::IMAGE_SYM_DTYPE_FUNCTION << COFF::SCT_COMPLEX_TYPE_SHIFT;
145
146 OutStreamer->beginCOFFSymbolDef(CurrentFnSym);
147 OutStreamer->emitCOFFSymbolStorageClass(Scl);
148 OutStreamer->emitCOFFSymbolType(Type);
149 OutStreamer->endCOFFSymbolDef();
150 }
151
152 // Emit the rest of the function body.
153 emitFunctionBody();
154
155 // Emit the XRay table for this function.
156 emitXRayTable();
157
158 // We didn't modify anything.
159 return false;
160 }
161
162 private:
163 void printOperand(const MachineInstr *MI, unsigned OpNum, raw_ostream &O);
164 bool printAsmMRegister(const MachineOperand &MO, char Mode, raw_ostream &O);
165 bool printAsmRegInClass(const MachineOperand &MO,
166 const TargetRegisterClass *RC, unsigned AltName,
167 raw_ostream &O);
168
169 bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
170 const char *ExtraCode, raw_ostream &O) override;
171 bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum,
172 const char *ExtraCode, raw_ostream &O) override;
173
174 void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS);
175
176 void emitFunctionBodyEnd() override;
177
178 MCSymbol *GetCPISymbol(unsigned CPID) const override;
179 void emitEndOfAsmFile(Module &M) override;
180
181 AArch64FunctionInfo *AArch64FI = nullptr;
182
183 /// Emit the LOHs contained in AArch64FI.
184 void emitLOHs();
185
186 /// Emit instruction to set float register to zero.
187 void emitFMov0(const MachineInstr &MI);
188
189 using MInstToMCSymbol = std::map<const MachineInstr *, MCSymbol *>;
190
191 MInstToMCSymbol LOHInstToLabel;
192
shouldEmitWeakSwiftAsyncExtendedFramePointerFlags() const193 bool shouldEmitWeakSwiftAsyncExtendedFramePointerFlags() const override {
194 return ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags;
195 }
196 };
197
198 } // end anonymous namespace
199
emitStartOfAsmFile(Module & M)200 void AArch64AsmPrinter::emitStartOfAsmFile(Module &M) {
201 const Triple &TT = TM.getTargetTriple();
202
203 if (TT.isOSBinFormatCOFF()) {
204 // Emit an absolute @feat.00 symbol
205 MCSymbol *S = MMI->getContext().getOrCreateSymbol(StringRef("@feat.00"));
206 OutStreamer->beginCOFFSymbolDef(S);
207 OutStreamer->emitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_STATIC);
208 OutStreamer->emitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_NULL);
209 OutStreamer->endCOFFSymbolDef();
210 int64_t Feat00Value = 0;
211
212 if (M.getModuleFlag("cfguard")) {
213 // Object is CFG-aware.
214 Feat00Value |= COFF::Feat00Flags::GuardCF;
215 }
216
217 if (M.getModuleFlag("ehcontguard")) {
218 // Object also has EHCont.
219 Feat00Value |= COFF::Feat00Flags::GuardEHCont;
220 }
221
222 if (M.getModuleFlag("ms-kernel")) {
223 // Object is compiled with /kernel.
224 Feat00Value |= COFF::Feat00Flags::Kernel;
225 }
226
227 OutStreamer->emitSymbolAttribute(S, MCSA_Global);
228 OutStreamer->emitAssignment(
229 S, MCConstantExpr::create(Feat00Value, MMI->getContext()));
230 }
231
232 if (!TT.isOSBinFormatELF())
233 return;
234
235 // Assemble feature flags that may require creation of a note section.
236 unsigned Flags = 0;
237 if (const auto *BTE = mdconst::extract_or_null<ConstantInt>(
238 M.getModuleFlag("branch-target-enforcement")))
239 if (BTE->getZExtValue())
240 Flags |= ELF::GNU_PROPERTY_AARCH64_FEATURE_1_BTI;
241
242 if (const auto *Sign = mdconst::extract_or_null<ConstantInt>(
243 M.getModuleFlag("sign-return-address")))
244 if (Sign->getZExtValue())
245 Flags |= ELF::GNU_PROPERTY_AARCH64_FEATURE_1_PAC;
246
247 if (Flags == 0)
248 return;
249
250 // Emit a .note.gnu.property section with the flags.
251 auto *TS =
252 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
253 TS->emitNoteSection(Flags);
254 }
255
emitFunctionHeaderComment()256 void AArch64AsmPrinter::emitFunctionHeaderComment() {
257 const AArch64FunctionInfo *FI = MF->getInfo<AArch64FunctionInfo>();
258 std::optional<std::string> OutlinerString = FI->getOutliningStyle();
259 if (OutlinerString != std::nullopt)
260 OutStreamer->getCommentOS() << ' ' << OutlinerString;
261 }
262
LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr & MI)263 void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI)
264 {
265 const Function &F = MF->getFunction();
266 if (F.hasFnAttribute("patchable-function-entry")) {
267 unsigned Num;
268 if (F.getFnAttribute("patchable-function-entry")
269 .getValueAsString()
270 .getAsInteger(10, Num))
271 return;
272 emitNops(Num);
273 return;
274 }
275
276 emitSled(MI, SledKind::FUNCTION_ENTER);
277 }
278
LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr & MI)279 void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI) {
280 emitSled(MI, SledKind::FUNCTION_EXIT);
281 }
282
LowerPATCHABLE_TAIL_CALL(const MachineInstr & MI)283 void AArch64AsmPrinter::LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI) {
284 emitSled(MI, SledKind::TAIL_CALL);
285 }
286
emitSled(const MachineInstr & MI,SledKind Kind)287 void AArch64AsmPrinter::emitSled(const MachineInstr &MI, SledKind Kind) {
288 static const int8_t NoopsInSledCount = 7;
289 // We want to emit the following pattern:
290 //
291 // .Lxray_sled_N:
292 // ALIGN
293 // B #32
294 // ; 7 NOP instructions (28 bytes)
295 // .tmpN
296 //
297 // We need the 28 bytes (7 instructions) because at runtime, we'd be patching
298 // over the full 32 bytes (8 instructions) with the following pattern:
299 //
300 // STP X0, X30, [SP, #-16]! ; push X0 and the link register to the stack
301 // LDR W0, #12 ; W0 := function ID
302 // LDR X16,#12 ; X16 := addr of __xray_FunctionEntry or __xray_FunctionExit
303 // BLR X16 ; call the tracing trampoline
304 // ;DATA: 32 bits of function ID
305 // ;DATA: lower 32 bits of the address of the trampoline
306 // ;DATA: higher 32 bits of the address of the trampoline
307 // LDP X0, X30, [SP], #16 ; pop X0 and the link register from the stack
308 //
309 OutStreamer->emitCodeAlignment(Align(4), &getSubtargetInfo());
310 auto CurSled = OutContext.createTempSymbol("xray_sled_", true);
311 OutStreamer->emitLabel(CurSled);
312 auto Target = OutContext.createTempSymbol();
313
314 // Emit "B #32" instruction, which jumps over the next 28 bytes.
315 // The operand has to be the number of 4-byte instructions to jump over,
316 // including the current instruction.
317 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::B).addImm(8));
318
319 for (int8_t I = 0; I < NoopsInSledCount; I++)
320 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
321
322 OutStreamer->emitLabel(Target);
323 recordSled(CurSled, MI, Kind, 2);
324 }
325
LowerKCFI_CHECK(const MachineInstr & MI)326 void AArch64AsmPrinter::LowerKCFI_CHECK(const MachineInstr &MI) {
327 Register AddrReg = MI.getOperand(0).getReg();
328 assert(std::next(MI.getIterator())->isCall() &&
329 "KCFI_CHECK not followed by a call instruction");
330 assert(std::next(MI.getIterator())->getOperand(0).getReg() == AddrReg &&
331 "KCFI_CHECK call target doesn't match call operand");
332
333 // Default to using the intra-procedure-call temporary registers for
334 // comparing the hashes.
335 unsigned ScratchRegs[] = {AArch64::W16, AArch64::W17};
336 if (AddrReg == AArch64::XZR) {
337 // Checking XZR makes no sense. Instead of emitting a load, zero
338 // ScratchRegs[0] and use it for the ESR AddrIndex below.
339 AddrReg = getXRegFromWReg(ScratchRegs[0]);
340 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ORRXrs)
341 .addReg(AddrReg)
342 .addReg(AArch64::XZR)
343 .addReg(AArch64::XZR)
344 .addImm(0));
345 } else {
346 // If one of the scratch registers is used for the call target (e.g.
347 // with AArch64::TCRETURNriBTI), we can clobber another caller-saved
348 // temporary register instead (in this case, AArch64::W9) as the check
349 // is immediately followed by the call instruction.
350 for (auto &Reg : ScratchRegs) {
351 if (Reg == getWRegFromXReg(AddrReg)) {
352 Reg = AArch64::W9;
353 break;
354 }
355 }
356 assert(ScratchRegs[0] != AddrReg && ScratchRegs[1] != AddrReg &&
357 "Invalid scratch registers for KCFI_CHECK");
358
359 // Adjust the offset for patchable-function-prefix. This assumes that
360 // patchable-function-prefix is the same for all functions.
361 int64_t PrefixNops = 0;
362 (void)MI.getMF()
363 ->getFunction()
364 .getFnAttribute("patchable-function-prefix")
365 .getValueAsString()
366 .getAsInteger(10, PrefixNops);
367
368 // Load the target function type hash.
369 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDURWi)
370 .addReg(ScratchRegs[0])
371 .addReg(AddrReg)
372 .addImm(-(PrefixNops * 4 + 4)));
373 }
374
375 // Load the expected type hash.
376 const int64_t Type = MI.getOperand(1).getImm();
377 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::MOVKWi)
378 .addReg(ScratchRegs[1])
379 .addReg(ScratchRegs[1])
380 .addImm(Type & 0xFFFF)
381 .addImm(0));
382 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::MOVKWi)
383 .addReg(ScratchRegs[1])
384 .addReg(ScratchRegs[1])
385 .addImm((Type >> 16) & 0xFFFF)
386 .addImm(16));
387
388 // Compare the hashes and trap if there's a mismatch.
389 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSWrs)
390 .addReg(AArch64::WZR)
391 .addReg(ScratchRegs[0])
392 .addReg(ScratchRegs[1])
393 .addImm(0));
394
395 MCSymbol *Pass = OutContext.createTempSymbol();
396 EmitToStreamer(*OutStreamer,
397 MCInstBuilder(AArch64::Bcc)
398 .addImm(AArch64CC::EQ)
399 .addExpr(MCSymbolRefExpr::create(Pass, OutContext)));
400
401 // The base ESR is 0x8000 and the register information is encoded in bits
402 // 0-9 as follows:
403 // - 0-4: n, where the register Xn contains the target address
404 // - 5-9: m, where the register Wm contains the expected type hash
405 // Where n, m are in [0, 30].
406 unsigned TypeIndex = ScratchRegs[1] - AArch64::W0;
407 unsigned AddrIndex;
408 switch (AddrReg) {
409 default:
410 AddrIndex = AddrReg - AArch64::X0;
411 break;
412 case AArch64::FP:
413 AddrIndex = 29;
414 break;
415 case AArch64::LR:
416 AddrIndex = 30;
417 break;
418 }
419
420 assert(AddrIndex < 31 && TypeIndex < 31);
421
422 unsigned ESR = 0x8000 | ((TypeIndex & 31) << 5) | (AddrIndex & 31);
423 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::BRK).addImm(ESR));
424 OutStreamer->emitLabel(Pass);
425 }
426
LowerHWASAN_CHECK_MEMACCESS(const MachineInstr & MI)427 void AArch64AsmPrinter::LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI) {
428 Register Reg = MI.getOperand(0).getReg();
429 bool IsShort =
430 MI.getOpcode() == AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES;
431 uint32_t AccessInfo = MI.getOperand(1).getImm();
432 MCSymbol *&Sym =
433 HwasanMemaccessSymbols[HwasanMemaccessTuple(Reg, IsShort, AccessInfo)];
434 if (!Sym) {
435 // FIXME: Make this work on non-ELF.
436 if (!TM.getTargetTriple().isOSBinFormatELF())
437 report_fatal_error("llvm.hwasan.check.memaccess only supported on ELF");
438
439 std::string SymName = "__hwasan_check_x" + utostr(Reg - AArch64::X0) + "_" +
440 utostr(AccessInfo);
441 if (IsShort)
442 SymName += "_short_v2";
443 Sym = OutContext.getOrCreateSymbol(SymName);
444 }
445
446 EmitToStreamer(*OutStreamer,
447 MCInstBuilder(AArch64::BL)
448 .addExpr(MCSymbolRefExpr::create(Sym, OutContext)));
449 }
450
emitHwasanMemaccessSymbols(Module & M)451 void AArch64AsmPrinter::emitHwasanMemaccessSymbols(Module &M) {
452 if (HwasanMemaccessSymbols.empty())
453 return;
454
455 const Triple &TT = TM.getTargetTriple();
456 assert(TT.isOSBinFormatELF());
457 std::unique_ptr<MCSubtargetInfo> STI(
458 TM.getTarget().createMCSubtargetInfo(TT.str(), "", ""));
459 assert(STI && "Unable to create subtarget info");
460
461 MCSymbol *HwasanTagMismatchV1Sym =
462 OutContext.getOrCreateSymbol("__hwasan_tag_mismatch");
463 MCSymbol *HwasanTagMismatchV2Sym =
464 OutContext.getOrCreateSymbol("__hwasan_tag_mismatch_v2");
465
466 const MCSymbolRefExpr *HwasanTagMismatchV1Ref =
467 MCSymbolRefExpr::create(HwasanTagMismatchV1Sym, OutContext);
468 const MCSymbolRefExpr *HwasanTagMismatchV2Ref =
469 MCSymbolRefExpr::create(HwasanTagMismatchV2Sym, OutContext);
470
471 for (auto &P : HwasanMemaccessSymbols) {
472 unsigned Reg = std::get<0>(P.first);
473 bool IsShort = std::get<1>(P.first);
474 uint32_t AccessInfo = std::get<2>(P.first);
475 const MCSymbolRefExpr *HwasanTagMismatchRef =
476 IsShort ? HwasanTagMismatchV2Ref : HwasanTagMismatchV1Ref;
477 MCSymbol *Sym = P.second;
478
479 bool HasMatchAllTag =
480 (AccessInfo >> HWASanAccessInfo::HasMatchAllShift) & 1;
481 uint8_t MatchAllTag =
482 (AccessInfo >> HWASanAccessInfo::MatchAllShift) & 0xff;
483 unsigned Size =
484 1 << ((AccessInfo >> HWASanAccessInfo::AccessSizeShift) & 0xf);
485 bool CompileKernel =
486 (AccessInfo >> HWASanAccessInfo::CompileKernelShift) & 1;
487
488 OutStreamer->switchSection(OutContext.getELFSection(
489 ".text.hot", ELF::SHT_PROGBITS,
490 ELF::SHF_EXECINSTR | ELF::SHF_ALLOC | ELF::SHF_GROUP, 0, Sym->getName(),
491 /*IsComdat=*/true));
492
493 OutStreamer->emitSymbolAttribute(Sym, MCSA_ELF_TypeFunction);
494 OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak);
495 OutStreamer->emitSymbolAttribute(Sym, MCSA_Hidden);
496 OutStreamer->emitLabel(Sym);
497
498 OutStreamer->emitInstruction(MCInstBuilder(AArch64::SBFMXri)
499 .addReg(AArch64::X16)
500 .addReg(Reg)
501 .addImm(4)
502 .addImm(55),
503 *STI);
504 OutStreamer->emitInstruction(
505 MCInstBuilder(AArch64::LDRBBroX)
506 .addReg(AArch64::W16)
507 .addReg(IsShort ? AArch64::X20 : AArch64::X9)
508 .addReg(AArch64::X16)
509 .addImm(0)
510 .addImm(0),
511 *STI);
512 OutStreamer->emitInstruction(
513 MCInstBuilder(AArch64::SUBSXrs)
514 .addReg(AArch64::XZR)
515 .addReg(AArch64::X16)
516 .addReg(Reg)
517 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSR, 56)),
518 *STI);
519 MCSymbol *HandleMismatchOrPartialSym = OutContext.createTempSymbol();
520 OutStreamer->emitInstruction(
521 MCInstBuilder(AArch64::Bcc)
522 .addImm(AArch64CC::NE)
523 .addExpr(MCSymbolRefExpr::create(HandleMismatchOrPartialSym,
524 OutContext)),
525 *STI);
526 MCSymbol *ReturnSym = OutContext.createTempSymbol();
527 OutStreamer->emitLabel(ReturnSym);
528 OutStreamer->emitInstruction(
529 MCInstBuilder(AArch64::RET).addReg(AArch64::LR), *STI);
530 OutStreamer->emitLabel(HandleMismatchOrPartialSym);
531
532 if (HasMatchAllTag) {
533 OutStreamer->emitInstruction(MCInstBuilder(AArch64::UBFMXri)
534 .addReg(AArch64::X16)
535 .addReg(Reg)
536 .addImm(56)
537 .addImm(63),
538 *STI);
539 OutStreamer->emitInstruction(MCInstBuilder(AArch64::SUBSXri)
540 .addReg(AArch64::XZR)
541 .addReg(AArch64::X16)
542 .addImm(MatchAllTag)
543 .addImm(0),
544 *STI);
545 OutStreamer->emitInstruction(
546 MCInstBuilder(AArch64::Bcc)
547 .addImm(AArch64CC::EQ)
548 .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)),
549 *STI);
550 }
551
552 if (IsShort) {
553 OutStreamer->emitInstruction(MCInstBuilder(AArch64::SUBSWri)
554 .addReg(AArch64::WZR)
555 .addReg(AArch64::W16)
556 .addImm(15)
557 .addImm(0),
558 *STI);
559 MCSymbol *HandleMismatchSym = OutContext.createTempSymbol();
560 OutStreamer->emitInstruction(
561 MCInstBuilder(AArch64::Bcc)
562 .addImm(AArch64CC::HI)
563 .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)),
564 *STI);
565
566 OutStreamer->emitInstruction(
567 MCInstBuilder(AArch64::ANDXri)
568 .addReg(AArch64::X17)
569 .addReg(Reg)
570 .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)),
571 *STI);
572 if (Size != 1)
573 OutStreamer->emitInstruction(MCInstBuilder(AArch64::ADDXri)
574 .addReg(AArch64::X17)
575 .addReg(AArch64::X17)
576 .addImm(Size - 1)
577 .addImm(0),
578 *STI);
579 OutStreamer->emitInstruction(MCInstBuilder(AArch64::SUBSWrs)
580 .addReg(AArch64::WZR)
581 .addReg(AArch64::W16)
582 .addReg(AArch64::W17)
583 .addImm(0),
584 *STI);
585 OutStreamer->emitInstruction(
586 MCInstBuilder(AArch64::Bcc)
587 .addImm(AArch64CC::LS)
588 .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)),
589 *STI);
590
591 OutStreamer->emitInstruction(
592 MCInstBuilder(AArch64::ORRXri)
593 .addReg(AArch64::X16)
594 .addReg(Reg)
595 .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)),
596 *STI);
597 OutStreamer->emitInstruction(MCInstBuilder(AArch64::LDRBBui)
598 .addReg(AArch64::W16)
599 .addReg(AArch64::X16)
600 .addImm(0),
601 *STI);
602 OutStreamer->emitInstruction(
603 MCInstBuilder(AArch64::SUBSXrs)
604 .addReg(AArch64::XZR)
605 .addReg(AArch64::X16)
606 .addReg(Reg)
607 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSR, 56)),
608 *STI);
609 OutStreamer->emitInstruction(
610 MCInstBuilder(AArch64::Bcc)
611 .addImm(AArch64CC::EQ)
612 .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)),
613 *STI);
614
615 OutStreamer->emitLabel(HandleMismatchSym);
616 }
617
618 OutStreamer->emitInstruction(MCInstBuilder(AArch64::STPXpre)
619 .addReg(AArch64::SP)
620 .addReg(AArch64::X0)
621 .addReg(AArch64::X1)
622 .addReg(AArch64::SP)
623 .addImm(-32),
624 *STI);
625 OutStreamer->emitInstruction(MCInstBuilder(AArch64::STPXi)
626 .addReg(AArch64::FP)
627 .addReg(AArch64::LR)
628 .addReg(AArch64::SP)
629 .addImm(29),
630 *STI);
631
632 if (Reg != AArch64::X0)
633 OutStreamer->emitInstruction(MCInstBuilder(AArch64::ORRXrs)
634 .addReg(AArch64::X0)
635 .addReg(AArch64::XZR)
636 .addReg(Reg)
637 .addImm(0),
638 *STI);
639 OutStreamer->emitInstruction(
640 MCInstBuilder(AArch64::MOVZXi)
641 .addReg(AArch64::X1)
642 .addImm(AccessInfo & HWASanAccessInfo::RuntimeMask)
643 .addImm(0),
644 *STI);
645
646 if (CompileKernel) {
647 // The Linux kernel's dynamic loader doesn't support GOT relative
648 // relocations, but it doesn't support late binding either, so just call
649 // the function directly.
650 OutStreamer->emitInstruction(
651 MCInstBuilder(AArch64::B).addExpr(HwasanTagMismatchRef), *STI);
652 } else {
653 // Intentionally load the GOT entry and branch to it, rather than possibly
654 // late binding the function, which may clobber the registers before we
655 // have a chance to save them.
656 OutStreamer->emitInstruction(
657 MCInstBuilder(AArch64::ADRP)
658 .addReg(AArch64::X16)
659 .addExpr(AArch64MCExpr::create(
660 HwasanTagMismatchRef, AArch64MCExpr::VariantKind::VK_GOT_PAGE,
661 OutContext)),
662 *STI);
663 OutStreamer->emitInstruction(
664 MCInstBuilder(AArch64::LDRXui)
665 .addReg(AArch64::X16)
666 .addReg(AArch64::X16)
667 .addExpr(AArch64MCExpr::create(
668 HwasanTagMismatchRef, AArch64MCExpr::VariantKind::VK_GOT_LO12,
669 OutContext)),
670 *STI);
671 OutStreamer->emitInstruction(
672 MCInstBuilder(AArch64::BR).addReg(AArch64::X16), *STI);
673 }
674 }
675 }
676
emitEndOfAsmFile(Module & M)677 void AArch64AsmPrinter::emitEndOfAsmFile(Module &M) {
678 emitHwasanMemaccessSymbols(M);
679
680 const Triple &TT = TM.getTargetTriple();
681 if (TT.isOSBinFormatMachO()) {
682 // Funny Darwin hack: This flag tells the linker that no global symbols
683 // contain code that falls through to other global symbols (e.g. the obvious
684 // implementation of multiple entry points). If this doesn't occur, the
685 // linker can safely perform dead code stripping. Since LLVM never
686 // generates code that does this, it is always safe to set.
687 OutStreamer->emitAssemblerFlag(MCAF_SubsectionsViaSymbols);
688 }
689
690 // Emit stack and fault map information.
691 FM.serializeToFaultMapSection();
692
693 }
694
emitLOHs()695 void AArch64AsmPrinter::emitLOHs() {
696 SmallVector<MCSymbol *, 3> MCArgs;
697
698 for (const auto &D : AArch64FI->getLOHContainer()) {
699 for (const MachineInstr *MI : D.getArgs()) {
700 MInstToMCSymbol::iterator LabelIt = LOHInstToLabel.find(MI);
701 assert(LabelIt != LOHInstToLabel.end() &&
702 "Label hasn't been inserted for LOH related instruction");
703 MCArgs.push_back(LabelIt->second);
704 }
705 OutStreamer->emitLOHDirective(D.getKind(), MCArgs);
706 MCArgs.clear();
707 }
708 }
709
emitFunctionBodyEnd()710 void AArch64AsmPrinter::emitFunctionBodyEnd() {
711 if (!AArch64FI->getLOHRelated().empty())
712 emitLOHs();
713 }
714
715 /// GetCPISymbol - Return the symbol for the specified constant pool entry.
GetCPISymbol(unsigned CPID) const716 MCSymbol *AArch64AsmPrinter::GetCPISymbol(unsigned CPID) const {
717 // Darwin uses a linker-private symbol name for constant-pools (to
718 // avoid addends on the relocation?), ELF has no such concept and
719 // uses a normal private symbol.
720 if (!getDataLayout().getLinkerPrivateGlobalPrefix().empty())
721 return OutContext.getOrCreateSymbol(
722 Twine(getDataLayout().getLinkerPrivateGlobalPrefix()) + "CPI" +
723 Twine(getFunctionNumber()) + "_" + Twine(CPID));
724
725 return AsmPrinter::GetCPISymbol(CPID);
726 }
727
printOperand(const MachineInstr * MI,unsigned OpNum,raw_ostream & O)728 void AArch64AsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNum,
729 raw_ostream &O) {
730 const MachineOperand &MO = MI->getOperand(OpNum);
731 switch (MO.getType()) {
732 default:
733 llvm_unreachable("<unknown operand type>");
734 case MachineOperand::MO_Register: {
735 Register Reg = MO.getReg();
736 assert(Reg.isPhysical());
737 assert(!MO.getSubReg() && "Subregs should be eliminated!");
738 O << AArch64InstPrinter::getRegisterName(Reg);
739 break;
740 }
741 case MachineOperand::MO_Immediate: {
742 O << MO.getImm();
743 break;
744 }
745 case MachineOperand::MO_GlobalAddress: {
746 PrintSymbolOperand(MO, O);
747 break;
748 }
749 case MachineOperand::MO_BlockAddress: {
750 MCSymbol *Sym = GetBlockAddressSymbol(MO.getBlockAddress());
751 Sym->print(O, MAI);
752 break;
753 }
754 }
755 }
756
printAsmMRegister(const MachineOperand & MO,char Mode,raw_ostream & O)757 bool AArch64AsmPrinter::printAsmMRegister(const MachineOperand &MO, char Mode,
758 raw_ostream &O) {
759 Register Reg = MO.getReg();
760 switch (Mode) {
761 default:
762 return true; // Unknown mode.
763 case 'w':
764 Reg = getWRegFromXReg(Reg);
765 break;
766 case 'x':
767 Reg = getXRegFromWReg(Reg);
768 break;
769 case 't':
770 Reg = getXRegFromXRegTuple(Reg);
771 break;
772 }
773
774 O << AArch64InstPrinter::getRegisterName(Reg);
775 return false;
776 }
777
778 // Prints the register in MO using class RC using the offset in the
779 // new register class. This should not be used for cross class
780 // printing.
printAsmRegInClass(const MachineOperand & MO,const TargetRegisterClass * RC,unsigned AltName,raw_ostream & O)781 bool AArch64AsmPrinter::printAsmRegInClass(const MachineOperand &MO,
782 const TargetRegisterClass *RC,
783 unsigned AltName, raw_ostream &O) {
784 assert(MO.isReg() && "Should only get here with a register!");
785 const TargetRegisterInfo *RI = STI->getRegisterInfo();
786 Register Reg = MO.getReg();
787 unsigned RegToPrint = RC->getRegister(RI->getEncodingValue(Reg));
788 if (!RI->regsOverlap(RegToPrint, Reg))
789 return true;
790 O << AArch64InstPrinter::getRegisterName(RegToPrint, AltName);
791 return false;
792 }
793
PrintAsmOperand(const MachineInstr * MI,unsigned OpNum,const char * ExtraCode,raw_ostream & O)794 bool AArch64AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
795 const char *ExtraCode, raw_ostream &O) {
796 const MachineOperand &MO = MI->getOperand(OpNum);
797
798 // First try the generic code, which knows about modifiers like 'c' and 'n'.
799 if (!AsmPrinter::PrintAsmOperand(MI, OpNum, ExtraCode, O))
800 return false;
801
802 // Does this asm operand have a single letter operand modifier?
803 if (ExtraCode && ExtraCode[0]) {
804 if (ExtraCode[1] != 0)
805 return true; // Unknown modifier.
806
807 switch (ExtraCode[0]) {
808 default:
809 return true; // Unknown modifier.
810 case 'w': // Print W register
811 case 'x': // Print X register
812 if (MO.isReg())
813 return printAsmMRegister(MO, ExtraCode[0], O);
814 if (MO.isImm() && MO.getImm() == 0) {
815 unsigned Reg = ExtraCode[0] == 'w' ? AArch64::WZR : AArch64::XZR;
816 O << AArch64InstPrinter::getRegisterName(Reg);
817 return false;
818 }
819 printOperand(MI, OpNum, O);
820 return false;
821 case 'b': // Print B register.
822 case 'h': // Print H register.
823 case 's': // Print S register.
824 case 'd': // Print D register.
825 case 'q': // Print Q register.
826 case 'z': // Print Z register.
827 if (MO.isReg()) {
828 const TargetRegisterClass *RC;
829 switch (ExtraCode[0]) {
830 case 'b':
831 RC = &AArch64::FPR8RegClass;
832 break;
833 case 'h':
834 RC = &AArch64::FPR16RegClass;
835 break;
836 case 's':
837 RC = &AArch64::FPR32RegClass;
838 break;
839 case 'd':
840 RC = &AArch64::FPR64RegClass;
841 break;
842 case 'q':
843 RC = &AArch64::FPR128RegClass;
844 break;
845 case 'z':
846 RC = &AArch64::ZPRRegClass;
847 break;
848 default:
849 return true;
850 }
851 return printAsmRegInClass(MO, RC, AArch64::NoRegAltName, O);
852 }
853 printOperand(MI, OpNum, O);
854 return false;
855 }
856 }
857
858 // According to ARM, we should emit x and v registers unless we have a
859 // modifier.
860 if (MO.isReg()) {
861 Register Reg = MO.getReg();
862
863 // If this is a w or x register, print an x register.
864 if (AArch64::GPR32allRegClass.contains(Reg) ||
865 AArch64::GPR64allRegClass.contains(Reg))
866 return printAsmMRegister(MO, 'x', O);
867
868 // If this is an x register tuple, print an x register.
869 if (AArch64::GPR64x8ClassRegClass.contains(Reg))
870 return printAsmMRegister(MO, 't', O);
871
872 unsigned AltName = AArch64::NoRegAltName;
873 const TargetRegisterClass *RegClass;
874 if (AArch64::ZPRRegClass.contains(Reg)) {
875 RegClass = &AArch64::ZPRRegClass;
876 } else if (AArch64::PPRRegClass.contains(Reg)) {
877 RegClass = &AArch64::PPRRegClass;
878 } else {
879 RegClass = &AArch64::FPR128RegClass;
880 AltName = AArch64::vreg;
881 }
882
883 // If this is a b, h, s, d, or q register, print it as a v register.
884 return printAsmRegInClass(MO, RegClass, AltName, O);
885 }
886
887 printOperand(MI, OpNum, O);
888 return false;
889 }
890
PrintAsmMemoryOperand(const MachineInstr * MI,unsigned OpNum,const char * ExtraCode,raw_ostream & O)891 bool AArch64AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
892 unsigned OpNum,
893 const char *ExtraCode,
894 raw_ostream &O) {
895 if (ExtraCode && ExtraCode[0] && ExtraCode[0] != 'a')
896 return true; // Unknown modifier.
897
898 const MachineOperand &MO = MI->getOperand(OpNum);
899 assert(MO.isReg() && "unexpected inline asm memory operand");
900 O << "[" << AArch64InstPrinter::getRegisterName(MO.getReg()) << "]";
901 return false;
902 }
903
PrintDebugValueComment(const MachineInstr * MI,raw_ostream & OS)904 void AArch64AsmPrinter::PrintDebugValueComment(const MachineInstr *MI,
905 raw_ostream &OS) {
906 unsigned NOps = MI->getNumOperands();
907 assert(NOps == 4);
908 OS << '\t' << MAI->getCommentString() << "DEBUG_VALUE: ";
909 // cast away const; DIetc do not take const operands for some reason.
910 OS << MI->getDebugVariable()->getName();
911 OS << " <- ";
912 // Frame address. Currently handles register +- offset only.
913 assert(MI->isIndirectDebugValue());
914 OS << '[';
915 for (unsigned I = 0, E = std::distance(MI->debug_operands().begin(),
916 MI->debug_operands().end());
917 I < E; ++I) {
918 if (I != 0)
919 OS << ", ";
920 printOperand(MI, I, OS);
921 }
922 OS << ']';
923 OS << "+";
924 printOperand(MI, NOps - 2, OS);
925 }
926
emitJumpTableInfo()927 void AArch64AsmPrinter::emitJumpTableInfo() {
928 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
929 if (!MJTI) return;
930
931 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
932 if (JT.empty()) return;
933
934 const TargetLoweringObjectFile &TLOF = getObjFileLowering();
935 MCSection *ReadOnlySec = TLOF.getSectionForJumpTable(MF->getFunction(), TM);
936 OutStreamer->switchSection(ReadOnlySec);
937
938 auto AFI = MF->getInfo<AArch64FunctionInfo>();
939 for (unsigned JTI = 0, e = JT.size(); JTI != e; ++JTI) {
940 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
941
942 // If this jump table was deleted, ignore it.
943 if (JTBBs.empty()) continue;
944
945 unsigned Size = AFI->getJumpTableEntrySize(JTI);
946 emitAlignment(Align(Size));
947 OutStreamer->emitLabel(GetJTISymbol(JTI));
948
949 const MCSymbol *BaseSym = AArch64FI->getJumpTableEntryPCRelSymbol(JTI);
950 const MCExpr *Base = MCSymbolRefExpr::create(BaseSym, OutContext);
951
952 for (auto *JTBB : JTBBs) {
953 const MCExpr *Value =
954 MCSymbolRefExpr::create(JTBB->getSymbol(), OutContext);
955
956 // Each entry is:
957 // .byte/.hword (LBB - Lbase)>>2
958 // or plain:
959 // .word LBB - Lbase
960 Value = MCBinaryExpr::createSub(Value, Base, OutContext);
961 if (Size != 4)
962 Value = MCBinaryExpr::createLShr(
963 Value, MCConstantExpr::create(2, OutContext), OutContext);
964
965 OutStreamer->emitValue(Value, Size);
966 }
967 }
968 }
969
emitFunctionEntryLabel()970 void AArch64AsmPrinter::emitFunctionEntryLabel() {
971 if (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall ||
972 MF->getFunction().getCallingConv() ==
973 CallingConv::AArch64_SVE_VectorCall ||
974 MF->getInfo<AArch64FunctionInfo>()->isSVECC()) {
975 auto *TS =
976 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
977 TS->emitDirectiveVariantPCS(CurrentFnSym);
978 }
979
980 return AsmPrinter::emitFunctionEntryLabel();
981 }
982
983 /// Small jump tables contain an unsigned byte or half, representing the offset
984 /// from the lowest-addressed possible destination to the desired basic
985 /// block. Since all instructions are 4-byte aligned, this is further compressed
986 /// by counting in instructions rather than bytes (i.e. divided by 4). So, to
987 /// materialize the correct destination we need:
988 ///
989 /// adr xDest, .LBB0_0
990 /// ldrb wScratch, [xTable, xEntry] (with "lsl #1" for ldrh).
991 /// add xDest, xDest, xScratch (with "lsl #2" for smaller entries)
LowerJumpTableDest(llvm::MCStreamer & OutStreamer,const llvm::MachineInstr & MI)992 void AArch64AsmPrinter::LowerJumpTableDest(llvm::MCStreamer &OutStreamer,
993 const llvm::MachineInstr &MI) {
994 Register DestReg = MI.getOperand(0).getReg();
995 Register ScratchReg = MI.getOperand(1).getReg();
996 Register ScratchRegW =
997 STI->getRegisterInfo()->getSubReg(ScratchReg, AArch64::sub_32);
998 Register TableReg = MI.getOperand(2).getReg();
999 Register EntryReg = MI.getOperand(3).getReg();
1000 int JTIdx = MI.getOperand(4).getIndex();
1001 int Size = AArch64FI->getJumpTableEntrySize(JTIdx);
1002
1003 // This has to be first because the compression pass based its reachability
1004 // calculations on the start of the JumpTableDest instruction.
1005 auto Label =
1006 MF->getInfo<AArch64FunctionInfo>()->getJumpTableEntryPCRelSymbol(JTIdx);
1007
1008 // If we don't already have a symbol to use as the base, use the ADR
1009 // instruction itself.
1010 if (!Label) {
1011 Label = MF->getContext().createTempSymbol();
1012 AArch64FI->setJumpTableEntryInfo(JTIdx, Size, Label);
1013 OutStreamer.emitLabel(Label);
1014 }
1015
1016 auto LabelExpr = MCSymbolRefExpr::create(Label, MF->getContext());
1017 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADR)
1018 .addReg(DestReg)
1019 .addExpr(LabelExpr));
1020
1021 // Load the number of instruction-steps to offset from the label.
1022 unsigned LdrOpcode;
1023 switch (Size) {
1024 case 1: LdrOpcode = AArch64::LDRBBroX; break;
1025 case 2: LdrOpcode = AArch64::LDRHHroX; break;
1026 case 4: LdrOpcode = AArch64::LDRSWroX; break;
1027 default:
1028 llvm_unreachable("Unknown jump table size");
1029 }
1030
1031 EmitToStreamer(OutStreamer, MCInstBuilder(LdrOpcode)
1032 .addReg(Size == 4 ? ScratchReg : ScratchRegW)
1033 .addReg(TableReg)
1034 .addReg(EntryReg)
1035 .addImm(0)
1036 .addImm(Size == 1 ? 0 : 1));
1037
1038 // Add to the already materialized base label address, multiplying by 4 if
1039 // compressed.
1040 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADDXrs)
1041 .addReg(DestReg)
1042 .addReg(DestReg)
1043 .addReg(ScratchReg)
1044 .addImm(Size == 4 ? 0 : 2));
1045 }
1046
LowerMOPS(llvm::MCStreamer & OutStreamer,const llvm::MachineInstr & MI)1047 void AArch64AsmPrinter::LowerMOPS(llvm::MCStreamer &OutStreamer,
1048 const llvm::MachineInstr &MI) {
1049 unsigned Opcode = MI.getOpcode();
1050 assert(STI->hasMOPS());
1051 assert(STI->hasMTE() || Opcode != AArch64::MOPSMemorySetTaggingPseudo);
1052
1053 const auto Ops = [Opcode]() -> std::array<unsigned, 3> {
1054 if (Opcode == AArch64::MOPSMemoryCopyPseudo)
1055 return {AArch64::CPYFP, AArch64::CPYFM, AArch64::CPYFE};
1056 if (Opcode == AArch64::MOPSMemoryMovePseudo)
1057 return {AArch64::CPYP, AArch64::CPYM, AArch64::CPYE};
1058 if (Opcode == AArch64::MOPSMemorySetPseudo)
1059 return {AArch64::SETP, AArch64::SETM, AArch64::SETE};
1060 if (Opcode == AArch64::MOPSMemorySetTaggingPseudo)
1061 return {AArch64::SETGP, AArch64::SETGM, AArch64::MOPSSETGE};
1062 llvm_unreachable("Unhandled memory operation pseudo");
1063 }();
1064 const bool IsSet = Opcode == AArch64::MOPSMemorySetPseudo ||
1065 Opcode == AArch64::MOPSMemorySetTaggingPseudo;
1066
1067 for (auto Op : Ops) {
1068 int i = 0;
1069 auto MCIB = MCInstBuilder(Op);
1070 // Destination registers
1071 MCIB.addReg(MI.getOperand(i++).getReg());
1072 MCIB.addReg(MI.getOperand(i++).getReg());
1073 if (!IsSet)
1074 MCIB.addReg(MI.getOperand(i++).getReg());
1075 // Input registers
1076 MCIB.addReg(MI.getOperand(i++).getReg());
1077 MCIB.addReg(MI.getOperand(i++).getReg());
1078 MCIB.addReg(MI.getOperand(i++).getReg());
1079
1080 EmitToStreamer(OutStreamer, MCIB);
1081 }
1082 }
1083
LowerSTACKMAP(MCStreamer & OutStreamer,StackMaps & SM,const MachineInstr & MI)1084 void AArch64AsmPrinter::LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
1085 const MachineInstr &MI) {
1086 unsigned NumNOPBytes = StackMapOpers(&MI).getNumPatchBytes();
1087
1088 auto &Ctx = OutStreamer.getContext();
1089 MCSymbol *MILabel = Ctx.createTempSymbol();
1090 OutStreamer.emitLabel(MILabel);
1091
1092 SM.recordStackMap(*MILabel, MI);
1093 assert(NumNOPBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
1094
1095 // Scan ahead to trim the shadow.
1096 const MachineBasicBlock &MBB = *MI.getParent();
1097 MachineBasicBlock::const_iterator MII(MI);
1098 ++MII;
1099 while (NumNOPBytes > 0) {
1100 if (MII == MBB.end() || MII->isCall() ||
1101 MII->getOpcode() == AArch64::DBG_VALUE ||
1102 MII->getOpcode() == TargetOpcode::PATCHPOINT ||
1103 MII->getOpcode() == TargetOpcode::STACKMAP)
1104 break;
1105 ++MII;
1106 NumNOPBytes -= 4;
1107 }
1108
1109 // Emit nops.
1110 for (unsigned i = 0; i < NumNOPBytes; i += 4)
1111 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
1112 }
1113
1114 // Lower a patchpoint of the form:
1115 // [<def>], <id>, <numBytes>, <target>, <numArgs>
LowerPATCHPOINT(MCStreamer & OutStreamer,StackMaps & SM,const MachineInstr & MI)1116 void AArch64AsmPrinter::LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
1117 const MachineInstr &MI) {
1118 auto &Ctx = OutStreamer.getContext();
1119 MCSymbol *MILabel = Ctx.createTempSymbol();
1120 OutStreamer.emitLabel(MILabel);
1121 SM.recordPatchPoint(*MILabel, MI);
1122
1123 PatchPointOpers Opers(&MI);
1124
1125 int64_t CallTarget = Opers.getCallTarget().getImm();
1126 unsigned EncodedBytes = 0;
1127 if (CallTarget) {
1128 assert((CallTarget & 0xFFFFFFFFFFFF) == CallTarget &&
1129 "High 16 bits of call target should be zero.");
1130 Register ScratchReg = MI.getOperand(Opers.getNextScratchIdx()).getReg();
1131 EncodedBytes = 16;
1132 // Materialize the jump address:
1133 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::MOVZXi)
1134 .addReg(ScratchReg)
1135 .addImm((CallTarget >> 32) & 0xFFFF)
1136 .addImm(32));
1137 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::MOVKXi)
1138 .addReg(ScratchReg)
1139 .addReg(ScratchReg)
1140 .addImm((CallTarget >> 16) & 0xFFFF)
1141 .addImm(16));
1142 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::MOVKXi)
1143 .addReg(ScratchReg)
1144 .addReg(ScratchReg)
1145 .addImm(CallTarget & 0xFFFF)
1146 .addImm(0));
1147 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::BLR).addReg(ScratchReg));
1148 }
1149 // Emit padding.
1150 unsigned NumBytes = Opers.getNumPatchBytes();
1151 assert(NumBytes >= EncodedBytes &&
1152 "Patchpoint can't request size less than the length of a call.");
1153 assert((NumBytes - EncodedBytes) % 4 == 0 &&
1154 "Invalid number of NOP bytes requested!");
1155 for (unsigned i = EncodedBytes; i < NumBytes; i += 4)
1156 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
1157 }
1158
LowerSTATEPOINT(MCStreamer & OutStreamer,StackMaps & SM,const MachineInstr & MI)1159 void AArch64AsmPrinter::LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
1160 const MachineInstr &MI) {
1161 StatepointOpers SOpers(&MI);
1162 if (unsigned PatchBytes = SOpers.getNumPatchBytes()) {
1163 assert(PatchBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
1164 for (unsigned i = 0; i < PatchBytes; i += 4)
1165 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
1166 } else {
1167 // Lower call target and choose correct opcode
1168 const MachineOperand &CallTarget = SOpers.getCallTarget();
1169 MCOperand CallTargetMCOp;
1170 unsigned CallOpcode;
1171 switch (CallTarget.getType()) {
1172 case MachineOperand::MO_GlobalAddress:
1173 case MachineOperand::MO_ExternalSymbol:
1174 MCInstLowering.lowerOperand(CallTarget, CallTargetMCOp);
1175 CallOpcode = AArch64::BL;
1176 break;
1177 case MachineOperand::MO_Immediate:
1178 CallTargetMCOp = MCOperand::createImm(CallTarget.getImm());
1179 CallOpcode = AArch64::BL;
1180 break;
1181 case MachineOperand::MO_Register:
1182 CallTargetMCOp = MCOperand::createReg(CallTarget.getReg());
1183 CallOpcode = AArch64::BLR;
1184 break;
1185 default:
1186 llvm_unreachable("Unsupported operand type in statepoint call target");
1187 break;
1188 }
1189
1190 EmitToStreamer(OutStreamer,
1191 MCInstBuilder(CallOpcode).addOperand(CallTargetMCOp));
1192 }
1193
1194 auto &Ctx = OutStreamer.getContext();
1195 MCSymbol *MILabel = Ctx.createTempSymbol();
1196 OutStreamer.emitLabel(MILabel);
1197 SM.recordStatepoint(*MILabel, MI);
1198 }
1199
LowerFAULTING_OP(const MachineInstr & FaultingMI)1200 void AArch64AsmPrinter::LowerFAULTING_OP(const MachineInstr &FaultingMI) {
1201 // FAULTING_LOAD_OP <def>, <faltinf type>, <MBB handler>,
1202 // <opcode>, <operands>
1203
1204 Register DefRegister = FaultingMI.getOperand(0).getReg();
1205 FaultMaps::FaultKind FK =
1206 static_cast<FaultMaps::FaultKind>(FaultingMI.getOperand(1).getImm());
1207 MCSymbol *HandlerLabel = FaultingMI.getOperand(2).getMBB()->getSymbol();
1208 unsigned Opcode = FaultingMI.getOperand(3).getImm();
1209 unsigned OperandsBeginIdx = 4;
1210
1211 auto &Ctx = OutStreamer->getContext();
1212 MCSymbol *FaultingLabel = Ctx.createTempSymbol();
1213 OutStreamer->emitLabel(FaultingLabel);
1214
1215 assert(FK < FaultMaps::FaultKindMax && "Invalid Faulting Kind!");
1216 FM.recordFaultingOp(FK, FaultingLabel, HandlerLabel);
1217
1218 MCInst MI;
1219 MI.setOpcode(Opcode);
1220
1221 if (DefRegister != (Register)0)
1222 MI.addOperand(MCOperand::createReg(DefRegister));
1223
1224 for (const MachineOperand &MO :
1225 llvm::drop_begin(FaultingMI.operands(), OperandsBeginIdx)) {
1226 MCOperand Dest;
1227 lowerOperand(MO, Dest);
1228 MI.addOperand(Dest);
1229 }
1230
1231 OutStreamer->AddComment("on-fault: " + HandlerLabel->getName());
1232 OutStreamer->emitInstruction(MI, getSubtargetInfo());
1233 }
1234
emitFMov0(const MachineInstr & MI)1235 void AArch64AsmPrinter::emitFMov0(const MachineInstr &MI) {
1236 Register DestReg = MI.getOperand(0).getReg();
1237 if (STI->hasZeroCycleZeroingFP() && !STI->hasZeroCycleZeroingFPWorkaround() &&
1238 STI->hasNEON()) {
1239 // Convert H/S register to corresponding D register
1240 if (AArch64::H0 <= DestReg && DestReg <= AArch64::H31)
1241 DestReg = AArch64::D0 + (DestReg - AArch64::H0);
1242 else if (AArch64::S0 <= DestReg && DestReg <= AArch64::S31)
1243 DestReg = AArch64::D0 + (DestReg - AArch64::S0);
1244 else
1245 assert(AArch64::D0 <= DestReg && DestReg <= AArch64::D31);
1246
1247 MCInst MOVI;
1248 MOVI.setOpcode(AArch64::MOVID);
1249 MOVI.addOperand(MCOperand::createReg(DestReg));
1250 MOVI.addOperand(MCOperand::createImm(0));
1251 EmitToStreamer(*OutStreamer, MOVI);
1252 } else {
1253 MCInst FMov;
1254 switch (MI.getOpcode()) {
1255 default: llvm_unreachable("Unexpected opcode");
1256 case AArch64::FMOVH0:
1257 FMov.setOpcode(AArch64::FMOVWHr);
1258 FMov.addOperand(MCOperand::createReg(DestReg));
1259 FMov.addOperand(MCOperand::createReg(AArch64::WZR));
1260 break;
1261 case AArch64::FMOVS0:
1262 FMov.setOpcode(AArch64::FMOVWSr);
1263 FMov.addOperand(MCOperand::createReg(DestReg));
1264 FMov.addOperand(MCOperand::createReg(AArch64::WZR));
1265 break;
1266 case AArch64::FMOVD0:
1267 FMov.setOpcode(AArch64::FMOVXDr);
1268 FMov.addOperand(MCOperand::createReg(DestReg));
1269 FMov.addOperand(MCOperand::createReg(AArch64::XZR));
1270 break;
1271 }
1272 EmitToStreamer(*OutStreamer, FMov);
1273 }
1274 }
1275
1276 // Simple pseudo-instructions have their lowering (with expansion to real
1277 // instructions) auto-generated.
1278 #include "AArch64GenMCPseudoLowering.inc"
1279
emitInstruction(const MachineInstr * MI)1280 void AArch64AsmPrinter::emitInstruction(const MachineInstr *MI) {
1281 AArch64_MC::verifyInstructionPredicates(MI->getOpcode(), STI->getFeatureBits());
1282
1283 // Do any auto-generated pseudo lowerings.
1284 if (emitPseudoExpansionLowering(*OutStreamer, MI))
1285 return;
1286
1287 if (MI->getOpcode() == AArch64::ADRP) {
1288 for (auto &Opd : MI->operands()) {
1289 if (Opd.isSymbol() && StringRef(Opd.getSymbolName()) ==
1290 "swift_async_extendedFramePointerFlags") {
1291 ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = true;
1292 }
1293 }
1294 }
1295
1296 if (AArch64FI->getLOHRelated().count(MI)) {
1297 // Generate a label for LOH related instruction
1298 MCSymbol *LOHLabel = createTempSymbol("loh");
1299 // Associate the instruction with the label
1300 LOHInstToLabel[MI] = LOHLabel;
1301 OutStreamer->emitLabel(LOHLabel);
1302 }
1303
1304 AArch64TargetStreamer *TS =
1305 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
1306 // Do any manual lowerings.
1307 switch (MI->getOpcode()) {
1308 default:
1309 break;
1310 case AArch64::HINT: {
1311 // CurrentPatchableFunctionEntrySym can be CurrentFnBegin only for
1312 // -fpatchable-function-entry=N,0. The entry MBB is guaranteed to be
1313 // non-empty. If MI is the initial BTI, place the
1314 // __patchable_function_entries label after BTI.
1315 if (CurrentPatchableFunctionEntrySym &&
1316 CurrentPatchableFunctionEntrySym == CurrentFnBegin &&
1317 MI == &MF->front().front()) {
1318 int64_t Imm = MI->getOperand(0).getImm();
1319 if ((Imm & 32) && (Imm & 6)) {
1320 MCInst Inst;
1321 MCInstLowering.Lower(MI, Inst);
1322 EmitToStreamer(*OutStreamer, Inst);
1323 CurrentPatchableFunctionEntrySym = createTempSymbol("patch");
1324 OutStreamer->emitLabel(CurrentPatchableFunctionEntrySym);
1325 return;
1326 }
1327 }
1328 break;
1329 }
1330 case AArch64::MOVMCSym: {
1331 Register DestReg = MI->getOperand(0).getReg();
1332 const MachineOperand &MO_Sym = MI->getOperand(1);
1333 MachineOperand Hi_MOSym(MO_Sym), Lo_MOSym(MO_Sym);
1334 MCOperand Hi_MCSym, Lo_MCSym;
1335
1336 Hi_MOSym.setTargetFlags(AArch64II::MO_G1 | AArch64II::MO_S);
1337 Lo_MOSym.setTargetFlags(AArch64II::MO_G0 | AArch64II::MO_NC);
1338
1339 MCInstLowering.lowerOperand(Hi_MOSym, Hi_MCSym);
1340 MCInstLowering.lowerOperand(Lo_MOSym, Lo_MCSym);
1341
1342 MCInst MovZ;
1343 MovZ.setOpcode(AArch64::MOVZXi);
1344 MovZ.addOperand(MCOperand::createReg(DestReg));
1345 MovZ.addOperand(Hi_MCSym);
1346 MovZ.addOperand(MCOperand::createImm(16));
1347 EmitToStreamer(*OutStreamer, MovZ);
1348
1349 MCInst MovK;
1350 MovK.setOpcode(AArch64::MOVKXi);
1351 MovK.addOperand(MCOperand::createReg(DestReg));
1352 MovK.addOperand(MCOperand::createReg(DestReg));
1353 MovK.addOperand(Lo_MCSym);
1354 MovK.addOperand(MCOperand::createImm(0));
1355 EmitToStreamer(*OutStreamer, MovK);
1356 return;
1357 }
1358 case AArch64::MOVIv2d_ns:
1359 // If the target has <rdar://problem/16473581>, lower this
1360 // instruction to movi.16b instead.
1361 if (STI->hasZeroCycleZeroingFPWorkaround() &&
1362 MI->getOperand(1).getImm() == 0) {
1363 MCInst TmpInst;
1364 TmpInst.setOpcode(AArch64::MOVIv16b_ns);
1365 TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
1366 TmpInst.addOperand(MCOperand::createImm(MI->getOperand(1).getImm()));
1367 EmitToStreamer(*OutStreamer, TmpInst);
1368 return;
1369 }
1370 break;
1371
1372 case AArch64::DBG_VALUE:
1373 case AArch64::DBG_VALUE_LIST:
1374 if (isVerbose() && OutStreamer->hasRawTextSupport()) {
1375 SmallString<128> TmpStr;
1376 raw_svector_ostream OS(TmpStr);
1377 PrintDebugValueComment(MI, OS);
1378 OutStreamer->emitRawText(StringRef(OS.str()));
1379 }
1380 return;
1381
1382 case AArch64::EMITBKEY: {
1383 ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType();
1384 if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
1385 ExceptionHandlingType != ExceptionHandling::ARM)
1386 return;
1387
1388 if (getFunctionCFISectionType(*MF) == CFISection::None)
1389 return;
1390
1391 OutStreamer->emitCFIBKeyFrame();
1392 return;
1393 }
1394
1395 case AArch64::EMITMTETAGGED: {
1396 ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType();
1397 if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
1398 ExceptionHandlingType != ExceptionHandling::ARM)
1399 return;
1400
1401 if (getFunctionCFISectionType(*MF) != CFISection::None)
1402 OutStreamer->emitCFIMTETaggedFrame();
1403 return;
1404 }
1405
1406 // Tail calls use pseudo instructions so they have the proper code-gen
1407 // attributes (isCall, isReturn, etc.). We lower them to the real
1408 // instruction here.
1409 case AArch64::TCRETURNri:
1410 case AArch64::TCRETURNriBTI:
1411 case AArch64::TCRETURNriALL: {
1412 MCInst TmpInst;
1413 TmpInst.setOpcode(AArch64::BR);
1414 TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
1415 EmitToStreamer(*OutStreamer, TmpInst);
1416 return;
1417 }
1418 case AArch64::TCRETURNdi: {
1419 MCOperand Dest;
1420 MCInstLowering.lowerOperand(MI->getOperand(0), Dest);
1421 MCInst TmpInst;
1422 TmpInst.setOpcode(AArch64::B);
1423 TmpInst.addOperand(Dest);
1424 EmitToStreamer(*OutStreamer, TmpInst);
1425 return;
1426 }
1427 case AArch64::SpeculationBarrierISBDSBEndBB: {
1428 // Print DSB SYS + ISB
1429 MCInst TmpInstDSB;
1430 TmpInstDSB.setOpcode(AArch64::DSB);
1431 TmpInstDSB.addOperand(MCOperand::createImm(0xf));
1432 EmitToStreamer(*OutStreamer, TmpInstDSB);
1433 MCInst TmpInstISB;
1434 TmpInstISB.setOpcode(AArch64::ISB);
1435 TmpInstISB.addOperand(MCOperand::createImm(0xf));
1436 EmitToStreamer(*OutStreamer, TmpInstISB);
1437 return;
1438 }
1439 case AArch64::SpeculationBarrierSBEndBB: {
1440 // Print SB
1441 MCInst TmpInstSB;
1442 TmpInstSB.setOpcode(AArch64::SB);
1443 EmitToStreamer(*OutStreamer, TmpInstSB);
1444 return;
1445 }
1446 case AArch64::TLSDESC_CALLSEQ: {
1447 /// lower this to:
1448 /// adrp x0, :tlsdesc:var
1449 /// ldr x1, [x0, #:tlsdesc_lo12:var]
1450 /// add x0, x0, #:tlsdesc_lo12:var
1451 /// .tlsdesccall var
1452 /// blr x1
1453 /// (TPIDR_EL0 offset now in x0)
1454 const MachineOperand &MO_Sym = MI->getOperand(0);
1455 MachineOperand MO_TLSDESC_LO12(MO_Sym), MO_TLSDESC(MO_Sym);
1456 MCOperand Sym, SymTLSDescLo12, SymTLSDesc;
1457 MO_TLSDESC_LO12.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
1458 MO_TLSDESC.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGE);
1459 MCInstLowering.lowerOperand(MO_Sym, Sym);
1460 MCInstLowering.lowerOperand(MO_TLSDESC_LO12, SymTLSDescLo12);
1461 MCInstLowering.lowerOperand(MO_TLSDESC, SymTLSDesc);
1462
1463 MCInst Adrp;
1464 Adrp.setOpcode(AArch64::ADRP);
1465 Adrp.addOperand(MCOperand::createReg(AArch64::X0));
1466 Adrp.addOperand(SymTLSDesc);
1467 EmitToStreamer(*OutStreamer, Adrp);
1468
1469 MCInst Ldr;
1470 if (STI->isTargetILP32()) {
1471 Ldr.setOpcode(AArch64::LDRWui);
1472 Ldr.addOperand(MCOperand::createReg(AArch64::W1));
1473 } else {
1474 Ldr.setOpcode(AArch64::LDRXui);
1475 Ldr.addOperand(MCOperand::createReg(AArch64::X1));
1476 }
1477 Ldr.addOperand(MCOperand::createReg(AArch64::X0));
1478 Ldr.addOperand(SymTLSDescLo12);
1479 Ldr.addOperand(MCOperand::createImm(0));
1480 EmitToStreamer(*OutStreamer, Ldr);
1481
1482 MCInst Add;
1483 if (STI->isTargetILP32()) {
1484 Add.setOpcode(AArch64::ADDWri);
1485 Add.addOperand(MCOperand::createReg(AArch64::W0));
1486 Add.addOperand(MCOperand::createReg(AArch64::W0));
1487 } else {
1488 Add.setOpcode(AArch64::ADDXri);
1489 Add.addOperand(MCOperand::createReg(AArch64::X0));
1490 Add.addOperand(MCOperand::createReg(AArch64::X0));
1491 }
1492 Add.addOperand(SymTLSDescLo12);
1493 Add.addOperand(MCOperand::createImm(AArch64_AM::getShiftValue(0)));
1494 EmitToStreamer(*OutStreamer, Add);
1495
1496 // Emit a relocation-annotation. This expands to no code, but requests
1497 // the following instruction gets an R_AARCH64_TLSDESC_CALL.
1498 MCInst TLSDescCall;
1499 TLSDescCall.setOpcode(AArch64::TLSDESCCALL);
1500 TLSDescCall.addOperand(Sym);
1501 EmitToStreamer(*OutStreamer, TLSDescCall);
1502
1503 MCInst Blr;
1504 Blr.setOpcode(AArch64::BLR);
1505 Blr.addOperand(MCOperand::createReg(AArch64::X1));
1506 EmitToStreamer(*OutStreamer, Blr);
1507
1508 return;
1509 }
1510
1511 case AArch64::JumpTableDest32:
1512 case AArch64::JumpTableDest16:
1513 case AArch64::JumpTableDest8:
1514 LowerJumpTableDest(*OutStreamer, *MI);
1515 return;
1516
1517 case AArch64::FMOVH0:
1518 case AArch64::FMOVS0:
1519 case AArch64::FMOVD0:
1520 emitFMov0(*MI);
1521 return;
1522
1523 case AArch64::MOPSMemoryCopyPseudo:
1524 case AArch64::MOPSMemoryMovePseudo:
1525 case AArch64::MOPSMemorySetPseudo:
1526 case AArch64::MOPSMemorySetTaggingPseudo:
1527 LowerMOPS(*OutStreamer, *MI);
1528 return;
1529
1530 case TargetOpcode::STACKMAP:
1531 return LowerSTACKMAP(*OutStreamer, SM, *MI);
1532
1533 case TargetOpcode::PATCHPOINT:
1534 return LowerPATCHPOINT(*OutStreamer, SM, *MI);
1535
1536 case TargetOpcode::STATEPOINT:
1537 return LowerSTATEPOINT(*OutStreamer, SM, *MI);
1538
1539 case TargetOpcode::FAULTING_OP:
1540 return LowerFAULTING_OP(*MI);
1541
1542 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
1543 LowerPATCHABLE_FUNCTION_ENTER(*MI);
1544 return;
1545
1546 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
1547 LowerPATCHABLE_FUNCTION_EXIT(*MI);
1548 return;
1549
1550 case TargetOpcode::PATCHABLE_TAIL_CALL:
1551 LowerPATCHABLE_TAIL_CALL(*MI);
1552 return;
1553
1554 case AArch64::KCFI_CHECK:
1555 LowerKCFI_CHECK(*MI);
1556 return;
1557
1558 case AArch64::HWASAN_CHECK_MEMACCESS:
1559 case AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES:
1560 LowerHWASAN_CHECK_MEMACCESS(*MI);
1561 return;
1562
1563 case AArch64::SEH_StackAlloc:
1564 TS->emitARM64WinCFIAllocStack(MI->getOperand(0).getImm());
1565 return;
1566
1567 case AArch64::SEH_SaveFPLR:
1568 TS->emitARM64WinCFISaveFPLR(MI->getOperand(0).getImm());
1569 return;
1570
1571 case AArch64::SEH_SaveFPLR_X:
1572 assert(MI->getOperand(0).getImm() < 0 &&
1573 "Pre increment SEH opcode must have a negative offset");
1574 TS->emitARM64WinCFISaveFPLRX(-MI->getOperand(0).getImm());
1575 return;
1576
1577 case AArch64::SEH_SaveReg:
1578 TS->emitARM64WinCFISaveReg(MI->getOperand(0).getImm(),
1579 MI->getOperand(1).getImm());
1580 return;
1581
1582 case AArch64::SEH_SaveReg_X:
1583 assert(MI->getOperand(1).getImm() < 0 &&
1584 "Pre increment SEH opcode must have a negative offset");
1585 TS->emitARM64WinCFISaveRegX(MI->getOperand(0).getImm(),
1586 -MI->getOperand(1).getImm());
1587 return;
1588
1589 case AArch64::SEH_SaveRegP:
1590 if (MI->getOperand(1).getImm() == 30 && MI->getOperand(0).getImm() >= 19 &&
1591 MI->getOperand(0).getImm() <= 28) {
1592 assert((MI->getOperand(0).getImm() - 19) % 2 == 0 &&
1593 "Register paired with LR must be odd");
1594 TS->emitARM64WinCFISaveLRPair(MI->getOperand(0).getImm(),
1595 MI->getOperand(2).getImm());
1596 return;
1597 }
1598 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
1599 "Non-consecutive registers not allowed for save_regp");
1600 TS->emitARM64WinCFISaveRegP(MI->getOperand(0).getImm(),
1601 MI->getOperand(2).getImm());
1602 return;
1603
1604 case AArch64::SEH_SaveRegP_X:
1605 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
1606 "Non-consecutive registers not allowed for save_regp_x");
1607 assert(MI->getOperand(2).getImm() < 0 &&
1608 "Pre increment SEH opcode must have a negative offset");
1609 TS->emitARM64WinCFISaveRegPX(MI->getOperand(0).getImm(),
1610 -MI->getOperand(2).getImm());
1611 return;
1612
1613 case AArch64::SEH_SaveFReg:
1614 TS->emitARM64WinCFISaveFReg(MI->getOperand(0).getImm(),
1615 MI->getOperand(1).getImm());
1616 return;
1617
1618 case AArch64::SEH_SaveFReg_X:
1619 assert(MI->getOperand(1).getImm() < 0 &&
1620 "Pre increment SEH opcode must have a negative offset");
1621 TS->emitARM64WinCFISaveFRegX(MI->getOperand(0).getImm(),
1622 -MI->getOperand(1).getImm());
1623 return;
1624
1625 case AArch64::SEH_SaveFRegP:
1626 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
1627 "Non-consecutive registers not allowed for save_regp");
1628 TS->emitARM64WinCFISaveFRegP(MI->getOperand(0).getImm(),
1629 MI->getOperand(2).getImm());
1630 return;
1631
1632 case AArch64::SEH_SaveFRegP_X:
1633 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
1634 "Non-consecutive registers not allowed for save_regp_x");
1635 assert(MI->getOperand(2).getImm() < 0 &&
1636 "Pre increment SEH opcode must have a negative offset");
1637 TS->emitARM64WinCFISaveFRegPX(MI->getOperand(0).getImm(),
1638 -MI->getOperand(2).getImm());
1639 return;
1640
1641 case AArch64::SEH_SetFP:
1642 TS->emitARM64WinCFISetFP();
1643 return;
1644
1645 case AArch64::SEH_AddFP:
1646 TS->emitARM64WinCFIAddFP(MI->getOperand(0).getImm());
1647 return;
1648
1649 case AArch64::SEH_Nop:
1650 TS->emitARM64WinCFINop();
1651 return;
1652
1653 case AArch64::SEH_PrologEnd:
1654 TS->emitARM64WinCFIPrologEnd();
1655 return;
1656
1657 case AArch64::SEH_EpilogStart:
1658 TS->emitARM64WinCFIEpilogStart();
1659 return;
1660
1661 case AArch64::SEH_EpilogEnd:
1662 TS->emitARM64WinCFIEpilogEnd();
1663 return;
1664
1665 case AArch64::RETGUARD_JMP_TRAP:
1666 {
1667 MCSymbol *RGSuccSym = OutContext.createTempSymbol();
1668 /* Compare and branch */
1669 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::CBZX)
1670 .addReg(MI->getOperand(0).getReg())
1671 .addExpr(MCSymbolRefExpr::create(RGSuccSym, OutContext)));
1672 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::BRK).addImm(1));
1673 OutStreamer->emitLabel(RGSuccSym);
1674 return;
1675 }
1676
1677 case AArch64::SEH_PACSignLR:
1678 TS->emitARM64WinCFIPACSignLR();
1679 return;
1680 }
1681
1682 // Finally, do the automated lowerings for everything else.
1683 MCInst TmpInst;
1684 MCInstLowering.Lower(MI, TmpInst);
1685 EmitToStreamer(*OutStreamer, TmpInst);
1686 }
1687
1688 // Force static initialization.
LLVMInitializeAArch64AsmPrinter()1689 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmPrinter() {
1690 RegisterAsmPrinter<AArch64AsmPrinter> X(getTheAArch64leTarget());
1691 RegisterAsmPrinter<AArch64AsmPrinter> Y(getTheAArch64beTarget());
1692 RegisterAsmPrinter<AArch64AsmPrinter> Z(getTheARM64Target());
1693 RegisterAsmPrinter<AArch64AsmPrinter> W(getTheARM64_32Target());
1694 RegisterAsmPrinter<AArch64AsmPrinter> V(getTheAArch64_32Target());
1695 }
1696