1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "Utils/AArch64BaseInfo.h"
13 #include "llvm/ADT/APInt.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SmallString.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/ADT/StringSwitch.h"
18 #include "llvm/ADT/Twine.h"
19 #include "llvm/MC/MCContext.h"
20 #include "llvm/MC/MCExpr.h"
21 #include "llvm/MC/MCInst.h"
22 #include "llvm/MC/MCObjectFileInfo.h"
23 #include "llvm/MC/MCParser/MCAsmLexer.h"
24 #include "llvm/MC/MCParser/MCAsmParser.h"
25 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
26 #include "llvm/MC/MCRegisterInfo.h"
27 #include "llvm/MC/MCStreamer.h"
28 #include "llvm/MC/MCSubtargetInfo.h"
29 #include "llvm/MC/MCSymbol.h"
30 #include "llvm/MC/MCTargetAsmParser.h"
31 #include "llvm/Support/ErrorHandling.h"
32 #include "llvm/Support/SourceMgr.h"
33 #include "llvm/Support/TargetRegistry.h"
34 #include "llvm/Support/raw_ostream.h"
35 #include <cstdio>
36 using namespace llvm;
37
38 namespace {
39
40 class AArch64Operand;
41
42 class AArch64AsmParser : public MCTargetAsmParser {
43 private:
44 StringRef Mnemonic; ///< Instruction mnemonic.
45 MCSubtargetInfo &STI;
46
47 // Map of register aliases registers via the .req directive.
48 StringMap<std::pair<bool, unsigned> > RegisterReqs;
49
getTargetStreamer()50 AArch64TargetStreamer &getTargetStreamer() {
51 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
52 return static_cast<AArch64TargetStreamer &>(TS);
53 }
54
getLoc() const55 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
56
57 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
58 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
59 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
60 unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
61 int tryParseRegister();
62 int tryMatchVectorRegister(StringRef &Kind, bool expected);
63 bool parseRegister(OperandVector &Operands);
64 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
65 bool parseVectorList(OperandVector &Operands);
66 bool parseOperand(OperandVector &Operands, bool isCondCode,
67 bool invertCondCode);
68
Warning(SMLoc L,const Twine & Msg)69 void Warning(SMLoc L, const Twine &Msg) { getParser().Warning(L, Msg); }
Error(SMLoc L,const Twine & Msg)70 bool Error(SMLoc L, const Twine &Msg) { return getParser().Error(L, Msg); }
71 bool showMatchError(SMLoc Loc, unsigned ErrCode);
72
73 bool parseDirectiveWord(unsigned Size, SMLoc L);
74 bool parseDirectiveInst(SMLoc L);
75
76 bool parseDirectiveTLSDescCall(SMLoc L);
77
78 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
79 bool parseDirectiveLtorg(SMLoc L);
80
81 bool parseDirectiveReq(StringRef Name, SMLoc L);
82 bool parseDirectiveUnreq(SMLoc L);
83
84 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
85 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
86 OperandVector &Operands, MCStreamer &Out,
87 uint64_t &ErrorInfo,
88 bool MatchingInlineAsm) override;
89 /// @name Auto-generated Match Functions
90 /// {
91
92 #define GET_ASSEMBLER_HEADER
93 #include "AArch64GenAsmMatcher.inc"
94
95 /// }
96
97 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
98 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
99 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
100 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
101 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
102 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
103 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
104 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
105 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
106 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
107 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
108 bool tryParseVectorRegister(OperandVector &Operands);
109
110 public:
111 enum AArch64MatchResultTy {
112 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
113 #define GET_OPERAND_DIAGNOSTIC_TYPES
114 #include "AArch64GenAsmMatcher.inc"
115 };
AArch64AsmParser(MCSubtargetInfo & _STI,MCAsmParser & _Parser,const MCInstrInfo & MII,const MCTargetOptions & Options)116 AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
117 const MCInstrInfo &MII,
118 const MCTargetOptions &Options)
119 : MCTargetAsmParser(), STI(_STI) {
120 MCAsmParserExtension::Initialize(_Parser);
121 MCStreamer &S = getParser().getStreamer();
122 if (S.getTargetStreamer() == nullptr)
123 new AArch64TargetStreamer(S);
124
125 // Initialize the set of available features.
126 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
127 }
128
129 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
130 SMLoc NameLoc, OperandVector &Operands) override;
131 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
132 bool ParseDirective(AsmToken DirectiveID) override;
133 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
134 unsigned Kind) override;
135
136 static bool classifySymbolRef(const MCExpr *Expr,
137 AArch64MCExpr::VariantKind &ELFRefKind,
138 MCSymbolRefExpr::VariantKind &DarwinRefKind,
139 int64_t &Addend);
140 };
141 } // end anonymous namespace
142
143 namespace {
144
145 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
146 /// instruction.
147 class AArch64Operand : public MCParsedAsmOperand {
148 private:
149 enum KindTy {
150 k_Immediate,
151 k_ShiftedImm,
152 k_CondCode,
153 k_Register,
154 k_VectorList,
155 k_VectorIndex,
156 k_Token,
157 k_SysReg,
158 k_SysCR,
159 k_Prefetch,
160 k_ShiftExtend,
161 k_FPImm,
162 k_Barrier
163 } Kind;
164
165 SMLoc StartLoc, EndLoc;
166
167 struct TokOp {
168 const char *Data;
169 unsigned Length;
170 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
171 };
172
173 struct RegOp {
174 unsigned RegNum;
175 bool isVector;
176 };
177
178 struct VectorListOp {
179 unsigned RegNum;
180 unsigned Count;
181 unsigned NumElements;
182 unsigned ElementKind;
183 };
184
185 struct VectorIndexOp {
186 unsigned Val;
187 };
188
189 struct ImmOp {
190 const MCExpr *Val;
191 };
192
193 struct ShiftedImmOp {
194 const MCExpr *Val;
195 unsigned ShiftAmount;
196 };
197
198 struct CondCodeOp {
199 AArch64CC::CondCode Code;
200 };
201
202 struct FPImmOp {
203 unsigned Val; // Encoded 8-bit representation.
204 };
205
206 struct BarrierOp {
207 unsigned Val; // Not the enum since not all values have names.
208 };
209
210 struct SysRegOp {
211 const char *Data;
212 unsigned Length;
213 uint64_t FeatureBits; // We need to pass through information about which
214 // core we are compiling for so that the SysReg
215 // Mappers can appropriately conditionalize.
216 };
217
218 struct SysCRImmOp {
219 unsigned Val;
220 };
221
222 struct PrefetchOp {
223 unsigned Val;
224 };
225
226 struct ShiftExtendOp {
227 AArch64_AM::ShiftExtendType Type;
228 unsigned Amount;
229 bool HasExplicitAmount;
230 };
231
232 struct ExtendOp {
233 unsigned Val;
234 };
235
236 union {
237 struct TokOp Tok;
238 struct RegOp Reg;
239 struct VectorListOp VectorList;
240 struct VectorIndexOp VectorIndex;
241 struct ImmOp Imm;
242 struct ShiftedImmOp ShiftedImm;
243 struct CondCodeOp CondCode;
244 struct FPImmOp FPImm;
245 struct BarrierOp Barrier;
246 struct SysRegOp SysReg;
247 struct SysCRImmOp SysCRImm;
248 struct PrefetchOp Prefetch;
249 struct ShiftExtendOp ShiftExtend;
250 };
251
252 // Keep the MCContext around as the MCExprs may need manipulated during
253 // the add<>Operands() calls.
254 MCContext &Ctx;
255
256 public:
AArch64Operand(KindTy K,MCContext & _Ctx)257 AArch64Operand(KindTy K, MCContext &_Ctx)
258 : MCParsedAsmOperand(), Kind(K), Ctx(_Ctx) {}
259
AArch64Operand(const AArch64Operand & o)260 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
261 Kind = o.Kind;
262 StartLoc = o.StartLoc;
263 EndLoc = o.EndLoc;
264 switch (Kind) {
265 case k_Token:
266 Tok = o.Tok;
267 break;
268 case k_Immediate:
269 Imm = o.Imm;
270 break;
271 case k_ShiftedImm:
272 ShiftedImm = o.ShiftedImm;
273 break;
274 case k_CondCode:
275 CondCode = o.CondCode;
276 break;
277 case k_FPImm:
278 FPImm = o.FPImm;
279 break;
280 case k_Barrier:
281 Barrier = o.Barrier;
282 break;
283 case k_Register:
284 Reg = o.Reg;
285 break;
286 case k_VectorList:
287 VectorList = o.VectorList;
288 break;
289 case k_VectorIndex:
290 VectorIndex = o.VectorIndex;
291 break;
292 case k_SysReg:
293 SysReg = o.SysReg;
294 break;
295 case k_SysCR:
296 SysCRImm = o.SysCRImm;
297 break;
298 case k_Prefetch:
299 Prefetch = o.Prefetch;
300 break;
301 case k_ShiftExtend:
302 ShiftExtend = o.ShiftExtend;
303 break;
304 }
305 }
306
307 /// getStartLoc - Get the location of the first token of this operand.
getStartLoc() const308 SMLoc getStartLoc() const override { return StartLoc; }
309 /// getEndLoc - Get the location of the last token of this operand.
getEndLoc() const310 SMLoc getEndLoc() const override { return EndLoc; }
311
getToken() const312 StringRef getToken() const {
313 assert(Kind == k_Token && "Invalid access!");
314 return StringRef(Tok.Data, Tok.Length);
315 }
316
isTokenSuffix() const317 bool isTokenSuffix() const {
318 assert(Kind == k_Token && "Invalid access!");
319 return Tok.IsSuffix;
320 }
321
getImm() const322 const MCExpr *getImm() const {
323 assert(Kind == k_Immediate && "Invalid access!");
324 return Imm.Val;
325 }
326
getShiftedImmVal() const327 const MCExpr *getShiftedImmVal() const {
328 assert(Kind == k_ShiftedImm && "Invalid access!");
329 return ShiftedImm.Val;
330 }
331
getShiftedImmShift() const332 unsigned getShiftedImmShift() const {
333 assert(Kind == k_ShiftedImm && "Invalid access!");
334 return ShiftedImm.ShiftAmount;
335 }
336
getCondCode() const337 AArch64CC::CondCode getCondCode() const {
338 assert(Kind == k_CondCode && "Invalid access!");
339 return CondCode.Code;
340 }
341
getFPImm() const342 unsigned getFPImm() const {
343 assert(Kind == k_FPImm && "Invalid access!");
344 return FPImm.Val;
345 }
346
getBarrier() const347 unsigned getBarrier() const {
348 assert(Kind == k_Barrier && "Invalid access!");
349 return Barrier.Val;
350 }
351
getReg() const352 unsigned getReg() const override {
353 assert(Kind == k_Register && "Invalid access!");
354 return Reg.RegNum;
355 }
356
getVectorListStart() const357 unsigned getVectorListStart() const {
358 assert(Kind == k_VectorList && "Invalid access!");
359 return VectorList.RegNum;
360 }
361
getVectorListCount() const362 unsigned getVectorListCount() const {
363 assert(Kind == k_VectorList && "Invalid access!");
364 return VectorList.Count;
365 }
366
getVectorIndex() const367 unsigned getVectorIndex() const {
368 assert(Kind == k_VectorIndex && "Invalid access!");
369 return VectorIndex.Val;
370 }
371
getSysReg() const372 StringRef getSysReg() const {
373 assert(Kind == k_SysReg && "Invalid access!");
374 return StringRef(SysReg.Data, SysReg.Length);
375 }
376
getSysRegFeatureBits() const377 uint64_t getSysRegFeatureBits() const {
378 assert(Kind == k_SysReg && "Invalid access!");
379 return SysReg.FeatureBits;
380 }
381
getSysCR() const382 unsigned getSysCR() const {
383 assert(Kind == k_SysCR && "Invalid access!");
384 return SysCRImm.Val;
385 }
386
getPrefetch() const387 unsigned getPrefetch() const {
388 assert(Kind == k_Prefetch && "Invalid access!");
389 return Prefetch.Val;
390 }
391
getShiftExtendType() const392 AArch64_AM::ShiftExtendType getShiftExtendType() const {
393 assert(Kind == k_ShiftExtend && "Invalid access!");
394 return ShiftExtend.Type;
395 }
396
getShiftExtendAmount() const397 unsigned getShiftExtendAmount() const {
398 assert(Kind == k_ShiftExtend && "Invalid access!");
399 return ShiftExtend.Amount;
400 }
401
hasShiftExtendAmount() const402 bool hasShiftExtendAmount() const {
403 assert(Kind == k_ShiftExtend && "Invalid access!");
404 return ShiftExtend.HasExplicitAmount;
405 }
406
isImm() const407 bool isImm() const override { return Kind == k_Immediate; }
isMem() const408 bool isMem() const override { return false; }
isSImm9() const409 bool isSImm9() const {
410 if (!isImm())
411 return false;
412 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
413 if (!MCE)
414 return false;
415 int64_t Val = MCE->getValue();
416 return (Val >= -256 && Val < 256);
417 }
isSImm7s4() const418 bool isSImm7s4() const {
419 if (!isImm())
420 return false;
421 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
422 if (!MCE)
423 return false;
424 int64_t Val = MCE->getValue();
425 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
426 }
isSImm7s8() const427 bool isSImm7s8() const {
428 if (!isImm())
429 return false;
430 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
431 if (!MCE)
432 return false;
433 int64_t Val = MCE->getValue();
434 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
435 }
isSImm7s16() const436 bool isSImm7s16() const {
437 if (!isImm())
438 return false;
439 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
440 if (!MCE)
441 return false;
442 int64_t Val = MCE->getValue();
443 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
444 }
445
isSymbolicUImm12Offset(const MCExpr * Expr,unsigned Scale) const446 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
447 AArch64MCExpr::VariantKind ELFRefKind;
448 MCSymbolRefExpr::VariantKind DarwinRefKind;
449 int64_t Addend;
450 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
451 Addend)) {
452 // If we don't understand the expression, assume the best and
453 // let the fixup and relocation code deal with it.
454 return true;
455 }
456
457 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
458 ELFRefKind == AArch64MCExpr::VK_LO12 ||
459 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
460 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
461 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
462 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
463 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
464 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
465 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
466 // Note that we don't range-check the addend. It's adjusted modulo page
467 // size when converted, so there is no "out of range" condition when using
468 // @pageoff.
469 return Addend >= 0 && (Addend % Scale) == 0;
470 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
471 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
472 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
473 return Addend == 0;
474 }
475
476 return false;
477 }
478
isUImm12Offset() const479 template <int Scale> bool isUImm12Offset() const {
480 if (!isImm())
481 return false;
482
483 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
484 if (!MCE)
485 return isSymbolicUImm12Offset(getImm(), Scale);
486
487 int64_t Val = MCE->getValue();
488 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
489 }
490
isImm0_7() const491 bool isImm0_7() const {
492 if (!isImm())
493 return false;
494 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
495 if (!MCE)
496 return false;
497 int64_t Val = MCE->getValue();
498 return (Val >= 0 && Val < 8);
499 }
isImm1_8() const500 bool isImm1_8() const {
501 if (!isImm())
502 return false;
503 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
504 if (!MCE)
505 return false;
506 int64_t Val = MCE->getValue();
507 return (Val > 0 && Val < 9);
508 }
isImm0_15() const509 bool isImm0_15() const {
510 if (!isImm())
511 return false;
512 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
513 if (!MCE)
514 return false;
515 int64_t Val = MCE->getValue();
516 return (Val >= 0 && Val < 16);
517 }
isImm1_16() const518 bool isImm1_16() const {
519 if (!isImm())
520 return false;
521 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
522 if (!MCE)
523 return false;
524 int64_t Val = MCE->getValue();
525 return (Val > 0 && Val < 17);
526 }
isImm0_31() const527 bool isImm0_31() const {
528 if (!isImm())
529 return false;
530 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
531 if (!MCE)
532 return false;
533 int64_t Val = MCE->getValue();
534 return (Val >= 0 && Val < 32);
535 }
isImm1_31() const536 bool isImm1_31() const {
537 if (!isImm())
538 return false;
539 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
540 if (!MCE)
541 return false;
542 int64_t Val = MCE->getValue();
543 return (Val >= 1 && Val < 32);
544 }
isImm1_32() const545 bool isImm1_32() const {
546 if (!isImm())
547 return false;
548 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
549 if (!MCE)
550 return false;
551 int64_t Val = MCE->getValue();
552 return (Val >= 1 && Val < 33);
553 }
isImm0_63() const554 bool isImm0_63() const {
555 if (!isImm())
556 return false;
557 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
558 if (!MCE)
559 return false;
560 int64_t Val = MCE->getValue();
561 return (Val >= 0 && Val < 64);
562 }
isImm1_63() const563 bool isImm1_63() const {
564 if (!isImm())
565 return false;
566 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
567 if (!MCE)
568 return false;
569 int64_t Val = MCE->getValue();
570 return (Val >= 1 && Val < 64);
571 }
isImm1_64() const572 bool isImm1_64() const {
573 if (!isImm())
574 return false;
575 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
576 if (!MCE)
577 return false;
578 int64_t Val = MCE->getValue();
579 return (Val >= 1 && Val < 65);
580 }
isImm0_127() const581 bool isImm0_127() const {
582 if (!isImm())
583 return false;
584 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
585 if (!MCE)
586 return false;
587 int64_t Val = MCE->getValue();
588 return (Val >= 0 && Val < 128);
589 }
isImm0_255() const590 bool isImm0_255() const {
591 if (!isImm())
592 return false;
593 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
594 if (!MCE)
595 return false;
596 int64_t Val = MCE->getValue();
597 return (Val >= 0 && Val < 256);
598 }
isImm0_65535() const599 bool isImm0_65535() const {
600 if (!isImm())
601 return false;
602 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
603 if (!MCE)
604 return false;
605 int64_t Val = MCE->getValue();
606 return (Val >= 0 && Val < 65536);
607 }
isImm32_63() const608 bool isImm32_63() const {
609 if (!isImm())
610 return false;
611 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
612 if (!MCE)
613 return false;
614 int64_t Val = MCE->getValue();
615 return (Val >= 32 && Val < 64);
616 }
isLogicalImm32() const617 bool isLogicalImm32() const {
618 if (!isImm())
619 return false;
620 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
621 if (!MCE)
622 return false;
623 int64_t Val = MCE->getValue();
624 if (Val >> 32 != 0 && Val >> 32 != ~0LL)
625 return false;
626 Val &= 0xFFFFFFFF;
627 return AArch64_AM::isLogicalImmediate(Val, 32);
628 }
isLogicalImm64() const629 bool isLogicalImm64() const {
630 if (!isImm())
631 return false;
632 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
633 if (!MCE)
634 return false;
635 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
636 }
isLogicalImm32Not() const637 bool isLogicalImm32Not() const {
638 if (!isImm())
639 return false;
640 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
641 if (!MCE)
642 return false;
643 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
644 return AArch64_AM::isLogicalImmediate(Val, 32);
645 }
isLogicalImm64Not() const646 bool isLogicalImm64Not() const {
647 if (!isImm())
648 return false;
649 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
650 if (!MCE)
651 return false;
652 return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
653 }
isShiftedImm() const654 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
isAddSubImm() const655 bool isAddSubImm() const {
656 if (!isShiftedImm() && !isImm())
657 return false;
658
659 const MCExpr *Expr;
660
661 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
662 if (isShiftedImm()) {
663 unsigned Shift = ShiftedImm.ShiftAmount;
664 Expr = ShiftedImm.Val;
665 if (Shift != 0 && Shift != 12)
666 return false;
667 } else {
668 Expr = getImm();
669 }
670
671 AArch64MCExpr::VariantKind ELFRefKind;
672 MCSymbolRefExpr::VariantKind DarwinRefKind;
673 int64_t Addend;
674 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
675 DarwinRefKind, Addend)) {
676 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
677 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
678 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
679 || ELFRefKind == AArch64MCExpr::VK_LO12
680 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
681 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
682 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
683 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
684 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
685 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
686 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
687 }
688
689 // Otherwise it should be a real immediate in range:
690 const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
691 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
692 }
isCondCode() const693 bool isCondCode() const { return Kind == k_CondCode; }
isSIMDImmType10() const694 bool isSIMDImmType10() const {
695 if (!isImm())
696 return false;
697 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
698 if (!MCE)
699 return false;
700 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
701 }
isBranchTarget26() const702 bool isBranchTarget26() const {
703 if (!isImm())
704 return false;
705 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
706 if (!MCE)
707 return true;
708 int64_t Val = MCE->getValue();
709 if (Val & 0x3)
710 return false;
711 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
712 }
isPCRelLabel19() const713 bool isPCRelLabel19() const {
714 if (!isImm())
715 return false;
716 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
717 if (!MCE)
718 return true;
719 int64_t Val = MCE->getValue();
720 if (Val & 0x3)
721 return false;
722 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
723 }
isBranchTarget14() const724 bool isBranchTarget14() const {
725 if (!isImm())
726 return false;
727 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
728 if (!MCE)
729 return true;
730 int64_t Val = MCE->getValue();
731 if (Val & 0x3)
732 return false;
733 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
734 }
735
736 bool
isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const737 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
738 if (!isImm())
739 return false;
740
741 AArch64MCExpr::VariantKind ELFRefKind;
742 MCSymbolRefExpr::VariantKind DarwinRefKind;
743 int64_t Addend;
744 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
745 DarwinRefKind, Addend)) {
746 return false;
747 }
748 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
749 return false;
750
751 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
752 if (ELFRefKind == AllowedModifiers[i])
753 return Addend == 0;
754 }
755
756 return false;
757 }
758
isMovZSymbolG3() const759 bool isMovZSymbolG3() const {
760 static AArch64MCExpr::VariantKind Variants[] = { AArch64MCExpr::VK_ABS_G3 };
761 return isMovWSymbol(Variants);
762 }
763
isMovZSymbolG2() const764 bool isMovZSymbolG2() const {
765 static AArch64MCExpr::VariantKind Variants[] = {
766 AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
767 AArch64MCExpr::VK_TPREL_G2, AArch64MCExpr::VK_DTPREL_G2};
768 return isMovWSymbol(Variants);
769 }
770
isMovZSymbolG1() const771 bool isMovZSymbolG1() const {
772 static AArch64MCExpr::VariantKind Variants[] = {
773 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
774 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
775 AArch64MCExpr::VK_DTPREL_G1,
776 };
777 return isMovWSymbol(Variants);
778 }
779
isMovZSymbolG0() const780 bool isMovZSymbolG0() const {
781 static AArch64MCExpr::VariantKind Variants[] = {
782 AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
783 AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_DTPREL_G0};
784 return isMovWSymbol(Variants);
785 }
786
isMovKSymbolG3() const787 bool isMovKSymbolG3() const {
788 static AArch64MCExpr::VariantKind Variants[] = { AArch64MCExpr::VK_ABS_G3 };
789 return isMovWSymbol(Variants);
790 }
791
isMovKSymbolG2() const792 bool isMovKSymbolG2() const {
793 static AArch64MCExpr::VariantKind Variants[] = {
794 AArch64MCExpr::VK_ABS_G2_NC};
795 return isMovWSymbol(Variants);
796 }
797
isMovKSymbolG1() const798 bool isMovKSymbolG1() const {
799 static AArch64MCExpr::VariantKind Variants[] = {
800 AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_TPREL_G1_NC,
801 AArch64MCExpr::VK_DTPREL_G1_NC
802 };
803 return isMovWSymbol(Variants);
804 }
805
isMovKSymbolG0() const806 bool isMovKSymbolG0() const {
807 static AArch64MCExpr::VariantKind Variants[] = {
808 AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
809 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC
810 };
811 return isMovWSymbol(Variants);
812 }
813
814 template<int RegWidth, int Shift>
isMOVZMovAlias() const815 bool isMOVZMovAlias() const {
816 if (!isImm()) return false;
817
818 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
819 if (!CE) return false;
820 uint64_t Value = CE->getValue();
821
822 if (RegWidth == 32)
823 Value &= 0xffffffffULL;
824
825 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
826 if (Value == 0 && Shift != 0)
827 return false;
828
829 return (Value & ~(0xffffULL << Shift)) == 0;
830 }
831
832 template<int RegWidth, int Shift>
isMOVNMovAlias() const833 bool isMOVNMovAlias() const {
834 if (!isImm()) return false;
835
836 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
837 if (!CE) return false;
838 uint64_t Value = CE->getValue();
839
840 // MOVZ takes precedence over MOVN.
841 for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
842 if ((Value & ~(0xffffULL << MOVZShift)) == 0)
843 return false;
844
845 Value = ~Value;
846 if (RegWidth == 32)
847 Value &= 0xffffffffULL;
848
849 return (Value & ~(0xffffULL << Shift)) == 0;
850 }
851
isFPImm() const852 bool isFPImm() const { return Kind == k_FPImm; }
isBarrier() const853 bool isBarrier() const { return Kind == k_Barrier; }
isSysReg() const854 bool isSysReg() const { return Kind == k_SysReg; }
isMRSSystemRegister() const855 bool isMRSSystemRegister() const {
856 if (!isSysReg()) return false;
857
858 bool IsKnownRegister;
859 auto Mapper = AArch64SysReg::MRSMapper(getSysRegFeatureBits());
860 Mapper.fromString(getSysReg(), IsKnownRegister);
861
862 return IsKnownRegister;
863 }
isMSRSystemRegister() const864 bool isMSRSystemRegister() const {
865 if (!isSysReg()) return false;
866
867 bool IsKnownRegister;
868 auto Mapper = AArch64SysReg::MSRMapper(getSysRegFeatureBits());
869 Mapper.fromString(getSysReg(), IsKnownRegister);
870
871 return IsKnownRegister;
872 }
isSystemPStateField() const873 bool isSystemPStateField() const {
874 if (!isSysReg()) return false;
875
876 bool IsKnownRegister;
877 AArch64PState::PStateMapper().fromString(getSysReg(), IsKnownRegister);
878
879 return IsKnownRegister;
880 }
isReg() const881 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
isVectorReg() const882 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
isVectorRegLo() const883 bool isVectorRegLo() const {
884 return Kind == k_Register && Reg.isVector &&
885 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
886 Reg.RegNum);
887 }
isGPR32as64() const888 bool isGPR32as64() const {
889 return Kind == k_Register && !Reg.isVector &&
890 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
891 }
892
isGPR64sp0() const893 bool isGPR64sp0() const {
894 return Kind == k_Register && !Reg.isVector &&
895 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
896 }
897
898 /// Is this a vector list with the type implicit (presumably attached to the
899 /// instruction itself)?
isImplicitlyTypedVectorList() const900 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
901 return Kind == k_VectorList && VectorList.Count == NumRegs &&
902 !VectorList.ElementKind;
903 }
904
905 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
isTypedVectorList() const906 bool isTypedVectorList() const {
907 if (Kind != k_VectorList)
908 return false;
909 if (VectorList.Count != NumRegs)
910 return false;
911 if (VectorList.ElementKind != ElementKind)
912 return false;
913 return VectorList.NumElements == NumElements;
914 }
915
isVectorIndex1() const916 bool isVectorIndex1() const {
917 return Kind == k_VectorIndex && VectorIndex.Val == 1;
918 }
isVectorIndexB() const919 bool isVectorIndexB() const {
920 return Kind == k_VectorIndex && VectorIndex.Val < 16;
921 }
isVectorIndexH() const922 bool isVectorIndexH() const {
923 return Kind == k_VectorIndex && VectorIndex.Val < 8;
924 }
isVectorIndexS() const925 bool isVectorIndexS() const {
926 return Kind == k_VectorIndex && VectorIndex.Val < 4;
927 }
isVectorIndexD() const928 bool isVectorIndexD() const {
929 return Kind == k_VectorIndex && VectorIndex.Val < 2;
930 }
isToken() const931 bool isToken() const override { return Kind == k_Token; }
isTokenEqual(StringRef Str) const932 bool isTokenEqual(StringRef Str) const {
933 return Kind == k_Token && getToken() == Str;
934 }
isSysCR() const935 bool isSysCR() const { return Kind == k_SysCR; }
isPrefetch() const936 bool isPrefetch() const { return Kind == k_Prefetch; }
isShiftExtend() const937 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
isShifter() const938 bool isShifter() const {
939 if (!isShiftExtend())
940 return false;
941
942 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
943 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
944 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
945 ST == AArch64_AM::MSL);
946 }
isExtend() const947 bool isExtend() const {
948 if (!isShiftExtend())
949 return false;
950
951 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
952 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
953 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
954 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
955 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
956 ET == AArch64_AM::LSL) &&
957 getShiftExtendAmount() <= 4;
958 }
959
isExtend64() const960 bool isExtend64() const {
961 if (!isExtend())
962 return false;
963 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
964 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
965 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
966 }
isExtendLSL64() const967 bool isExtendLSL64() const {
968 if (!isExtend())
969 return false;
970 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
971 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
972 ET == AArch64_AM::LSL) &&
973 getShiftExtendAmount() <= 4;
974 }
975
isMemXExtend() const976 template<int Width> bool isMemXExtend() const {
977 if (!isExtend())
978 return false;
979 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
980 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
981 (getShiftExtendAmount() == Log2_32(Width / 8) ||
982 getShiftExtendAmount() == 0);
983 }
984
isMemWExtend() const985 template<int Width> bool isMemWExtend() const {
986 if (!isExtend())
987 return false;
988 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
989 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
990 (getShiftExtendAmount() == Log2_32(Width / 8) ||
991 getShiftExtendAmount() == 0);
992 }
993
994 template <unsigned width>
isArithmeticShifter() const995 bool isArithmeticShifter() const {
996 if (!isShifter())
997 return false;
998
999 // An arithmetic shifter is LSL, LSR, or ASR.
1000 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1001 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1002 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1003 }
1004
1005 template <unsigned width>
isLogicalShifter() const1006 bool isLogicalShifter() const {
1007 if (!isShifter())
1008 return false;
1009
1010 // A logical shifter is LSL, LSR, ASR or ROR.
1011 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1012 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1013 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1014 getShiftExtendAmount() < width;
1015 }
1016
isMovImm32Shifter() const1017 bool isMovImm32Shifter() const {
1018 if (!isShifter())
1019 return false;
1020
1021 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1022 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1023 if (ST != AArch64_AM::LSL)
1024 return false;
1025 uint64_t Val = getShiftExtendAmount();
1026 return (Val == 0 || Val == 16);
1027 }
1028
isMovImm64Shifter() const1029 bool isMovImm64Shifter() const {
1030 if (!isShifter())
1031 return false;
1032
1033 // A MOVi shifter is LSL of 0 or 16.
1034 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1035 if (ST != AArch64_AM::LSL)
1036 return false;
1037 uint64_t Val = getShiftExtendAmount();
1038 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1039 }
1040
isLogicalVecShifter() const1041 bool isLogicalVecShifter() const {
1042 if (!isShifter())
1043 return false;
1044
1045 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1046 unsigned Shift = getShiftExtendAmount();
1047 return getShiftExtendType() == AArch64_AM::LSL &&
1048 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1049 }
1050
isLogicalVecHalfWordShifter() const1051 bool isLogicalVecHalfWordShifter() const {
1052 if (!isLogicalVecShifter())
1053 return false;
1054
1055 // A logical vector shifter is a left shift by 0 or 8.
1056 unsigned Shift = getShiftExtendAmount();
1057 return getShiftExtendType() == AArch64_AM::LSL &&
1058 (Shift == 0 || Shift == 8);
1059 }
1060
isMoveVecShifter() const1061 bool isMoveVecShifter() const {
1062 if (!isShiftExtend())
1063 return false;
1064
1065 // A logical vector shifter is a left shift by 8 or 16.
1066 unsigned Shift = getShiftExtendAmount();
1067 return getShiftExtendType() == AArch64_AM::MSL &&
1068 (Shift == 8 || Shift == 16);
1069 }
1070
1071 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1072 // to LDUR/STUR when the offset is not legal for the former but is for
1073 // the latter. As such, in addition to checking for being a legal unscaled
1074 // address, also check that it is not a legal scaled address. This avoids
1075 // ambiguity in the matcher.
1076 template<int Width>
isSImm9OffsetFB() const1077 bool isSImm9OffsetFB() const {
1078 return isSImm9() && !isUImm12Offset<Width / 8>();
1079 }
1080
isAdrpLabel() const1081 bool isAdrpLabel() const {
1082 // Validation was handled during parsing, so we just sanity check that
1083 // something didn't go haywire.
1084 if (!isImm())
1085 return false;
1086
1087 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1088 int64_t Val = CE->getValue();
1089 int64_t Min = - (4096 * (1LL << (21 - 1)));
1090 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1091 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1092 }
1093
1094 return true;
1095 }
1096
isAdrLabel() const1097 bool isAdrLabel() const {
1098 // Validation was handled during parsing, so we just sanity check that
1099 // something didn't go haywire.
1100 if (!isImm())
1101 return false;
1102
1103 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1104 int64_t Val = CE->getValue();
1105 int64_t Min = - (1LL << (21 - 1));
1106 int64_t Max = ((1LL << (21 - 1)) - 1);
1107 return Val >= Min && Val <= Max;
1108 }
1109
1110 return true;
1111 }
1112
addExpr(MCInst & Inst,const MCExpr * Expr) const1113 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1114 // Add as immediates when possible. Null MCExpr = 0.
1115 if (!Expr)
1116 Inst.addOperand(MCOperand::CreateImm(0));
1117 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1118 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1119 else
1120 Inst.addOperand(MCOperand::CreateExpr(Expr));
1121 }
1122
addRegOperands(MCInst & Inst,unsigned N) const1123 void addRegOperands(MCInst &Inst, unsigned N) const {
1124 assert(N == 1 && "Invalid number of operands!");
1125 Inst.addOperand(MCOperand::CreateReg(getReg()));
1126 }
1127
addGPR32as64Operands(MCInst & Inst,unsigned N) const1128 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1129 assert(N == 1 && "Invalid number of operands!");
1130 assert(
1131 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1132
1133 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1134 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1135 RI->getEncodingValue(getReg()));
1136
1137 Inst.addOperand(MCOperand::CreateReg(Reg));
1138 }
1139
addVectorReg64Operands(MCInst & Inst,unsigned N) const1140 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1141 assert(N == 1 && "Invalid number of operands!");
1142 assert(
1143 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1144 Inst.addOperand(MCOperand::CreateReg(AArch64::D0 + getReg() - AArch64::Q0));
1145 }
1146
addVectorReg128Operands(MCInst & Inst,unsigned N) const1147 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1148 assert(N == 1 && "Invalid number of operands!");
1149 assert(
1150 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1151 Inst.addOperand(MCOperand::CreateReg(getReg()));
1152 }
1153
addVectorRegLoOperands(MCInst & Inst,unsigned N) const1154 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1155 assert(N == 1 && "Invalid number of operands!");
1156 Inst.addOperand(MCOperand::CreateReg(getReg()));
1157 }
1158
1159 template <unsigned NumRegs>
addVectorList64Operands(MCInst & Inst,unsigned N) const1160 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1161 assert(N == 1 && "Invalid number of operands!");
1162 static unsigned FirstRegs[] = { AArch64::D0, AArch64::D0_D1,
1163 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 };
1164 unsigned FirstReg = FirstRegs[NumRegs - 1];
1165
1166 Inst.addOperand(
1167 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1168 }
1169
1170 template <unsigned NumRegs>
addVectorList128Operands(MCInst & Inst,unsigned N) const1171 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1172 assert(N == 1 && "Invalid number of operands!");
1173 static unsigned FirstRegs[] = { AArch64::Q0, AArch64::Q0_Q1,
1174 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 };
1175 unsigned FirstReg = FirstRegs[NumRegs - 1];
1176
1177 Inst.addOperand(
1178 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1179 }
1180
addVectorIndex1Operands(MCInst & Inst,unsigned N) const1181 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1182 assert(N == 1 && "Invalid number of operands!");
1183 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1184 }
1185
addVectorIndexBOperands(MCInst & Inst,unsigned N) const1186 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1187 assert(N == 1 && "Invalid number of operands!");
1188 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1189 }
1190
addVectorIndexHOperands(MCInst & Inst,unsigned N) const1191 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1192 assert(N == 1 && "Invalid number of operands!");
1193 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1194 }
1195
addVectorIndexSOperands(MCInst & Inst,unsigned N) const1196 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1197 assert(N == 1 && "Invalid number of operands!");
1198 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1199 }
1200
addVectorIndexDOperands(MCInst & Inst,unsigned N) const1201 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1202 assert(N == 1 && "Invalid number of operands!");
1203 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1204 }
1205
addImmOperands(MCInst & Inst,unsigned N) const1206 void addImmOperands(MCInst &Inst, unsigned N) const {
1207 assert(N == 1 && "Invalid number of operands!");
1208 // If this is a pageoff symrefexpr with an addend, adjust the addend
1209 // to be only the page-offset portion. Otherwise, just add the expr
1210 // as-is.
1211 addExpr(Inst, getImm());
1212 }
1213
addAddSubImmOperands(MCInst & Inst,unsigned N) const1214 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1215 assert(N == 2 && "Invalid number of operands!");
1216 if (isShiftedImm()) {
1217 addExpr(Inst, getShiftedImmVal());
1218 Inst.addOperand(MCOperand::CreateImm(getShiftedImmShift()));
1219 } else {
1220 addExpr(Inst, getImm());
1221 Inst.addOperand(MCOperand::CreateImm(0));
1222 }
1223 }
1224
addCondCodeOperands(MCInst & Inst,unsigned N) const1225 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1226 assert(N == 1 && "Invalid number of operands!");
1227 Inst.addOperand(MCOperand::CreateImm(getCondCode()));
1228 }
1229
addAdrpLabelOperands(MCInst & Inst,unsigned N) const1230 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1231 assert(N == 1 && "Invalid number of operands!");
1232 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1233 if (!MCE)
1234 addExpr(Inst, getImm());
1235 else
1236 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 12));
1237 }
1238
addAdrLabelOperands(MCInst & Inst,unsigned N) const1239 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1240 addImmOperands(Inst, N);
1241 }
1242
1243 template<int Scale>
addUImm12OffsetOperands(MCInst & Inst,unsigned N) const1244 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1245 assert(N == 1 && "Invalid number of operands!");
1246 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1247
1248 if (!MCE) {
1249 Inst.addOperand(MCOperand::CreateExpr(getImm()));
1250 return;
1251 }
1252 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / Scale));
1253 }
1254
addSImm9Operands(MCInst & Inst,unsigned N) const1255 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1256 assert(N == 1 && "Invalid number of operands!");
1257 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1258 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1259 }
1260
addSImm7s4Operands(MCInst & Inst,unsigned N) const1261 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1262 assert(N == 1 && "Invalid number of operands!");
1263 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1264 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 4));
1265 }
1266
addSImm7s8Operands(MCInst & Inst,unsigned N) const1267 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1268 assert(N == 1 && "Invalid number of operands!");
1269 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1270 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 8));
1271 }
1272
addSImm7s16Operands(MCInst & Inst,unsigned N) const1273 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1274 assert(N == 1 && "Invalid number of operands!");
1275 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1276 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 16));
1277 }
1278
addImm0_7Operands(MCInst & Inst,unsigned N) const1279 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1280 assert(N == 1 && "Invalid number of operands!");
1281 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1282 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1283 }
1284
addImm1_8Operands(MCInst & Inst,unsigned N) const1285 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1286 assert(N == 1 && "Invalid number of operands!");
1287 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1288 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1289 }
1290
addImm0_15Operands(MCInst & Inst,unsigned N) const1291 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1292 assert(N == 1 && "Invalid number of operands!");
1293 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1294 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1295 }
1296
addImm1_16Operands(MCInst & Inst,unsigned N) const1297 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1298 assert(N == 1 && "Invalid number of operands!");
1299 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1300 assert(MCE && "Invalid constant immediate operand!");
1301 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1302 }
1303
addImm0_31Operands(MCInst & Inst,unsigned N) const1304 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1305 assert(N == 1 && "Invalid number of operands!");
1306 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1307 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1308 }
1309
addImm1_31Operands(MCInst & Inst,unsigned N) const1310 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1311 assert(N == 1 && "Invalid number of operands!");
1312 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1313 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1314 }
1315
addImm1_32Operands(MCInst & Inst,unsigned N) const1316 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1317 assert(N == 1 && "Invalid number of operands!");
1318 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1319 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1320 }
1321
addImm0_63Operands(MCInst & Inst,unsigned N) const1322 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1323 assert(N == 1 && "Invalid number of operands!");
1324 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1325 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1326 }
1327
addImm1_63Operands(MCInst & Inst,unsigned N) const1328 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1329 assert(N == 1 && "Invalid number of operands!");
1330 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1331 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1332 }
1333
addImm1_64Operands(MCInst & Inst,unsigned N) const1334 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1335 assert(N == 1 && "Invalid number of operands!");
1336 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1337 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1338 }
1339
addImm0_127Operands(MCInst & Inst,unsigned N) const1340 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1341 assert(N == 1 && "Invalid number of operands!");
1342 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1343 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1344 }
1345
addImm0_255Operands(MCInst & Inst,unsigned N) const1346 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1347 assert(N == 1 && "Invalid number of operands!");
1348 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1349 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1350 }
1351
addImm0_65535Operands(MCInst & Inst,unsigned N) const1352 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1353 assert(N == 1 && "Invalid number of operands!");
1354 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1355 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1356 }
1357
addImm32_63Operands(MCInst & Inst,unsigned N) const1358 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1359 assert(N == 1 && "Invalid number of operands!");
1360 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1361 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1362 }
1363
addLogicalImm32Operands(MCInst & Inst,unsigned N) const1364 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1365 assert(N == 1 && "Invalid number of operands!");
1366 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1367 uint64_t encoding =
1368 AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
1369 Inst.addOperand(MCOperand::CreateImm(encoding));
1370 }
1371
addLogicalImm64Operands(MCInst & Inst,unsigned N) const1372 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1373 assert(N == 1 && "Invalid number of operands!");
1374 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1375 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1376 Inst.addOperand(MCOperand::CreateImm(encoding));
1377 }
1378
addLogicalImm32NotOperands(MCInst & Inst,unsigned N) const1379 void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
1380 assert(N == 1 && "Invalid number of operands!");
1381 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1382 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
1383 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
1384 Inst.addOperand(MCOperand::CreateImm(encoding));
1385 }
1386
addLogicalImm64NotOperands(MCInst & Inst,unsigned N) const1387 void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
1388 assert(N == 1 && "Invalid number of operands!");
1389 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1390 uint64_t encoding =
1391 AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
1392 Inst.addOperand(MCOperand::CreateImm(encoding));
1393 }
1394
addSIMDImmType10Operands(MCInst & Inst,unsigned N) const1395 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1396 assert(N == 1 && "Invalid number of operands!");
1397 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1398 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1399 Inst.addOperand(MCOperand::CreateImm(encoding));
1400 }
1401
addBranchTarget26Operands(MCInst & Inst,unsigned N) const1402 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1403 // Branch operands don't encode the low bits, so shift them off
1404 // here. If it's a label, however, just put it on directly as there's
1405 // not enough information now to do anything.
1406 assert(N == 1 && "Invalid number of operands!");
1407 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1408 if (!MCE) {
1409 addExpr(Inst, getImm());
1410 return;
1411 }
1412 assert(MCE && "Invalid constant immediate operand!");
1413 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1414 }
1415
addPCRelLabel19Operands(MCInst & Inst,unsigned N) const1416 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1417 // Branch operands don't encode the low bits, so shift them off
1418 // here. If it's a label, however, just put it on directly as there's
1419 // not enough information now to do anything.
1420 assert(N == 1 && "Invalid number of operands!");
1421 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1422 if (!MCE) {
1423 addExpr(Inst, getImm());
1424 return;
1425 }
1426 assert(MCE && "Invalid constant immediate operand!");
1427 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1428 }
1429
addBranchTarget14Operands(MCInst & Inst,unsigned N) const1430 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1431 // Branch operands don't encode the low bits, so shift them off
1432 // here. If it's a label, however, just put it on directly as there's
1433 // not enough information now to do anything.
1434 assert(N == 1 && "Invalid number of operands!");
1435 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1436 if (!MCE) {
1437 addExpr(Inst, getImm());
1438 return;
1439 }
1440 assert(MCE && "Invalid constant immediate operand!");
1441 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1442 }
1443
addFPImmOperands(MCInst & Inst,unsigned N) const1444 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1445 assert(N == 1 && "Invalid number of operands!");
1446 Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1447 }
1448
addBarrierOperands(MCInst & Inst,unsigned N) const1449 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1450 assert(N == 1 && "Invalid number of operands!");
1451 Inst.addOperand(MCOperand::CreateImm(getBarrier()));
1452 }
1453
addMRSSystemRegisterOperands(MCInst & Inst,unsigned N) const1454 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1455 assert(N == 1 && "Invalid number of operands!");
1456
1457 bool Valid;
1458 auto Mapper = AArch64SysReg::MRSMapper(getSysRegFeatureBits());
1459 uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
1460
1461 Inst.addOperand(MCOperand::CreateImm(Bits));
1462 }
1463
addMSRSystemRegisterOperands(MCInst & Inst,unsigned N) const1464 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1465 assert(N == 1 && "Invalid number of operands!");
1466
1467 bool Valid;
1468 auto Mapper = AArch64SysReg::MSRMapper(getSysRegFeatureBits());
1469 uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
1470
1471 Inst.addOperand(MCOperand::CreateImm(Bits));
1472 }
1473
addSystemPStateFieldOperands(MCInst & Inst,unsigned N) const1474 void addSystemPStateFieldOperands(MCInst &Inst, unsigned N) const {
1475 assert(N == 1 && "Invalid number of operands!");
1476
1477 bool Valid;
1478 uint32_t Bits =
1479 AArch64PState::PStateMapper().fromString(getSysReg(), Valid);
1480
1481 Inst.addOperand(MCOperand::CreateImm(Bits));
1482 }
1483
addSysCROperands(MCInst & Inst,unsigned N) const1484 void addSysCROperands(MCInst &Inst, unsigned N) const {
1485 assert(N == 1 && "Invalid number of operands!");
1486 Inst.addOperand(MCOperand::CreateImm(getSysCR()));
1487 }
1488
addPrefetchOperands(MCInst & Inst,unsigned N) const1489 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1490 assert(N == 1 && "Invalid number of operands!");
1491 Inst.addOperand(MCOperand::CreateImm(getPrefetch()));
1492 }
1493
addShifterOperands(MCInst & Inst,unsigned N) const1494 void addShifterOperands(MCInst &Inst, unsigned N) const {
1495 assert(N == 1 && "Invalid number of operands!");
1496 unsigned Imm =
1497 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1498 Inst.addOperand(MCOperand::CreateImm(Imm));
1499 }
1500
addExtendOperands(MCInst & Inst,unsigned N) const1501 void addExtendOperands(MCInst &Inst, unsigned N) const {
1502 assert(N == 1 && "Invalid number of operands!");
1503 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1504 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1505 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1506 Inst.addOperand(MCOperand::CreateImm(Imm));
1507 }
1508
addExtend64Operands(MCInst & Inst,unsigned N) const1509 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1510 assert(N == 1 && "Invalid number of operands!");
1511 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1512 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1513 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1514 Inst.addOperand(MCOperand::CreateImm(Imm));
1515 }
1516
addMemExtendOperands(MCInst & Inst,unsigned N) const1517 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1518 assert(N == 2 && "Invalid number of operands!");
1519 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1520 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1521 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1522 Inst.addOperand(MCOperand::CreateImm(getShiftExtendAmount() != 0));
1523 }
1524
1525 // For 8-bit load/store instructions with a register offset, both the
1526 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1527 // they're disambiguated by whether the shift was explicit or implicit rather
1528 // than its size.
addMemExtend8Operands(MCInst & Inst,unsigned N) const1529 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1530 assert(N == 2 && "Invalid number of operands!");
1531 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1532 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1533 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1534 Inst.addOperand(MCOperand::CreateImm(hasShiftExtendAmount()));
1535 }
1536
1537 template<int Shift>
addMOVZMovAliasOperands(MCInst & Inst,unsigned N) const1538 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1539 assert(N == 1 && "Invalid number of operands!");
1540
1541 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1542 uint64_t Value = CE->getValue();
1543 Inst.addOperand(MCOperand::CreateImm((Value >> Shift) & 0xffff));
1544 }
1545
1546 template<int Shift>
addMOVNMovAliasOperands(MCInst & Inst,unsigned N) const1547 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1548 assert(N == 1 && "Invalid number of operands!");
1549
1550 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1551 uint64_t Value = CE->getValue();
1552 Inst.addOperand(MCOperand::CreateImm((~Value >> Shift) & 0xffff));
1553 }
1554
1555 void print(raw_ostream &OS) const override;
1556
1557 static std::unique_ptr<AArch64Operand>
CreateToken(StringRef Str,bool IsSuffix,SMLoc S,MCContext & Ctx)1558 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1559 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1560 Op->Tok.Data = Str.data();
1561 Op->Tok.Length = Str.size();
1562 Op->Tok.IsSuffix = IsSuffix;
1563 Op->StartLoc = S;
1564 Op->EndLoc = S;
1565 return Op;
1566 }
1567
1568 static std::unique_ptr<AArch64Operand>
CreateReg(unsigned RegNum,bool isVector,SMLoc S,SMLoc E,MCContext & Ctx)1569 CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1570 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1571 Op->Reg.RegNum = RegNum;
1572 Op->Reg.isVector = isVector;
1573 Op->StartLoc = S;
1574 Op->EndLoc = E;
1575 return Op;
1576 }
1577
1578 static std::unique_ptr<AArch64Operand>
CreateVectorList(unsigned RegNum,unsigned Count,unsigned NumElements,char ElementKind,SMLoc S,SMLoc E,MCContext & Ctx)1579 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1580 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1581 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1582 Op->VectorList.RegNum = RegNum;
1583 Op->VectorList.Count = Count;
1584 Op->VectorList.NumElements = NumElements;
1585 Op->VectorList.ElementKind = ElementKind;
1586 Op->StartLoc = S;
1587 Op->EndLoc = E;
1588 return Op;
1589 }
1590
1591 static std::unique_ptr<AArch64Operand>
CreateVectorIndex(unsigned Idx,SMLoc S,SMLoc E,MCContext & Ctx)1592 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1593 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1594 Op->VectorIndex.Val = Idx;
1595 Op->StartLoc = S;
1596 Op->EndLoc = E;
1597 return Op;
1598 }
1599
CreateImm(const MCExpr * Val,SMLoc S,SMLoc E,MCContext & Ctx)1600 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1601 SMLoc E, MCContext &Ctx) {
1602 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1603 Op->Imm.Val = Val;
1604 Op->StartLoc = S;
1605 Op->EndLoc = E;
1606 return Op;
1607 }
1608
CreateShiftedImm(const MCExpr * Val,unsigned ShiftAmount,SMLoc S,SMLoc E,MCContext & Ctx)1609 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1610 unsigned ShiftAmount,
1611 SMLoc S, SMLoc E,
1612 MCContext &Ctx) {
1613 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1614 Op->ShiftedImm .Val = Val;
1615 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1616 Op->StartLoc = S;
1617 Op->EndLoc = E;
1618 return Op;
1619 }
1620
1621 static std::unique_ptr<AArch64Operand>
CreateCondCode(AArch64CC::CondCode Code,SMLoc S,SMLoc E,MCContext & Ctx)1622 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1623 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1624 Op->CondCode.Code = Code;
1625 Op->StartLoc = S;
1626 Op->EndLoc = E;
1627 return Op;
1628 }
1629
CreateFPImm(unsigned Val,SMLoc S,MCContext & Ctx)1630 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1631 MCContext &Ctx) {
1632 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1633 Op->FPImm.Val = Val;
1634 Op->StartLoc = S;
1635 Op->EndLoc = S;
1636 return Op;
1637 }
1638
CreateBarrier(unsigned Val,SMLoc S,MCContext & Ctx)1639 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val, SMLoc S,
1640 MCContext &Ctx) {
1641 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1642 Op->Barrier.Val = Val;
1643 Op->StartLoc = S;
1644 Op->EndLoc = S;
1645 return Op;
1646 }
1647
1648 static std::unique_ptr<AArch64Operand>
CreateSysReg(StringRef Str,SMLoc S,uint64_t FeatureBits,MCContext & Ctx)1649 CreateSysReg(StringRef Str, SMLoc S, uint64_t FeatureBits, MCContext &Ctx) {
1650 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1651 Op->SysReg.Data = Str.data();
1652 Op->SysReg.Length = Str.size();
1653 Op->SysReg.FeatureBits = FeatureBits;
1654 Op->StartLoc = S;
1655 Op->EndLoc = S;
1656 return Op;
1657 }
1658
CreateSysCR(unsigned Val,SMLoc S,SMLoc E,MCContext & Ctx)1659 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1660 SMLoc E, MCContext &Ctx) {
1661 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1662 Op->SysCRImm.Val = Val;
1663 Op->StartLoc = S;
1664 Op->EndLoc = E;
1665 return Op;
1666 }
1667
CreatePrefetch(unsigned Val,SMLoc S,MCContext & Ctx)1668 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val, SMLoc S,
1669 MCContext &Ctx) {
1670 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1671 Op->Prefetch.Val = Val;
1672 Op->StartLoc = S;
1673 Op->EndLoc = S;
1674 return Op;
1675 }
1676
1677 static std::unique_ptr<AArch64Operand>
CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp,unsigned Val,bool HasExplicitAmount,SMLoc S,SMLoc E,MCContext & Ctx)1678 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1679 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1680 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1681 Op->ShiftExtend.Type = ShOp;
1682 Op->ShiftExtend.Amount = Val;
1683 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1684 Op->StartLoc = S;
1685 Op->EndLoc = E;
1686 return Op;
1687 }
1688 };
1689
1690 } // end anonymous namespace.
1691
print(raw_ostream & OS) const1692 void AArch64Operand::print(raw_ostream &OS) const {
1693 switch (Kind) {
1694 case k_FPImm:
1695 OS << "<fpimm " << getFPImm() << "("
1696 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1697 break;
1698 case k_Barrier: {
1699 bool Valid;
1700 StringRef Name = AArch64DB::DBarrierMapper().toString(getBarrier(), Valid);
1701 if (Valid)
1702 OS << "<barrier " << Name << ">";
1703 else
1704 OS << "<barrier invalid #" << getBarrier() << ">";
1705 break;
1706 }
1707 case k_Immediate:
1708 getImm()->print(OS);
1709 break;
1710 case k_ShiftedImm: {
1711 unsigned Shift = getShiftedImmShift();
1712 OS << "<shiftedimm ";
1713 getShiftedImmVal()->print(OS);
1714 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1715 break;
1716 }
1717 case k_CondCode:
1718 OS << "<condcode " << getCondCode() << ">";
1719 break;
1720 case k_Register:
1721 OS << "<register " << getReg() << ">";
1722 break;
1723 case k_VectorList: {
1724 OS << "<vectorlist ";
1725 unsigned Reg = getVectorListStart();
1726 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1727 OS << Reg + i << " ";
1728 OS << ">";
1729 break;
1730 }
1731 case k_VectorIndex:
1732 OS << "<vectorindex " << getVectorIndex() << ">";
1733 break;
1734 case k_SysReg:
1735 OS << "<sysreg: " << getSysReg() << '>';
1736 break;
1737 case k_Token:
1738 OS << "'" << getToken() << "'";
1739 break;
1740 case k_SysCR:
1741 OS << "c" << getSysCR();
1742 break;
1743 case k_Prefetch: {
1744 bool Valid;
1745 StringRef Name = AArch64PRFM::PRFMMapper().toString(getPrefetch(), Valid);
1746 if (Valid)
1747 OS << "<prfop " << Name << ">";
1748 else
1749 OS << "<prfop invalid #" << getPrefetch() << ">";
1750 break;
1751 }
1752 case k_ShiftExtend: {
1753 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1754 << getShiftExtendAmount();
1755 if (!hasShiftExtendAmount())
1756 OS << "<imp>";
1757 OS << '>';
1758 break;
1759 }
1760 }
1761 }
1762
1763 /// @name Auto-generated Match Functions
1764 /// {
1765
1766 static unsigned MatchRegisterName(StringRef Name);
1767
1768 /// }
1769
matchVectorRegName(StringRef Name)1770 static unsigned matchVectorRegName(StringRef Name) {
1771 return StringSwitch<unsigned>(Name)
1772 .Case("v0", AArch64::Q0)
1773 .Case("v1", AArch64::Q1)
1774 .Case("v2", AArch64::Q2)
1775 .Case("v3", AArch64::Q3)
1776 .Case("v4", AArch64::Q4)
1777 .Case("v5", AArch64::Q5)
1778 .Case("v6", AArch64::Q6)
1779 .Case("v7", AArch64::Q7)
1780 .Case("v8", AArch64::Q8)
1781 .Case("v9", AArch64::Q9)
1782 .Case("v10", AArch64::Q10)
1783 .Case("v11", AArch64::Q11)
1784 .Case("v12", AArch64::Q12)
1785 .Case("v13", AArch64::Q13)
1786 .Case("v14", AArch64::Q14)
1787 .Case("v15", AArch64::Q15)
1788 .Case("v16", AArch64::Q16)
1789 .Case("v17", AArch64::Q17)
1790 .Case("v18", AArch64::Q18)
1791 .Case("v19", AArch64::Q19)
1792 .Case("v20", AArch64::Q20)
1793 .Case("v21", AArch64::Q21)
1794 .Case("v22", AArch64::Q22)
1795 .Case("v23", AArch64::Q23)
1796 .Case("v24", AArch64::Q24)
1797 .Case("v25", AArch64::Q25)
1798 .Case("v26", AArch64::Q26)
1799 .Case("v27", AArch64::Q27)
1800 .Case("v28", AArch64::Q28)
1801 .Case("v29", AArch64::Q29)
1802 .Case("v30", AArch64::Q30)
1803 .Case("v31", AArch64::Q31)
1804 .Default(0);
1805 }
1806
isValidVectorKind(StringRef Name)1807 static bool isValidVectorKind(StringRef Name) {
1808 return StringSwitch<bool>(Name.lower())
1809 .Case(".8b", true)
1810 .Case(".16b", true)
1811 .Case(".4h", true)
1812 .Case(".8h", true)
1813 .Case(".2s", true)
1814 .Case(".4s", true)
1815 .Case(".1d", true)
1816 .Case(".2d", true)
1817 .Case(".1q", true)
1818 // Accept the width neutral ones, too, for verbose syntax. If those
1819 // aren't used in the right places, the token operand won't match so
1820 // all will work out.
1821 .Case(".b", true)
1822 .Case(".h", true)
1823 .Case(".s", true)
1824 .Case(".d", true)
1825 .Default(false);
1826 }
1827
parseValidVectorKind(StringRef Name,unsigned & NumElements,char & ElementKind)1828 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1829 char &ElementKind) {
1830 assert(isValidVectorKind(Name));
1831
1832 ElementKind = Name.lower()[Name.size() - 1];
1833 NumElements = 0;
1834
1835 if (Name.size() == 2)
1836 return;
1837
1838 // Parse the lane count
1839 Name = Name.drop_front();
1840 while (isdigit(Name.front())) {
1841 NumElements = 10 * NumElements + (Name.front() - '0');
1842 Name = Name.drop_front();
1843 }
1844 }
1845
ParseRegister(unsigned & RegNo,SMLoc & StartLoc,SMLoc & EndLoc)1846 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1847 SMLoc &EndLoc) {
1848 StartLoc = getLoc();
1849 RegNo = tryParseRegister();
1850 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1851 return (RegNo == (unsigned)-1);
1852 }
1853
1854 // Matches a register name or register alias previously defined by '.req'
matchRegisterNameAlias(StringRef Name,bool isVector)1855 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1856 bool isVector) {
1857 unsigned RegNum = isVector ? matchVectorRegName(Name)
1858 : MatchRegisterName(Name);
1859
1860 if (RegNum == 0) {
1861 // Check for aliases registered via .req. Canonicalize to lower case.
1862 // That's more consistent since register names are case insensitive, and
1863 // it's how the original entry was passed in from MC/MCParser/AsmParser.
1864 auto Entry = RegisterReqs.find(Name.lower());
1865 if (Entry == RegisterReqs.end())
1866 return 0;
1867 // set RegNum if the match is the right kind of register
1868 if (isVector == Entry->getValue().first)
1869 RegNum = Entry->getValue().second;
1870 }
1871 return RegNum;
1872 }
1873
1874 /// tryParseRegister - Try to parse a register name. The token must be an
1875 /// Identifier when called, and if it is a register name the token is eaten and
1876 /// the register is added to the operand list.
tryParseRegister()1877 int AArch64AsmParser::tryParseRegister() {
1878 MCAsmParser &Parser = getParser();
1879 const AsmToken &Tok = Parser.getTok();
1880 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1881
1882 std::string lowerCase = Tok.getString().lower();
1883 unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
1884 // Also handle a few aliases of registers.
1885 if (RegNum == 0)
1886 RegNum = StringSwitch<unsigned>(lowerCase)
1887 .Case("fp", AArch64::FP)
1888 .Case("lr", AArch64::LR)
1889 .Case("x31", AArch64::XZR)
1890 .Case("w31", AArch64::WZR)
1891 .Default(0);
1892
1893 if (RegNum == 0)
1894 return -1;
1895
1896 Parser.Lex(); // Eat identifier token.
1897 return RegNum;
1898 }
1899
1900 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1901 /// kind specifier. If it is a register specifier, eat the token and return it.
tryMatchVectorRegister(StringRef & Kind,bool expected)1902 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1903 MCAsmParser &Parser = getParser();
1904 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1905 TokError("vector register expected");
1906 return -1;
1907 }
1908
1909 StringRef Name = Parser.getTok().getString();
1910 // If there is a kind specifier, it's separated from the register name by
1911 // a '.'.
1912 size_t Start = 0, Next = Name.find('.');
1913 StringRef Head = Name.slice(Start, Next);
1914 unsigned RegNum = matchRegisterNameAlias(Head, true);
1915
1916 if (RegNum) {
1917 if (Next != StringRef::npos) {
1918 Kind = Name.slice(Next, StringRef::npos);
1919 if (!isValidVectorKind(Kind)) {
1920 TokError("invalid vector kind qualifier");
1921 return -1;
1922 }
1923 }
1924 Parser.Lex(); // Eat the register token.
1925 return RegNum;
1926 }
1927
1928 if (expected)
1929 TokError("vector register expected");
1930 return -1;
1931 }
1932
1933 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1934 AArch64AsmParser::OperandMatchResultTy
tryParseSysCROperand(OperandVector & Operands)1935 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1936 MCAsmParser &Parser = getParser();
1937 SMLoc S = getLoc();
1938
1939 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1940 Error(S, "Expected cN operand where 0 <= N <= 15");
1941 return MatchOperand_ParseFail;
1942 }
1943
1944 StringRef Tok = Parser.getTok().getIdentifier();
1945 if (Tok[0] != 'c' && Tok[0] != 'C') {
1946 Error(S, "Expected cN operand where 0 <= N <= 15");
1947 return MatchOperand_ParseFail;
1948 }
1949
1950 uint32_t CRNum;
1951 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1952 if (BadNum || CRNum > 15) {
1953 Error(S, "Expected cN operand where 0 <= N <= 15");
1954 return MatchOperand_ParseFail;
1955 }
1956
1957 Parser.Lex(); // Eat identifier token.
1958 Operands.push_back(
1959 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
1960 return MatchOperand_Success;
1961 }
1962
1963 /// tryParsePrefetch - Try to parse a prefetch operand.
1964 AArch64AsmParser::OperandMatchResultTy
tryParsePrefetch(OperandVector & Operands)1965 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
1966 MCAsmParser &Parser = getParser();
1967 SMLoc S = getLoc();
1968 const AsmToken &Tok = Parser.getTok();
1969 // Either an identifier for named values or a 5-bit immediate.
1970 bool Hash = Tok.is(AsmToken::Hash);
1971 if (Hash || Tok.is(AsmToken::Integer)) {
1972 if (Hash)
1973 Parser.Lex(); // Eat hash token.
1974 const MCExpr *ImmVal;
1975 if (getParser().parseExpression(ImmVal))
1976 return MatchOperand_ParseFail;
1977
1978 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
1979 if (!MCE) {
1980 TokError("immediate value expected for prefetch operand");
1981 return MatchOperand_ParseFail;
1982 }
1983 unsigned prfop = MCE->getValue();
1984 if (prfop > 31) {
1985 TokError("prefetch operand out of range, [0,31] expected");
1986 return MatchOperand_ParseFail;
1987 }
1988
1989 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
1990 return MatchOperand_Success;
1991 }
1992
1993 if (Tok.isNot(AsmToken::Identifier)) {
1994 TokError("pre-fetch hint expected");
1995 return MatchOperand_ParseFail;
1996 }
1997
1998 bool Valid;
1999 unsigned prfop = AArch64PRFM::PRFMMapper().fromString(Tok.getString(), Valid);
2000 if (!Valid) {
2001 TokError("pre-fetch hint expected");
2002 return MatchOperand_ParseFail;
2003 }
2004
2005 Parser.Lex(); // Eat identifier token.
2006 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
2007 return MatchOperand_Success;
2008 }
2009
2010 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2011 /// instruction.
2012 AArch64AsmParser::OperandMatchResultTy
tryParseAdrpLabel(OperandVector & Operands)2013 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2014 MCAsmParser &Parser = getParser();
2015 SMLoc S = getLoc();
2016 const MCExpr *Expr;
2017
2018 if (Parser.getTok().is(AsmToken::Hash)) {
2019 Parser.Lex(); // Eat hash token.
2020 }
2021
2022 if (parseSymbolicImmVal(Expr))
2023 return MatchOperand_ParseFail;
2024
2025 AArch64MCExpr::VariantKind ELFRefKind;
2026 MCSymbolRefExpr::VariantKind DarwinRefKind;
2027 int64_t Addend;
2028 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2029 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2030 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2031 // No modifier was specified at all; this is the syntax for an ELF basic
2032 // ADRP relocation (unfortunately).
2033 Expr =
2034 AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2035 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2036 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2037 Addend != 0) {
2038 Error(S, "gotpage label reference not allowed an addend");
2039 return MatchOperand_ParseFail;
2040 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2041 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2042 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2043 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2044 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2045 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2046 // The operand must be an @page or @gotpage qualified symbolref.
2047 Error(S, "page or gotpage label reference expected");
2048 return MatchOperand_ParseFail;
2049 }
2050 }
2051
2052 // We have either a label reference possibly with addend or an immediate. The
2053 // addend is a raw value here. The linker will adjust it to only reference the
2054 // page.
2055 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2056 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2057
2058 return MatchOperand_Success;
2059 }
2060
2061 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2062 /// instruction.
2063 AArch64AsmParser::OperandMatchResultTy
tryParseAdrLabel(OperandVector & Operands)2064 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2065 MCAsmParser &Parser = getParser();
2066 SMLoc S = getLoc();
2067 const MCExpr *Expr;
2068
2069 if (Parser.getTok().is(AsmToken::Hash)) {
2070 Parser.Lex(); // Eat hash token.
2071 }
2072
2073 if (getParser().parseExpression(Expr))
2074 return MatchOperand_ParseFail;
2075
2076 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2077 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2078
2079 return MatchOperand_Success;
2080 }
2081
2082 /// tryParseFPImm - A floating point immediate expression operand.
2083 AArch64AsmParser::OperandMatchResultTy
tryParseFPImm(OperandVector & Operands)2084 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2085 MCAsmParser &Parser = getParser();
2086 SMLoc S = getLoc();
2087
2088 bool Hash = false;
2089 if (Parser.getTok().is(AsmToken::Hash)) {
2090 Parser.Lex(); // Eat '#'
2091 Hash = true;
2092 }
2093
2094 // Handle negation, as that still comes through as a separate token.
2095 bool isNegative = false;
2096 if (Parser.getTok().is(AsmToken::Minus)) {
2097 isNegative = true;
2098 Parser.Lex();
2099 }
2100 const AsmToken &Tok = Parser.getTok();
2101 if (Tok.is(AsmToken::Real)) {
2102 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2103 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2104 // If we had a '-' in front, toggle the sign bit.
2105 IntVal ^= (uint64_t)isNegative << 63;
2106 int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2107 Parser.Lex(); // Eat the token.
2108 // Check for out of range values. As an exception, we let Zero through,
2109 // as we handle that special case in post-processing before matching in
2110 // order to use the zero register for it.
2111 if (Val == -1 && !RealVal.isZero()) {
2112 TokError("expected compatible register or floating-point constant");
2113 return MatchOperand_ParseFail;
2114 }
2115 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2116 return MatchOperand_Success;
2117 }
2118 if (Tok.is(AsmToken::Integer)) {
2119 int64_t Val;
2120 if (!isNegative && Tok.getString().startswith("0x")) {
2121 Val = Tok.getIntVal();
2122 if (Val > 255 || Val < 0) {
2123 TokError("encoded floating point value out of range");
2124 return MatchOperand_ParseFail;
2125 }
2126 } else {
2127 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2128 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2129 // If we had a '-' in front, toggle the sign bit.
2130 IntVal ^= (uint64_t)isNegative << 63;
2131 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2132 }
2133 Parser.Lex(); // Eat the token.
2134 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2135 return MatchOperand_Success;
2136 }
2137
2138 if (!Hash)
2139 return MatchOperand_NoMatch;
2140
2141 TokError("invalid floating point immediate");
2142 return MatchOperand_ParseFail;
2143 }
2144
2145 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2146 AArch64AsmParser::OperandMatchResultTy
tryParseAddSubImm(OperandVector & Operands)2147 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2148 MCAsmParser &Parser = getParser();
2149 SMLoc S = getLoc();
2150
2151 if (Parser.getTok().is(AsmToken::Hash))
2152 Parser.Lex(); // Eat '#'
2153 else if (Parser.getTok().isNot(AsmToken::Integer))
2154 // Operand should start from # or should be integer, emit error otherwise.
2155 return MatchOperand_NoMatch;
2156
2157 const MCExpr *Imm;
2158 if (parseSymbolicImmVal(Imm))
2159 return MatchOperand_ParseFail;
2160 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2161 uint64_t ShiftAmount = 0;
2162 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2163 if (MCE) {
2164 int64_t Val = MCE->getValue();
2165 if (Val > 0xfff && (Val & 0xfff) == 0) {
2166 Imm = MCConstantExpr::Create(Val >> 12, getContext());
2167 ShiftAmount = 12;
2168 }
2169 }
2170 SMLoc E = Parser.getTok().getLoc();
2171 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2172 getContext()));
2173 return MatchOperand_Success;
2174 }
2175
2176 // Eat ','
2177 Parser.Lex();
2178
2179 // The optional operand must be "lsl #N" where N is non-negative.
2180 if (!Parser.getTok().is(AsmToken::Identifier) ||
2181 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2182 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2183 return MatchOperand_ParseFail;
2184 }
2185
2186 // Eat 'lsl'
2187 Parser.Lex();
2188
2189 if (Parser.getTok().is(AsmToken::Hash)) {
2190 Parser.Lex();
2191 }
2192
2193 if (Parser.getTok().isNot(AsmToken::Integer)) {
2194 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2195 return MatchOperand_ParseFail;
2196 }
2197
2198 int64_t ShiftAmount = Parser.getTok().getIntVal();
2199
2200 if (ShiftAmount < 0) {
2201 Error(Parser.getTok().getLoc(), "positive shift amount required");
2202 return MatchOperand_ParseFail;
2203 }
2204 Parser.Lex(); // Eat the number
2205
2206 SMLoc E = Parser.getTok().getLoc();
2207 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2208 S, E, getContext()));
2209 return MatchOperand_Success;
2210 }
2211
2212 /// parseCondCodeString - Parse a Condition Code string.
parseCondCodeString(StringRef Cond)2213 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2214 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2215 .Case("eq", AArch64CC::EQ)
2216 .Case("ne", AArch64CC::NE)
2217 .Case("cs", AArch64CC::HS)
2218 .Case("hs", AArch64CC::HS)
2219 .Case("cc", AArch64CC::LO)
2220 .Case("lo", AArch64CC::LO)
2221 .Case("mi", AArch64CC::MI)
2222 .Case("pl", AArch64CC::PL)
2223 .Case("vs", AArch64CC::VS)
2224 .Case("vc", AArch64CC::VC)
2225 .Case("hi", AArch64CC::HI)
2226 .Case("ls", AArch64CC::LS)
2227 .Case("ge", AArch64CC::GE)
2228 .Case("lt", AArch64CC::LT)
2229 .Case("gt", AArch64CC::GT)
2230 .Case("le", AArch64CC::LE)
2231 .Case("al", AArch64CC::AL)
2232 .Case("nv", AArch64CC::NV)
2233 .Default(AArch64CC::Invalid);
2234 return CC;
2235 }
2236
2237 /// parseCondCode - Parse a Condition Code operand.
parseCondCode(OperandVector & Operands,bool invertCondCode)2238 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2239 bool invertCondCode) {
2240 MCAsmParser &Parser = getParser();
2241 SMLoc S = getLoc();
2242 const AsmToken &Tok = Parser.getTok();
2243 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2244
2245 StringRef Cond = Tok.getString();
2246 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2247 if (CC == AArch64CC::Invalid)
2248 return TokError("invalid condition code");
2249 Parser.Lex(); // Eat identifier token.
2250
2251 if (invertCondCode) {
2252 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2253 return TokError("condition codes AL and NV are invalid for this instruction");
2254 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2255 }
2256
2257 Operands.push_back(
2258 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2259 return false;
2260 }
2261
2262 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2263 /// them if present.
2264 AArch64AsmParser::OperandMatchResultTy
tryParseOptionalShiftExtend(OperandVector & Operands)2265 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2266 MCAsmParser &Parser = getParser();
2267 const AsmToken &Tok = Parser.getTok();
2268 std::string LowerID = Tok.getString().lower();
2269 AArch64_AM::ShiftExtendType ShOp =
2270 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2271 .Case("lsl", AArch64_AM::LSL)
2272 .Case("lsr", AArch64_AM::LSR)
2273 .Case("asr", AArch64_AM::ASR)
2274 .Case("ror", AArch64_AM::ROR)
2275 .Case("msl", AArch64_AM::MSL)
2276 .Case("uxtb", AArch64_AM::UXTB)
2277 .Case("uxth", AArch64_AM::UXTH)
2278 .Case("uxtw", AArch64_AM::UXTW)
2279 .Case("uxtx", AArch64_AM::UXTX)
2280 .Case("sxtb", AArch64_AM::SXTB)
2281 .Case("sxth", AArch64_AM::SXTH)
2282 .Case("sxtw", AArch64_AM::SXTW)
2283 .Case("sxtx", AArch64_AM::SXTX)
2284 .Default(AArch64_AM::InvalidShiftExtend);
2285
2286 if (ShOp == AArch64_AM::InvalidShiftExtend)
2287 return MatchOperand_NoMatch;
2288
2289 SMLoc S = Tok.getLoc();
2290 Parser.Lex();
2291
2292 bool Hash = getLexer().is(AsmToken::Hash);
2293 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2294 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2295 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2296 ShOp == AArch64_AM::MSL) {
2297 // We expect a number here.
2298 TokError("expected #imm after shift specifier");
2299 return MatchOperand_ParseFail;
2300 }
2301
2302 // "extend" type operatoins don't need an immediate, #0 is implicit.
2303 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2304 Operands.push_back(
2305 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2306 return MatchOperand_Success;
2307 }
2308
2309 if (Hash)
2310 Parser.Lex(); // Eat the '#'.
2311
2312 // Make sure we do actually have a number or a parenthesized expression.
2313 SMLoc E = Parser.getTok().getLoc();
2314 if (!Parser.getTok().is(AsmToken::Integer) &&
2315 !Parser.getTok().is(AsmToken::LParen)) {
2316 Error(E, "expected integer shift amount");
2317 return MatchOperand_ParseFail;
2318 }
2319
2320 const MCExpr *ImmVal;
2321 if (getParser().parseExpression(ImmVal))
2322 return MatchOperand_ParseFail;
2323
2324 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2325 if (!MCE) {
2326 Error(E, "expected constant '#imm' after shift specifier");
2327 return MatchOperand_ParseFail;
2328 }
2329
2330 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2331 Operands.push_back(AArch64Operand::CreateShiftExtend(
2332 ShOp, MCE->getValue(), true, S, E, getContext()));
2333 return MatchOperand_Success;
2334 }
2335
2336 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2337 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
parseSysAlias(StringRef Name,SMLoc NameLoc,OperandVector & Operands)2338 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2339 OperandVector &Operands) {
2340 if (Name.find('.') != StringRef::npos)
2341 return TokError("invalid operand");
2342
2343 Mnemonic = Name;
2344 Operands.push_back(
2345 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2346
2347 MCAsmParser &Parser = getParser();
2348 const AsmToken &Tok = Parser.getTok();
2349 StringRef Op = Tok.getString();
2350 SMLoc S = Tok.getLoc();
2351
2352 const MCExpr *Expr = nullptr;
2353
2354 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2355 do { \
2356 Expr = MCConstantExpr::Create(op1, getContext()); \
2357 Operands.push_back( \
2358 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2359 Operands.push_back( \
2360 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2361 Operands.push_back( \
2362 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2363 Expr = MCConstantExpr::Create(op2, getContext()); \
2364 Operands.push_back( \
2365 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2366 } while (0)
2367
2368 if (Mnemonic == "ic") {
2369 if (!Op.compare_lower("ialluis")) {
2370 // SYS #0, C7, C1, #0
2371 SYS_ALIAS(0, 7, 1, 0);
2372 } else if (!Op.compare_lower("iallu")) {
2373 // SYS #0, C7, C5, #0
2374 SYS_ALIAS(0, 7, 5, 0);
2375 } else if (!Op.compare_lower("ivau")) {
2376 // SYS #3, C7, C5, #1
2377 SYS_ALIAS(3, 7, 5, 1);
2378 } else {
2379 return TokError("invalid operand for IC instruction");
2380 }
2381 } else if (Mnemonic == "dc") {
2382 if (!Op.compare_lower("zva")) {
2383 // SYS #3, C7, C4, #1
2384 SYS_ALIAS(3, 7, 4, 1);
2385 } else if (!Op.compare_lower("ivac")) {
2386 // SYS #3, C7, C6, #1
2387 SYS_ALIAS(0, 7, 6, 1);
2388 } else if (!Op.compare_lower("isw")) {
2389 // SYS #0, C7, C6, #2
2390 SYS_ALIAS(0, 7, 6, 2);
2391 } else if (!Op.compare_lower("cvac")) {
2392 // SYS #3, C7, C10, #1
2393 SYS_ALIAS(3, 7, 10, 1);
2394 } else if (!Op.compare_lower("csw")) {
2395 // SYS #0, C7, C10, #2
2396 SYS_ALIAS(0, 7, 10, 2);
2397 } else if (!Op.compare_lower("cvau")) {
2398 // SYS #3, C7, C11, #1
2399 SYS_ALIAS(3, 7, 11, 1);
2400 } else if (!Op.compare_lower("civac")) {
2401 // SYS #3, C7, C14, #1
2402 SYS_ALIAS(3, 7, 14, 1);
2403 } else if (!Op.compare_lower("cisw")) {
2404 // SYS #0, C7, C14, #2
2405 SYS_ALIAS(0, 7, 14, 2);
2406 } else {
2407 return TokError("invalid operand for DC instruction");
2408 }
2409 } else if (Mnemonic == "at") {
2410 if (!Op.compare_lower("s1e1r")) {
2411 // SYS #0, C7, C8, #0
2412 SYS_ALIAS(0, 7, 8, 0);
2413 } else if (!Op.compare_lower("s1e2r")) {
2414 // SYS #4, C7, C8, #0
2415 SYS_ALIAS(4, 7, 8, 0);
2416 } else if (!Op.compare_lower("s1e3r")) {
2417 // SYS #6, C7, C8, #0
2418 SYS_ALIAS(6, 7, 8, 0);
2419 } else if (!Op.compare_lower("s1e1w")) {
2420 // SYS #0, C7, C8, #1
2421 SYS_ALIAS(0, 7, 8, 1);
2422 } else if (!Op.compare_lower("s1e2w")) {
2423 // SYS #4, C7, C8, #1
2424 SYS_ALIAS(4, 7, 8, 1);
2425 } else if (!Op.compare_lower("s1e3w")) {
2426 // SYS #6, C7, C8, #1
2427 SYS_ALIAS(6, 7, 8, 1);
2428 } else if (!Op.compare_lower("s1e0r")) {
2429 // SYS #0, C7, C8, #3
2430 SYS_ALIAS(0, 7, 8, 2);
2431 } else if (!Op.compare_lower("s1e0w")) {
2432 // SYS #0, C7, C8, #3
2433 SYS_ALIAS(0, 7, 8, 3);
2434 } else if (!Op.compare_lower("s12e1r")) {
2435 // SYS #4, C7, C8, #4
2436 SYS_ALIAS(4, 7, 8, 4);
2437 } else if (!Op.compare_lower("s12e1w")) {
2438 // SYS #4, C7, C8, #5
2439 SYS_ALIAS(4, 7, 8, 5);
2440 } else if (!Op.compare_lower("s12e0r")) {
2441 // SYS #4, C7, C8, #6
2442 SYS_ALIAS(4, 7, 8, 6);
2443 } else if (!Op.compare_lower("s12e0w")) {
2444 // SYS #4, C7, C8, #7
2445 SYS_ALIAS(4, 7, 8, 7);
2446 } else {
2447 return TokError("invalid operand for AT instruction");
2448 }
2449 } else if (Mnemonic == "tlbi") {
2450 if (!Op.compare_lower("vmalle1is")) {
2451 // SYS #0, C8, C3, #0
2452 SYS_ALIAS(0, 8, 3, 0);
2453 } else if (!Op.compare_lower("alle2is")) {
2454 // SYS #4, C8, C3, #0
2455 SYS_ALIAS(4, 8, 3, 0);
2456 } else if (!Op.compare_lower("alle3is")) {
2457 // SYS #6, C8, C3, #0
2458 SYS_ALIAS(6, 8, 3, 0);
2459 } else if (!Op.compare_lower("vae1is")) {
2460 // SYS #0, C8, C3, #1
2461 SYS_ALIAS(0, 8, 3, 1);
2462 } else if (!Op.compare_lower("vae2is")) {
2463 // SYS #4, C8, C3, #1
2464 SYS_ALIAS(4, 8, 3, 1);
2465 } else if (!Op.compare_lower("vae3is")) {
2466 // SYS #6, C8, C3, #1
2467 SYS_ALIAS(6, 8, 3, 1);
2468 } else if (!Op.compare_lower("aside1is")) {
2469 // SYS #0, C8, C3, #2
2470 SYS_ALIAS(0, 8, 3, 2);
2471 } else if (!Op.compare_lower("vaae1is")) {
2472 // SYS #0, C8, C3, #3
2473 SYS_ALIAS(0, 8, 3, 3);
2474 } else if (!Op.compare_lower("alle1is")) {
2475 // SYS #4, C8, C3, #4
2476 SYS_ALIAS(4, 8, 3, 4);
2477 } else if (!Op.compare_lower("vale1is")) {
2478 // SYS #0, C8, C3, #5
2479 SYS_ALIAS(0, 8, 3, 5);
2480 } else if (!Op.compare_lower("vaale1is")) {
2481 // SYS #0, C8, C3, #7
2482 SYS_ALIAS(0, 8, 3, 7);
2483 } else if (!Op.compare_lower("vmalle1")) {
2484 // SYS #0, C8, C7, #0
2485 SYS_ALIAS(0, 8, 7, 0);
2486 } else if (!Op.compare_lower("alle2")) {
2487 // SYS #4, C8, C7, #0
2488 SYS_ALIAS(4, 8, 7, 0);
2489 } else if (!Op.compare_lower("vale2is")) {
2490 // SYS #4, C8, C3, #5
2491 SYS_ALIAS(4, 8, 3, 5);
2492 } else if (!Op.compare_lower("vale3is")) {
2493 // SYS #6, C8, C3, #5
2494 SYS_ALIAS(6, 8, 3, 5);
2495 } else if (!Op.compare_lower("alle3")) {
2496 // SYS #6, C8, C7, #0
2497 SYS_ALIAS(6, 8, 7, 0);
2498 } else if (!Op.compare_lower("vae1")) {
2499 // SYS #0, C8, C7, #1
2500 SYS_ALIAS(0, 8, 7, 1);
2501 } else if (!Op.compare_lower("vae2")) {
2502 // SYS #4, C8, C7, #1
2503 SYS_ALIAS(4, 8, 7, 1);
2504 } else if (!Op.compare_lower("vae3")) {
2505 // SYS #6, C8, C7, #1
2506 SYS_ALIAS(6, 8, 7, 1);
2507 } else if (!Op.compare_lower("aside1")) {
2508 // SYS #0, C8, C7, #2
2509 SYS_ALIAS(0, 8, 7, 2);
2510 } else if (!Op.compare_lower("vaae1")) {
2511 // SYS #0, C8, C7, #3
2512 SYS_ALIAS(0, 8, 7, 3);
2513 } else if (!Op.compare_lower("alle1")) {
2514 // SYS #4, C8, C7, #4
2515 SYS_ALIAS(4, 8, 7, 4);
2516 } else if (!Op.compare_lower("vale1")) {
2517 // SYS #0, C8, C7, #5
2518 SYS_ALIAS(0, 8, 7, 5);
2519 } else if (!Op.compare_lower("vale2")) {
2520 // SYS #4, C8, C7, #5
2521 SYS_ALIAS(4, 8, 7, 5);
2522 } else if (!Op.compare_lower("vale3")) {
2523 // SYS #6, C8, C7, #5
2524 SYS_ALIAS(6, 8, 7, 5);
2525 } else if (!Op.compare_lower("vaale1")) {
2526 // SYS #0, C8, C7, #7
2527 SYS_ALIAS(0, 8, 7, 7);
2528 } else if (!Op.compare_lower("ipas2e1")) {
2529 // SYS #4, C8, C4, #1
2530 SYS_ALIAS(4, 8, 4, 1);
2531 } else if (!Op.compare_lower("ipas2le1")) {
2532 // SYS #4, C8, C4, #5
2533 SYS_ALIAS(4, 8, 4, 5);
2534 } else if (!Op.compare_lower("ipas2e1is")) {
2535 // SYS #4, C8, C4, #1
2536 SYS_ALIAS(4, 8, 0, 1);
2537 } else if (!Op.compare_lower("ipas2le1is")) {
2538 // SYS #4, C8, C4, #5
2539 SYS_ALIAS(4, 8, 0, 5);
2540 } else if (!Op.compare_lower("vmalls12e1")) {
2541 // SYS #4, C8, C7, #6
2542 SYS_ALIAS(4, 8, 7, 6);
2543 } else if (!Op.compare_lower("vmalls12e1is")) {
2544 // SYS #4, C8, C3, #6
2545 SYS_ALIAS(4, 8, 3, 6);
2546 } else {
2547 return TokError("invalid operand for TLBI instruction");
2548 }
2549 }
2550
2551 #undef SYS_ALIAS
2552
2553 Parser.Lex(); // Eat operand.
2554
2555 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2556 bool HasRegister = false;
2557
2558 // Check for the optional register operand.
2559 if (getLexer().is(AsmToken::Comma)) {
2560 Parser.Lex(); // Eat comma.
2561
2562 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2563 return TokError("expected register operand");
2564
2565 HasRegister = true;
2566 }
2567
2568 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2569 Parser.eatToEndOfStatement();
2570 return TokError("unexpected token in argument list");
2571 }
2572
2573 if (ExpectRegister && !HasRegister) {
2574 return TokError("specified " + Mnemonic + " op requires a register");
2575 }
2576 else if (!ExpectRegister && HasRegister) {
2577 return TokError("specified " + Mnemonic + " op does not use a register");
2578 }
2579
2580 Parser.Lex(); // Consume the EndOfStatement
2581 return false;
2582 }
2583
2584 AArch64AsmParser::OperandMatchResultTy
tryParseBarrierOperand(OperandVector & Operands)2585 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2586 MCAsmParser &Parser = getParser();
2587 const AsmToken &Tok = Parser.getTok();
2588
2589 // Can be either a #imm style literal or an option name
2590 bool Hash = Tok.is(AsmToken::Hash);
2591 if (Hash || Tok.is(AsmToken::Integer)) {
2592 // Immediate operand.
2593 if (Hash)
2594 Parser.Lex(); // Eat the '#'
2595 const MCExpr *ImmVal;
2596 SMLoc ExprLoc = getLoc();
2597 if (getParser().parseExpression(ImmVal))
2598 return MatchOperand_ParseFail;
2599 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2600 if (!MCE) {
2601 Error(ExprLoc, "immediate value expected for barrier operand");
2602 return MatchOperand_ParseFail;
2603 }
2604 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2605 Error(ExprLoc, "barrier operand out of range");
2606 return MatchOperand_ParseFail;
2607 }
2608 Operands.push_back(
2609 AArch64Operand::CreateBarrier(MCE->getValue(), ExprLoc, getContext()));
2610 return MatchOperand_Success;
2611 }
2612
2613 if (Tok.isNot(AsmToken::Identifier)) {
2614 TokError("invalid operand for instruction");
2615 return MatchOperand_ParseFail;
2616 }
2617
2618 bool Valid;
2619 unsigned Opt = AArch64DB::DBarrierMapper().fromString(Tok.getString(), Valid);
2620 if (!Valid) {
2621 TokError("invalid barrier option name");
2622 return MatchOperand_ParseFail;
2623 }
2624
2625 // The only valid named option for ISB is 'sy'
2626 if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
2627 TokError("'sy' or #imm operand expected");
2628 return MatchOperand_ParseFail;
2629 }
2630
2631 Operands.push_back(
2632 AArch64Operand::CreateBarrier(Opt, getLoc(), getContext()));
2633 Parser.Lex(); // Consume the option
2634
2635 return MatchOperand_Success;
2636 }
2637
2638 AArch64AsmParser::OperandMatchResultTy
tryParseSysReg(OperandVector & Operands)2639 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2640 MCAsmParser &Parser = getParser();
2641 const AsmToken &Tok = Parser.getTok();
2642
2643 if (Tok.isNot(AsmToken::Identifier))
2644 return MatchOperand_NoMatch;
2645
2646 Operands.push_back(AArch64Operand::CreateSysReg(Tok.getString(), getLoc(),
2647 STI.getFeatureBits(), getContext()));
2648 Parser.Lex(); // Eat identifier
2649
2650 return MatchOperand_Success;
2651 }
2652
2653 /// tryParseVectorRegister - Parse a vector register operand.
tryParseVectorRegister(OperandVector & Operands)2654 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2655 MCAsmParser &Parser = getParser();
2656 if (Parser.getTok().isNot(AsmToken::Identifier))
2657 return true;
2658
2659 SMLoc S = getLoc();
2660 // Check for a vector register specifier first.
2661 StringRef Kind;
2662 int64_t Reg = tryMatchVectorRegister(Kind, false);
2663 if (Reg == -1)
2664 return true;
2665 Operands.push_back(
2666 AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2667 // If there was an explicit qualifier, that goes on as a literal text
2668 // operand.
2669 if (!Kind.empty())
2670 Operands.push_back(
2671 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2672
2673 // If there is an index specifier following the register, parse that too.
2674 if (Parser.getTok().is(AsmToken::LBrac)) {
2675 SMLoc SIdx = getLoc();
2676 Parser.Lex(); // Eat left bracket token.
2677
2678 const MCExpr *ImmVal;
2679 if (getParser().parseExpression(ImmVal))
2680 return false;
2681 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2682 if (!MCE) {
2683 TokError("immediate value expected for vector index");
2684 return false;
2685 }
2686
2687 SMLoc E = getLoc();
2688 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2689 Error(E, "']' expected");
2690 return false;
2691 }
2692
2693 Parser.Lex(); // Eat right bracket token.
2694
2695 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2696 E, getContext()));
2697 }
2698
2699 return false;
2700 }
2701
2702 /// parseRegister - Parse a non-vector register operand.
parseRegister(OperandVector & Operands)2703 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2704 MCAsmParser &Parser = getParser();
2705 SMLoc S = getLoc();
2706 // Try for a vector register.
2707 if (!tryParseVectorRegister(Operands))
2708 return false;
2709
2710 // Try for a scalar register.
2711 int64_t Reg = tryParseRegister();
2712 if (Reg == -1)
2713 return true;
2714 Operands.push_back(
2715 AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2716
2717 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2718 // as a string token in the instruction itself.
2719 if (getLexer().getKind() == AsmToken::LBrac) {
2720 SMLoc LBracS = getLoc();
2721 Parser.Lex();
2722 const AsmToken &Tok = Parser.getTok();
2723 if (Tok.is(AsmToken::Integer)) {
2724 SMLoc IntS = getLoc();
2725 int64_t Val = Tok.getIntVal();
2726 if (Val == 1) {
2727 Parser.Lex();
2728 if (getLexer().getKind() == AsmToken::RBrac) {
2729 SMLoc RBracS = getLoc();
2730 Parser.Lex();
2731 Operands.push_back(
2732 AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2733 Operands.push_back(
2734 AArch64Operand::CreateToken("1", false, IntS, getContext()));
2735 Operands.push_back(
2736 AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2737 return false;
2738 }
2739 }
2740 }
2741 }
2742
2743 return false;
2744 }
2745
parseSymbolicImmVal(const MCExpr * & ImmVal)2746 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2747 MCAsmParser &Parser = getParser();
2748 bool HasELFModifier = false;
2749 AArch64MCExpr::VariantKind RefKind;
2750
2751 if (Parser.getTok().is(AsmToken::Colon)) {
2752 Parser.Lex(); // Eat ':"
2753 HasELFModifier = true;
2754
2755 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2756 Error(Parser.getTok().getLoc(),
2757 "expect relocation specifier in operand after ':'");
2758 return true;
2759 }
2760
2761 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2762 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2763 .Case("lo12", AArch64MCExpr::VK_LO12)
2764 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2765 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2766 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2767 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2768 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2769 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2770 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2771 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2772 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2773 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2774 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2775 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2776 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2777 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2778 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2779 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2780 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2781 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2782 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2783 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2784 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2785 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2786 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2787 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2788 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2789 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2790 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2791 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2792 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2793 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2794 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2795 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2796 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2797 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2798 .Default(AArch64MCExpr::VK_INVALID);
2799
2800 if (RefKind == AArch64MCExpr::VK_INVALID) {
2801 Error(Parser.getTok().getLoc(),
2802 "expect relocation specifier in operand after ':'");
2803 return true;
2804 }
2805
2806 Parser.Lex(); // Eat identifier
2807
2808 if (Parser.getTok().isNot(AsmToken::Colon)) {
2809 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2810 return true;
2811 }
2812 Parser.Lex(); // Eat ':'
2813 }
2814
2815 if (getParser().parseExpression(ImmVal))
2816 return true;
2817
2818 if (HasELFModifier)
2819 ImmVal = AArch64MCExpr::Create(ImmVal, RefKind, getContext());
2820
2821 return false;
2822 }
2823
2824 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
parseVectorList(OperandVector & Operands)2825 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2826 MCAsmParser &Parser = getParser();
2827 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2828 SMLoc S = getLoc();
2829 Parser.Lex(); // Eat left bracket token.
2830 StringRef Kind;
2831 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2832 if (FirstReg == -1)
2833 return true;
2834 int64_t PrevReg = FirstReg;
2835 unsigned Count = 1;
2836
2837 if (Parser.getTok().is(AsmToken::Minus)) {
2838 Parser.Lex(); // Eat the minus.
2839
2840 SMLoc Loc = getLoc();
2841 StringRef NextKind;
2842 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2843 if (Reg == -1)
2844 return true;
2845 // Any Kind suffices must match on all regs in the list.
2846 if (Kind != NextKind)
2847 return Error(Loc, "mismatched register size suffix");
2848
2849 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2850
2851 if (Space == 0 || Space > 3) {
2852 return Error(Loc, "invalid number of vectors");
2853 }
2854
2855 Count += Space;
2856 }
2857 else {
2858 while (Parser.getTok().is(AsmToken::Comma)) {
2859 Parser.Lex(); // Eat the comma token.
2860
2861 SMLoc Loc = getLoc();
2862 StringRef NextKind;
2863 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2864 if (Reg == -1)
2865 return true;
2866 // Any Kind suffices must match on all regs in the list.
2867 if (Kind != NextKind)
2868 return Error(Loc, "mismatched register size suffix");
2869
2870 // Registers must be incremental (with wraparound at 31)
2871 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2872 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2873 return Error(Loc, "registers must be sequential");
2874
2875 PrevReg = Reg;
2876 ++Count;
2877 }
2878 }
2879
2880 if (Parser.getTok().isNot(AsmToken::RCurly))
2881 return Error(getLoc(), "'}' expected");
2882 Parser.Lex(); // Eat the '}' token.
2883
2884 if (Count > 4)
2885 return Error(S, "invalid number of vectors");
2886
2887 unsigned NumElements = 0;
2888 char ElementKind = 0;
2889 if (!Kind.empty())
2890 parseValidVectorKind(Kind, NumElements, ElementKind);
2891
2892 Operands.push_back(AArch64Operand::CreateVectorList(
2893 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2894
2895 // If there is an index specifier following the list, parse that too.
2896 if (Parser.getTok().is(AsmToken::LBrac)) {
2897 SMLoc SIdx = getLoc();
2898 Parser.Lex(); // Eat left bracket token.
2899
2900 const MCExpr *ImmVal;
2901 if (getParser().parseExpression(ImmVal))
2902 return false;
2903 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2904 if (!MCE) {
2905 TokError("immediate value expected for vector index");
2906 return false;
2907 }
2908
2909 SMLoc E = getLoc();
2910 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2911 Error(E, "']' expected");
2912 return false;
2913 }
2914
2915 Parser.Lex(); // Eat right bracket token.
2916
2917 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2918 E, getContext()));
2919 }
2920 return false;
2921 }
2922
2923 AArch64AsmParser::OperandMatchResultTy
tryParseGPR64sp0Operand(OperandVector & Operands)2924 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
2925 MCAsmParser &Parser = getParser();
2926 const AsmToken &Tok = Parser.getTok();
2927 if (!Tok.is(AsmToken::Identifier))
2928 return MatchOperand_NoMatch;
2929
2930 unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
2931
2932 MCContext &Ctx = getContext();
2933 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2934 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
2935 return MatchOperand_NoMatch;
2936
2937 SMLoc S = getLoc();
2938 Parser.Lex(); // Eat register
2939
2940 if (Parser.getTok().isNot(AsmToken::Comma)) {
2941 Operands.push_back(
2942 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2943 return MatchOperand_Success;
2944 }
2945 Parser.Lex(); // Eat comma.
2946
2947 if (Parser.getTok().is(AsmToken::Hash))
2948 Parser.Lex(); // Eat hash
2949
2950 if (Parser.getTok().isNot(AsmToken::Integer)) {
2951 Error(getLoc(), "index must be absent or #0");
2952 return MatchOperand_ParseFail;
2953 }
2954
2955 const MCExpr *ImmVal;
2956 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
2957 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
2958 Error(getLoc(), "index must be absent or #0");
2959 return MatchOperand_ParseFail;
2960 }
2961
2962 Operands.push_back(
2963 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2964 return MatchOperand_Success;
2965 }
2966
2967 /// parseOperand - Parse a arm instruction operand. For now this parses the
2968 /// operand regardless of the mnemonic.
parseOperand(OperandVector & Operands,bool isCondCode,bool invertCondCode)2969 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
2970 bool invertCondCode) {
2971 MCAsmParser &Parser = getParser();
2972 // Check if the current operand has a custom associated parser, if so, try to
2973 // custom parse the operand, or fallback to the general approach.
2974 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
2975 if (ResTy == MatchOperand_Success)
2976 return false;
2977 // If there wasn't a custom match, try the generic matcher below. Otherwise,
2978 // there was a match, but an error occurred, in which case, just return that
2979 // the operand parsing failed.
2980 if (ResTy == MatchOperand_ParseFail)
2981 return true;
2982
2983 // Nothing custom, so do general case parsing.
2984 SMLoc S, E;
2985 switch (getLexer().getKind()) {
2986 default: {
2987 SMLoc S = getLoc();
2988 const MCExpr *Expr;
2989 if (parseSymbolicImmVal(Expr))
2990 return Error(S, "invalid operand");
2991
2992 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2993 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2994 return false;
2995 }
2996 case AsmToken::LBrac: {
2997 SMLoc Loc = Parser.getTok().getLoc();
2998 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
2999 getContext()));
3000 Parser.Lex(); // Eat '['
3001
3002 // There's no comma after a '[', so we can parse the next operand
3003 // immediately.
3004 return parseOperand(Operands, false, false);
3005 }
3006 case AsmToken::LCurly:
3007 return parseVectorList(Operands);
3008 case AsmToken::Identifier: {
3009 // If we're expecting a Condition Code operand, then just parse that.
3010 if (isCondCode)
3011 return parseCondCode(Operands, invertCondCode);
3012
3013 // If it's a register name, parse it.
3014 if (!parseRegister(Operands))
3015 return false;
3016
3017 // This could be an optional "shift" or "extend" operand.
3018 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3019 // We can only continue if no tokens were eaten.
3020 if (GotShift != MatchOperand_NoMatch)
3021 return GotShift;
3022
3023 // This was not a register so parse other operands that start with an
3024 // identifier (like labels) as expressions and create them as immediates.
3025 const MCExpr *IdVal;
3026 S = getLoc();
3027 if (getParser().parseExpression(IdVal))
3028 return true;
3029
3030 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3031 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3032 return false;
3033 }
3034 case AsmToken::Integer:
3035 case AsmToken::Real:
3036 case AsmToken::Hash: {
3037 // #42 -> immediate.
3038 S = getLoc();
3039 if (getLexer().is(AsmToken::Hash))
3040 Parser.Lex();
3041
3042 // Parse a negative sign
3043 bool isNegative = false;
3044 if (Parser.getTok().is(AsmToken::Minus)) {
3045 isNegative = true;
3046 // We need to consume this token only when we have a Real, otherwise
3047 // we let parseSymbolicImmVal take care of it
3048 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3049 Parser.Lex();
3050 }
3051
3052 // The only Real that should come through here is a literal #0.0 for
3053 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3054 // so convert the value.
3055 const AsmToken &Tok = Parser.getTok();
3056 if (Tok.is(AsmToken::Real)) {
3057 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3058 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3059 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3060 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3061 Mnemonic != "fcmlt")
3062 return TokError("unexpected floating point literal");
3063 else if (IntVal != 0 || isNegative)
3064 return TokError("expected floating-point constant #0.0");
3065 Parser.Lex(); // Eat the token.
3066
3067 Operands.push_back(
3068 AArch64Operand::CreateToken("#0", false, S, getContext()));
3069 Operands.push_back(
3070 AArch64Operand::CreateToken(".0", false, S, getContext()));
3071 return false;
3072 }
3073
3074 const MCExpr *ImmVal;
3075 if (parseSymbolicImmVal(ImmVal))
3076 return true;
3077
3078 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3079 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3080 return false;
3081 }
3082 case AsmToken::Equal: {
3083 SMLoc Loc = Parser.getTok().getLoc();
3084 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3085 return Error(Loc, "unexpected token in operand");
3086 Parser.Lex(); // Eat '='
3087 const MCExpr *SubExprVal;
3088 if (getParser().parseExpression(SubExprVal))
3089 return true;
3090
3091 if (Operands.size() < 2 ||
3092 !static_cast<AArch64Operand &>(*Operands[1]).isReg())
3093 return true;
3094
3095 bool IsXReg =
3096 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3097 Operands[1]->getReg());
3098
3099 MCContext& Ctx = getContext();
3100 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3101 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3102 if (isa<MCConstantExpr>(SubExprVal)) {
3103 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3104 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3105 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3106 ShiftAmt += 16;
3107 Imm >>= 16;
3108 }
3109 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3110 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3111 Operands.push_back(AArch64Operand::CreateImm(
3112 MCConstantExpr::Create(Imm, Ctx), S, E, Ctx));
3113 if (ShiftAmt)
3114 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3115 ShiftAmt, true, S, E, Ctx));
3116 return false;
3117 }
3118 APInt Simm = APInt(64, Imm << ShiftAmt);
3119 // check if the immediate is an unsigned or signed 32-bit int for W regs
3120 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3121 return Error(Loc, "Immediate too large for register");
3122 }
3123 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3124 const MCExpr *CPLoc =
3125 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4);
3126 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3127 return false;
3128 }
3129 }
3130 }
3131
3132 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3133 /// operands.
ParseInstruction(ParseInstructionInfo & Info,StringRef Name,SMLoc NameLoc,OperandVector & Operands)3134 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3135 StringRef Name, SMLoc NameLoc,
3136 OperandVector &Operands) {
3137 MCAsmParser &Parser = getParser();
3138 Name = StringSwitch<StringRef>(Name.lower())
3139 .Case("beq", "b.eq")
3140 .Case("bne", "b.ne")
3141 .Case("bhs", "b.hs")
3142 .Case("bcs", "b.cs")
3143 .Case("blo", "b.lo")
3144 .Case("bcc", "b.cc")
3145 .Case("bmi", "b.mi")
3146 .Case("bpl", "b.pl")
3147 .Case("bvs", "b.vs")
3148 .Case("bvc", "b.vc")
3149 .Case("bhi", "b.hi")
3150 .Case("bls", "b.ls")
3151 .Case("bge", "b.ge")
3152 .Case("blt", "b.lt")
3153 .Case("bgt", "b.gt")
3154 .Case("ble", "b.le")
3155 .Case("bal", "b.al")
3156 .Case("bnv", "b.nv")
3157 .Default(Name);
3158
3159 // First check for the AArch64-specific .req directive.
3160 if (Parser.getTok().is(AsmToken::Identifier) &&
3161 Parser.getTok().getIdentifier() == ".req") {
3162 parseDirectiveReq(Name, NameLoc);
3163 // We always return 'error' for this, as we're done with this
3164 // statement and don't need to match the 'instruction."
3165 return true;
3166 }
3167
3168 // Create the leading tokens for the mnemonic, split by '.' characters.
3169 size_t Start = 0, Next = Name.find('.');
3170 StringRef Head = Name.slice(Start, Next);
3171
3172 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3173 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3174 bool IsError = parseSysAlias(Head, NameLoc, Operands);
3175 if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3176 Parser.eatToEndOfStatement();
3177 return IsError;
3178 }
3179
3180 Operands.push_back(
3181 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3182 Mnemonic = Head;
3183
3184 // Handle condition codes for a branch mnemonic
3185 if (Head == "b" && Next != StringRef::npos) {
3186 Start = Next;
3187 Next = Name.find('.', Start + 1);
3188 Head = Name.slice(Start + 1, Next);
3189
3190 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3191 (Head.data() - Name.data()));
3192 AArch64CC::CondCode CC = parseCondCodeString(Head);
3193 if (CC == AArch64CC::Invalid)
3194 return Error(SuffixLoc, "invalid condition code");
3195 Operands.push_back(
3196 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3197 Operands.push_back(
3198 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3199 }
3200
3201 // Add the remaining tokens in the mnemonic.
3202 while (Next != StringRef::npos) {
3203 Start = Next;
3204 Next = Name.find('.', Start + 1);
3205 Head = Name.slice(Start, Next);
3206 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3207 (Head.data() - Name.data()) + 1);
3208 Operands.push_back(
3209 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3210 }
3211
3212 // Conditional compare instructions have a Condition Code operand, which needs
3213 // to be parsed and an immediate operand created.
3214 bool condCodeFourthOperand =
3215 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3216 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3217 Head == "csinc" || Head == "csinv" || Head == "csneg");
3218
3219 // These instructions are aliases to some of the conditional select
3220 // instructions. However, the condition code is inverted in the aliased
3221 // instruction.
3222 //
3223 // FIXME: Is this the correct way to handle these? Or should the parser
3224 // generate the aliased instructions directly?
3225 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3226 bool condCodeThirdOperand =
3227 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3228
3229 // Read the remaining operands.
3230 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3231 // Read the first operand.
3232 if (parseOperand(Operands, false, false)) {
3233 Parser.eatToEndOfStatement();
3234 return true;
3235 }
3236
3237 unsigned N = 2;
3238 while (getLexer().is(AsmToken::Comma)) {
3239 Parser.Lex(); // Eat the comma.
3240
3241 // Parse and remember the operand.
3242 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3243 (N == 3 && condCodeThirdOperand) ||
3244 (N == 2 && condCodeSecondOperand),
3245 condCodeSecondOperand || condCodeThirdOperand)) {
3246 Parser.eatToEndOfStatement();
3247 return true;
3248 }
3249
3250 // After successfully parsing some operands there are two special cases to
3251 // consider (i.e. notional operands not separated by commas). Both are due
3252 // to memory specifiers:
3253 // + An RBrac will end an address for load/store/prefetch
3254 // + An '!' will indicate a pre-indexed operation.
3255 //
3256 // It's someone else's responsibility to make sure these tokens are sane
3257 // in the given context!
3258 if (Parser.getTok().is(AsmToken::RBrac)) {
3259 SMLoc Loc = Parser.getTok().getLoc();
3260 Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3261 getContext()));
3262 Parser.Lex();
3263 }
3264
3265 if (Parser.getTok().is(AsmToken::Exclaim)) {
3266 SMLoc Loc = Parser.getTok().getLoc();
3267 Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3268 getContext()));
3269 Parser.Lex();
3270 }
3271
3272 ++N;
3273 }
3274 }
3275
3276 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3277 SMLoc Loc = Parser.getTok().getLoc();
3278 Parser.eatToEndOfStatement();
3279 return Error(Loc, "unexpected token in argument list");
3280 }
3281
3282 Parser.Lex(); // Consume the EndOfStatement
3283 return false;
3284 }
3285
3286 // FIXME: This entire function is a giant hack to provide us with decent
3287 // operand range validation/diagnostics until TableGen/MC can be extended
3288 // to support autogeneration of this kind of validation.
validateInstruction(MCInst & Inst,SmallVectorImpl<SMLoc> & Loc)3289 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3290 SmallVectorImpl<SMLoc> &Loc) {
3291 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3292 // Check for indexed addressing modes w/ the base register being the
3293 // same as a destination/source register or pair load where
3294 // the Rt == Rt2. All of those are undefined behaviour.
3295 switch (Inst.getOpcode()) {
3296 case AArch64::LDPSWpre:
3297 case AArch64::LDPWpost:
3298 case AArch64::LDPWpre:
3299 case AArch64::LDPXpost:
3300 case AArch64::LDPXpre: {
3301 unsigned Rt = Inst.getOperand(1).getReg();
3302 unsigned Rt2 = Inst.getOperand(2).getReg();
3303 unsigned Rn = Inst.getOperand(3).getReg();
3304 if (RI->isSubRegisterEq(Rn, Rt))
3305 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3306 "is also a destination");
3307 if (RI->isSubRegisterEq(Rn, Rt2))
3308 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3309 "is also a destination");
3310 // FALLTHROUGH
3311 }
3312 case AArch64::LDPDi:
3313 case AArch64::LDPQi:
3314 case AArch64::LDPSi:
3315 case AArch64::LDPSWi:
3316 case AArch64::LDPWi:
3317 case AArch64::LDPXi: {
3318 unsigned Rt = Inst.getOperand(0).getReg();
3319 unsigned Rt2 = Inst.getOperand(1).getReg();
3320 if (Rt == Rt2)
3321 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3322 break;
3323 }
3324 case AArch64::LDPDpost:
3325 case AArch64::LDPDpre:
3326 case AArch64::LDPQpost:
3327 case AArch64::LDPQpre:
3328 case AArch64::LDPSpost:
3329 case AArch64::LDPSpre:
3330 case AArch64::LDPSWpost: {
3331 unsigned Rt = Inst.getOperand(1).getReg();
3332 unsigned Rt2 = Inst.getOperand(2).getReg();
3333 if (Rt == Rt2)
3334 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3335 break;
3336 }
3337 case AArch64::STPDpost:
3338 case AArch64::STPDpre:
3339 case AArch64::STPQpost:
3340 case AArch64::STPQpre:
3341 case AArch64::STPSpost:
3342 case AArch64::STPSpre:
3343 case AArch64::STPWpost:
3344 case AArch64::STPWpre:
3345 case AArch64::STPXpost:
3346 case AArch64::STPXpre: {
3347 unsigned Rt = Inst.getOperand(1).getReg();
3348 unsigned Rt2 = Inst.getOperand(2).getReg();
3349 unsigned Rn = Inst.getOperand(3).getReg();
3350 if (RI->isSubRegisterEq(Rn, Rt))
3351 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3352 "is also a source");
3353 if (RI->isSubRegisterEq(Rn, Rt2))
3354 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3355 "is also a source");
3356 break;
3357 }
3358 case AArch64::LDRBBpre:
3359 case AArch64::LDRBpre:
3360 case AArch64::LDRHHpre:
3361 case AArch64::LDRHpre:
3362 case AArch64::LDRSBWpre:
3363 case AArch64::LDRSBXpre:
3364 case AArch64::LDRSHWpre:
3365 case AArch64::LDRSHXpre:
3366 case AArch64::LDRSWpre:
3367 case AArch64::LDRWpre:
3368 case AArch64::LDRXpre:
3369 case AArch64::LDRBBpost:
3370 case AArch64::LDRBpost:
3371 case AArch64::LDRHHpost:
3372 case AArch64::LDRHpost:
3373 case AArch64::LDRSBWpost:
3374 case AArch64::LDRSBXpost:
3375 case AArch64::LDRSHWpost:
3376 case AArch64::LDRSHXpost:
3377 case AArch64::LDRSWpost:
3378 case AArch64::LDRWpost:
3379 case AArch64::LDRXpost: {
3380 unsigned Rt = Inst.getOperand(1).getReg();
3381 unsigned Rn = Inst.getOperand(2).getReg();
3382 if (RI->isSubRegisterEq(Rn, Rt))
3383 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3384 "is also a source");
3385 break;
3386 }
3387 case AArch64::STRBBpost:
3388 case AArch64::STRBpost:
3389 case AArch64::STRHHpost:
3390 case AArch64::STRHpost:
3391 case AArch64::STRWpost:
3392 case AArch64::STRXpost:
3393 case AArch64::STRBBpre:
3394 case AArch64::STRBpre:
3395 case AArch64::STRHHpre:
3396 case AArch64::STRHpre:
3397 case AArch64::STRWpre:
3398 case AArch64::STRXpre: {
3399 unsigned Rt = Inst.getOperand(1).getReg();
3400 unsigned Rn = Inst.getOperand(2).getReg();
3401 if (RI->isSubRegisterEq(Rn, Rt))
3402 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3403 "is also a source");
3404 break;
3405 }
3406 }
3407
3408 // Now check immediate ranges. Separate from the above as there is overlap
3409 // in the instructions being checked and this keeps the nested conditionals
3410 // to a minimum.
3411 switch (Inst.getOpcode()) {
3412 case AArch64::ADDSWri:
3413 case AArch64::ADDSXri:
3414 case AArch64::ADDWri:
3415 case AArch64::ADDXri:
3416 case AArch64::SUBSWri:
3417 case AArch64::SUBSXri:
3418 case AArch64::SUBWri:
3419 case AArch64::SUBXri: {
3420 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3421 // some slight duplication here.
3422 if (Inst.getOperand(2).isExpr()) {
3423 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3424 AArch64MCExpr::VariantKind ELFRefKind;
3425 MCSymbolRefExpr::VariantKind DarwinRefKind;
3426 int64_t Addend;
3427 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3428 return Error(Loc[2], "invalid immediate expression");
3429 }
3430
3431 // Only allow these with ADDXri.
3432 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3433 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3434 Inst.getOpcode() == AArch64::ADDXri)
3435 return false;
3436
3437 // Only allow these with ADDXri/ADDWri
3438 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3439 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3440 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3441 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3442 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3443 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3444 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3445 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3446 (Inst.getOpcode() == AArch64::ADDXri ||
3447 Inst.getOpcode() == AArch64::ADDWri))
3448 return false;
3449
3450 // Don't allow expressions in the immediate field otherwise
3451 return Error(Loc[2], "invalid immediate expression");
3452 }
3453 return false;
3454 }
3455 default:
3456 return false;
3457 }
3458 }
3459
showMatchError(SMLoc Loc,unsigned ErrCode)3460 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3461 switch (ErrCode) {
3462 case Match_MissingFeature:
3463 return Error(Loc,
3464 "instruction requires a CPU feature not currently enabled");
3465 case Match_InvalidOperand:
3466 return Error(Loc, "invalid operand for instruction");
3467 case Match_InvalidSuffix:
3468 return Error(Loc, "invalid type suffix for instruction");
3469 case Match_InvalidCondCode:
3470 return Error(Loc, "expected AArch64 condition code");
3471 case Match_AddSubRegExtendSmall:
3472 return Error(Loc,
3473 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3474 case Match_AddSubRegExtendLarge:
3475 return Error(Loc,
3476 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3477 case Match_AddSubSecondSource:
3478 return Error(Loc,
3479 "expected compatible register, symbol or integer in range [0, 4095]");
3480 case Match_LogicalSecondSource:
3481 return Error(Loc, "expected compatible register or logical immediate");
3482 case Match_InvalidMovImm32Shift:
3483 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3484 case Match_InvalidMovImm64Shift:
3485 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3486 case Match_AddSubRegShift32:
3487 return Error(Loc,
3488 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3489 case Match_AddSubRegShift64:
3490 return Error(Loc,
3491 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3492 case Match_InvalidFPImm:
3493 return Error(Loc,
3494 "expected compatible register or floating-point constant");
3495 case Match_InvalidMemoryIndexedSImm9:
3496 return Error(Loc, "index must be an integer in range [-256, 255].");
3497 case Match_InvalidMemoryIndexed4SImm7:
3498 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3499 case Match_InvalidMemoryIndexed8SImm7:
3500 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3501 case Match_InvalidMemoryIndexed16SImm7:
3502 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3503 case Match_InvalidMemoryWExtend8:
3504 return Error(Loc,
3505 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3506 case Match_InvalidMemoryWExtend16:
3507 return Error(Loc,
3508 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3509 case Match_InvalidMemoryWExtend32:
3510 return Error(Loc,
3511 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3512 case Match_InvalidMemoryWExtend64:
3513 return Error(Loc,
3514 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3515 case Match_InvalidMemoryWExtend128:
3516 return Error(Loc,
3517 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3518 case Match_InvalidMemoryXExtend8:
3519 return Error(Loc,
3520 "expected 'lsl' or 'sxtx' with optional shift of #0");
3521 case Match_InvalidMemoryXExtend16:
3522 return Error(Loc,
3523 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3524 case Match_InvalidMemoryXExtend32:
3525 return Error(Loc,
3526 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3527 case Match_InvalidMemoryXExtend64:
3528 return Error(Loc,
3529 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3530 case Match_InvalidMemoryXExtend128:
3531 return Error(Loc,
3532 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3533 case Match_InvalidMemoryIndexed1:
3534 return Error(Loc, "index must be an integer in range [0, 4095].");
3535 case Match_InvalidMemoryIndexed2:
3536 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3537 case Match_InvalidMemoryIndexed4:
3538 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3539 case Match_InvalidMemoryIndexed8:
3540 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3541 case Match_InvalidMemoryIndexed16:
3542 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3543 case Match_InvalidImm0_7:
3544 return Error(Loc, "immediate must be an integer in range [0, 7].");
3545 case Match_InvalidImm0_15:
3546 return Error(Loc, "immediate must be an integer in range [0, 15].");
3547 case Match_InvalidImm0_31:
3548 return Error(Loc, "immediate must be an integer in range [0, 31].");
3549 case Match_InvalidImm0_63:
3550 return Error(Loc, "immediate must be an integer in range [0, 63].");
3551 case Match_InvalidImm0_127:
3552 return Error(Loc, "immediate must be an integer in range [0, 127].");
3553 case Match_InvalidImm0_65535:
3554 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3555 case Match_InvalidImm1_8:
3556 return Error(Loc, "immediate must be an integer in range [1, 8].");
3557 case Match_InvalidImm1_16:
3558 return Error(Loc, "immediate must be an integer in range [1, 16].");
3559 case Match_InvalidImm1_32:
3560 return Error(Loc, "immediate must be an integer in range [1, 32].");
3561 case Match_InvalidImm1_64:
3562 return Error(Loc, "immediate must be an integer in range [1, 64].");
3563 case Match_InvalidIndex1:
3564 return Error(Loc, "expected lane specifier '[1]'");
3565 case Match_InvalidIndexB:
3566 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3567 case Match_InvalidIndexH:
3568 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3569 case Match_InvalidIndexS:
3570 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3571 case Match_InvalidIndexD:
3572 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3573 case Match_InvalidLabel:
3574 return Error(Loc, "expected label or encodable integer pc offset");
3575 case Match_MRS:
3576 return Error(Loc, "expected readable system register");
3577 case Match_MSR:
3578 return Error(Loc, "expected writable system register or pstate");
3579 case Match_MnemonicFail:
3580 return Error(Loc, "unrecognized instruction mnemonic");
3581 default:
3582 llvm_unreachable("unexpected error code!");
3583 }
3584 }
3585
3586 static const char *getSubtargetFeatureName(uint64_t Val);
3587
MatchAndEmitInstruction(SMLoc IDLoc,unsigned & Opcode,OperandVector & Operands,MCStreamer & Out,uint64_t & ErrorInfo,bool MatchingInlineAsm)3588 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3589 OperandVector &Operands,
3590 MCStreamer &Out,
3591 uint64_t &ErrorInfo,
3592 bool MatchingInlineAsm) {
3593 assert(!Operands.empty() && "Unexpect empty operand list!");
3594 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3595 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3596
3597 StringRef Tok = Op.getToken();
3598 unsigned NumOperands = Operands.size();
3599
3600 if (NumOperands == 4 && Tok == "lsl") {
3601 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3602 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3603 if (Op2.isReg() && Op3.isImm()) {
3604 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3605 if (Op3CE) {
3606 uint64_t Op3Val = Op3CE->getValue();
3607 uint64_t NewOp3Val = 0;
3608 uint64_t NewOp4Val = 0;
3609 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3610 Op2.getReg())) {
3611 NewOp3Val = (32 - Op3Val) & 0x1f;
3612 NewOp4Val = 31 - Op3Val;
3613 } else {
3614 NewOp3Val = (64 - Op3Val) & 0x3f;
3615 NewOp4Val = 63 - Op3Val;
3616 }
3617
3618 const MCExpr *NewOp3 = MCConstantExpr::Create(NewOp3Val, getContext());
3619 const MCExpr *NewOp4 = MCConstantExpr::Create(NewOp4Val, getContext());
3620
3621 Operands[0] = AArch64Operand::CreateToken(
3622 "ubfm", false, Op.getStartLoc(), getContext());
3623 Operands.push_back(AArch64Operand::CreateImm(
3624 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3625 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3626 Op3.getEndLoc(), getContext());
3627 }
3628 }
3629 } else if (NumOperands == 5) {
3630 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3631 // UBFIZ -> UBFM aliases.
3632 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3633 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3634 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3635 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3636
3637 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3638 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3639 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3640
3641 if (Op3CE && Op4CE) {
3642 uint64_t Op3Val = Op3CE->getValue();
3643 uint64_t Op4Val = Op4CE->getValue();
3644
3645 uint64_t RegWidth = 0;
3646 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3647 Op1.getReg()))
3648 RegWidth = 64;
3649 else
3650 RegWidth = 32;
3651
3652 if (Op3Val >= RegWidth)
3653 return Error(Op3.getStartLoc(),
3654 "expected integer in range [0, 31]");
3655 if (Op4Val < 1 || Op4Val > RegWidth)
3656 return Error(Op4.getStartLoc(),
3657 "expected integer in range [1, 32]");
3658
3659 uint64_t NewOp3Val = 0;
3660 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3661 Op1.getReg()))
3662 NewOp3Val = (32 - Op3Val) & 0x1f;
3663 else
3664 NewOp3Val = (64 - Op3Val) & 0x3f;
3665
3666 uint64_t NewOp4Val = Op4Val - 1;
3667
3668 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3669 return Error(Op4.getStartLoc(),
3670 "requested insert overflows register");
3671
3672 const MCExpr *NewOp3 =
3673 MCConstantExpr::Create(NewOp3Val, getContext());
3674 const MCExpr *NewOp4 =
3675 MCConstantExpr::Create(NewOp4Val, getContext());
3676 Operands[3] = AArch64Operand::CreateImm(
3677 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3678 Operands[4] = AArch64Operand::CreateImm(
3679 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3680 if (Tok == "bfi")
3681 Operands[0] = AArch64Operand::CreateToken(
3682 "bfm", false, Op.getStartLoc(), getContext());
3683 else if (Tok == "sbfiz")
3684 Operands[0] = AArch64Operand::CreateToken(
3685 "sbfm", false, Op.getStartLoc(), getContext());
3686 else if (Tok == "ubfiz")
3687 Operands[0] = AArch64Operand::CreateToken(
3688 "ubfm", false, Op.getStartLoc(), getContext());
3689 else
3690 llvm_unreachable("No valid mnemonic for alias?");
3691 }
3692 }
3693
3694 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3695 // UBFX -> UBFM aliases.
3696 } else if (NumOperands == 5 &&
3697 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3698 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3699 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3700 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3701
3702 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3703 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3704 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3705
3706 if (Op3CE && Op4CE) {
3707 uint64_t Op3Val = Op3CE->getValue();
3708 uint64_t Op4Val = Op4CE->getValue();
3709
3710 uint64_t RegWidth = 0;
3711 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3712 Op1.getReg()))
3713 RegWidth = 64;
3714 else
3715 RegWidth = 32;
3716
3717 if (Op3Val >= RegWidth)
3718 return Error(Op3.getStartLoc(),
3719 "expected integer in range [0, 31]");
3720 if (Op4Val < 1 || Op4Val > RegWidth)
3721 return Error(Op4.getStartLoc(),
3722 "expected integer in range [1, 32]");
3723
3724 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3725
3726 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3727 return Error(Op4.getStartLoc(),
3728 "requested extract overflows register");
3729
3730 const MCExpr *NewOp4 =
3731 MCConstantExpr::Create(NewOp4Val, getContext());
3732 Operands[4] = AArch64Operand::CreateImm(
3733 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3734 if (Tok == "bfxil")
3735 Operands[0] = AArch64Operand::CreateToken(
3736 "bfm", false, Op.getStartLoc(), getContext());
3737 else if (Tok == "sbfx")
3738 Operands[0] = AArch64Operand::CreateToken(
3739 "sbfm", false, Op.getStartLoc(), getContext());
3740 else if (Tok == "ubfx")
3741 Operands[0] = AArch64Operand::CreateToken(
3742 "ubfm", false, Op.getStartLoc(), getContext());
3743 else
3744 llvm_unreachable("No valid mnemonic for alias?");
3745 }
3746 }
3747 }
3748 }
3749 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3750 // InstAlias can't quite handle this since the reg classes aren't
3751 // subclasses.
3752 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3753 // The source register can be Wn here, but the matcher expects a
3754 // GPR64. Twiddle it here if necessary.
3755 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3756 if (Op.isReg()) {
3757 unsigned Reg = getXRegFromWReg(Op.getReg());
3758 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3759 Op.getEndLoc(), getContext());
3760 }
3761 }
3762 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3763 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3764 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3765 if (Op.isReg() &&
3766 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3767 Op.getReg())) {
3768 // The source register can be Wn here, but the matcher expects a
3769 // GPR64. Twiddle it here if necessary.
3770 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3771 if (Op.isReg()) {
3772 unsigned Reg = getXRegFromWReg(Op.getReg());
3773 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3774 Op.getEndLoc(), getContext());
3775 }
3776 }
3777 }
3778 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3779 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3780 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3781 if (Op.isReg() &&
3782 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3783 Op.getReg())) {
3784 // The source register can be Wn here, but the matcher expects a
3785 // GPR32. Twiddle it here if necessary.
3786 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3787 if (Op.isReg()) {
3788 unsigned Reg = getWRegFromXReg(Op.getReg());
3789 Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3790 Op.getEndLoc(), getContext());
3791 }
3792 }
3793 }
3794
3795 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3796 if (NumOperands == 3 && Tok == "fmov") {
3797 AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
3798 AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
3799 if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
3800 unsigned zreg =
3801 AArch64MCRegisterClasses[AArch64::FPR32RegClassID].contains(
3802 RegOp.getReg())
3803 ? AArch64::WZR
3804 : AArch64::XZR;
3805 Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
3806 Op.getEndLoc(), getContext());
3807 }
3808 }
3809
3810 MCInst Inst;
3811 // First try to match against the secondary set of tables containing the
3812 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3813 unsigned MatchResult =
3814 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3815
3816 // If that fails, try against the alternate table containing long-form NEON:
3817 // "fadd v0.2s, v1.2s, v2.2s"
3818 if (MatchResult != Match_Success)
3819 MatchResult =
3820 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3821
3822 switch (MatchResult) {
3823 case Match_Success: {
3824 // Perform range checking and other semantic validations
3825 SmallVector<SMLoc, 8> OperandLocs;
3826 NumOperands = Operands.size();
3827 for (unsigned i = 1; i < NumOperands; ++i)
3828 OperandLocs.push_back(Operands[i]->getStartLoc());
3829 if (validateInstruction(Inst, OperandLocs))
3830 return true;
3831
3832 Inst.setLoc(IDLoc);
3833 Out.EmitInstruction(Inst, STI);
3834 return false;
3835 }
3836 case Match_MissingFeature: {
3837 assert(ErrorInfo && "Unknown missing feature!");
3838 // Special case the error message for the very common case where only
3839 // a single subtarget feature is missing (neon, e.g.).
3840 std::string Msg = "instruction requires:";
3841 uint64_t Mask = 1;
3842 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
3843 if (ErrorInfo & Mask) {
3844 Msg += " ";
3845 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
3846 }
3847 Mask <<= 1;
3848 }
3849 return Error(IDLoc, Msg);
3850 }
3851 case Match_MnemonicFail:
3852 return showMatchError(IDLoc, MatchResult);
3853 case Match_InvalidOperand: {
3854 SMLoc ErrorLoc = IDLoc;
3855 if (ErrorInfo != ~0ULL) {
3856 if (ErrorInfo >= Operands.size())
3857 return Error(IDLoc, "too few operands for instruction");
3858
3859 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3860 if (ErrorLoc == SMLoc())
3861 ErrorLoc = IDLoc;
3862 }
3863 // If the match failed on a suffix token operand, tweak the diagnostic
3864 // accordingly.
3865 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
3866 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
3867 MatchResult = Match_InvalidSuffix;
3868
3869 return showMatchError(ErrorLoc, MatchResult);
3870 }
3871 case Match_InvalidMemoryIndexed1:
3872 case Match_InvalidMemoryIndexed2:
3873 case Match_InvalidMemoryIndexed4:
3874 case Match_InvalidMemoryIndexed8:
3875 case Match_InvalidMemoryIndexed16:
3876 case Match_InvalidCondCode:
3877 case Match_AddSubRegExtendSmall:
3878 case Match_AddSubRegExtendLarge:
3879 case Match_AddSubSecondSource:
3880 case Match_LogicalSecondSource:
3881 case Match_AddSubRegShift32:
3882 case Match_AddSubRegShift64:
3883 case Match_InvalidMovImm32Shift:
3884 case Match_InvalidMovImm64Shift:
3885 case Match_InvalidFPImm:
3886 case Match_InvalidMemoryWExtend8:
3887 case Match_InvalidMemoryWExtend16:
3888 case Match_InvalidMemoryWExtend32:
3889 case Match_InvalidMemoryWExtend64:
3890 case Match_InvalidMemoryWExtend128:
3891 case Match_InvalidMemoryXExtend8:
3892 case Match_InvalidMemoryXExtend16:
3893 case Match_InvalidMemoryXExtend32:
3894 case Match_InvalidMemoryXExtend64:
3895 case Match_InvalidMemoryXExtend128:
3896 case Match_InvalidMemoryIndexed4SImm7:
3897 case Match_InvalidMemoryIndexed8SImm7:
3898 case Match_InvalidMemoryIndexed16SImm7:
3899 case Match_InvalidMemoryIndexedSImm9:
3900 case Match_InvalidImm0_7:
3901 case Match_InvalidImm0_15:
3902 case Match_InvalidImm0_31:
3903 case Match_InvalidImm0_63:
3904 case Match_InvalidImm0_127:
3905 case Match_InvalidImm0_65535:
3906 case Match_InvalidImm1_8:
3907 case Match_InvalidImm1_16:
3908 case Match_InvalidImm1_32:
3909 case Match_InvalidImm1_64:
3910 case Match_InvalidIndex1:
3911 case Match_InvalidIndexB:
3912 case Match_InvalidIndexH:
3913 case Match_InvalidIndexS:
3914 case Match_InvalidIndexD:
3915 case Match_InvalidLabel:
3916 case Match_MSR:
3917 case Match_MRS: {
3918 if (ErrorInfo >= Operands.size())
3919 return Error(IDLoc, "too few operands for instruction");
3920 // Any time we get here, there's nothing fancy to do. Just get the
3921 // operand SMLoc and display the diagnostic.
3922 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3923 if (ErrorLoc == SMLoc())
3924 ErrorLoc = IDLoc;
3925 return showMatchError(ErrorLoc, MatchResult);
3926 }
3927 }
3928
3929 llvm_unreachable("Implement any new match types added!");
3930 }
3931
3932 /// ParseDirective parses the arm specific directives
ParseDirective(AsmToken DirectiveID)3933 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
3934 const MCObjectFileInfo::Environment Format =
3935 getContext().getObjectFileInfo()->getObjectFileType();
3936 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
3937 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
3938
3939 StringRef IDVal = DirectiveID.getIdentifier();
3940 SMLoc Loc = DirectiveID.getLoc();
3941 if (IDVal == ".hword")
3942 return parseDirectiveWord(2, Loc);
3943 if (IDVal == ".word")
3944 return parseDirectiveWord(4, Loc);
3945 if (IDVal == ".xword")
3946 return parseDirectiveWord(8, Loc);
3947 if (IDVal == ".tlsdesccall")
3948 return parseDirectiveTLSDescCall(Loc);
3949 if (IDVal == ".ltorg" || IDVal == ".pool")
3950 return parseDirectiveLtorg(Loc);
3951 if (IDVal == ".unreq")
3952 return parseDirectiveUnreq(DirectiveID.getLoc());
3953
3954 if (!IsMachO && !IsCOFF) {
3955 if (IDVal == ".inst")
3956 return parseDirectiveInst(Loc);
3957 }
3958
3959 return parseDirectiveLOH(IDVal, Loc);
3960 }
3961
3962 /// parseDirectiveWord
3963 /// ::= .word [ expression (, expression)* ]
parseDirectiveWord(unsigned Size,SMLoc L)3964 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
3965 MCAsmParser &Parser = getParser();
3966 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3967 for (;;) {
3968 const MCExpr *Value;
3969 if (getParser().parseExpression(Value))
3970 return true;
3971
3972 getParser().getStreamer().EmitValue(Value, Size);
3973
3974 if (getLexer().is(AsmToken::EndOfStatement))
3975 break;
3976
3977 // FIXME: Improve diagnostic.
3978 if (getLexer().isNot(AsmToken::Comma))
3979 return Error(L, "unexpected token in directive");
3980 Parser.Lex();
3981 }
3982 }
3983
3984 Parser.Lex();
3985 return false;
3986 }
3987
3988 /// parseDirectiveInst
3989 /// ::= .inst opcode [, ...]
parseDirectiveInst(SMLoc Loc)3990 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
3991 MCAsmParser &Parser = getParser();
3992 if (getLexer().is(AsmToken::EndOfStatement)) {
3993 Parser.eatToEndOfStatement();
3994 Error(Loc, "expected expression following directive");
3995 return false;
3996 }
3997
3998 for (;;) {
3999 const MCExpr *Expr;
4000
4001 if (getParser().parseExpression(Expr)) {
4002 Error(Loc, "expected expression");
4003 return false;
4004 }
4005
4006 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4007 if (!Value) {
4008 Error(Loc, "expected constant expression");
4009 return false;
4010 }
4011
4012 getTargetStreamer().emitInst(Value->getValue());
4013
4014 if (getLexer().is(AsmToken::EndOfStatement))
4015 break;
4016
4017 if (getLexer().isNot(AsmToken::Comma)) {
4018 Error(Loc, "unexpected token in directive");
4019 return false;
4020 }
4021
4022 Parser.Lex(); // Eat comma.
4023 }
4024
4025 Parser.Lex();
4026 return false;
4027 }
4028
4029 // parseDirectiveTLSDescCall:
4030 // ::= .tlsdesccall symbol
parseDirectiveTLSDescCall(SMLoc L)4031 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4032 StringRef Name;
4033 if (getParser().parseIdentifier(Name))
4034 return Error(L, "expected symbol after directive");
4035
4036 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
4037 const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
4038 Expr = AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
4039
4040 MCInst Inst;
4041 Inst.setOpcode(AArch64::TLSDESCCALL);
4042 Inst.addOperand(MCOperand::CreateExpr(Expr));
4043
4044 getParser().getStreamer().EmitInstruction(Inst, STI);
4045 return false;
4046 }
4047
4048 /// ::= .loh <lohName | lohId> label1, ..., labelN
4049 /// The number of arguments depends on the loh identifier.
parseDirectiveLOH(StringRef IDVal,SMLoc Loc)4050 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4051 if (IDVal != MCLOHDirectiveName())
4052 return true;
4053 MCLOHType Kind;
4054 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4055 if (getParser().getTok().isNot(AsmToken::Integer))
4056 return TokError("expected an identifier or a number in directive");
4057 // We successfully get a numeric value for the identifier.
4058 // Check if it is valid.
4059 int64_t Id = getParser().getTok().getIntVal();
4060 if (Id <= -1U && !isValidMCLOHType(Id))
4061 return TokError("invalid numeric identifier in directive");
4062 Kind = (MCLOHType)Id;
4063 } else {
4064 StringRef Name = getTok().getIdentifier();
4065 // We successfully parse an identifier.
4066 // Check if it is a recognized one.
4067 int Id = MCLOHNameToId(Name);
4068
4069 if (Id == -1)
4070 return TokError("invalid identifier in directive");
4071 Kind = (MCLOHType)Id;
4072 }
4073 // Consume the identifier.
4074 Lex();
4075 // Get the number of arguments of this LOH.
4076 int NbArgs = MCLOHIdToNbArgs(Kind);
4077
4078 assert(NbArgs != -1 && "Invalid number of arguments");
4079
4080 SmallVector<MCSymbol *, 3> Args;
4081 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4082 StringRef Name;
4083 if (getParser().parseIdentifier(Name))
4084 return TokError("expected identifier in directive");
4085 Args.push_back(getContext().GetOrCreateSymbol(Name));
4086
4087 if (Idx + 1 == NbArgs)
4088 break;
4089 if (getLexer().isNot(AsmToken::Comma))
4090 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4091 Lex();
4092 }
4093 if (getLexer().isNot(AsmToken::EndOfStatement))
4094 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4095
4096 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4097 return false;
4098 }
4099
4100 /// parseDirectiveLtorg
4101 /// ::= .ltorg | .pool
parseDirectiveLtorg(SMLoc L)4102 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4103 getTargetStreamer().emitCurrentConstantPool();
4104 return false;
4105 }
4106
4107 /// parseDirectiveReq
4108 /// ::= name .req registername
parseDirectiveReq(StringRef Name,SMLoc L)4109 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4110 MCAsmParser &Parser = getParser();
4111 Parser.Lex(); // Eat the '.req' token.
4112 SMLoc SRegLoc = getLoc();
4113 unsigned RegNum = tryParseRegister();
4114 bool IsVector = false;
4115
4116 if (RegNum == static_cast<unsigned>(-1)) {
4117 StringRef Kind;
4118 RegNum = tryMatchVectorRegister(Kind, false);
4119 if (!Kind.empty()) {
4120 Error(SRegLoc, "vector register without type specifier expected");
4121 return false;
4122 }
4123 IsVector = true;
4124 }
4125
4126 if (RegNum == static_cast<unsigned>(-1)) {
4127 Parser.eatToEndOfStatement();
4128 Error(SRegLoc, "register name or alias expected");
4129 return false;
4130 }
4131
4132 // Shouldn't be anything else.
4133 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
4134 Error(Parser.getTok().getLoc(), "unexpected input in .req directive");
4135 Parser.eatToEndOfStatement();
4136 return false;
4137 }
4138
4139 Parser.Lex(); // Consume the EndOfStatement
4140
4141 auto pair = std::make_pair(IsVector, RegNum);
4142 if (!RegisterReqs.insert(std::make_pair(Name, pair)).second)
4143 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4144
4145 return true;
4146 }
4147
4148 /// parseDirectiveUneq
4149 /// ::= .unreq registername
parseDirectiveUnreq(SMLoc L)4150 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4151 MCAsmParser &Parser = getParser();
4152 if (Parser.getTok().isNot(AsmToken::Identifier)) {
4153 Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive.");
4154 Parser.eatToEndOfStatement();
4155 return false;
4156 }
4157 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4158 Parser.Lex(); // Eat the identifier.
4159 return false;
4160 }
4161
4162 bool
classifySymbolRef(const MCExpr * Expr,AArch64MCExpr::VariantKind & ELFRefKind,MCSymbolRefExpr::VariantKind & DarwinRefKind,int64_t & Addend)4163 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4164 AArch64MCExpr::VariantKind &ELFRefKind,
4165 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4166 int64_t &Addend) {
4167 ELFRefKind = AArch64MCExpr::VK_INVALID;
4168 DarwinRefKind = MCSymbolRefExpr::VK_None;
4169 Addend = 0;
4170
4171 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4172 ELFRefKind = AE->getKind();
4173 Expr = AE->getSubExpr();
4174 }
4175
4176 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4177 if (SE) {
4178 // It's a simple symbol reference with no addend.
4179 DarwinRefKind = SE->getKind();
4180 return true;
4181 }
4182
4183 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4184 if (!BE)
4185 return false;
4186
4187 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4188 if (!SE)
4189 return false;
4190 DarwinRefKind = SE->getKind();
4191
4192 if (BE->getOpcode() != MCBinaryExpr::Add &&
4193 BE->getOpcode() != MCBinaryExpr::Sub)
4194 return false;
4195
4196 // See if the addend is is a constant, otherwise there's more going
4197 // on here than we can deal with.
4198 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4199 if (!AddendExpr)
4200 return false;
4201
4202 Addend = AddendExpr->getValue();
4203 if (BE->getOpcode() == MCBinaryExpr::Sub)
4204 Addend = -Addend;
4205
4206 // It's some symbol reference + a constant addend, but really
4207 // shouldn't use both Darwin and ELF syntax.
4208 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4209 DarwinRefKind == MCSymbolRefExpr::VK_None;
4210 }
4211
4212 /// Force static initialization.
LLVMInitializeAArch64AsmParser()4213 extern "C" void LLVMInitializeAArch64AsmParser() {
4214 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
4215 RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
4216 RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64Target);
4217 }
4218
4219 #define GET_REGISTER_MATCHER
4220 #define GET_SUBTARGET_FEATURE_NAME
4221 #define GET_MATCHER_IMPLEMENTATION
4222 #include "AArch64GenAsmMatcher.inc"
4223
4224 // Define this matcher function after the auto-generated include so we
4225 // have the match class enum definitions.
validateTargetOperandClass(MCParsedAsmOperand & AsmOp,unsigned Kind)4226 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4227 unsigned Kind) {
4228 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4229 // If the kind is a token for a literal immediate, check if our asm
4230 // operand matches. This is for InstAliases which have a fixed-value
4231 // immediate in the syntax.
4232 int64_t ExpectedVal;
4233 switch (Kind) {
4234 default:
4235 return Match_InvalidOperand;
4236 case MCK__35_0:
4237 ExpectedVal = 0;
4238 break;
4239 case MCK__35_1:
4240 ExpectedVal = 1;
4241 break;
4242 case MCK__35_12:
4243 ExpectedVal = 12;
4244 break;
4245 case MCK__35_16:
4246 ExpectedVal = 16;
4247 break;
4248 case MCK__35_2:
4249 ExpectedVal = 2;
4250 break;
4251 case MCK__35_24:
4252 ExpectedVal = 24;
4253 break;
4254 case MCK__35_3:
4255 ExpectedVal = 3;
4256 break;
4257 case MCK__35_32:
4258 ExpectedVal = 32;
4259 break;
4260 case MCK__35_4:
4261 ExpectedVal = 4;
4262 break;
4263 case MCK__35_48:
4264 ExpectedVal = 48;
4265 break;
4266 case MCK__35_6:
4267 ExpectedVal = 6;
4268 break;
4269 case MCK__35_64:
4270 ExpectedVal = 64;
4271 break;
4272 case MCK__35_8:
4273 ExpectedVal = 8;
4274 break;
4275 }
4276 if (!Op.isImm())
4277 return Match_InvalidOperand;
4278 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4279 if (!CE)
4280 return Match_InvalidOperand;
4281 if (CE->getValue() == ExpectedVal)
4282 return Match_Success;
4283 return Match_InvalidOperand;
4284 }
4285