1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "MCTargetDesc/AArch64AddressingModes.h"
10 #include "MCTargetDesc/AArch64InstPrinter.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "MCTargetDesc/AArch64MCTargetDesc.h"
13 #include "MCTargetDesc/AArch64TargetStreamer.h"
14 #include "TargetInfo/AArch64TargetInfo.h"
15 #include "AArch64InstrInfo.h"
16 #include "Utils/AArch64BaseInfo.h"
17 #include "llvm/ADT/APFloat.h"
18 #include "llvm/ADT/APInt.h"
19 #include "llvm/ADT/ArrayRef.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/StringExtras.h"
23 #include "llvm/ADT/StringMap.h"
24 #include "llvm/ADT/StringRef.h"
25 #include "llvm/ADT/StringSwitch.h"
26 #include "llvm/ADT/Twine.h"
27 #include "llvm/MC/MCContext.h"
28 #include "llvm/MC/MCExpr.h"
29 #include "llvm/MC/MCInst.h"
30 #include "llvm/MC/MCLinkerOptimizationHint.h"
31 #include "llvm/MC/MCObjectFileInfo.h"
32 #include "llvm/MC/MCParser/MCAsmLexer.h"
33 #include "llvm/MC/MCParser/MCAsmParser.h"
34 #include "llvm/MC/MCParser/MCAsmParserExtension.h"
35 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
36 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
37 #include "llvm/MC/MCRegisterInfo.h"
38 #include "llvm/MC/MCStreamer.h"
39 #include "llvm/MC/MCSubtargetInfo.h"
40 #include "llvm/MC/MCSymbol.h"
41 #include "llvm/MC/MCTargetOptions.h"
42 #include "llvm/MC/SubtargetFeature.h"
43 #include "llvm/MC/MCValue.h"
44 #include "llvm/Support/Casting.h"
45 #include "llvm/Support/Compiler.h"
46 #include "llvm/Support/ErrorHandling.h"
47 #include "llvm/Support/MathExtras.h"
48 #include "llvm/Support/SMLoc.h"
49 #include "llvm/Support/TargetParser.h"
50 #include "llvm/Support/TargetRegistry.h"
51 #include "llvm/Support/raw_ostream.h"
52 #include <cassert>
53 #include <cctype>
54 #include <cstdint>
55 #include <cstdio>
56 #include <string>
57 #include <tuple>
58 #include <utility>
59 #include <vector>
60
61 using namespace llvm;
62
63 namespace {
64
65 enum class RegKind {
66 Scalar,
67 NeonVector,
68 SVEDataVector,
69 SVEPredicateVector
70 };
71
72 enum RegConstraintEqualityTy {
73 EqualsReg,
74 EqualsSuperReg,
75 EqualsSubReg
76 };
77
78 class AArch64AsmParser : public MCTargetAsmParser {
79 private:
80 StringRef Mnemonic; ///< Instruction mnemonic.
81
82 // Map of register aliases registers via the .req directive.
83 StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
84
85 class PrefixInfo {
86 public:
CreateFromInst(const MCInst & Inst,uint64_t TSFlags)87 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
88 PrefixInfo Prefix;
89 switch (Inst.getOpcode()) {
90 case AArch64::MOVPRFX_ZZ:
91 Prefix.Active = true;
92 Prefix.Dst = Inst.getOperand(0).getReg();
93 break;
94 case AArch64::MOVPRFX_ZPmZ_B:
95 case AArch64::MOVPRFX_ZPmZ_H:
96 case AArch64::MOVPRFX_ZPmZ_S:
97 case AArch64::MOVPRFX_ZPmZ_D:
98 Prefix.Active = true;
99 Prefix.Predicated = true;
100 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
101 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
102 "No destructive element size set for movprfx");
103 Prefix.Dst = Inst.getOperand(0).getReg();
104 Prefix.Pg = Inst.getOperand(2).getReg();
105 break;
106 case AArch64::MOVPRFX_ZPzZ_B:
107 case AArch64::MOVPRFX_ZPzZ_H:
108 case AArch64::MOVPRFX_ZPzZ_S:
109 case AArch64::MOVPRFX_ZPzZ_D:
110 Prefix.Active = true;
111 Prefix.Predicated = true;
112 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
113 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
114 "No destructive element size set for movprfx");
115 Prefix.Dst = Inst.getOperand(0).getReg();
116 Prefix.Pg = Inst.getOperand(1).getReg();
117 break;
118 default:
119 break;
120 }
121
122 return Prefix;
123 }
124
PrefixInfo()125 PrefixInfo() : Active(false), Predicated(false) {}
isActive() const126 bool isActive() const { return Active; }
isPredicated() const127 bool isPredicated() const { return Predicated; }
getElementSize() const128 unsigned getElementSize() const {
129 assert(Predicated);
130 return ElementSize;
131 }
getDstReg() const132 unsigned getDstReg() const { return Dst; }
getPgReg() const133 unsigned getPgReg() const {
134 assert(Predicated);
135 return Pg;
136 }
137
138 private:
139 bool Active;
140 bool Predicated;
141 unsigned ElementSize;
142 unsigned Dst;
143 unsigned Pg;
144 } NextPrefix;
145
getTargetStreamer()146 AArch64TargetStreamer &getTargetStreamer() {
147 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
148 return static_cast<AArch64TargetStreamer &>(TS);
149 }
150
getLoc() const151 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
152
153 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
154 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
155 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
156 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
157 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
158 bool parseRegister(OperandVector &Operands);
159 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
160 bool parseNeonVectorList(OperandVector &Operands);
161 bool parseOptionalMulOperand(OperandVector &Operands);
162 bool parseKeywordOperand(OperandVector &Operands);
163 bool parseOperand(OperandVector &Operands, bool isCondCode,
164 bool invertCondCode);
165 bool parseImmExpr(int64_t &Out);
166 bool parseComma();
167 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
168 unsigned Last);
169
170 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
171 OperandVector &Operands);
172
173 bool parseDirectiveArch(SMLoc L);
174 bool parseDirectiveArchExtension(SMLoc L);
175 bool parseDirectiveCPU(SMLoc L);
176 bool parseDirectiveInst(SMLoc L);
177
178 bool parseDirectiveTLSDescCall(SMLoc L);
179
180 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
181 bool parseDirectiveLtorg(SMLoc L);
182
183 bool parseDirectiveReq(StringRef Name, SMLoc L);
184 bool parseDirectiveUnreq(SMLoc L);
185 bool parseDirectiveCFINegateRAState();
186 bool parseDirectiveCFIBKeyFrame();
187
188 bool parseDirectiveVariantPCS(SMLoc L);
189
190 bool parseDirectiveSEHAllocStack(SMLoc L);
191 bool parseDirectiveSEHPrologEnd(SMLoc L);
192 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
193 bool parseDirectiveSEHSaveFPLR(SMLoc L);
194 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
195 bool parseDirectiveSEHSaveReg(SMLoc L);
196 bool parseDirectiveSEHSaveRegX(SMLoc L);
197 bool parseDirectiveSEHSaveRegP(SMLoc L);
198 bool parseDirectiveSEHSaveRegPX(SMLoc L);
199 bool parseDirectiveSEHSaveLRPair(SMLoc L);
200 bool parseDirectiveSEHSaveFReg(SMLoc L);
201 bool parseDirectiveSEHSaveFRegX(SMLoc L);
202 bool parseDirectiveSEHSaveFRegP(SMLoc L);
203 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
204 bool parseDirectiveSEHSetFP(SMLoc L);
205 bool parseDirectiveSEHAddFP(SMLoc L);
206 bool parseDirectiveSEHNop(SMLoc L);
207 bool parseDirectiveSEHSaveNext(SMLoc L);
208 bool parseDirectiveSEHEpilogStart(SMLoc L);
209 bool parseDirectiveSEHEpilogEnd(SMLoc L);
210 bool parseDirectiveSEHTrapFrame(SMLoc L);
211 bool parseDirectiveSEHMachineFrame(SMLoc L);
212 bool parseDirectiveSEHContext(SMLoc L);
213 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
214
215 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
216 SmallVectorImpl<SMLoc> &Loc);
217 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
218 OperandVector &Operands, MCStreamer &Out,
219 uint64_t &ErrorInfo,
220 bool MatchingInlineAsm) override;
221 /// @name Auto-generated Match Functions
222 /// {
223
224 #define GET_ASSEMBLER_HEADER
225 #include "AArch64GenAsmMatcher.inc"
226
227 /// }
228
229 OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
230 OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
231 RegKind MatchKind);
232 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
233 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
234 OperandMatchResultTy tryParseBarriernXSOperand(OperandVector &Operands);
235 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
236 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
237 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
238 template <bool IsSVEPrefetch = false>
239 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
240 OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
241 OperandMatchResultTy tryParseBTIHint(OperandVector &Operands);
242 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
243 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
244 template<bool AddFPZeroAsLiteral>
245 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
246 OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
247 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
248 bool tryParseNeonVectorRegister(OperandVector &Operands);
249 OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
250 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
251 template <bool ParseShiftExtend,
252 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
253 OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
254 template <bool ParseShiftExtend, bool ParseSuffix>
255 OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
256 OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
257 template <RegKind VectorKind>
258 OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
259 bool ExpectMatch = false);
260 OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
261 OperandMatchResultTy tryParseGPR64x8(OperandVector &Operands);
262
263 public:
264 enum AArch64MatchResultTy {
265 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
266 #define GET_OPERAND_DIAGNOSTIC_TYPES
267 #include "AArch64GenAsmMatcher.inc"
268 };
269 bool IsILP32;
270
AArch64AsmParser(const MCSubtargetInfo & STI,MCAsmParser & Parser,const MCInstrInfo & MII,const MCTargetOptions & Options)271 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
272 const MCInstrInfo &MII, const MCTargetOptions &Options)
273 : MCTargetAsmParser(Options, STI, MII) {
274 IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
275 MCAsmParserExtension::Initialize(Parser);
276 MCStreamer &S = getParser().getStreamer();
277 if (S.getTargetStreamer() == nullptr)
278 new AArch64TargetStreamer(S);
279
280 // Alias .hword/.word/.[dx]word to the target-independent
281 // .2byte/.4byte/.8byte directives as they have the same form and
282 // semantics:
283 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
284 Parser.addAliasForDirective(".hword", ".2byte");
285 Parser.addAliasForDirective(".word", ".4byte");
286 Parser.addAliasForDirective(".dword", ".8byte");
287 Parser.addAliasForDirective(".xword", ".8byte");
288
289 // Initialize the set of available features.
290 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
291 }
292
293 bool regsEqual(const MCParsedAsmOperand &Op1,
294 const MCParsedAsmOperand &Op2) const override;
295 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
296 SMLoc NameLoc, OperandVector &Operands) override;
297 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
298 OperandMatchResultTy tryParseRegister(unsigned &RegNo, SMLoc &StartLoc,
299 SMLoc &EndLoc) override;
300 bool ParseDirective(AsmToken DirectiveID) override;
301 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
302 unsigned Kind) override;
303
304 static bool classifySymbolRef(const MCExpr *Expr,
305 AArch64MCExpr::VariantKind &ELFRefKind,
306 MCSymbolRefExpr::VariantKind &DarwinRefKind,
307 int64_t &Addend);
308 };
309
310 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
311 /// instruction.
312 class AArch64Operand : public MCParsedAsmOperand {
313 private:
314 enum KindTy {
315 k_Immediate,
316 k_ShiftedImm,
317 k_CondCode,
318 k_Register,
319 k_VectorList,
320 k_VectorIndex,
321 k_Token,
322 k_SysReg,
323 k_SysCR,
324 k_Prefetch,
325 k_ShiftExtend,
326 k_FPImm,
327 k_Barrier,
328 k_PSBHint,
329 k_BTIHint,
330 } Kind;
331
332 SMLoc StartLoc, EndLoc;
333
334 struct TokOp {
335 const char *Data;
336 unsigned Length;
337 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
338 };
339
340 // Separate shift/extend operand.
341 struct ShiftExtendOp {
342 AArch64_AM::ShiftExtendType Type;
343 unsigned Amount;
344 bool HasExplicitAmount;
345 };
346
347 struct RegOp {
348 unsigned RegNum;
349 RegKind Kind;
350 int ElementWidth;
351
352 // The register may be allowed as a different register class,
353 // e.g. for GPR64as32 or GPR32as64.
354 RegConstraintEqualityTy EqualityTy;
355
356 // In some cases the shift/extend needs to be explicitly parsed together
357 // with the register, rather than as a separate operand. This is needed
358 // for addressing modes where the instruction as a whole dictates the
359 // scaling/extend, rather than specific bits in the instruction.
360 // By parsing them as a single operand, we avoid the need to pass an
361 // extra operand in all CodeGen patterns (because all operands need to
362 // have an associated value), and we avoid the need to update TableGen to
363 // accept operands that have no associated bits in the instruction.
364 //
365 // An added benefit of parsing them together is that the assembler
366 // can give a sensible diagnostic if the scaling is not correct.
367 //
368 // The default is 'lsl #0' (HasExplicitAmount = false) if no
369 // ShiftExtend is specified.
370 ShiftExtendOp ShiftExtend;
371 };
372
373 struct VectorListOp {
374 unsigned RegNum;
375 unsigned Count;
376 unsigned NumElements;
377 unsigned ElementWidth;
378 RegKind RegisterKind;
379 };
380
381 struct VectorIndexOp {
382 int Val;
383 };
384
385 struct ImmOp {
386 const MCExpr *Val;
387 };
388
389 struct ShiftedImmOp {
390 const MCExpr *Val;
391 unsigned ShiftAmount;
392 };
393
394 struct CondCodeOp {
395 AArch64CC::CondCode Code;
396 };
397
398 struct FPImmOp {
399 uint64_t Val; // APFloat value bitcasted to uint64_t.
400 bool IsExact; // describes whether parsed value was exact.
401 };
402
403 struct BarrierOp {
404 const char *Data;
405 unsigned Length;
406 unsigned Val; // Not the enum since not all values have names.
407 bool HasnXSModifier;
408 };
409
410 struct SysRegOp {
411 const char *Data;
412 unsigned Length;
413 uint32_t MRSReg;
414 uint32_t MSRReg;
415 uint32_t PStateField;
416 };
417
418 struct SysCRImmOp {
419 unsigned Val;
420 };
421
422 struct PrefetchOp {
423 const char *Data;
424 unsigned Length;
425 unsigned Val;
426 };
427
428 struct PSBHintOp {
429 const char *Data;
430 unsigned Length;
431 unsigned Val;
432 };
433
434 struct BTIHintOp {
435 const char *Data;
436 unsigned Length;
437 unsigned Val;
438 };
439
440 union {
441 struct TokOp Tok;
442 struct RegOp Reg;
443 struct VectorListOp VectorList;
444 struct VectorIndexOp VectorIndex;
445 struct ImmOp Imm;
446 struct ShiftedImmOp ShiftedImm;
447 struct CondCodeOp CondCode;
448 struct FPImmOp FPImm;
449 struct BarrierOp Barrier;
450 struct SysRegOp SysReg;
451 struct SysCRImmOp SysCRImm;
452 struct PrefetchOp Prefetch;
453 struct PSBHintOp PSBHint;
454 struct BTIHintOp BTIHint;
455 struct ShiftExtendOp ShiftExtend;
456 };
457
458 // Keep the MCContext around as the MCExprs may need manipulated during
459 // the add<>Operands() calls.
460 MCContext &Ctx;
461
462 public:
AArch64Operand(KindTy K,MCContext & Ctx)463 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
464
AArch64Operand(const AArch64Operand & o)465 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
466 Kind = o.Kind;
467 StartLoc = o.StartLoc;
468 EndLoc = o.EndLoc;
469 switch (Kind) {
470 case k_Token:
471 Tok = o.Tok;
472 break;
473 case k_Immediate:
474 Imm = o.Imm;
475 break;
476 case k_ShiftedImm:
477 ShiftedImm = o.ShiftedImm;
478 break;
479 case k_CondCode:
480 CondCode = o.CondCode;
481 break;
482 case k_FPImm:
483 FPImm = o.FPImm;
484 break;
485 case k_Barrier:
486 Barrier = o.Barrier;
487 break;
488 case k_Register:
489 Reg = o.Reg;
490 break;
491 case k_VectorList:
492 VectorList = o.VectorList;
493 break;
494 case k_VectorIndex:
495 VectorIndex = o.VectorIndex;
496 break;
497 case k_SysReg:
498 SysReg = o.SysReg;
499 break;
500 case k_SysCR:
501 SysCRImm = o.SysCRImm;
502 break;
503 case k_Prefetch:
504 Prefetch = o.Prefetch;
505 break;
506 case k_PSBHint:
507 PSBHint = o.PSBHint;
508 break;
509 case k_BTIHint:
510 BTIHint = o.BTIHint;
511 break;
512 case k_ShiftExtend:
513 ShiftExtend = o.ShiftExtend;
514 break;
515 }
516 }
517
518 /// getStartLoc - Get the location of the first token of this operand.
getStartLoc() const519 SMLoc getStartLoc() const override { return StartLoc; }
520 /// getEndLoc - Get the location of the last token of this operand.
getEndLoc() const521 SMLoc getEndLoc() const override { return EndLoc; }
522
getToken() const523 StringRef getToken() const {
524 assert(Kind == k_Token && "Invalid access!");
525 return StringRef(Tok.Data, Tok.Length);
526 }
527
isTokenSuffix() const528 bool isTokenSuffix() const {
529 assert(Kind == k_Token && "Invalid access!");
530 return Tok.IsSuffix;
531 }
532
getImm() const533 const MCExpr *getImm() const {
534 assert(Kind == k_Immediate && "Invalid access!");
535 return Imm.Val;
536 }
537
getShiftedImmVal() const538 const MCExpr *getShiftedImmVal() const {
539 assert(Kind == k_ShiftedImm && "Invalid access!");
540 return ShiftedImm.Val;
541 }
542
getShiftedImmShift() const543 unsigned getShiftedImmShift() const {
544 assert(Kind == k_ShiftedImm && "Invalid access!");
545 return ShiftedImm.ShiftAmount;
546 }
547
getCondCode() const548 AArch64CC::CondCode getCondCode() const {
549 assert(Kind == k_CondCode && "Invalid access!");
550 return CondCode.Code;
551 }
552
getFPImm() const553 APFloat getFPImm() const {
554 assert (Kind == k_FPImm && "Invalid access!");
555 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
556 }
557
getFPImmIsExact() const558 bool getFPImmIsExact() const {
559 assert (Kind == k_FPImm && "Invalid access!");
560 return FPImm.IsExact;
561 }
562
getBarrier() const563 unsigned getBarrier() const {
564 assert(Kind == k_Barrier && "Invalid access!");
565 return Barrier.Val;
566 }
567
getBarrierName() const568 StringRef getBarrierName() const {
569 assert(Kind == k_Barrier && "Invalid access!");
570 return StringRef(Barrier.Data, Barrier.Length);
571 }
572
getBarriernXSModifier() const573 bool getBarriernXSModifier() const {
574 assert(Kind == k_Barrier && "Invalid access!");
575 return Barrier.HasnXSModifier;
576 }
577
getReg() const578 unsigned getReg() const override {
579 assert(Kind == k_Register && "Invalid access!");
580 return Reg.RegNum;
581 }
582
getRegEqualityTy() const583 RegConstraintEqualityTy getRegEqualityTy() const {
584 assert(Kind == k_Register && "Invalid access!");
585 return Reg.EqualityTy;
586 }
587
getVectorListStart() const588 unsigned getVectorListStart() const {
589 assert(Kind == k_VectorList && "Invalid access!");
590 return VectorList.RegNum;
591 }
592
getVectorListCount() const593 unsigned getVectorListCount() const {
594 assert(Kind == k_VectorList && "Invalid access!");
595 return VectorList.Count;
596 }
597
getVectorIndex() const598 int getVectorIndex() const {
599 assert(Kind == k_VectorIndex && "Invalid access!");
600 return VectorIndex.Val;
601 }
602
getSysReg() const603 StringRef getSysReg() const {
604 assert(Kind == k_SysReg && "Invalid access!");
605 return StringRef(SysReg.Data, SysReg.Length);
606 }
607
getSysCR() const608 unsigned getSysCR() const {
609 assert(Kind == k_SysCR && "Invalid access!");
610 return SysCRImm.Val;
611 }
612
getPrefetch() const613 unsigned getPrefetch() const {
614 assert(Kind == k_Prefetch && "Invalid access!");
615 return Prefetch.Val;
616 }
617
getPSBHint() const618 unsigned getPSBHint() const {
619 assert(Kind == k_PSBHint && "Invalid access!");
620 return PSBHint.Val;
621 }
622
getPSBHintName() const623 StringRef getPSBHintName() const {
624 assert(Kind == k_PSBHint && "Invalid access!");
625 return StringRef(PSBHint.Data, PSBHint.Length);
626 }
627
getBTIHint() const628 unsigned getBTIHint() const {
629 assert(Kind == k_BTIHint && "Invalid access!");
630 return BTIHint.Val;
631 }
632
getBTIHintName() const633 StringRef getBTIHintName() const {
634 assert(Kind == k_BTIHint && "Invalid access!");
635 return StringRef(BTIHint.Data, BTIHint.Length);
636 }
637
getPrefetchName() const638 StringRef getPrefetchName() const {
639 assert(Kind == k_Prefetch && "Invalid access!");
640 return StringRef(Prefetch.Data, Prefetch.Length);
641 }
642
getShiftExtendType() const643 AArch64_AM::ShiftExtendType getShiftExtendType() const {
644 if (Kind == k_ShiftExtend)
645 return ShiftExtend.Type;
646 if (Kind == k_Register)
647 return Reg.ShiftExtend.Type;
648 llvm_unreachable("Invalid access!");
649 }
650
getShiftExtendAmount() const651 unsigned getShiftExtendAmount() const {
652 if (Kind == k_ShiftExtend)
653 return ShiftExtend.Amount;
654 if (Kind == k_Register)
655 return Reg.ShiftExtend.Amount;
656 llvm_unreachable("Invalid access!");
657 }
658
hasShiftExtendAmount() const659 bool hasShiftExtendAmount() const {
660 if (Kind == k_ShiftExtend)
661 return ShiftExtend.HasExplicitAmount;
662 if (Kind == k_Register)
663 return Reg.ShiftExtend.HasExplicitAmount;
664 llvm_unreachable("Invalid access!");
665 }
666
isImm() const667 bool isImm() const override { return Kind == k_Immediate; }
isMem() const668 bool isMem() const override { return false; }
669
isUImm6() const670 bool isUImm6() const {
671 if (!isImm())
672 return false;
673 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
674 if (!MCE)
675 return false;
676 int64_t Val = MCE->getValue();
677 return (Val >= 0 && Val < 64);
678 }
679
isSImm() const680 template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
681
isSImmScaled() const682 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
683 return isImmScaled<Bits, Scale>(true);
684 }
685
isUImmScaled() const686 template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const {
687 return isImmScaled<Bits, Scale>(false);
688 }
689
690 template <int Bits, int Scale>
isImmScaled(bool Signed) const691 DiagnosticPredicate isImmScaled(bool Signed) const {
692 if (!isImm())
693 return DiagnosticPredicateTy::NoMatch;
694
695 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
696 if (!MCE)
697 return DiagnosticPredicateTy::NoMatch;
698
699 int64_t MinVal, MaxVal;
700 if (Signed) {
701 int64_t Shift = Bits - 1;
702 MinVal = (int64_t(1) << Shift) * -Scale;
703 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
704 } else {
705 MinVal = 0;
706 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
707 }
708
709 int64_t Val = MCE->getValue();
710 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
711 return DiagnosticPredicateTy::Match;
712
713 return DiagnosticPredicateTy::NearMatch;
714 }
715
isSVEPattern() const716 DiagnosticPredicate isSVEPattern() const {
717 if (!isImm())
718 return DiagnosticPredicateTy::NoMatch;
719 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
720 if (!MCE)
721 return DiagnosticPredicateTy::NoMatch;
722 int64_t Val = MCE->getValue();
723 if (Val >= 0 && Val < 32)
724 return DiagnosticPredicateTy::Match;
725 return DiagnosticPredicateTy::NearMatch;
726 }
727
isSymbolicUImm12Offset(const MCExpr * Expr) const728 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
729 AArch64MCExpr::VariantKind ELFRefKind;
730 MCSymbolRefExpr::VariantKind DarwinRefKind;
731 int64_t Addend;
732 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
733 Addend)) {
734 // If we don't understand the expression, assume the best and
735 // let the fixup and relocation code deal with it.
736 return true;
737 }
738
739 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
740 ELFRefKind == AArch64MCExpr::VK_LO12 ||
741 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
742 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
743 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
744 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
745 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
746 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
747 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
748 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
749 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
750 ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) {
751 // Note that we don't range-check the addend. It's adjusted modulo page
752 // size when converted, so there is no "out of range" condition when using
753 // @pageoff.
754 return true;
755 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
756 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
757 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
758 return Addend == 0;
759 }
760
761 return false;
762 }
763
isUImm12Offset() const764 template <int Scale> bool isUImm12Offset() const {
765 if (!isImm())
766 return false;
767
768 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
769 if (!MCE)
770 return isSymbolicUImm12Offset(getImm());
771
772 int64_t Val = MCE->getValue();
773 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
774 }
775
776 template <int N, int M>
isImmInRange() const777 bool isImmInRange() const {
778 if (!isImm())
779 return false;
780 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
781 if (!MCE)
782 return false;
783 int64_t Val = MCE->getValue();
784 return (Val >= N && Val <= M);
785 }
786
787 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
788 // a logical immediate can always be represented when inverted.
789 template <typename T>
isLogicalImm() const790 bool isLogicalImm() const {
791 if (!isImm())
792 return false;
793 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
794 if (!MCE)
795 return false;
796
797 int64_t Val = MCE->getValue();
798 // Avoid left shift by 64 directly.
799 uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
800 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
801 if ((Val & Upper) && (Val & Upper) != Upper)
802 return false;
803
804 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
805 }
806
isShiftedImm() const807 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
808
809 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
810 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
811 /// immediate that can be shifted by 'Shift'.
812 template <unsigned Width>
getShiftedVal() const813 Optional<std::pair<int64_t, unsigned> > getShiftedVal() const {
814 if (isShiftedImm() && Width == getShiftedImmShift())
815 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
816 return std::make_pair(CE->getValue(), Width);
817
818 if (isImm())
819 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
820 int64_t Val = CE->getValue();
821 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
822 return std::make_pair(Val >> Width, Width);
823 else
824 return std::make_pair(Val, 0u);
825 }
826
827 return {};
828 }
829
isAddSubImm() const830 bool isAddSubImm() const {
831 if (!isShiftedImm() && !isImm())
832 return false;
833
834 const MCExpr *Expr;
835
836 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
837 if (isShiftedImm()) {
838 unsigned Shift = ShiftedImm.ShiftAmount;
839 Expr = ShiftedImm.Val;
840 if (Shift != 0 && Shift != 12)
841 return false;
842 } else {
843 Expr = getImm();
844 }
845
846 AArch64MCExpr::VariantKind ELFRefKind;
847 MCSymbolRefExpr::VariantKind DarwinRefKind;
848 int64_t Addend;
849 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
850 DarwinRefKind, Addend)) {
851 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
852 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
853 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
854 || ELFRefKind == AArch64MCExpr::VK_LO12
855 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
856 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
857 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
858 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
859 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
860 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
861 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
862 || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
863 || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
864 }
865
866 // If it's a constant, it should be a real immediate in range.
867 if (auto ShiftedVal = getShiftedVal<12>())
868 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
869
870 // If it's an expression, we hope for the best and let the fixup/relocation
871 // code deal with it.
872 return true;
873 }
874
isAddSubImmNeg() const875 bool isAddSubImmNeg() const {
876 if (!isShiftedImm() && !isImm())
877 return false;
878
879 // Otherwise it should be a real negative immediate in range.
880 if (auto ShiftedVal = getShiftedVal<12>())
881 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
882
883 return false;
884 }
885
886 // Signed value in the range -128 to +127. For element widths of
887 // 16 bits or higher it may also be a signed multiple of 256 in the
888 // range -32768 to +32512.
889 // For element-width of 8 bits a range of -128 to 255 is accepted,
890 // since a copy of a byte can be either signed/unsigned.
891 template <typename T>
isSVECpyImm() const892 DiagnosticPredicate isSVECpyImm() const {
893 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
894 return DiagnosticPredicateTy::NoMatch;
895
896 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
897 std::is_same<int8_t, T>::value;
898 if (auto ShiftedImm = getShiftedVal<8>())
899 if (!(IsByte && ShiftedImm->second) &&
900 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
901 << ShiftedImm->second))
902 return DiagnosticPredicateTy::Match;
903
904 return DiagnosticPredicateTy::NearMatch;
905 }
906
907 // Unsigned value in the range 0 to 255. For element widths of
908 // 16 bits or higher it may also be a signed multiple of 256 in the
909 // range 0 to 65280.
isSVEAddSubImm() const910 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
911 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
912 return DiagnosticPredicateTy::NoMatch;
913
914 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
915 std::is_same<int8_t, T>::value;
916 if (auto ShiftedImm = getShiftedVal<8>())
917 if (!(IsByte && ShiftedImm->second) &&
918 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
919 << ShiftedImm->second))
920 return DiagnosticPredicateTy::Match;
921
922 return DiagnosticPredicateTy::NearMatch;
923 }
924
isSVEPreferredLogicalImm() const925 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
926 if (isLogicalImm<T>() && !isSVECpyImm<T>())
927 return DiagnosticPredicateTy::Match;
928 return DiagnosticPredicateTy::NoMatch;
929 }
930
isCondCode() const931 bool isCondCode() const { return Kind == k_CondCode; }
932
isSIMDImmType10() const933 bool isSIMDImmType10() const {
934 if (!isImm())
935 return false;
936 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
937 if (!MCE)
938 return false;
939 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
940 }
941
942 template<int N>
isBranchTarget() const943 bool isBranchTarget() const {
944 if (!isImm())
945 return false;
946 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
947 if (!MCE)
948 return true;
949 int64_t Val = MCE->getValue();
950 if (Val & 0x3)
951 return false;
952 assert(N > 0 && "Branch target immediate cannot be 0 bits!");
953 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
954 }
955
956 bool
isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const957 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
958 if (!isImm())
959 return false;
960
961 AArch64MCExpr::VariantKind ELFRefKind;
962 MCSymbolRefExpr::VariantKind DarwinRefKind;
963 int64_t Addend;
964 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
965 DarwinRefKind, Addend)) {
966 return false;
967 }
968 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
969 return false;
970
971 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
972 if (ELFRefKind == AllowedModifiers[i])
973 return true;
974 }
975
976 return false;
977 }
978
isMovWSymbolG3() const979 bool isMovWSymbolG3() const {
980 return isMovWSymbol({AArch64MCExpr::VK_ABS_G3, AArch64MCExpr::VK_PREL_G3});
981 }
982
isMovWSymbolG2() const983 bool isMovWSymbolG2() const {
984 return isMovWSymbol(
985 {AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
986 AArch64MCExpr::VK_ABS_G2_NC, AArch64MCExpr::VK_PREL_G2,
987 AArch64MCExpr::VK_PREL_G2_NC, AArch64MCExpr::VK_TPREL_G2,
988 AArch64MCExpr::VK_DTPREL_G2});
989 }
990
isMovWSymbolG1() const991 bool isMovWSymbolG1() const {
992 return isMovWSymbol(
993 {AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
994 AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_PREL_G1,
995 AArch64MCExpr::VK_PREL_G1_NC, AArch64MCExpr::VK_GOTTPREL_G1,
996 AArch64MCExpr::VK_TPREL_G1, AArch64MCExpr::VK_TPREL_G1_NC,
997 AArch64MCExpr::VK_DTPREL_G1, AArch64MCExpr::VK_DTPREL_G1_NC});
998 }
999
isMovWSymbolG0() const1000 bool isMovWSymbolG0() const {
1001 return isMovWSymbol(
1002 {AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
1003 AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_PREL_G0,
1004 AArch64MCExpr::VK_PREL_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
1005 AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_TPREL_G0_NC,
1006 AArch64MCExpr::VK_DTPREL_G0, AArch64MCExpr::VK_DTPREL_G0_NC});
1007 }
1008
1009 template<int RegWidth, int Shift>
isMOVZMovAlias() const1010 bool isMOVZMovAlias() const {
1011 if (!isImm()) return false;
1012
1013 const MCExpr *E = getImm();
1014 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1015 uint64_t Value = CE->getValue();
1016
1017 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1018 }
1019 // Only supports the case of Shift being 0 if an expression is used as an
1020 // operand
1021 return !Shift && E;
1022 }
1023
1024 template<int RegWidth, int Shift>
isMOVNMovAlias() const1025 bool isMOVNMovAlias() const {
1026 if (!isImm()) return false;
1027
1028 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1029 if (!CE) return false;
1030 uint64_t Value = CE->getValue();
1031
1032 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1033 }
1034
isFPImm() const1035 bool isFPImm() const {
1036 return Kind == k_FPImm &&
1037 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1038 }
1039
isBarrier() const1040 bool isBarrier() const {
1041 return Kind == k_Barrier && !getBarriernXSModifier();
1042 }
isBarriernXS() const1043 bool isBarriernXS() const {
1044 return Kind == k_Barrier && getBarriernXSModifier();
1045 }
isSysReg() const1046 bool isSysReg() const { return Kind == k_SysReg; }
1047
isMRSSystemRegister() const1048 bool isMRSSystemRegister() const {
1049 if (!isSysReg()) return false;
1050
1051 return SysReg.MRSReg != -1U;
1052 }
1053
isMSRSystemRegister() const1054 bool isMSRSystemRegister() const {
1055 if (!isSysReg()) return false;
1056 return SysReg.MSRReg != -1U;
1057 }
1058
isSystemPStateFieldWithImm0_1() const1059 bool isSystemPStateFieldWithImm0_1() const {
1060 if (!isSysReg()) return false;
1061 return (SysReg.PStateField == AArch64PState::PAN ||
1062 SysReg.PStateField == AArch64PState::DIT ||
1063 SysReg.PStateField == AArch64PState::UAO ||
1064 SysReg.PStateField == AArch64PState::SSBS);
1065 }
1066
isSystemPStateFieldWithImm0_15() const1067 bool isSystemPStateFieldWithImm0_15() const {
1068 if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
1069 return SysReg.PStateField != -1U;
1070 }
1071
isReg() const1072 bool isReg() const override {
1073 return Kind == k_Register;
1074 }
1075
isScalarReg() const1076 bool isScalarReg() const {
1077 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1078 }
1079
isNeonVectorReg() const1080 bool isNeonVectorReg() const {
1081 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1082 }
1083
isNeonVectorRegLo() const1084 bool isNeonVectorRegLo() const {
1085 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1086 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1087 Reg.RegNum) ||
1088 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1089 Reg.RegNum));
1090 }
1091
isSVEVectorReg() const1092 template <unsigned Class> bool isSVEVectorReg() const {
1093 RegKind RK;
1094 switch (Class) {
1095 case AArch64::ZPRRegClassID:
1096 case AArch64::ZPR_3bRegClassID:
1097 case AArch64::ZPR_4bRegClassID:
1098 RK = RegKind::SVEDataVector;
1099 break;
1100 case AArch64::PPRRegClassID:
1101 case AArch64::PPR_3bRegClassID:
1102 RK = RegKind::SVEPredicateVector;
1103 break;
1104 default:
1105 llvm_unreachable("Unsupport register class");
1106 }
1107
1108 return (Kind == k_Register && Reg.Kind == RK) &&
1109 AArch64MCRegisterClasses[Class].contains(getReg());
1110 }
1111
isFPRasZPR() const1112 template <unsigned Class> bool isFPRasZPR() const {
1113 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1114 AArch64MCRegisterClasses[Class].contains(getReg());
1115 }
1116
1117 template <int ElementWidth, unsigned Class>
isSVEPredicateVectorRegOfWidth() const1118 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1119 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1120 return DiagnosticPredicateTy::NoMatch;
1121
1122 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1123 return DiagnosticPredicateTy::Match;
1124
1125 return DiagnosticPredicateTy::NearMatch;
1126 }
1127
1128 template <int ElementWidth, unsigned Class>
isSVEDataVectorRegOfWidth() const1129 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1130 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1131 return DiagnosticPredicateTy::NoMatch;
1132
1133 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1134 return DiagnosticPredicateTy::Match;
1135
1136 return DiagnosticPredicateTy::NearMatch;
1137 }
1138
1139 template <int ElementWidth, unsigned Class,
1140 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1141 bool ShiftWidthAlwaysSame>
isSVEDataVectorRegWithShiftExtend() const1142 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1143 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1144 if (!VectorMatch.isMatch())
1145 return DiagnosticPredicateTy::NoMatch;
1146
1147 // Give a more specific diagnostic when the user has explicitly typed in
1148 // a shift-amount that does not match what is expected, but for which
1149 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1150 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1151 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1152 ShiftExtendTy == AArch64_AM::SXTW) &&
1153 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1154 return DiagnosticPredicateTy::NoMatch;
1155
1156 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1157 return DiagnosticPredicateTy::Match;
1158
1159 return DiagnosticPredicateTy::NearMatch;
1160 }
1161
isGPR32as64() const1162 bool isGPR32as64() const {
1163 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1164 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1165 }
1166
isGPR64as32() const1167 bool isGPR64as32() const {
1168 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1169 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1170 }
1171
isGPR64x8() const1172 bool isGPR64x8() const {
1173 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1174 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1175 Reg.RegNum);
1176 }
1177
isWSeqPair() const1178 bool isWSeqPair() const {
1179 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1180 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1181 Reg.RegNum);
1182 }
1183
isXSeqPair() const1184 bool isXSeqPair() const {
1185 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1186 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1187 Reg.RegNum);
1188 }
1189
1190 template<int64_t Angle, int64_t Remainder>
isComplexRotation() const1191 DiagnosticPredicate isComplexRotation() const {
1192 if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1193
1194 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1195 if (!CE) return DiagnosticPredicateTy::NoMatch;
1196 uint64_t Value = CE->getValue();
1197
1198 if (Value % Angle == Remainder && Value <= 270)
1199 return DiagnosticPredicateTy::Match;
1200 return DiagnosticPredicateTy::NearMatch;
1201 }
1202
isGPR64() const1203 template <unsigned RegClassID> bool isGPR64() const {
1204 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1205 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1206 }
1207
1208 template <unsigned RegClassID, int ExtWidth>
isGPR64WithShiftExtend() const1209 DiagnosticPredicate isGPR64WithShiftExtend() const {
1210 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1211 return DiagnosticPredicateTy::NoMatch;
1212
1213 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1214 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1215 return DiagnosticPredicateTy::Match;
1216 return DiagnosticPredicateTy::NearMatch;
1217 }
1218
1219 /// Is this a vector list with the type implicit (presumably attached to the
1220 /// instruction itself)?
1221 template <RegKind VectorKind, unsigned NumRegs>
isImplicitlyTypedVectorList() const1222 bool isImplicitlyTypedVectorList() const {
1223 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1224 VectorList.NumElements == 0 &&
1225 VectorList.RegisterKind == VectorKind;
1226 }
1227
1228 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1229 unsigned ElementWidth>
isTypedVectorList() const1230 bool isTypedVectorList() const {
1231 if (Kind != k_VectorList)
1232 return false;
1233 if (VectorList.Count != NumRegs)
1234 return false;
1235 if (VectorList.RegisterKind != VectorKind)
1236 return false;
1237 if (VectorList.ElementWidth != ElementWidth)
1238 return false;
1239 return VectorList.NumElements == NumElements;
1240 }
1241
1242 template <int Min, int Max>
isVectorIndex() const1243 DiagnosticPredicate isVectorIndex() const {
1244 if (Kind != k_VectorIndex)
1245 return DiagnosticPredicateTy::NoMatch;
1246 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1247 return DiagnosticPredicateTy::Match;
1248 return DiagnosticPredicateTy::NearMatch;
1249 }
1250
isToken() const1251 bool isToken() const override { return Kind == k_Token; }
1252
isTokenEqual(StringRef Str) const1253 bool isTokenEqual(StringRef Str) const {
1254 return Kind == k_Token && getToken() == Str;
1255 }
isSysCR() const1256 bool isSysCR() const { return Kind == k_SysCR; }
isPrefetch() const1257 bool isPrefetch() const { return Kind == k_Prefetch; }
isPSBHint() const1258 bool isPSBHint() const { return Kind == k_PSBHint; }
isBTIHint() const1259 bool isBTIHint() const { return Kind == k_BTIHint; }
isShiftExtend() const1260 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
isShifter() const1261 bool isShifter() const {
1262 if (!isShiftExtend())
1263 return false;
1264
1265 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1266 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1267 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1268 ST == AArch64_AM::MSL);
1269 }
1270
isExactFPImm() const1271 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1272 if (Kind != k_FPImm)
1273 return DiagnosticPredicateTy::NoMatch;
1274
1275 if (getFPImmIsExact()) {
1276 // Lookup the immediate from table of supported immediates.
1277 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1278 assert(Desc && "Unknown enum value");
1279
1280 // Calculate its FP value.
1281 APFloat RealVal(APFloat::IEEEdouble());
1282 auto StatusOrErr =
1283 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1284 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1285 llvm_unreachable("FP immediate is not exact");
1286
1287 if (getFPImm().bitwiseIsEqual(RealVal))
1288 return DiagnosticPredicateTy::Match;
1289 }
1290
1291 return DiagnosticPredicateTy::NearMatch;
1292 }
1293
1294 template <unsigned ImmA, unsigned ImmB>
isExactFPImm() const1295 DiagnosticPredicate isExactFPImm() const {
1296 DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1297 if ((Res = isExactFPImm<ImmA>()))
1298 return DiagnosticPredicateTy::Match;
1299 if ((Res = isExactFPImm<ImmB>()))
1300 return DiagnosticPredicateTy::Match;
1301 return Res;
1302 }
1303
isExtend() const1304 bool isExtend() const {
1305 if (!isShiftExtend())
1306 return false;
1307
1308 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1309 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1310 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1311 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1312 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1313 ET == AArch64_AM::LSL) &&
1314 getShiftExtendAmount() <= 4;
1315 }
1316
isExtend64() const1317 bool isExtend64() const {
1318 if (!isExtend())
1319 return false;
1320 // Make sure the extend expects a 32-bit source register.
1321 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1322 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1323 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1324 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1325 }
1326
isExtendLSL64() const1327 bool isExtendLSL64() const {
1328 if (!isExtend())
1329 return false;
1330 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1331 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1332 ET == AArch64_AM::LSL) &&
1333 getShiftExtendAmount() <= 4;
1334 }
1335
isMemXExtend() const1336 template<int Width> bool isMemXExtend() const {
1337 if (!isExtend())
1338 return false;
1339 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1340 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1341 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1342 getShiftExtendAmount() == 0);
1343 }
1344
isMemWExtend() const1345 template<int Width> bool isMemWExtend() const {
1346 if (!isExtend())
1347 return false;
1348 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1349 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1350 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1351 getShiftExtendAmount() == 0);
1352 }
1353
1354 template <unsigned width>
isArithmeticShifter() const1355 bool isArithmeticShifter() const {
1356 if (!isShifter())
1357 return false;
1358
1359 // An arithmetic shifter is LSL, LSR, or ASR.
1360 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1361 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1362 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1363 }
1364
1365 template <unsigned width>
isLogicalShifter() const1366 bool isLogicalShifter() const {
1367 if (!isShifter())
1368 return false;
1369
1370 // A logical shifter is LSL, LSR, ASR or ROR.
1371 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1372 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1373 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1374 getShiftExtendAmount() < width;
1375 }
1376
isMovImm32Shifter() const1377 bool isMovImm32Shifter() const {
1378 if (!isShifter())
1379 return false;
1380
1381 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1382 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1383 if (ST != AArch64_AM::LSL)
1384 return false;
1385 uint64_t Val = getShiftExtendAmount();
1386 return (Val == 0 || Val == 16);
1387 }
1388
isMovImm64Shifter() const1389 bool isMovImm64Shifter() const {
1390 if (!isShifter())
1391 return false;
1392
1393 // A MOVi shifter is LSL of 0 or 16.
1394 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1395 if (ST != AArch64_AM::LSL)
1396 return false;
1397 uint64_t Val = getShiftExtendAmount();
1398 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1399 }
1400
isLogicalVecShifter() const1401 bool isLogicalVecShifter() const {
1402 if (!isShifter())
1403 return false;
1404
1405 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1406 unsigned Shift = getShiftExtendAmount();
1407 return getShiftExtendType() == AArch64_AM::LSL &&
1408 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1409 }
1410
isLogicalVecHalfWordShifter() const1411 bool isLogicalVecHalfWordShifter() const {
1412 if (!isLogicalVecShifter())
1413 return false;
1414
1415 // A logical vector shifter is a left shift by 0 or 8.
1416 unsigned Shift = getShiftExtendAmount();
1417 return getShiftExtendType() == AArch64_AM::LSL &&
1418 (Shift == 0 || Shift == 8);
1419 }
1420
isMoveVecShifter() const1421 bool isMoveVecShifter() const {
1422 if (!isShiftExtend())
1423 return false;
1424
1425 // A logical vector shifter is a left shift by 8 or 16.
1426 unsigned Shift = getShiftExtendAmount();
1427 return getShiftExtendType() == AArch64_AM::MSL &&
1428 (Shift == 8 || Shift == 16);
1429 }
1430
1431 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1432 // to LDUR/STUR when the offset is not legal for the former but is for
1433 // the latter. As such, in addition to checking for being a legal unscaled
1434 // address, also check that it is not a legal scaled address. This avoids
1435 // ambiguity in the matcher.
1436 template<int Width>
isSImm9OffsetFB() const1437 bool isSImm9OffsetFB() const {
1438 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1439 }
1440
isAdrpLabel() const1441 bool isAdrpLabel() const {
1442 // Validation was handled during parsing, so we just sanity check that
1443 // something didn't go haywire.
1444 if (!isImm())
1445 return false;
1446
1447 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1448 int64_t Val = CE->getValue();
1449 int64_t Min = - (4096 * (1LL << (21 - 1)));
1450 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1451 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1452 }
1453
1454 return true;
1455 }
1456
isAdrLabel() const1457 bool isAdrLabel() const {
1458 // Validation was handled during parsing, so we just sanity check that
1459 // something didn't go haywire.
1460 if (!isImm())
1461 return false;
1462
1463 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1464 int64_t Val = CE->getValue();
1465 int64_t Min = - (1LL << (21 - 1));
1466 int64_t Max = ((1LL << (21 - 1)) - 1);
1467 return Val >= Min && Val <= Max;
1468 }
1469
1470 return true;
1471 }
1472
addExpr(MCInst & Inst,const MCExpr * Expr) const1473 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1474 // Add as immediates when possible. Null MCExpr = 0.
1475 if (!Expr)
1476 Inst.addOperand(MCOperand::createImm(0));
1477 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1478 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1479 else
1480 Inst.addOperand(MCOperand::createExpr(Expr));
1481 }
1482
addRegOperands(MCInst & Inst,unsigned N) const1483 void addRegOperands(MCInst &Inst, unsigned N) const {
1484 assert(N == 1 && "Invalid number of operands!");
1485 Inst.addOperand(MCOperand::createReg(getReg()));
1486 }
1487
addGPR32as64Operands(MCInst & Inst,unsigned N) const1488 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1489 assert(N == 1 && "Invalid number of operands!");
1490 assert(
1491 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1492
1493 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1494 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1495 RI->getEncodingValue(getReg()));
1496
1497 Inst.addOperand(MCOperand::createReg(Reg));
1498 }
1499
addGPR64as32Operands(MCInst & Inst,unsigned N) const1500 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1501 assert(N == 1 && "Invalid number of operands!");
1502 assert(
1503 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1504
1505 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1506 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1507 RI->getEncodingValue(getReg()));
1508
1509 Inst.addOperand(MCOperand::createReg(Reg));
1510 }
1511
1512 template <int Width>
addFPRasZPRRegOperands(MCInst & Inst,unsigned N) const1513 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1514 unsigned Base;
1515 switch (Width) {
1516 case 8: Base = AArch64::B0; break;
1517 case 16: Base = AArch64::H0; break;
1518 case 32: Base = AArch64::S0; break;
1519 case 64: Base = AArch64::D0; break;
1520 case 128: Base = AArch64::Q0; break;
1521 default:
1522 llvm_unreachable("Unsupported width");
1523 }
1524 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1525 }
1526
addVectorReg64Operands(MCInst & Inst,unsigned N) const1527 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1528 assert(N == 1 && "Invalid number of operands!");
1529 assert(
1530 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1531 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1532 }
1533
addVectorReg128Operands(MCInst & Inst,unsigned N) const1534 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1535 assert(N == 1 && "Invalid number of operands!");
1536 assert(
1537 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1538 Inst.addOperand(MCOperand::createReg(getReg()));
1539 }
1540
addVectorRegLoOperands(MCInst & Inst,unsigned N) const1541 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1542 assert(N == 1 && "Invalid number of operands!");
1543 Inst.addOperand(MCOperand::createReg(getReg()));
1544 }
1545
1546 enum VecListIndexType {
1547 VecListIdx_DReg = 0,
1548 VecListIdx_QReg = 1,
1549 VecListIdx_ZReg = 2,
1550 };
1551
1552 template <VecListIndexType RegTy, unsigned NumRegs>
addVectorListOperands(MCInst & Inst,unsigned N) const1553 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1554 assert(N == 1 && "Invalid number of operands!");
1555 static const unsigned FirstRegs[][5] = {
1556 /* DReg */ { AArch64::Q0,
1557 AArch64::D0, AArch64::D0_D1,
1558 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1559 /* QReg */ { AArch64::Q0,
1560 AArch64::Q0, AArch64::Q0_Q1,
1561 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1562 /* ZReg */ { AArch64::Z0,
1563 AArch64::Z0, AArch64::Z0_Z1,
1564 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
1565 };
1566
1567 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1568 " NumRegs must be <= 4 for ZRegs");
1569
1570 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1571 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1572 FirstRegs[(unsigned)RegTy][0]));
1573 }
1574
addVectorIndexOperands(MCInst & Inst,unsigned N) const1575 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1576 assert(N == 1 && "Invalid number of operands!");
1577 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1578 }
1579
1580 template <unsigned ImmIs0, unsigned ImmIs1>
addExactFPImmOperands(MCInst & Inst,unsigned N) const1581 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1582 assert(N == 1 && "Invalid number of operands!");
1583 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1584 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1585 }
1586
addImmOperands(MCInst & Inst,unsigned N) const1587 void addImmOperands(MCInst &Inst, unsigned N) const {
1588 assert(N == 1 && "Invalid number of operands!");
1589 // If this is a pageoff symrefexpr with an addend, adjust the addend
1590 // to be only the page-offset portion. Otherwise, just add the expr
1591 // as-is.
1592 addExpr(Inst, getImm());
1593 }
1594
1595 template <int Shift>
addImmWithOptionalShiftOperands(MCInst & Inst,unsigned N) const1596 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1597 assert(N == 2 && "Invalid number of operands!");
1598 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1599 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1600 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1601 } else if (isShiftedImm()) {
1602 addExpr(Inst, getShiftedImmVal());
1603 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1604 } else {
1605 addExpr(Inst, getImm());
1606 Inst.addOperand(MCOperand::createImm(0));
1607 }
1608 }
1609
1610 template <int Shift>
addImmNegWithOptionalShiftOperands(MCInst & Inst,unsigned N) const1611 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1612 assert(N == 2 && "Invalid number of operands!");
1613 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1614 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1615 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1616 } else
1617 llvm_unreachable("Not a shifted negative immediate");
1618 }
1619
addCondCodeOperands(MCInst & Inst,unsigned N) const1620 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1621 assert(N == 1 && "Invalid number of operands!");
1622 Inst.addOperand(MCOperand::createImm(getCondCode()));
1623 }
1624
addAdrpLabelOperands(MCInst & Inst,unsigned N) const1625 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1626 assert(N == 1 && "Invalid number of operands!");
1627 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1628 if (!MCE)
1629 addExpr(Inst, getImm());
1630 else
1631 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1632 }
1633
addAdrLabelOperands(MCInst & Inst,unsigned N) const1634 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1635 addImmOperands(Inst, N);
1636 }
1637
1638 template<int Scale>
addUImm12OffsetOperands(MCInst & Inst,unsigned N) const1639 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1640 assert(N == 1 && "Invalid number of operands!");
1641 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1642
1643 if (!MCE) {
1644 Inst.addOperand(MCOperand::createExpr(getImm()));
1645 return;
1646 }
1647 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1648 }
1649
addUImm6Operands(MCInst & Inst,unsigned N) const1650 void addUImm6Operands(MCInst &Inst, unsigned N) const {
1651 assert(N == 1 && "Invalid number of operands!");
1652 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1653 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1654 }
1655
1656 template <int Scale>
addImmScaledOperands(MCInst & Inst,unsigned N) const1657 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1658 assert(N == 1 && "Invalid number of operands!");
1659 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1660 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1661 }
1662
1663 template <typename T>
addLogicalImmOperands(MCInst & Inst,unsigned N) const1664 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1665 assert(N == 1 && "Invalid number of operands!");
1666 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1667 std::make_unsigned_t<T> Val = MCE->getValue();
1668 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1669 Inst.addOperand(MCOperand::createImm(encoding));
1670 }
1671
1672 template <typename T>
addLogicalImmNotOperands(MCInst & Inst,unsigned N) const1673 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1674 assert(N == 1 && "Invalid number of operands!");
1675 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1676 std::make_unsigned_t<T> Val = ~MCE->getValue();
1677 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1678 Inst.addOperand(MCOperand::createImm(encoding));
1679 }
1680
addSIMDImmType10Operands(MCInst & Inst,unsigned N) const1681 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1682 assert(N == 1 && "Invalid number of operands!");
1683 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1684 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1685 Inst.addOperand(MCOperand::createImm(encoding));
1686 }
1687
addBranchTarget26Operands(MCInst & Inst,unsigned N) const1688 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1689 // Branch operands don't encode the low bits, so shift them off
1690 // here. If it's a label, however, just put it on directly as there's
1691 // not enough information now to do anything.
1692 assert(N == 1 && "Invalid number of operands!");
1693 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1694 if (!MCE) {
1695 addExpr(Inst, getImm());
1696 return;
1697 }
1698 assert(MCE && "Invalid constant immediate operand!");
1699 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1700 }
1701
addPCRelLabel19Operands(MCInst & Inst,unsigned N) const1702 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1703 // Branch operands don't encode the low bits, so shift them off
1704 // here. If it's a label, however, just put it on directly as there's
1705 // not enough information now to do anything.
1706 assert(N == 1 && "Invalid number of operands!");
1707 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1708 if (!MCE) {
1709 addExpr(Inst, getImm());
1710 return;
1711 }
1712 assert(MCE && "Invalid constant immediate operand!");
1713 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1714 }
1715
addBranchTarget14Operands(MCInst & Inst,unsigned N) const1716 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1717 // Branch operands don't encode the low bits, so shift them off
1718 // here. If it's a label, however, just put it on directly as there's
1719 // not enough information now to do anything.
1720 assert(N == 1 && "Invalid number of operands!");
1721 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1722 if (!MCE) {
1723 addExpr(Inst, getImm());
1724 return;
1725 }
1726 assert(MCE && "Invalid constant immediate operand!");
1727 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1728 }
1729
addFPImmOperands(MCInst & Inst,unsigned N) const1730 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1731 assert(N == 1 && "Invalid number of operands!");
1732 Inst.addOperand(MCOperand::createImm(
1733 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1734 }
1735
addBarrierOperands(MCInst & Inst,unsigned N) const1736 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1737 assert(N == 1 && "Invalid number of operands!");
1738 Inst.addOperand(MCOperand::createImm(getBarrier()));
1739 }
1740
addBarriernXSOperands(MCInst & Inst,unsigned N) const1741 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
1742 assert(N == 1 && "Invalid number of operands!");
1743 Inst.addOperand(MCOperand::createImm(getBarrier()));
1744 }
1745
addMRSSystemRegisterOperands(MCInst & Inst,unsigned N) const1746 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1747 assert(N == 1 && "Invalid number of operands!");
1748
1749 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1750 }
1751
addMSRSystemRegisterOperands(MCInst & Inst,unsigned N) const1752 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1753 assert(N == 1 && "Invalid number of operands!");
1754
1755 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1756 }
1757
addSystemPStateFieldWithImm0_1Operands(MCInst & Inst,unsigned N) const1758 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1759 assert(N == 1 && "Invalid number of operands!");
1760
1761 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1762 }
1763
addSystemPStateFieldWithImm0_15Operands(MCInst & Inst,unsigned N) const1764 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1765 assert(N == 1 && "Invalid number of operands!");
1766
1767 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1768 }
1769
addSysCROperands(MCInst & Inst,unsigned N) const1770 void addSysCROperands(MCInst &Inst, unsigned N) const {
1771 assert(N == 1 && "Invalid number of operands!");
1772 Inst.addOperand(MCOperand::createImm(getSysCR()));
1773 }
1774
addPrefetchOperands(MCInst & Inst,unsigned N) const1775 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1776 assert(N == 1 && "Invalid number of operands!");
1777 Inst.addOperand(MCOperand::createImm(getPrefetch()));
1778 }
1779
addPSBHintOperands(MCInst & Inst,unsigned N) const1780 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1781 assert(N == 1 && "Invalid number of operands!");
1782 Inst.addOperand(MCOperand::createImm(getPSBHint()));
1783 }
1784
addBTIHintOperands(MCInst & Inst,unsigned N) const1785 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
1786 assert(N == 1 && "Invalid number of operands!");
1787 Inst.addOperand(MCOperand::createImm(getBTIHint()));
1788 }
1789
addShifterOperands(MCInst & Inst,unsigned N) const1790 void addShifterOperands(MCInst &Inst, unsigned N) const {
1791 assert(N == 1 && "Invalid number of operands!");
1792 unsigned Imm =
1793 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1794 Inst.addOperand(MCOperand::createImm(Imm));
1795 }
1796
addExtendOperands(MCInst & Inst,unsigned N) const1797 void addExtendOperands(MCInst &Inst, unsigned N) const {
1798 assert(N == 1 && "Invalid number of operands!");
1799 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1800 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1801 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1802 Inst.addOperand(MCOperand::createImm(Imm));
1803 }
1804
addExtend64Operands(MCInst & Inst,unsigned N) const1805 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1806 assert(N == 1 && "Invalid number of operands!");
1807 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1808 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1809 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1810 Inst.addOperand(MCOperand::createImm(Imm));
1811 }
1812
addMemExtendOperands(MCInst & Inst,unsigned N) const1813 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1814 assert(N == 2 && "Invalid number of operands!");
1815 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1816 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1817 Inst.addOperand(MCOperand::createImm(IsSigned));
1818 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1819 }
1820
1821 // For 8-bit load/store instructions with a register offset, both the
1822 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1823 // they're disambiguated by whether the shift was explicit or implicit rather
1824 // than its size.
addMemExtend8Operands(MCInst & Inst,unsigned N) const1825 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1826 assert(N == 2 && "Invalid number of operands!");
1827 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1828 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1829 Inst.addOperand(MCOperand::createImm(IsSigned));
1830 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1831 }
1832
1833 template<int Shift>
addMOVZMovAliasOperands(MCInst & Inst,unsigned N) const1834 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1835 assert(N == 1 && "Invalid number of operands!");
1836
1837 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1838 if (CE) {
1839 uint64_t Value = CE->getValue();
1840 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1841 } else {
1842 addExpr(Inst, getImm());
1843 }
1844 }
1845
1846 template<int Shift>
addMOVNMovAliasOperands(MCInst & Inst,unsigned N) const1847 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1848 assert(N == 1 && "Invalid number of operands!");
1849
1850 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1851 uint64_t Value = CE->getValue();
1852 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1853 }
1854
addComplexRotationEvenOperands(MCInst & Inst,unsigned N) const1855 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1856 assert(N == 1 && "Invalid number of operands!");
1857 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1858 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1859 }
1860
addComplexRotationOddOperands(MCInst & Inst,unsigned N) const1861 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1862 assert(N == 1 && "Invalid number of operands!");
1863 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1864 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1865 }
1866
1867 void print(raw_ostream &OS) const override;
1868
1869 static std::unique_ptr<AArch64Operand>
CreateToken(StringRef Str,bool IsSuffix,SMLoc S,MCContext & Ctx)1870 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1871 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
1872 Op->Tok.Data = Str.data();
1873 Op->Tok.Length = Str.size();
1874 Op->Tok.IsSuffix = IsSuffix;
1875 Op->StartLoc = S;
1876 Op->EndLoc = S;
1877 return Op;
1878 }
1879
1880 static std::unique_ptr<AArch64Operand>
CreateReg(unsigned RegNum,RegKind Kind,SMLoc S,SMLoc E,MCContext & Ctx,RegConstraintEqualityTy EqTy=RegConstraintEqualityTy::EqualsReg,AArch64_AM::ShiftExtendType ExtTy=AArch64_AM::LSL,unsigned ShiftAmount=0,unsigned HasExplicitAmount=false)1881 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
1882 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
1883 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1884 unsigned ShiftAmount = 0,
1885 unsigned HasExplicitAmount = false) {
1886 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
1887 Op->Reg.RegNum = RegNum;
1888 Op->Reg.Kind = Kind;
1889 Op->Reg.ElementWidth = 0;
1890 Op->Reg.EqualityTy = EqTy;
1891 Op->Reg.ShiftExtend.Type = ExtTy;
1892 Op->Reg.ShiftExtend.Amount = ShiftAmount;
1893 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1894 Op->StartLoc = S;
1895 Op->EndLoc = E;
1896 return Op;
1897 }
1898
1899 static std::unique_ptr<AArch64Operand>
CreateVectorReg(unsigned RegNum,RegKind Kind,unsigned ElementWidth,SMLoc S,SMLoc E,MCContext & Ctx,AArch64_AM::ShiftExtendType ExtTy=AArch64_AM::LSL,unsigned ShiftAmount=0,unsigned HasExplicitAmount=false)1900 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
1901 SMLoc S, SMLoc E, MCContext &Ctx,
1902 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1903 unsigned ShiftAmount = 0,
1904 unsigned HasExplicitAmount = false) {
1905 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
1906 Kind == RegKind::SVEPredicateVector) &&
1907 "Invalid vector kind");
1908 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
1909 HasExplicitAmount);
1910 Op->Reg.ElementWidth = ElementWidth;
1911 return Op;
1912 }
1913
1914 static std::unique_ptr<AArch64Operand>
CreateVectorList(unsigned RegNum,unsigned Count,unsigned NumElements,unsigned ElementWidth,RegKind RegisterKind,SMLoc S,SMLoc E,MCContext & Ctx)1915 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1916 unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
1917 MCContext &Ctx) {
1918 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
1919 Op->VectorList.RegNum = RegNum;
1920 Op->VectorList.Count = Count;
1921 Op->VectorList.NumElements = NumElements;
1922 Op->VectorList.ElementWidth = ElementWidth;
1923 Op->VectorList.RegisterKind = RegisterKind;
1924 Op->StartLoc = S;
1925 Op->EndLoc = E;
1926 return Op;
1927 }
1928
1929 static std::unique_ptr<AArch64Operand>
CreateVectorIndex(int Idx,SMLoc S,SMLoc E,MCContext & Ctx)1930 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1931 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1932 Op->VectorIndex.Val = Idx;
1933 Op->StartLoc = S;
1934 Op->EndLoc = E;
1935 return Op;
1936 }
1937
CreateImm(const MCExpr * Val,SMLoc S,SMLoc E,MCContext & Ctx)1938 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1939 SMLoc E, MCContext &Ctx) {
1940 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
1941 Op->Imm.Val = Val;
1942 Op->StartLoc = S;
1943 Op->EndLoc = E;
1944 return Op;
1945 }
1946
CreateShiftedImm(const MCExpr * Val,unsigned ShiftAmount,SMLoc S,SMLoc E,MCContext & Ctx)1947 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1948 unsigned ShiftAmount,
1949 SMLoc S, SMLoc E,
1950 MCContext &Ctx) {
1951 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1952 Op->ShiftedImm .Val = Val;
1953 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1954 Op->StartLoc = S;
1955 Op->EndLoc = E;
1956 return Op;
1957 }
1958
1959 static std::unique_ptr<AArch64Operand>
CreateCondCode(AArch64CC::CondCode Code,SMLoc S,SMLoc E,MCContext & Ctx)1960 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1961 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
1962 Op->CondCode.Code = Code;
1963 Op->StartLoc = S;
1964 Op->EndLoc = E;
1965 return Op;
1966 }
1967
1968 static std::unique_ptr<AArch64Operand>
CreateFPImm(APFloat Val,bool IsExact,SMLoc S,MCContext & Ctx)1969 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
1970 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
1971 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
1972 Op->FPImm.IsExact = IsExact;
1973 Op->StartLoc = S;
1974 Op->EndLoc = S;
1975 return Op;
1976 }
1977
CreateBarrier(unsigned Val,StringRef Str,SMLoc S,MCContext & Ctx,bool HasnXSModifier)1978 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1979 StringRef Str,
1980 SMLoc S,
1981 MCContext &Ctx,
1982 bool HasnXSModifier) {
1983 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
1984 Op->Barrier.Val = Val;
1985 Op->Barrier.Data = Str.data();
1986 Op->Barrier.Length = Str.size();
1987 Op->Barrier.HasnXSModifier = HasnXSModifier;
1988 Op->StartLoc = S;
1989 Op->EndLoc = S;
1990 return Op;
1991 }
1992
CreateSysReg(StringRef Str,SMLoc S,uint32_t MRSReg,uint32_t MSRReg,uint32_t PStateField,MCContext & Ctx)1993 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1994 uint32_t MRSReg,
1995 uint32_t MSRReg,
1996 uint32_t PStateField,
1997 MCContext &Ctx) {
1998 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
1999 Op->SysReg.Data = Str.data();
2000 Op->SysReg.Length = Str.size();
2001 Op->SysReg.MRSReg = MRSReg;
2002 Op->SysReg.MSRReg = MSRReg;
2003 Op->SysReg.PStateField = PStateField;
2004 Op->StartLoc = S;
2005 Op->EndLoc = S;
2006 return Op;
2007 }
2008
CreateSysCR(unsigned Val,SMLoc S,SMLoc E,MCContext & Ctx)2009 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2010 SMLoc E, MCContext &Ctx) {
2011 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2012 Op->SysCRImm.Val = Val;
2013 Op->StartLoc = S;
2014 Op->EndLoc = E;
2015 return Op;
2016 }
2017
CreatePrefetch(unsigned Val,StringRef Str,SMLoc S,MCContext & Ctx)2018 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2019 StringRef Str,
2020 SMLoc S,
2021 MCContext &Ctx) {
2022 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2023 Op->Prefetch.Val = Val;
2024 Op->Barrier.Data = Str.data();
2025 Op->Barrier.Length = Str.size();
2026 Op->StartLoc = S;
2027 Op->EndLoc = S;
2028 return Op;
2029 }
2030
CreatePSBHint(unsigned Val,StringRef Str,SMLoc S,MCContext & Ctx)2031 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2032 StringRef Str,
2033 SMLoc S,
2034 MCContext &Ctx) {
2035 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2036 Op->PSBHint.Val = Val;
2037 Op->PSBHint.Data = Str.data();
2038 Op->PSBHint.Length = Str.size();
2039 Op->StartLoc = S;
2040 Op->EndLoc = S;
2041 return Op;
2042 }
2043
CreateBTIHint(unsigned Val,StringRef Str,SMLoc S,MCContext & Ctx)2044 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2045 StringRef Str,
2046 SMLoc S,
2047 MCContext &Ctx) {
2048 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2049 Op->BTIHint.Val = Val | 32;
2050 Op->BTIHint.Data = Str.data();
2051 Op->BTIHint.Length = Str.size();
2052 Op->StartLoc = S;
2053 Op->EndLoc = S;
2054 return Op;
2055 }
2056
2057 static std::unique_ptr<AArch64Operand>
CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp,unsigned Val,bool HasExplicitAmount,SMLoc S,SMLoc E,MCContext & Ctx)2058 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2059 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2060 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2061 Op->ShiftExtend.Type = ShOp;
2062 Op->ShiftExtend.Amount = Val;
2063 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2064 Op->StartLoc = S;
2065 Op->EndLoc = E;
2066 return Op;
2067 }
2068 };
2069
2070 } // end anonymous namespace.
2071
print(raw_ostream & OS) const2072 void AArch64Operand::print(raw_ostream &OS) const {
2073 switch (Kind) {
2074 case k_FPImm:
2075 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2076 if (!getFPImmIsExact())
2077 OS << " (inexact)";
2078 OS << ">";
2079 break;
2080 case k_Barrier: {
2081 StringRef Name = getBarrierName();
2082 if (!Name.empty())
2083 OS << "<barrier " << Name << ">";
2084 else
2085 OS << "<barrier invalid #" << getBarrier() << ">";
2086 break;
2087 }
2088 case k_Immediate:
2089 OS << *getImm();
2090 break;
2091 case k_ShiftedImm: {
2092 unsigned Shift = getShiftedImmShift();
2093 OS << "<shiftedimm ";
2094 OS << *getShiftedImmVal();
2095 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2096 break;
2097 }
2098 case k_CondCode:
2099 OS << "<condcode " << getCondCode() << ">";
2100 break;
2101 case k_VectorList: {
2102 OS << "<vectorlist ";
2103 unsigned Reg = getVectorListStart();
2104 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2105 OS << Reg + i << " ";
2106 OS << ">";
2107 break;
2108 }
2109 case k_VectorIndex:
2110 OS << "<vectorindex " << getVectorIndex() << ">";
2111 break;
2112 case k_SysReg:
2113 OS << "<sysreg: " << getSysReg() << '>';
2114 break;
2115 case k_Token:
2116 OS << "'" << getToken() << "'";
2117 break;
2118 case k_SysCR:
2119 OS << "c" << getSysCR();
2120 break;
2121 case k_Prefetch: {
2122 StringRef Name = getPrefetchName();
2123 if (!Name.empty())
2124 OS << "<prfop " << Name << ">";
2125 else
2126 OS << "<prfop invalid #" << getPrefetch() << ">";
2127 break;
2128 }
2129 case k_PSBHint:
2130 OS << getPSBHintName();
2131 break;
2132 case k_BTIHint:
2133 OS << getBTIHintName();
2134 break;
2135 case k_Register:
2136 OS << "<register " << getReg() << ">";
2137 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2138 break;
2139 LLVM_FALLTHROUGH;
2140 case k_ShiftExtend:
2141 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2142 << getShiftExtendAmount();
2143 if (!hasShiftExtendAmount())
2144 OS << "<imp>";
2145 OS << '>';
2146 break;
2147 }
2148 }
2149
2150 /// @name Auto-generated Match Functions
2151 /// {
2152
2153 static unsigned MatchRegisterName(StringRef Name);
2154
2155 /// }
2156
MatchNeonVectorRegName(StringRef Name)2157 static unsigned MatchNeonVectorRegName(StringRef Name) {
2158 return StringSwitch<unsigned>(Name.lower())
2159 .Case("v0", AArch64::Q0)
2160 .Case("v1", AArch64::Q1)
2161 .Case("v2", AArch64::Q2)
2162 .Case("v3", AArch64::Q3)
2163 .Case("v4", AArch64::Q4)
2164 .Case("v5", AArch64::Q5)
2165 .Case("v6", AArch64::Q6)
2166 .Case("v7", AArch64::Q7)
2167 .Case("v8", AArch64::Q8)
2168 .Case("v9", AArch64::Q9)
2169 .Case("v10", AArch64::Q10)
2170 .Case("v11", AArch64::Q11)
2171 .Case("v12", AArch64::Q12)
2172 .Case("v13", AArch64::Q13)
2173 .Case("v14", AArch64::Q14)
2174 .Case("v15", AArch64::Q15)
2175 .Case("v16", AArch64::Q16)
2176 .Case("v17", AArch64::Q17)
2177 .Case("v18", AArch64::Q18)
2178 .Case("v19", AArch64::Q19)
2179 .Case("v20", AArch64::Q20)
2180 .Case("v21", AArch64::Q21)
2181 .Case("v22", AArch64::Q22)
2182 .Case("v23", AArch64::Q23)
2183 .Case("v24", AArch64::Q24)
2184 .Case("v25", AArch64::Q25)
2185 .Case("v26", AArch64::Q26)
2186 .Case("v27", AArch64::Q27)
2187 .Case("v28", AArch64::Q28)
2188 .Case("v29", AArch64::Q29)
2189 .Case("v30", AArch64::Q30)
2190 .Case("v31", AArch64::Q31)
2191 .Default(0);
2192 }
2193
2194 /// Returns an optional pair of (#elements, element-width) if Suffix
2195 /// is a valid vector kind. Where the number of elements in a vector
2196 /// or the vector width is implicit or explicitly unknown (but still a
2197 /// valid suffix kind), 0 is used.
parseVectorKind(StringRef Suffix,RegKind VectorKind)2198 static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2199 RegKind VectorKind) {
2200 std::pair<int, int> Res = {-1, -1};
2201
2202 switch (VectorKind) {
2203 case RegKind::NeonVector:
2204 Res =
2205 StringSwitch<std::pair<int, int>>(Suffix.lower())
2206 .Case("", {0, 0})
2207 .Case(".1d", {1, 64})
2208 .Case(".1q", {1, 128})
2209 // '.2h' needed for fp16 scalar pairwise reductions
2210 .Case(".2h", {2, 16})
2211 .Case(".2s", {2, 32})
2212 .Case(".2d", {2, 64})
2213 // '.4b' is another special case for the ARMv8.2a dot product
2214 // operand
2215 .Case(".4b", {4, 8})
2216 .Case(".4h", {4, 16})
2217 .Case(".4s", {4, 32})
2218 .Case(".8b", {8, 8})
2219 .Case(".8h", {8, 16})
2220 .Case(".16b", {16, 8})
2221 // Accept the width neutral ones, too, for verbose syntax. If those
2222 // aren't used in the right places, the token operand won't match so
2223 // all will work out.
2224 .Case(".b", {0, 8})
2225 .Case(".h", {0, 16})
2226 .Case(".s", {0, 32})
2227 .Case(".d", {0, 64})
2228 .Default({-1, -1});
2229 break;
2230 case RegKind::SVEPredicateVector:
2231 case RegKind::SVEDataVector:
2232 Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2233 .Case("", {0, 0})
2234 .Case(".b", {0, 8})
2235 .Case(".h", {0, 16})
2236 .Case(".s", {0, 32})
2237 .Case(".d", {0, 64})
2238 .Case(".q", {0, 128})
2239 .Default({-1, -1});
2240 break;
2241 default:
2242 llvm_unreachable("Unsupported RegKind");
2243 }
2244
2245 if (Res == std::make_pair(-1, -1))
2246 return Optional<std::pair<int, int>>();
2247
2248 return Optional<std::pair<int, int>>(Res);
2249 }
2250
isValidVectorKind(StringRef Suffix,RegKind VectorKind)2251 static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2252 return parseVectorKind(Suffix, VectorKind).hasValue();
2253 }
2254
matchSVEDataVectorRegName(StringRef Name)2255 static unsigned matchSVEDataVectorRegName(StringRef Name) {
2256 return StringSwitch<unsigned>(Name.lower())
2257 .Case("z0", AArch64::Z0)
2258 .Case("z1", AArch64::Z1)
2259 .Case("z2", AArch64::Z2)
2260 .Case("z3", AArch64::Z3)
2261 .Case("z4", AArch64::Z4)
2262 .Case("z5", AArch64::Z5)
2263 .Case("z6", AArch64::Z6)
2264 .Case("z7", AArch64::Z7)
2265 .Case("z8", AArch64::Z8)
2266 .Case("z9", AArch64::Z9)
2267 .Case("z10", AArch64::Z10)
2268 .Case("z11", AArch64::Z11)
2269 .Case("z12", AArch64::Z12)
2270 .Case("z13", AArch64::Z13)
2271 .Case("z14", AArch64::Z14)
2272 .Case("z15", AArch64::Z15)
2273 .Case("z16", AArch64::Z16)
2274 .Case("z17", AArch64::Z17)
2275 .Case("z18", AArch64::Z18)
2276 .Case("z19", AArch64::Z19)
2277 .Case("z20", AArch64::Z20)
2278 .Case("z21", AArch64::Z21)
2279 .Case("z22", AArch64::Z22)
2280 .Case("z23", AArch64::Z23)
2281 .Case("z24", AArch64::Z24)
2282 .Case("z25", AArch64::Z25)
2283 .Case("z26", AArch64::Z26)
2284 .Case("z27", AArch64::Z27)
2285 .Case("z28", AArch64::Z28)
2286 .Case("z29", AArch64::Z29)
2287 .Case("z30", AArch64::Z30)
2288 .Case("z31", AArch64::Z31)
2289 .Default(0);
2290 }
2291
matchSVEPredicateVectorRegName(StringRef Name)2292 static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2293 return StringSwitch<unsigned>(Name.lower())
2294 .Case("p0", AArch64::P0)
2295 .Case("p1", AArch64::P1)
2296 .Case("p2", AArch64::P2)
2297 .Case("p3", AArch64::P3)
2298 .Case("p4", AArch64::P4)
2299 .Case("p5", AArch64::P5)
2300 .Case("p6", AArch64::P6)
2301 .Case("p7", AArch64::P7)
2302 .Case("p8", AArch64::P8)
2303 .Case("p9", AArch64::P9)
2304 .Case("p10", AArch64::P10)
2305 .Case("p11", AArch64::P11)
2306 .Case("p12", AArch64::P12)
2307 .Case("p13", AArch64::P13)
2308 .Case("p14", AArch64::P14)
2309 .Case("p15", AArch64::P15)
2310 .Default(0);
2311 }
2312
ParseRegister(unsigned & RegNo,SMLoc & StartLoc,SMLoc & EndLoc)2313 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2314 SMLoc &EndLoc) {
2315 return tryParseRegister(RegNo, StartLoc, EndLoc) != MatchOperand_Success;
2316 }
2317
tryParseRegister(unsigned & RegNo,SMLoc & StartLoc,SMLoc & EndLoc)2318 OperandMatchResultTy AArch64AsmParser::tryParseRegister(unsigned &RegNo,
2319 SMLoc &StartLoc,
2320 SMLoc &EndLoc) {
2321 StartLoc = getLoc();
2322 auto Res = tryParseScalarRegister(RegNo);
2323 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2324 return Res;
2325 }
2326
2327 // Matches a register name or register alias previously defined by '.req'
matchRegisterNameAlias(StringRef Name,RegKind Kind)2328 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2329 RegKind Kind) {
2330 unsigned RegNum = 0;
2331 if ((RegNum = matchSVEDataVectorRegName(Name)))
2332 return Kind == RegKind::SVEDataVector ? RegNum : 0;
2333
2334 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2335 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2336
2337 if ((RegNum = MatchNeonVectorRegName(Name)))
2338 return Kind == RegKind::NeonVector ? RegNum : 0;
2339
2340 // The parsed register must be of RegKind Scalar
2341 if ((RegNum = MatchRegisterName(Name)))
2342 return Kind == RegKind::Scalar ? RegNum : 0;
2343
2344 if (!RegNum) {
2345 // Handle a few common aliases of registers.
2346 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2347 .Case("fp", AArch64::FP)
2348 .Case("lr", AArch64::LR)
2349 .Case("x31", AArch64::XZR)
2350 .Case("w31", AArch64::WZR)
2351 .Default(0))
2352 return Kind == RegKind::Scalar ? RegNum : 0;
2353
2354 // Check for aliases registered via .req. Canonicalize to lower case.
2355 // That's more consistent since register names are case insensitive, and
2356 // it's how the original entry was passed in from MC/MCParser/AsmParser.
2357 auto Entry = RegisterReqs.find(Name.lower());
2358 if (Entry == RegisterReqs.end())
2359 return 0;
2360
2361 // set RegNum if the match is the right kind of register
2362 if (Kind == Entry->getValue().first)
2363 RegNum = Entry->getValue().second;
2364 }
2365 return RegNum;
2366 }
2367
2368 /// tryParseScalarRegister - Try to parse a register name. The token must be an
2369 /// Identifier when called, and if it is a register name the token is eaten and
2370 /// the register is added to the operand list.
2371 OperandMatchResultTy
tryParseScalarRegister(unsigned & RegNum)2372 AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
2373 MCAsmParser &Parser = getParser();
2374 const AsmToken &Tok = Parser.getTok();
2375 if (Tok.isNot(AsmToken::Identifier))
2376 return MatchOperand_NoMatch;
2377
2378 std::string lowerCase = Tok.getString().lower();
2379 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2380 if (Reg == 0)
2381 return MatchOperand_NoMatch;
2382
2383 RegNum = Reg;
2384 Parser.Lex(); // Eat identifier token.
2385 return MatchOperand_Success;
2386 }
2387
2388 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2389 OperandMatchResultTy
tryParseSysCROperand(OperandVector & Operands)2390 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2391 MCAsmParser &Parser = getParser();
2392 SMLoc S = getLoc();
2393
2394 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2395 Error(S, "Expected cN operand where 0 <= N <= 15");
2396 return MatchOperand_ParseFail;
2397 }
2398
2399 StringRef Tok = Parser.getTok().getIdentifier();
2400 if (Tok[0] != 'c' && Tok[0] != 'C') {
2401 Error(S, "Expected cN operand where 0 <= N <= 15");
2402 return MatchOperand_ParseFail;
2403 }
2404
2405 uint32_t CRNum;
2406 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2407 if (BadNum || CRNum > 15) {
2408 Error(S, "Expected cN operand where 0 <= N <= 15");
2409 return MatchOperand_ParseFail;
2410 }
2411
2412 Parser.Lex(); // Eat identifier token.
2413 Operands.push_back(
2414 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2415 return MatchOperand_Success;
2416 }
2417
2418 /// tryParsePrefetch - Try to parse a prefetch operand.
2419 template <bool IsSVEPrefetch>
2420 OperandMatchResultTy
tryParsePrefetch(OperandVector & Operands)2421 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2422 MCAsmParser &Parser = getParser();
2423 SMLoc S = getLoc();
2424 const AsmToken &Tok = Parser.getTok();
2425
2426 auto LookupByName = [](StringRef N) {
2427 if (IsSVEPrefetch) {
2428 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
2429 return Optional<unsigned>(Res->Encoding);
2430 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
2431 return Optional<unsigned>(Res->Encoding);
2432 return Optional<unsigned>();
2433 };
2434
2435 auto LookupByEncoding = [](unsigned E) {
2436 if (IsSVEPrefetch) {
2437 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
2438 return Optional<StringRef>(Res->Name);
2439 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
2440 return Optional<StringRef>(Res->Name);
2441 return Optional<StringRef>();
2442 };
2443 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2444
2445 // Either an identifier for named values or a 5-bit immediate.
2446 // Eat optional hash.
2447 if (parseOptionalToken(AsmToken::Hash) ||
2448 Tok.is(AsmToken::Integer)) {
2449 const MCExpr *ImmVal;
2450 if (getParser().parseExpression(ImmVal))
2451 return MatchOperand_ParseFail;
2452
2453 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2454 if (!MCE) {
2455 TokError("immediate value expected for prefetch operand");
2456 return MatchOperand_ParseFail;
2457 }
2458 unsigned prfop = MCE->getValue();
2459 if (prfop > MaxVal) {
2460 TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2461 "] expected");
2462 return MatchOperand_ParseFail;
2463 }
2464
2465 auto PRFM = LookupByEncoding(MCE->getValue());
2466 Operands.push_back(AArch64Operand::CreatePrefetch(
2467 prfop, PRFM.getValueOr(""), S, getContext()));
2468 return MatchOperand_Success;
2469 }
2470
2471 if (Tok.isNot(AsmToken::Identifier)) {
2472 TokError("prefetch hint expected");
2473 return MatchOperand_ParseFail;
2474 }
2475
2476 auto PRFM = LookupByName(Tok.getString());
2477 if (!PRFM) {
2478 TokError("prefetch hint expected");
2479 return MatchOperand_ParseFail;
2480 }
2481
2482 Operands.push_back(AArch64Operand::CreatePrefetch(
2483 *PRFM, Tok.getString(), S, getContext()));
2484 Parser.Lex(); // Eat identifier token.
2485 return MatchOperand_Success;
2486 }
2487
2488 /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2489 OperandMatchResultTy
tryParsePSBHint(OperandVector & Operands)2490 AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2491 MCAsmParser &Parser = getParser();
2492 SMLoc S = getLoc();
2493 const AsmToken &Tok = Parser.getTok();
2494 if (Tok.isNot(AsmToken::Identifier)) {
2495 TokError("invalid operand for instruction");
2496 return MatchOperand_ParseFail;
2497 }
2498
2499 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2500 if (!PSB) {
2501 TokError("invalid operand for instruction");
2502 return MatchOperand_ParseFail;
2503 }
2504
2505 Operands.push_back(AArch64Operand::CreatePSBHint(
2506 PSB->Encoding, Tok.getString(), S, getContext()));
2507 Parser.Lex(); // Eat identifier token.
2508 return MatchOperand_Success;
2509 }
2510
2511 /// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
2512 OperandMatchResultTy
tryParseBTIHint(OperandVector & Operands)2513 AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
2514 MCAsmParser &Parser = getParser();
2515 SMLoc S = getLoc();
2516 const AsmToken &Tok = Parser.getTok();
2517 if (Tok.isNot(AsmToken::Identifier)) {
2518 TokError("invalid operand for instruction");
2519 return MatchOperand_ParseFail;
2520 }
2521
2522 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
2523 if (!BTI) {
2524 TokError("invalid operand for instruction");
2525 return MatchOperand_ParseFail;
2526 }
2527
2528 Operands.push_back(AArch64Operand::CreateBTIHint(
2529 BTI->Encoding, Tok.getString(), S, getContext()));
2530 Parser.Lex(); // Eat identifier token.
2531 return MatchOperand_Success;
2532 }
2533
2534 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2535 /// instruction.
2536 OperandMatchResultTy
tryParseAdrpLabel(OperandVector & Operands)2537 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2538 MCAsmParser &Parser = getParser();
2539 SMLoc S = getLoc();
2540 const MCExpr *Expr = nullptr;
2541
2542 if (Parser.getTok().is(AsmToken::Hash)) {
2543 Parser.Lex(); // Eat hash token.
2544 }
2545
2546 if (parseSymbolicImmVal(Expr))
2547 return MatchOperand_ParseFail;
2548
2549 AArch64MCExpr::VariantKind ELFRefKind;
2550 MCSymbolRefExpr::VariantKind DarwinRefKind;
2551 int64_t Addend;
2552 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2553 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2554 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2555 // No modifier was specified at all; this is the syntax for an ELF basic
2556 // ADRP relocation (unfortunately).
2557 Expr =
2558 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2559 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2560 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2561 Addend != 0) {
2562 Error(S, "gotpage label reference not allowed an addend");
2563 return MatchOperand_ParseFail;
2564 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2565 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2566 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2567 ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
2568 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2569 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
2570 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2571 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2572 // The operand must be an @page or @gotpage qualified symbolref.
2573 Error(S, "page or gotpage label reference expected");
2574 return MatchOperand_ParseFail;
2575 }
2576 }
2577
2578 // We have either a label reference possibly with addend or an immediate. The
2579 // addend is a raw value here. The linker will adjust it to only reference the
2580 // page.
2581 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2582 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2583
2584 return MatchOperand_Success;
2585 }
2586
2587 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2588 /// instruction.
2589 OperandMatchResultTy
tryParseAdrLabel(OperandVector & Operands)2590 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2591 SMLoc S = getLoc();
2592 const MCExpr *Expr = nullptr;
2593
2594 // Leave anything with a bracket to the default for SVE
2595 if (getParser().getTok().is(AsmToken::LBrac))
2596 return MatchOperand_NoMatch;
2597
2598 if (getParser().getTok().is(AsmToken::Hash))
2599 getParser().Lex(); // Eat hash token.
2600
2601 if (parseSymbolicImmVal(Expr))
2602 return MatchOperand_ParseFail;
2603
2604 AArch64MCExpr::VariantKind ELFRefKind;
2605 MCSymbolRefExpr::VariantKind DarwinRefKind;
2606 int64_t Addend;
2607 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2608 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2609 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2610 // No modifier was specified at all; this is the syntax for an ELF basic
2611 // ADR relocation (unfortunately).
2612 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
2613 } else {
2614 Error(S, "unexpected adr label");
2615 return MatchOperand_ParseFail;
2616 }
2617 }
2618
2619 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2620 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2621 return MatchOperand_Success;
2622 }
2623
2624 /// tryParseFPImm - A floating point immediate expression operand.
2625 template<bool AddFPZeroAsLiteral>
2626 OperandMatchResultTy
tryParseFPImm(OperandVector & Operands)2627 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2628 MCAsmParser &Parser = getParser();
2629 SMLoc S = getLoc();
2630
2631 bool Hash = parseOptionalToken(AsmToken::Hash);
2632
2633 // Handle negation, as that still comes through as a separate token.
2634 bool isNegative = parseOptionalToken(AsmToken::Minus);
2635
2636 const AsmToken &Tok = Parser.getTok();
2637 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
2638 if (!Hash)
2639 return MatchOperand_NoMatch;
2640 TokError("invalid floating point immediate");
2641 return MatchOperand_ParseFail;
2642 }
2643
2644 // Parse hexadecimal representation.
2645 if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
2646 if (Tok.getIntVal() > 255 || isNegative) {
2647 TokError("encoded floating point value out of range");
2648 return MatchOperand_ParseFail;
2649 }
2650
2651 APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
2652 Operands.push_back(
2653 AArch64Operand::CreateFPImm(F, true, S, getContext()));
2654 } else {
2655 // Parse FP representation.
2656 APFloat RealVal(APFloat::IEEEdouble());
2657 auto StatusOrErr =
2658 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
2659 if (errorToBool(StatusOrErr.takeError())) {
2660 TokError("invalid floating point representation");
2661 return MatchOperand_ParseFail;
2662 }
2663
2664 if (isNegative)
2665 RealVal.changeSign();
2666
2667 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
2668 Operands.push_back(
2669 AArch64Operand::CreateToken("#0", false, S, getContext()));
2670 Operands.push_back(
2671 AArch64Operand::CreateToken(".0", false, S, getContext()));
2672 } else
2673 Operands.push_back(AArch64Operand::CreateFPImm(
2674 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
2675 }
2676
2677 Parser.Lex(); // Eat the token.
2678
2679 return MatchOperand_Success;
2680 }
2681
2682 /// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
2683 /// a shift suffix, for example '#1, lsl #12'.
2684 OperandMatchResultTy
tryParseImmWithOptionalShift(OperandVector & Operands)2685 AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
2686 MCAsmParser &Parser = getParser();
2687 SMLoc S = getLoc();
2688
2689 if (Parser.getTok().is(AsmToken::Hash))
2690 Parser.Lex(); // Eat '#'
2691 else if (Parser.getTok().isNot(AsmToken::Integer))
2692 // Operand should start from # or should be integer, emit error otherwise.
2693 return MatchOperand_NoMatch;
2694
2695 const MCExpr *Imm = nullptr;
2696 if (parseSymbolicImmVal(Imm))
2697 return MatchOperand_ParseFail;
2698 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2699 SMLoc E = Parser.getTok().getLoc();
2700 Operands.push_back(
2701 AArch64Operand::CreateImm(Imm, S, E, getContext()));
2702 return MatchOperand_Success;
2703 }
2704
2705 // Eat ','
2706 Parser.Lex();
2707
2708 // The optional operand must be "lsl #N" where N is non-negative.
2709 if (!Parser.getTok().is(AsmToken::Identifier) ||
2710 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2711 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2712 return MatchOperand_ParseFail;
2713 }
2714
2715 // Eat 'lsl'
2716 Parser.Lex();
2717
2718 parseOptionalToken(AsmToken::Hash);
2719
2720 if (Parser.getTok().isNot(AsmToken::Integer)) {
2721 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2722 return MatchOperand_ParseFail;
2723 }
2724
2725 int64_t ShiftAmount = Parser.getTok().getIntVal();
2726
2727 if (ShiftAmount < 0) {
2728 Error(Parser.getTok().getLoc(), "positive shift amount required");
2729 return MatchOperand_ParseFail;
2730 }
2731 Parser.Lex(); // Eat the number
2732
2733 // Just in case the optional lsl #0 is used for immediates other than zero.
2734 if (ShiftAmount == 0 && Imm != nullptr) {
2735 SMLoc E = Parser.getTok().getLoc();
2736 Operands.push_back(AArch64Operand::CreateImm(Imm, S, E, getContext()));
2737 return MatchOperand_Success;
2738 }
2739
2740 SMLoc E = Parser.getTok().getLoc();
2741 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2742 S, E, getContext()));
2743 return MatchOperand_Success;
2744 }
2745
2746 /// parseCondCodeString - Parse a Condition Code string.
parseCondCodeString(StringRef Cond)2747 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2748 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2749 .Case("eq", AArch64CC::EQ)
2750 .Case("ne", AArch64CC::NE)
2751 .Case("cs", AArch64CC::HS)
2752 .Case("hs", AArch64CC::HS)
2753 .Case("cc", AArch64CC::LO)
2754 .Case("lo", AArch64CC::LO)
2755 .Case("mi", AArch64CC::MI)
2756 .Case("pl", AArch64CC::PL)
2757 .Case("vs", AArch64CC::VS)
2758 .Case("vc", AArch64CC::VC)
2759 .Case("hi", AArch64CC::HI)
2760 .Case("ls", AArch64CC::LS)
2761 .Case("ge", AArch64CC::GE)
2762 .Case("lt", AArch64CC::LT)
2763 .Case("gt", AArch64CC::GT)
2764 .Case("le", AArch64CC::LE)
2765 .Case("al", AArch64CC::AL)
2766 .Case("nv", AArch64CC::NV)
2767 .Default(AArch64CC::Invalid);
2768
2769 if (CC == AArch64CC::Invalid &&
2770 getSTI().getFeatureBits()[AArch64::FeatureSVE])
2771 CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2772 .Case("none", AArch64CC::EQ)
2773 .Case("any", AArch64CC::NE)
2774 .Case("nlast", AArch64CC::HS)
2775 .Case("last", AArch64CC::LO)
2776 .Case("first", AArch64CC::MI)
2777 .Case("nfrst", AArch64CC::PL)
2778 .Case("pmore", AArch64CC::HI)
2779 .Case("plast", AArch64CC::LS)
2780 .Case("tcont", AArch64CC::GE)
2781 .Case("tstop", AArch64CC::LT)
2782 .Default(AArch64CC::Invalid);
2783
2784 return CC;
2785 }
2786
2787 /// parseCondCode - Parse a Condition Code operand.
parseCondCode(OperandVector & Operands,bool invertCondCode)2788 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2789 bool invertCondCode) {
2790 MCAsmParser &Parser = getParser();
2791 SMLoc S = getLoc();
2792 const AsmToken &Tok = Parser.getTok();
2793 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2794
2795 StringRef Cond = Tok.getString();
2796 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2797 if (CC == AArch64CC::Invalid)
2798 return TokError("invalid condition code");
2799 Parser.Lex(); // Eat identifier token.
2800
2801 if (invertCondCode) {
2802 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2803 return TokError("condition codes AL and NV are invalid for this instruction");
2804 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2805 }
2806
2807 Operands.push_back(
2808 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2809 return false;
2810 }
2811
2812 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2813 /// them if present.
2814 OperandMatchResultTy
tryParseOptionalShiftExtend(OperandVector & Operands)2815 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2816 MCAsmParser &Parser = getParser();
2817 const AsmToken &Tok = Parser.getTok();
2818 std::string LowerID = Tok.getString().lower();
2819 AArch64_AM::ShiftExtendType ShOp =
2820 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2821 .Case("lsl", AArch64_AM::LSL)
2822 .Case("lsr", AArch64_AM::LSR)
2823 .Case("asr", AArch64_AM::ASR)
2824 .Case("ror", AArch64_AM::ROR)
2825 .Case("msl", AArch64_AM::MSL)
2826 .Case("uxtb", AArch64_AM::UXTB)
2827 .Case("uxth", AArch64_AM::UXTH)
2828 .Case("uxtw", AArch64_AM::UXTW)
2829 .Case("uxtx", AArch64_AM::UXTX)
2830 .Case("sxtb", AArch64_AM::SXTB)
2831 .Case("sxth", AArch64_AM::SXTH)
2832 .Case("sxtw", AArch64_AM::SXTW)
2833 .Case("sxtx", AArch64_AM::SXTX)
2834 .Default(AArch64_AM::InvalidShiftExtend);
2835
2836 if (ShOp == AArch64_AM::InvalidShiftExtend)
2837 return MatchOperand_NoMatch;
2838
2839 SMLoc S = Tok.getLoc();
2840 Parser.Lex();
2841
2842 bool Hash = parseOptionalToken(AsmToken::Hash);
2843
2844 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2845 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2846 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2847 ShOp == AArch64_AM::MSL) {
2848 // We expect a number here.
2849 TokError("expected #imm after shift specifier");
2850 return MatchOperand_ParseFail;
2851 }
2852
2853 // "extend" type operations don't need an immediate, #0 is implicit.
2854 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2855 Operands.push_back(
2856 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2857 return MatchOperand_Success;
2858 }
2859
2860 // Make sure we do actually have a number, identifier or a parenthesized
2861 // expression.
2862 SMLoc E = Parser.getTok().getLoc();
2863 if (!Parser.getTok().is(AsmToken::Integer) &&
2864 !Parser.getTok().is(AsmToken::LParen) &&
2865 !Parser.getTok().is(AsmToken::Identifier)) {
2866 Error(E, "expected integer shift amount");
2867 return MatchOperand_ParseFail;
2868 }
2869
2870 const MCExpr *ImmVal;
2871 if (getParser().parseExpression(ImmVal))
2872 return MatchOperand_ParseFail;
2873
2874 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2875 if (!MCE) {
2876 Error(E, "expected constant '#imm' after shift specifier");
2877 return MatchOperand_ParseFail;
2878 }
2879
2880 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2881 Operands.push_back(AArch64Operand::CreateShiftExtend(
2882 ShOp, MCE->getValue(), true, S, E, getContext()));
2883 return MatchOperand_Success;
2884 }
2885
2886 static const struct Extension {
2887 const char *Name;
2888 const FeatureBitset Features;
2889 } ExtensionMap[] = {
2890 {"crc", {AArch64::FeatureCRC}},
2891 {"sm4", {AArch64::FeatureSM4}},
2892 {"sha3", {AArch64::FeatureSHA3}},
2893 {"sha2", {AArch64::FeatureSHA2}},
2894 {"aes", {AArch64::FeatureAES}},
2895 {"crypto", {AArch64::FeatureCrypto}},
2896 {"fp", {AArch64::FeatureFPARMv8}},
2897 {"simd", {AArch64::FeatureNEON}},
2898 {"ras", {AArch64::FeatureRAS}},
2899 {"lse", {AArch64::FeatureLSE}},
2900 {"predres", {AArch64::FeaturePredRes}},
2901 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
2902 {"mte", {AArch64::FeatureMTE}},
2903 {"memtag", {AArch64::FeatureMTE}},
2904 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
2905 {"pan", {AArch64::FeaturePAN}},
2906 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
2907 {"ccpp", {AArch64::FeatureCCPP}},
2908 {"rcpc", {AArch64::FeatureRCPC}},
2909 {"rng", {AArch64::FeatureRandGen}},
2910 {"sve", {AArch64::FeatureSVE}},
2911 {"sve2", {AArch64::FeatureSVE2}},
2912 {"sve2-aes", {AArch64::FeatureSVE2AES}},
2913 {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
2914 {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
2915 {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
2916 {"ls64", {AArch64::FeatureLS64}},
2917 {"xs", {AArch64::FeatureXS}},
2918 {"pauth", {AArch64::FeaturePAuth}},
2919 {"flagm", {AArch64::FeatureFlagM}},
2920 // FIXME: Unsupported extensions
2921 {"lor", {}},
2922 {"rdma", {}},
2923 {"profile", {}},
2924 };
2925
setRequiredFeatureString(FeatureBitset FBS,std::string & Str)2926 static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
2927 if (FBS[AArch64::HasV8_1aOps])
2928 Str += "ARMv8.1a";
2929 else if (FBS[AArch64::HasV8_2aOps])
2930 Str += "ARMv8.2a";
2931 else if (FBS[AArch64::HasV8_3aOps])
2932 Str += "ARMv8.3a";
2933 else if (FBS[AArch64::HasV8_4aOps])
2934 Str += "ARMv8.4a";
2935 else if (FBS[AArch64::HasV8_5aOps])
2936 Str += "ARMv8.5a";
2937 else if (FBS[AArch64::HasV8_6aOps])
2938 Str += "ARMv8.6a";
2939 else if (FBS[AArch64::HasV8_7aOps])
2940 Str += "ARMv8.7a";
2941 else {
2942 SmallVector<std::string, 2> ExtMatches;
2943 for (const auto& Ext : ExtensionMap) {
2944 // Use & in case multiple features are enabled
2945 if ((FBS & Ext.Features) != FeatureBitset())
2946 ExtMatches.push_back(Ext.Name);
2947 }
2948 Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
2949 }
2950 }
2951
createSysAlias(uint16_t Encoding,OperandVector & Operands,SMLoc S)2952 void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
2953 SMLoc S) {
2954 const uint16_t Op2 = Encoding & 7;
2955 const uint16_t Cm = (Encoding & 0x78) >> 3;
2956 const uint16_t Cn = (Encoding & 0x780) >> 7;
2957 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
2958
2959 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
2960
2961 Operands.push_back(
2962 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2963 Operands.push_back(
2964 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
2965 Operands.push_back(
2966 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
2967 Expr = MCConstantExpr::create(Op2, getContext());
2968 Operands.push_back(
2969 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2970 }
2971
2972 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2973 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
parseSysAlias(StringRef Name,SMLoc NameLoc,OperandVector & Operands)2974 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2975 OperandVector &Operands) {
2976 if (Name.find('.') != StringRef::npos)
2977 return TokError("invalid operand");
2978
2979 Mnemonic = Name;
2980 Operands.push_back(
2981 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2982
2983 MCAsmParser &Parser = getParser();
2984 const AsmToken &Tok = Parser.getTok();
2985 StringRef Op = Tok.getString();
2986 SMLoc S = Tok.getLoc();
2987
2988 if (Mnemonic == "ic") {
2989 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
2990 if (!IC)
2991 return TokError("invalid operand for IC instruction");
2992 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
2993 std::string Str("IC " + std::string(IC->Name) + " requires: ");
2994 setRequiredFeatureString(IC->getRequiredFeatures(), Str);
2995 return TokError(Str.c_str());
2996 }
2997 createSysAlias(IC->Encoding, Operands, S);
2998 } else if (Mnemonic == "dc") {
2999 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3000 if (!DC)
3001 return TokError("invalid operand for DC instruction");
3002 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3003 std::string Str("DC " + std::string(DC->Name) + " requires: ");
3004 setRequiredFeatureString(DC->getRequiredFeatures(), Str);
3005 return TokError(Str.c_str());
3006 }
3007 createSysAlias(DC->Encoding, Operands, S);
3008 } else if (Mnemonic == "at") {
3009 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3010 if (!AT)
3011 return TokError("invalid operand for AT instruction");
3012 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3013 std::string Str("AT " + std::string(AT->Name) + " requires: ");
3014 setRequiredFeatureString(AT->getRequiredFeatures(), Str);
3015 return TokError(Str.c_str());
3016 }
3017 createSysAlias(AT->Encoding, Operands, S);
3018 } else if (Mnemonic == "tlbi") {
3019 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3020 if (!TLBI)
3021 return TokError("invalid operand for TLBI instruction");
3022 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3023 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3024 setRequiredFeatureString(TLBI->getRequiredFeatures(), Str);
3025 return TokError(Str.c_str());
3026 }
3027 createSysAlias(TLBI->Encoding, Operands, S);
3028 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp") {
3029 const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByName(Op);
3030 if (!PRCTX)
3031 return TokError("invalid operand for prediction restriction instruction");
3032 else if (!PRCTX->haveFeatures(getSTI().getFeatureBits())) {
3033 std::string Str(
3034 Mnemonic.upper() + std::string(PRCTX->Name) + " requires: ");
3035 setRequiredFeatureString(PRCTX->getRequiredFeatures(), Str);
3036 return TokError(Str.c_str());
3037 }
3038 uint16_t PRCTX_Op2 =
3039 Mnemonic == "cfp" ? 4 :
3040 Mnemonic == "dvp" ? 5 :
3041 Mnemonic == "cpp" ? 7 :
3042 0;
3043 assert(PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction");
3044 createSysAlias(PRCTX->Encoding << 3 | PRCTX_Op2 , Operands, S);
3045 }
3046
3047 Parser.Lex(); // Eat operand.
3048
3049 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
3050 bool HasRegister = false;
3051
3052 // Check for the optional register operand.
3053 if (parseOptionalToken(AsmToken::Comma)) {
3054 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3055 return TokError("expected register operand");
3056 HasRegister = true;
3057 }
3058
3059 if (ExpectRegister && !HasRegister)
3060 return TokError("specified " + Mnemonic + " op requires a register");
3061 else if (!ExpectRegister && HasRegister)
3062 return TokError("specified " + Mnemonic + " op does not use a register");
3063
3064 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3065 return true;
3066
3067 return false;
3068 }
3069
3070 OperandMatchResultTy
tryParseBarrierOperand(OperandVector & Operands)3071 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
3072 MCAsmParser &Parser = getParser();
3073 const AsmToken &Tok = Parser.getTok();
3074
3075 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
3076 TokError("'csync' operand expected");
3077 return MatchOperand_ParseFail;
3078 } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3079 // Immediate operand.
3080 const MCExpr *ImmVal;
3081 SMLoc ExprLoc = getLoc();
3082 AsmToken IntTok = Tok;
3083 if (getParser().parseExpression(ImmVal))
3084 return MatchOperand_ParseFail;
3085 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3086 if (!MCE) {
3087 Error(ExprLoc, "immediate value expected for barrier operand");
3088 return MatchOperand_ParseFail;
3089 }
3090 int64_t Value = MCE->getValue();
3091 if (Mnemonic == "dsb" && Value > 15) {
3092 // This case is a no match here, but it might be matched by the nXS
3093 // variant. Deliberately not unlex the optional '#' as it is not necessary
3094 // to characterize an integer immediate.
3095 Parser.getLexer().UnLex(IntTok);
3096 return MatchOperand_NoMatch;
3097 }
3098 if (Value < 0 || Value > 15) {
3099 Error(ExprLoc, "barrier operand out of range");
3100 return MatchOperand_ParseFail;
3101 }
3102 auto DB = AArch64DB::lookupDBByEncoding(Value);
3103 Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
3104 ExprLoc, getContext(),
3105 false /*hasnXSModifier*/));
3106 return MatchOperand_Success;
3107 }
3108
3109 if (Tok.isNot(AsmToken::Identifier)) {
3110 TokError("invalid operand for instruction");
3111 return MatchOperand_ParseFail;
3112 }
3113
3114 StringRef Operand = Tok.getString();
3115 auto TSB = AArch64TSB::lookupTSBByName(Operand);
3116 auto DB = AArch64DB::lookupDBByName(Operand);
3117 // The only valid named option for ISB is 'sy'
3118 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
3119 TokError("'sy' or #imm operand expected");
3120 return MatchOperand_ParseFail;
3121 // The only valid named option for TSB is 'csync'
3122 } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3123 TokError("'csync' operand expected");
3124 return MatchOperand_ParseFail;
3125 } else if (!DB && !TSB) {
3126 if (Mnemonic == "dsb") {
3127 // This case is a no match here, but it might be matched by the nXS
3128 // variant.
3129 return MatchOperand_NoMatch;
3130 }
3131 TokError("invalid barrier option name");
3132 return MatchOperand_ParseFail;
3133 }
3134
3135 Operands.push_back(AArch64Operand::CreateBarrier(
3136 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
3137 getContext(), false /*hasnXSModifier*/));
3138 Parser.Lex(); // Consume the option
3139
3140 return MatchOperand_Success;
3141 }
3142
3143 OperandMatchResultTy
tryParseBarriernXSOperand(OperandVector & Operands)3144 AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
3145 MCAsmParser &Parser = getParser();
3146 const AsmToken &Tok = Parser.getTok();
3147
3148 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
3149 if (Mnemonic != "dsb")
3150 return MatchOperand_ParseFail;
3151
3152 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3153 // Immediate operand.
3154 const MCExpr *ImmVal;
3155 SMLoc ExprLoc = getLoc();
3156 if (getParser().parseExpression(ImmVal))
3157 return MatchOperand_ParseFail;
3158 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3159 if (!MCE) {
3160 Error(ExprLoc, "immediate value expected for barrier operand");
3161 return MatchOperand_ParseFail;
3162 }
3163 int64_t Value = MCE->getValue();
3164 // v8.7-A DSB in the nXS variant accepts only the following immediate
3165 // values: 16, 20, 24, 28.
3166 if (Value != 16 && Value != 20 && Value != 24 && Value != 28) {
3167 Error(ExprLoc, "barrier operand out of range");
3168 return MatchOperand_ParseFail;
3169 }
3170 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
3171 Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
3172 ExprLoc, getContext(),
3173 true /*hasnXSModifier*/));
3174 return MatchOperand_Success;
3175 }
3176
3177 if (Tok.isNot(AsmToken::Identifier)) {
3178 TokError("invalid operand for instruction");
3179 return MatchOperand_ParseFail;
3180 }
3181
3182 StringRef Operand = Tok.getString();
3183 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
3184
3185 if (!DB) {
3186 TokError("invalid barrier option name");
3187 return MatchOperand_ParseFail;
3188 }
3189
3190 Operands.push_back(
3191 AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
3192 getContext(), true /*hasnXSModifier*/));
3193 Parser.Lex(); // Consume the option
3194
3195 return MatchOperand_Success;
3196 }
3197
3198 OperandMatchResultTy
tryParseSysReg(OperandVector & Operands)3199 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
3200 MCAsmParser &Parser = getParser();
3201 const AsmToken &Tok = Parser.getTok();
3202
3203 if (Tok.isNot(AsmToken::Identifier))
3204 return MatchOperand_NoMatch;
3205
3206 int MRSReg, MSRReg;
3207 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
3208 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
3209 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
3210 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
3211 } else
3212 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
3213
3214 auto PState = AArch64PState::lookupPStateByName(Tok.getString());
3215 unsigned PStateImm = -1;
3216 if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
3217 PStateImm = PState->Encoding;
3218
3219 Operands.push_back(
3220 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
3221 PStateImm, getContext()));
3222 Parser.Lex(); // Eat identifier
3223
3224 return MatchOperand_Success;
3225 }
3226
3227 /// tryParseNeonVectorRegister - Parse a vector register operand.
tryParseNeonVectorRegister(OperandVector & Operands)3228 bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
3229 MCAsmParser &Parser = getParser();
3230 if (Parser.getTok().isNot(AsmToken::Identifier))
3231 return true;
3232
3233 SMLoc S = getLoc();
3234 // Check for a vector register specifier first.
3235 StringRef Kind;
3236 unsigned Reg;
3237 OperandMatchResultTy Res =
3238 tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
3239 if (Res != MatchOperand_Success)
3240 return true;
3241
3242 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
3243 if (!KindRes)
3244 return true;
3245
3246 unsigned ElementWidth = KindRes->second;
3247 Operands.push_back(
3248 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
3249 S, getLoc(), getContext()));
3250
3251 // If there was an explicit qualifier, that goes on as a literal text
3252 // operand.
3253 if (!Kind.empty())
3254 Operands.push_back(
3255 AArch64Operand::CreateToken(Kind, false, S, getContext()));
3256
3257 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3258 }
3259
3260 OperandMatchResultTy
tryParseVectorIndex(OperandVector & Operands)3261 AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
3262 SMLoc SIdx = getLoc();
3263 if (parseOptionalToken(AsmToken::LBrac)) {
3264 const MCExpr *ImmVal;
3265 if (getParser().parseExpression(ImmVal))
3266 return MatchOperand_NoMatch;
3267 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3268 if (!MCE) {
3269 TokError("immediate value expected for vector index");
3270 return MatchOperand_ParseFail;;
3271 }
3272
3273 SMLoc E = getLoc();
3274
3275 if (parseToken(AsmToken::RBrac, "']' expected"))
3276 return MatchOperand_ParseFail;;
3277
3278 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3279 E, getContext()));
3280 return MatchOperand_Success;
3281 }
3282
3283 return MatchOperand_NoMatch;
3284 }
3285
3286 // tryParseVectorRegister - Try to parse a vector register name with
3287 // optional kind specifier. If it is a register specifier, eat the token
3288 // and return it.
3289 OperandMatchResultTy
tryParseVectorRegister(unsigned & Reg,StringRef & Kind,RegKind MatchKind)3290 AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
3291 RegKind MatchKind) {
3292 MCAsmParser &Parser = getParser();
3293 const AsmToken &Tok = Parser.getTok();
3294
3295 if (Tok.isNot(AsmToken::Identifier))
3296 return MatchOperand_NoMatch;
3297
3298 StringRef Name = Tok.getString();
3299 // If there is a kind specifier, it's separated from the register name by
3300 // a '.'.
3301 size_t Start = 0, Next = Name.find('.');
3302 StringRef Head = Name.slice(Start, Next);
3303 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
3304
3305 if (RegNum) {
3306 if (Next != StringRef::npos) {
3307 Kind = Name.slice(Next, StringRef::npos);
3308 if (!isValidVectorKind(Kind, MatchKind)) {
3309 TokError("invalid vector kind qualifier");
3310 return MatchOperand_ParseFail;
3311 }
3312 }
3313 Parser.Lex(); // Eat the register token.
3314
3315 Reg = RegNum;
3316 return MatchOperand_Success;
3317 }
3318
3319 return MatchOperand_NoMatch;
3320 }
3321
3322 /// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
3323 OperandMatchResultTy
tryParseSVEPredicateVector(OperandVector & Operands)3324 AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
3325 // Check for a SVE predicate register specifier first.
3326 const SMLoc S = getLoc();
3327 StringRef Kind;
3328 unsigned RegNum;
3329 auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
3330 if (Res != MatchOperand_Success)
3331 return Res;
3332
3333 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector);
3334 if (!KindRes)
3335 return MatchOperand_NoMatch;
3336
3337 unsigned ElementWidth = KindRes->second;
3338 Operands.push_back(AArch64Operand::CreateVectorReg(
3339 RegNum, RegKind::SVEPredicateVector, ElementWidth, S,
3340 getLoc(), getContext()));
3341
3342 // Not all predicates are followed by a '/m' or '/z'.
3343 MCAsmParser &Parser = getParser();
3344 if (Parser.getTok().isNot(AsmToken::Slash))
3345 return MatchOperand_Success;
3346
3347 // But when they do they shouldn't have an element type suffix.
3348 if (!Kind.empty()) {
3349 Error(S, "not expecting size suffix");
3350 return MatchOperand_ParseFail;
3351 }
3352
3353 // Add a literal slash as operand
3354 Operands.push_back(
3355 AArch64Operand::CreateToken("/" , false, getLoc(), getContext()));
3356
3357 Parser.Lex(); // Eat the slash.
3358
3359 // Zeroing or merging?
3360 auto Pred = Parser.getTok().getString().lower();
3361 if (Pred != "z" && Pred != "m") {
3362 Error(getLoc(), "expecting 'm' or 'z' predication");
3363 return MatchOperand_ParseFail;
3364 }
3365
3366 // Add zero/merge token.
3367 const char *ZM = Pred == "z" ? "z" : "m";
3368 Operands.push_back(
3369 AArch64Operand::CreateToken(ZM, false, getLoc(), getContext()));
3370
3371 Parser.Lex(); // Eat zero/merge token.
3372 return MatchOperand_Success;
3373 }
3374
3375 /// parseRegister - Parse a register operand.
parseRegister(OperandVector & Operands)3376 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
3377 // Try for a Neon vector register.
3378 if (!tryParseNeonVectorRegister(Operands))
3379 return false;
3380
3381 // Otherwise try for a scalar register.
3382 if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
3383 return false;
3384
3385 return true;
3386 }
3387
parseSymbolicImmVal(const MCExpr * & ImmVal)3388 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3389 MCAsmParser &Parser = getParser();
3390 bool HasELFModifier = false;
3391 AArch64MCExpr::VariantKind RefKind;
3392
3393 if (parseOptionalToken(AsmToken::Colon)) {
3394 HasELFModifier = true;
3395
3396 if (Parser.getTok().isNot(AsmToken::Identifier))
3397 return TokError("expect relocation specifier in operand after ':'");
3398
3399 std::string LowerCase = Parser.getTok().getIdentifier().lower();
3400 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
3401 .Case("lo12", AArch64MCExpr::VK_LO12)
3402 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
3403 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
3404 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
3405 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
3406 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
3407 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
3408 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
3409 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
3410 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
3411 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
3412 .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
3413 .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
3414 .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
3415 .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
3416 .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
3417 .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
3418 .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
3419 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
3420 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
3421 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
3422 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
3423 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
3424 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
3425 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
3426 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
3427 .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
3428 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
3429 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
3430 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
3431 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
3432 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
3433 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
3434 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
3435 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
3436 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
3437 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
3438 .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
3439 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
3440 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
3441 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
3442 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
3443 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
3444 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
3445 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
3446 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
3447 .Default(AArch64MCExpr::VK_INVALID);
3448
3449 if (RefKind == AArch64MCExpr::VK_INVALID)
3450 return TokError("expect relocation specifier in operand after ':'");
3451
3452 Parser.Lex(); // Eat identifier
3453
3454 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
3455 return true;
3456 }
3457
3458 if (getParser().parseExpression(ImmVal))
3459 return true;
3460
3461 if (HasELFModifier)
3462 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
3463
3464 return false;
3465 }
3466
3467 template <RegKind VectorKind>
3468 OperandMatchResultTy
tryParseVectorList(OperandVector & Operands,bool ExpectMatch)3469 AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
3470 bool ExpectMatch) {
3471 MCAsmParser &Parser = getParser();
3472 if (!Parser.getTok().is(AsmToken::LCurly))
3473 return MatchOperand_NoMatch;
3474
3475 // Wrapper around parse function
3476 auto ParseVector = [this, &Parser](unsigned &Reg, StringRef &Kind, SMLoc Loc,
3477 bool NoMatchIsError) {
3478 auto RegTok = Parser.getTok();
3479 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
3480 if (ParseRes == MatchOperand_Success) {
3481 if (parseVectorKind(Kind, VectorKind))
3482 return ParseRes;
3483 llvm_unreachable("Expected a valid vector kind");
3484 }
3485
3486 if (RegTok.isNot(AsmToken::Identifier) ||
3487 ParseRes == MatchOperand_ParseFail ||
3488 (ParseRes == MatchOperand_NoMatch && NoMatchIsError)) {
3489 Error(Loc, "vector register expected");
3490 return MatchOperand_ParseFail;
3491 }
3492
3493 return MatchOperand_NoMatch;
3494 };
3495
3496 SMLoc S = getLoc();
3497 auto LCurly = Parser.getTok();
3498 Parser.Lex(); // Eat left bracket token.
3499
3500 StringRef Kind;
3501 unsigned FirstReg;
3502 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
3503
3504 // Put back the original left bracket if there was no match, so that
3505 // different types of list-operands can be matched (e.g. SVE, Neon).
3506 if (ParseRes == MatchOperand_NoMatch)
3507 Parser.getLexer().UnLex(LCurly);
3508
3509 if (ParseRes != MatchOperand_Success)
3510 return ParseRes;
3511
3512 int64_t PrevReg = FirstReg;
3513 unsigned Count = 1;
3514
3515 if (parseOptionalToken(AsmToken::Minus)) {
3516 SMLoc Loc = getLoc();
3517 StringRef NextKind;
3518
3519 unsigned Reg;
3520 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3521 if (ParseRes != MatchOperand_Success)
3522 return ParseRes;
3523
3524 // Any Kind suffices must match on all regs in the list.
3525 if (Kind != NextKind) {
3526 Error(Loc, "mismatched register size suffix");
3527 return MatchOperand_ParseFail;
3528 }
3529
3530 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3531
3532 if (Space == 0 || Space > 3) {
3533 Error(Loc, "invalid number of vectors");
3534 return MatchOperand_ParseFail;
3535 }
3536
3537 Count += Space;
3538 }
3539 else {
3540 while (parseOptionalToken(AsmToken::Comma)) {
3541 SMLoc Loc = getLoc();
3542 StringRef NextKind;
3543 unsigned Reg;
3544 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3545 if (ParseRes != MatchOperand_Success)
3546 return ParseRes;
3547
3548 // Any Kind suffices must match on all regs in the list.
3549 if (Kind != NextKind) {
3550 Error(Loc, "mismatched register size suffix");
3551 return MatchOperand_ParseFail;
3552 }
3553
3554 // Registers must be incremental (with wraparound at 31)
3555 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3556 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
3557 Error(Loc, "registers must be sequential");
3558 return MatchOperand_ParseFail;
3559 }
3560
3561 PrevReg = Reg;
3562 ++Count;
3563 }
3564 }
3565
3566 if (parseToken(AsmToken::RCurly, "'}' expected"))
3567 return MatchOperand_ParseFail;
3568
3569 if (Count > 4) {
3570 Error(S, "invalid number of vectors");
3571 return MatchOperand_ParseFail;
3572 }
3573
3574 unsigned NumElements = 0;
3575 unsigned ElementWidth = 0;
3576 if (!Kind.empty()) {
3577 if (const auto &VK = parseVectorKind(Kind, VectorKind))
3578 std::tie(NumElements, ElementWidth) = *VK;
3579 }
3580
3581 Operands.push_back(AArch64Operand::CreateVectorList(
3582 FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
3583 getContext()));
3584
3585 return MatchOperand_Success;
3586 }
3587
3588 /// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
parseNeonVectorList(OperandVector & Operands)3589 bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
3590 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
3591 if (ParseRes != MatchOperand_Success)
3592 return true;
3593
3594 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3595 }
3596
3597 OperandMatchResultTy
tryParseGPR64sp0Operand(OperandVector & Operands)3598 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
3599 SMLoc StartLoc = getLoc();
3600
3601 unsigned RegNum;
3602 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3603 if (Res != MatchOperand_Success)
3604 return Res;
3605
3606 if (!parseOptionalToken(AsmToken::Comma)) {
3607 Operands.push_back(AArch64Operand::CreateReg(
3608 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3609 return MatchOperand_Success;
3610 }
3611
3612 parseOptionalToken(AsmToken::Hash);
3613
3614 if (getParser().getTok().isNot(AsmToken::Integer)) {
3615 Error(getLoc(), "index must be absent or #0");
3616 return MatchOperand_ParseFail;
3617 }
3618
3619 const MCExpr *ImmVal;
3620 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3621 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3622 Error(getLoc(), "index must be absent or #0");
3623 return MatchOperand_ParseFail;
3624 }
3625
3626 Operands.push_back(AArch64Operand::CreateReg(
3627 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3628 return MatchOperand_Success;
3629 }
3630
3631 template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
3632 OperandMatchResultTy
tryParseGPROperand(OperandVector & Operands)3633 AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
3634 SMLoc StartLoc = getLoc();
3635
3636 unsigned RegNum;
3637 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3638 if (Res != MatchOperand_Success)
3639 return Res;
3640
3641 // No shift/extend is the default.
3642 if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
3643 Operands.push_back(AArch64Operand::CreateReg(
3644 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
3645 return MatchOperand_Success;
3646 }
3647
3648 // Eat the comma
3649 getParser().Lex();
3650
3651 // Match the shift
3652 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
3653 Res = tryParseOptionalShiftExtend(ExtOpnd);
3654 if (Res != MatchOperand_Success)
3655 return Res;
3656
3657 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
3658 Operands.push_back(AArch64Operand::CreateReg(
3659 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
3660 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
3661 Ext->hasShiftExtendAmount()));
3662
3663 return MatchOperand_Success;
3664 }
3665
parseOptionalMulOperand(OperandVector & Operands)3666 bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
3667 MCAsmParser &Parser = getParser();
3668
3669 // Some SVE instructions have a decoration after the immediate, i.e.
3670 // "mul vl". We parse them here and add tokens, which must be present in the
3671 // asm string in the tablegen instruction.
3672 bool NextIsVL = Parser.getLexer().peekTok().getString().equals_lower("vl");
3673 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
3674 if (!Parser.getTok().getString().equals_lower("mul") ||
3675 !(NextIsVL || NextIsHash))
3676 return true;
3677
3678 Operands.push_back(
3679 AArch64Operand::CreateToken("mul", false, getLoc(), getContext()));
3680 Parser.Lex(); // Eat the "mul"
3681
3682 if (NextIsVL) {
3683 Operands.push_back(
3684 AArch64Operand::CreateToken("vl", false, getLoc(), getContext()));
3685 Parser.Lex(); // Eat the "vl"
3686 return false;
3687 }
3688
3689 if (NextIsHash) {
3690 Parser.Lex(); // Eat the #
3691 SMLoc S = getLoc();
3692
3693 // Parse immediate operand.
3694 const MCExpr *ImmVal;
3695 if (!Parser.parseExpression(ImmVal))
3696 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
3697 Operands.push_back(AArch64Operand::CreateImm(
3698 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
3699 getContext()));
3700 return MatchOperand_Success;
3701 }
3702 }
3703
3704 return Error(getLoc(), "expected 'vl' or '#<imm>'");
3705 }
3706
parseKeywordOperand(OperandVector & Operands)3707 bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
3708 MCAsmParser &Parser = getParser();
3709 auto Tok = Parser.getTok();
3710 if (Tok.isNot(AsmToken::Identifier))
3711 return true;
3712 Operands.push_back(AArch64Operand::CreateToken(Tok.getString(), false,
3713 Tok.getLoc(), getContext()));
3714 Parser.Lex();
3715 return false;
3716 }
3717
3718 /// parseOperand - Parse a arm instruction operand. For now this parses the
3719 /// operand regardless of the mnemonic.
parseOperand(OperandVector & Operands,bool isCondCode,bool invertCondCode)3720 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3721 bool invertCondCode) {
3722 MCAsmParser &Parser = getParser();
3723
3724 OperandMatchResultTy ResTy =
3725 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
3726
3727 // Check if the current operand has a custom associated parser, if so, try to
3728 // custom parse the operand, or fallback to the general approach.
3729 if (ResTy == MatchOperand_Success)
3730 return false;
3731 // If there wasn't a custom match, try the generic matcher below. Otherwise,
3732 // there was a match, but an error occurred, in which case, just return that
3733 // the operand parsing failed.
3734 if (ResTy == MatchOperand_ParseFail)
3735 return true;
3736
3737 // Nothing custom, so do general case parsing.
3738 SMLoc S, E;
3739 switch (getLexer().getKind()) {
3740 default: {
3741 SMLoc S = getLoc();
3742 const MCExpr *Expr;
3743 if (parseSymbolicImmVal(Expr))
3744 return Error(S, "invalid operand");
3745
3746 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3747 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3748 return false;
3749 }
3750 case AsmToken::LBrac: {
3751 SMLoc Loc = Parser.getTok().getLoc();
3752 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3753 getContext()));
3754 Parser.Lex(); // Eat '['
3755
3756 // There's no comma after a '[', so we can parse the next operand
3757 // immediately.
3758 return parseOperand(Operands, false, false);
3759 }
3760 case AsmToken::LCurly:
3761 return parseNeonVectorList(Operands);
3762 case AsmToken::Identifier: {
3763 // If we're expecting a Condition Code operand, then just parse that.
3764 if (isCondCode)
3765 return parseCondCode(Operands, invertCondCode);
3766
3767 // If it's a register name, parse it.
3768 if (!parseRegister(Operands))
3769 return false;
3770
3771 // See if this is a "mul vl" decoration or "mul #<int>" operand used
3772 // by SVE instructions.
3773 if (!parseOptionalMulOperand(Operands))
3774 return false;
3775
3776 // This could be an optional "shift" or "extend" operand.
3777 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3778 // We can only continue if no tokens were eaten.
3779 if (GotShift != MatchOperand_NoMatch)
3780 return GotShift;
3781
3782 // If this is a two-word mnemonic, parse its special keyword
3783 // operand as an identifier.
3784 if (Mnemonic == "brb")
3785 return parseKeywordOperand(Operands);
3786
3787 // This was not a register so parse other operands that start with an
3788 // identifier (like labels) as expressions and create them as immediates.
3789 const MCExpr *IdVal;
3790 S = getLoc();
3791 if (getParser().parseExpression(IdVal))
3792 return true;
3793 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3794 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3795 return false;
3796 }
3797 case AsmToken::Integer:
3798 case AsmToken::Real:
3799 case AsmToken::Hash: {
3800 // #42 -> immediate.
3801 S = getLoc();
3802
3803 parseOptionalToken(AsmToken::Hash);
3804
3805 // Parse a negative sign
3806 bool isNegative = false;
3807 if (Parser.getTok().is(AsmToken::Minus)) {
3808 isNegative = true;
3809 // We need to consume this token only when we have a Real, otherwise
3810 // we let parseSymbolicImmVal take care of it
3811 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3812 Parser.Lex();
3813 }
3814
3815 // The only Real that should come through here is a literal #0.0 for
3816 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3817 // so convert the value.
3818 const AsmToken &Tok = Parser.getTok();
3819 if (Tok.is(AsmToken::Real)) {
3820 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
3821 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3822 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3823 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3824 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
3825 return TokError("unexpected floating point literal");
3826 else if (IntVal != 0 || isNegative)
3827 return TokError("expected floating-point constant #0.0");
3828 Parser.Lex(); // Eat the token.
3829
3830 Operands.push_back(
3831 AArch64Operand::CreateToken("#0", false, S, getContext()));
3832 Operands.push_back(
3833 AArch64Operand::CreateToken(".0", false, S, getContext()));
3834 return false;
3835 }
3836
3837 const MCExpr *ImmVal;
3838 if (parseSymbolicImmVal(ImmVal))
3839 return true;
3840
3841 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3842 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3843 return false;
3844 }
3845 case AsmToken::Equal: {
3846 SMLoc Loc = getLoc();
3847 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3848 return TokError("unexpected token in operand");
3849 Parser.Lex(); // Eat '='
3850 const MCExpr *SubExprVal;
3851 if (getParser().parseExpression(SubExprVal))
3852 return true;
3853
3854 if (Operands.size() < 2 ||
3855 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
3856 return Error(Loc, "Only valid when first operand is register");
3857
3858 bool IsXReg =
3859 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3860 Operands[1]->getReg());
3861
3862 MCContext& Ctx = getContext();
3863 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3864 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3865 if (isa<MCConstantExpr>(SubExprVal)) {
3866 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3867 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3868 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3869 ShiftAmt += 16;
3870 Imm >>= 16;
3871 }
3872 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3873 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3874 Operands.push_back(AArch64Operand::CreateImm(
3875 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3876 if (ShiftAmt)
3877 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3878 ShiftAmt, true, S, E, Ctx));
3879 return false;
3880 }
3881 APInt Simm = APInt(64, Imm << ShiftAmt);
3882 // check if the immediate is an unsigned or signed 32-bit int for W regs
3883 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3884 return Error(Loc, "Immediate too large for register");
3885 }
3886 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3887 const MCExpr *CPLoc =
3888 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3889 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3890 return false;
3891 }
3892 }
3893 }
3894
parseImmExpr(int64_t & Out)3895 bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
3896 const MCExpr *Expr = nullptr;
3897 SMLoc L = getLoc();
3898 if (check(getParser().parseExpression(Expr), L, "expected expression"))
3899 return true;
3900 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
3901 if (check(!Value, L, "expected constant expression"))
3902 return true;
3903 Out = Value->getValue();
3904 return false;
3905 }
3906
parseComma()3907 bool AArch64AsmParser::parseComma() {
3908 if (check(getParser().getTok().isNot(AsmToken::Comma), getLoc(),
3909 "expected comma"))
3910 return true;
3911 // Eat the comma
3912 getParser().Lex();
3913 return false;
3914 }
3915
parseRegisterInRange(unsigned & Out,unsigned Base,unsigned First,unsigned Last)3916 bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
3917 unsigned First, unsigned Last) {
3918 unsigned Reg;
3919 SMLoc Start, End;
3920 if (check(ParseRegister(Reg, Start, End), getLoc(), "expected register"))
3921 return true;
3922
3923 // Special handling for FP and LR; they aren't linearly after x28 in
3924 // the registers enum.
3925 unsigned RangeEnd = Last;
3926 if (Base == AArch64::X0) {
3927 if (Last == AArch64::FP) {
3928 RangeEnd = AArch64::X28;
3929 if (Reg == AArch64::FP) {
3930 Out = 29;
3931 return false;
3932 }
3933 }
3934 if (Last == AArch64::LR) {
3935 RangeEnd = AArch64::X28;
3936 if (Reg == AArch64::FP) {
3937 Out = 29;
3938 return false;
3939 } else if (Reg == AArch64::LR) {
3940 Out = 30;
3941 return false;
3942 }
3943 }
3944 }
3945
3946 if (check(Reg < First || Reg > RangeEnd, Start,
3947 Twine("expected register in range ") +
3948 AArch64InstPrinter::getRegisterName(First) + " to " +
3949 AArch64InstPrinter::getRegisterName(Last)))
3950 return true;
3951 Out = Reg - Base;
3952 return false;
3953 }
3954
regsEqual(const MCParsedAsmOperand & Op1,const MCParsedAsmOperand & Op2) const3955 bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1,
3956 const MCParsedAsmOperand &Op2) const {
3957 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
3958 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
3959 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
3960 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
3961 return MCTargetAsmParser::regsEqual(Op1, Op2);
3962
3963 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
3964 "Testing equality of non-scalar registers not supported");
3965
3966 // Check if a registers match their sub/super register classes.
3967 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
3968 return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
3969 if (AOp1.getRegEqualityTy() == EqualsSubReg)
3970 return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
3971 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
3972 return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
3973 if (AOp2.getRegEqualityTy() == EqualsSubReg)
3974 return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
3975
3976 return false;
3977 }
3978
3979 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3980 /// operands.
ParseInstruction(ParseInstructionInfo & Info,StringRef Name,SMLoc NameLoc,OperandVector & Operands)3981 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3982 StringRef Name, SMLoc NameLoc,
3983 OperandVector &Operands) {
3984 MCAsmParser &Parser = getParser();
3985 Name = StringSwitch<StringRef>(Name.lower())
3986 .Case("beq", "b.eq")
3987 .Case("bne", "b.ne")
3988 .Case("bhs", "b.hs")
3989 .Case("bcs", "b.cs")
3990 .Case("blo", "b.lo")
3991 .Case("bcc", "b.cc")
3992 .Case("bmi", "b.mi")
3993 .Case("bpl", "b.pl")
3994 .Case("bvs", "b.vs")
3995 .Case("bvc", "b.vc")
3996 .Case("bhi", "b.hi")
3997 .Case("bls", "b.ls")
3998 .Case("bge", "b.ge")
3999 .Case("blt", "b.lt")
4000 .Case("bgt", "b.gt")
4001 .Case("ble", "b.le")
4002 .Case("bal", "b.al")
4003 .Case("bnv", "b.nv")
4004 .Default(Name);
4005
4006 // First check for the AArch64-specific .req directive.
4007 if (Parser.getTok().is(AsmToken::Identifier) &&
4008 Parser.getTok().getIdentifier().lower() == ".req") {
4009 parseDirectiveReq(Name, NameLoc);
4010 // We always return 'error' for this, as we're done with this
4011 // statement and don't need to match the 'instruction."
4012 return true;
4013 }
4014
4015 // Create the leading tokens for the mnemonic, split by '.' characters.
4016 size_t Start = 0, Next = Name.find('.');
4017 StringRef Head = Name.slice(Start, Next);
4018
4019 // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
4020 // the SYS instruction.
4021 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
4022 Head == "cfp" || Head == "dvp" || Head == "cpp")
4023 return parseSysAlias(Head, NameLoc, Operands);
4024
4025 Operands.push_back(
4026 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
4027 Mnemonic = Head;
4028
4029 // Handle condition codes for a branch mnemonic
4030 if (Head == "b" && Next != StringRef::npos) {
4031 Start = Next;
4032 Next = Name.find('.', Start + 1);
4033 Head = Name.slice(Start + 1, Next);
4034
4035 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
4036 (Head.data() - Name.data()));
4037 AArch64CC::CondCode CC = parseCondCodeString(Head);
4038 if (CC == AArch64CC::Invalid)
4039 return Error(SuffixLoc, "invalid condition code");
4040 Operands.push_back(
4041 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
4042 Operands.push_back(
4043 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
4044 }
4045
4046 // Add the remaining tokens in the mnemonic.
4047 while (Next != StringRef::npos) {
4048 Start = Next;
4049 Next = Name.find('.', Start + 1);
4050 Head = Name.slice(Start, Next);
4051 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
4052 (Head.data() - Name.data()) + 1);
4053 Operands.push_back(
4054 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
4055 }
4056
4057 // Conditional compare instructions have a Condition Code operand, which needs
4058 // to be parsed and an immediate operand created.
4059 bool condCodeFourthOperand =
4060 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
4061 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
4062 Head == "csinc" || Head == "csinv" || Head == "csneg");
4063
4064 // These instructions are aliases to some of the conditional select
4065 // instructions. However, the condition code is inverted in the aliased
4066 // instruction.
4067 //
4068 // FIXME: Is this the correct way to handle these? Or should the parser
4069 // generate the aliased instructions directly?
4070 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
4071 bool condCodeThirdOperand =
4072 (Head == "cinc" || Head == "cinv" || Head == "cneg");
4073
4074 // Read the remaining operands.
4075 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4076
4077 unsigned N = 1;
4078 do {
4079 // Parse and remember the operand.
4080 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
4081 (N == 3 && condCodeThirdOperand) ||
4082 (N == 2 && condCodeSecondOperand),
4083 condCodeSecondOperand || condCodeThirdOperand)) {
4084 return true;
4085 }
4086
4087 // After successfully parsing some operands there are two special cases to
4088 // consider (i.e. notional operands not separated by commas). Both are due
4089 // to memory specifiers:
4090 // + An RBrac will end an address for load/store/prefetch
4091 // + An '!' will indicate a pre-indexed operation.
4092 //
4093 // It's someone else's responsibility to make sure these tokens are sane
4094 // in the given context!
4095
4096 SMLoc RLoc = Parser.getTok().getLoc();
4097 if (parseOptionalToken(AsmToken::RBrac))
4098 Operands.push_back(
4099 AArch64Operand::CreateToken("]", false, RLoc, getContext()));
4100 SMLoc ELoc = Parser.getTok().getLoc();
4101 if (parseOptionalToken(AsmToken::Exclaim))
4102 Operands.push_back(
4103 AArch64Operand::CreateToken("!", false, ELoc, getContext()));
4104
4105 ++N;
4106 } while (parseOptionalToken(AsmToken::Comma));
4107 }
4108
4109 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4110 return true;
4111
4112 return false;
4113 }
4114
isMatchingOrAlias(unsigned ZReg,unsigned Reg)4115 static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
4116 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
4117 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
4118 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
4119 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
4120 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
4121 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
4122 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
4123 }
4124
4125 // FIXME: This entire function is a giant hack to provide us with decent
4126 // operand range validation/diagnostics until TableGen/MC can be extended
4127 // to support autogeneration of this kind of validation.
validateInstruction(MCInst & Inst,SMLoc & IDLoc,SmallVectorImpl<SMLoc> & Loc)4128 bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
4129 SmallVectorImpl<SMLoc> &Loc) {
4130 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4131 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
4132
4133 // A prefix only applies to the instruction following it. Here we extract
4134 // prefix information for the next instruction before validating the current
4135 // one so that in the case of failure we don't erronously continue using the
4136 // current prefix.
4137 PrefixInfo Prefix = NextPrefix;
4138 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
4139
4140 // Before validating the instruction in isolation we run through the rules
4141 // applicable when it follows a prefix instruction.
4142 // NOTE: brk & hlt can be prefixed but require no additional validation.
4143 if (Prefix.isActive() &&
4144 (Inst.getOpcode() != AArch64::BRK) &&
4145 (Inst.getOpcode() != AArch64::HLT)) {
4146
4147 // Prefixed intructions must have a destructive operand.
4148 if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) ==
4149 AArch64::NotDestructive)
4150 return Error(IDLoc, "instruction is unpredictable when following a"
4151 " movprfx, suggest replacing movprfx with mov");
4152
4153 // Destination operands must match.
4154 if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
4155 return Error(Loc[0], "instruction is unpredictable when following a"
4156 " movprfx writing to a different destination");
4157
4158 // Destination operand must not be used in any other location.
4159 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
4160 if (Inst.getOperand(i).isReg() &&
4161 (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
4162 isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
4163 return Error(Loc[0], "instruction is unpredictable when following a"
4164 " movprfx and destination also used as non-destructive"
4165 " source");
4166 }
4167
4168 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
4169 if (Prefix.isPredicated()) {
4170 int PgIdx = -1;
4171
4172 // Find the instructions general predicate.
4173 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
4174 if (Inst.getOperand(i).isReg() &&
4175 PPRRegClass.contains(Inst.getOperand(i).getReg())) {
4176 PgIdx = i;
4177 break;
4178 }
4179
4180 // Instruction must be predicated if the movprfx is predicated.
4181 if (PgIdx == -1 ||
4182 (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone)
4183 return Error(IDLoc, "instruction is unpredictable when following a"
4184 " predicated movprfx, suggest using unpredicated movprfx");
4185
4186 // Instruction must use same general predicate as the movprfx.
4187 if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
4188 return Error(IDLoc, "instruction is unpredictable when following a"
4189 " predicated movprfx using a different general predicate");
4190
4191 // Instruction element type must match the movprfx.
4192 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
4193 return Error(IDLoc, "instruction is unpredictable when following a"
4194 " predicated movprfx with a different element size");
4195 }
4196 }
4197
4198 // Check for indexed addressing modes w/ the base register being the
4199 // same as a destination/source register or pair load where
4200 // the Rt == Rt2. All of those are undefined behaviour.
4201 switch (Inst.getOpcode()) {
4202 case AArch64::LDPSWpre:
4203 case AArch64::LDPWpost:
4204 case AArch64::LDPWpre:
4205 case AArch64::LDPXpost:
4206 case AArch64::LDPXpre: {
4207 unsigned Rt = Inst.getOperand(1).getReg();
4208 unsigned Rt2 = Inst.getOperand(2).getReg();
4209 unsigned Rn = Inst.getOperand(3).getReg();
4210 if (RI->isSubRegisterEq(Rn, Rt))
4211 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
4212 "is also a destination");
4213 if (RI->isSubRegisterEq(Rn, Rt2))
4214 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
4215 "is also a destination");
4216 LLVM_FALLTHROUGH;
4217 }
4218 case AArch64::LDPDi:
4219 case AArch64::LDPQi:
4220 case AArch64::LDPSi:
4221 case AArch64::LDPSWi:
4222 case AArch64::LDPWi:
4223 case AArch64::LDPXi: {
4224 unsigned Rt = Inst.getOperand(0).getReg();
4225 unsigned Rt2 = Inst.getOperand(1).getReg();
4226 if (Rt == Rt2)
4227 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4228 break;
4229 }
4230 case AArch64::LDPDpost:
4231 case AArch64::LDPDpre:
4232 case AArch64::LDPQpost:
4233 case AArch64::LDPQpre:
4234 case AArch64::LDPSpost:
4235 case AArch64::LDPSpre:
4236 case AArch64::LDPSWpost: {
4237 unsigned Rt = Inst.getOperand(1).getReg();
4238 unsigned Rt2 = Inst.getOperand(2).getReg();
4239 if (Rt == Rt2)
4240 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4241 break;
4242 }
4243 case AArch64::STPDpost:
4244 case AArch64::STPDpre:
4245 case AArch64::STPQpost:
4246 case AArch64::STPQpre:
4247 case AArch64::STPSpost:
4248 case AArch64::STPSpre:
4249 case AArch64::STPWpost:
4250 case AArch64::STPWpre:
4251 case AArch64::STPXpost:
4252 case AArch64::STPXpre: {
4253 unsigned Rt = Inst.getOperand(1).getReg();
4254 unsigned Rt2 = Inst.getOperand(2).getReg();
4255 unsigned Rn = Inst.getOperand(3).getReg();
4256 if (RI->isSubRegisterEq(Rn, Rt))
4257 return Error(Loc[0], "unpredictable STP instruction, writeback base "
4258 "is also a source");
4259 if (RI->isSubRegisterEq(Rn, Rt2))
4260 return Error(Loc[1], "unpredictable STP instruction, writeback base "
4261 "is also a source");
4262 break;
4263 }
4264 case AArch64::LDRBBpre:
4265 case AArch64::LDRBpre:
4266 case AArch64::LDRHHpre:
4267 case AArch64::LDRHpre:
4268 case AArch64::LDRSBWpre:
4269 case AArch64::LDRSBXpre:
4270 case AArch64::LDRSHWpre:
4271 case AArch64::LDRSHXpre:
4272 case AArch64::LDRSWpre:
4273 case AArch64::LDRWpre:
4274 case AArch64::LDRXpre:
4275 case AArch64::LDRBBpost:
4276 case AArch64::LDRBpost:
4277 case AArch64::LDRHHpost:
4278 case AArch64::LDRHpost:
4279 case AArch64::LDRSBWpost:
4280 case AArch64::LDRSBXpost:
4281 case AArch64::LDRSHWpost:
4282 case AArch64::LDRSHXpost:
4283 case AArch64::LDRSWpost:
4284 case AArch64::LDRWpost:
4285 case AArch64::LDRXpost: {
4286 unsigned Rt = Inst.getOperand(1).getReg();
4287 unsigned Rn = Inst.getOperand(2).getReg();
4288 if (RI->isSubRegisterEq(Rn, Rt))
4289 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
4290 "is also a source");
4291 break;
4292 }
4293 case AArch64::STRBBpost:
4294 case AArch64::STRBpost:
4295 case AArch64::STRHHpost:
4296 case AArch64::STRHpost:
4297 case AArch64::STRWpost:
4298 case AArch64::STRXpost:
4299 case AArch64::STRBBpre:
4300 case AArch64::STRBpre:
4301 case AArch64::STRHHpre:
4302 case AArch64::STRHpre:
4303 case AArch64::STRWpre:
4304 case AArch64::STRXpre: {
4305 unsigned Rt = Inst.getOperand(1).getReg();
4306 unsigned Rn = Inst.getOperand(2).getReg();
4307 if (RI->isSubRegisterEq(Rn, Rt))
4308 return Error(Loc[0], "unpredictable STR instruction, writeback base "
4309 "is also a source");
4310 break;
4311 }
4312 case AArch64::STXRB:
4313 case AArch64::STXRH:
4314 case AArch64::STXRW:
4315 case AArch64::STXRX:
4316 case AArch64::STLXRB:
4317 case AArch64::STLXRH:
4318 case AArch64::STLXRW:
4319 case AArch64::STLXRX: {
4320 unsigned Rs = Inst.getOperand(0).getReg();
4321 unsigned Rt = Inst.getOperand(1).getReg();
4322 unsigned Rn = Inst.getOperand(2).getReg();
4323 if (RI->isSubRegisterEq(Rt, Rs) ||
4324 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4325 return Error(Loc[0],
4326 "unpredictable STXR instruction, status is also a source");
4327 break;
4328 }
4329 case AArch64::STXPW:
4330 case AArch64::STXPX:
4331 case AArch64::STLXPW:
4332 case AArch64::STLXPX: {
4333 unsigned Rs = Inst.getOperand(0).getReg();
4334 unsigned Rt1 = Inst.getOperand(1).getReg();
4335 unsigned Rt2 = Inst.getOperand(2).getReg();
4336 unsigned Rn = Inst.getOperand(3).getReg();
4337 if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
4338 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4339 return Error(Loc[0],
4340 "unpredictable STXP instruction, status is also a source");
4341 break;
4342 }
4343 case AArch64::LDRABwriteback:
4344 case AArch64::LDRAAwriteback: {
4345 unsigned Xt = Inst.getOperand(0).getReg();
4346 unsigned Xn = Inst.getOperand(1).getReg();
4347 if (Xt == Xn)
4348 return Error(Loc[0],
4349 "unpredictable LDRA instruction, writeback base"
4350 " is also a destination");
4351 break;
4352 }
4353 }
4354
4355
4356 // Now check immediate ranges. Separate from the above as there is overlap
4357 // in the instructions being checked and this keeps the nested conditionals
4358 // to a minimum.
4359 switch (Inst.getOpcode()) {
4360 case AArch64::ADDSWri:
4361 case AArch64::ADDSXri:
4362 case AArch64::ADDWri:
4363 case AArch64::ADDXri:
4364 case AArch64::SUBSWri:
4365 case AArch64::SUBSXri:
4366 case AArch64::SUBWri:
4367 case AArch64::SUBXri: {
4368 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
4369 // some slight duplication here.
4370 if (Inst.getOperand(2).isExpr()) {
4371 const MCExpr *Expr = Inst.getOperand(2).getExpr();
4372 AArch64MCExpr::VariantKind ELFRefKind;
4373 MCSymbolRefExpr::VariantKind DarwinRefKind;
4374 int64_t Addend;
4375 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
4376
4377 // Only allow these with ADDXri.
4378 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
4379 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
4380 Inst.getOpcode() == AArch64::ADDXri)
4381 return false;
4382
4383 // Only allow these with ADDXri/ADDWri
4384 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
4385 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
4386 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
4387 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
4388 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
4389 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
4390 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
4391 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
4392 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
4393 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
4394 (Inst.getOpcode() == AArch64::ADDXri ||
4395 Inst.getOpcode() == AArch64::ADDWri))
4396 return false;
4397
4398 // Don't allow symbol refs in the immediate field otherwise
4399 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
4400 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
4401 // 'cmp w0, 'borked')
4402 return Error(Loc.back(), "invalid immediate expression");
4403 }
4404 // We don't validate more complex expressions here
4405 }
4406 return false;
4407 }
4408 default:
4409 return false;
4410 }
4411 }
4412
4413 static std::string AArch64MnemonicSpellCheck(StringRef S,
4414 const FeatureBitset &FBS,
4415 unsigned VariantID = 0);
4416
showMatchError(SMLoc Loc,unsigned ErrCode,uint64_t ErrorInfo,OperandVector & Operands)4417 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
4418 uint64_t ErrorInfo,
4419 OperandVector &Operands) {
4420 switch (ErrCode) {
4421 case Match_InvalidTiedOperand: {
4422 RegConstraintEqualityTy EqTy =
4423 static_cast<const AArch64Operand &>(*Operands[ErrorInfo])
4424 .getRegEqualityTy();
4425 switch (EqTy) {
4426 case RegConstraintEqualityTy::EqualsSubReg:
4427 return Error(Loc, "operand must be 64-bit form of destination register");
4428 case RegConstraintEqualityTy::EqualsSuperReg:
4429 return Error(Loc, "operand must be 32-bit form of destination register");
4430 case RegConstraintEqualityTy::EqualsReg:
4431 return Error(Loc, "operand must match destination register");
4432 }
4433 llvm_unreachable("Unknown RegConstraintEqualityTy");
4434 }
4435 case Match_MissingFeature:
4436 return Error(Loc,
4437 "instruction requires a CPU feature not currently enabled");
4438 case Match_InvalidOperand:
4439 return Error(Loc, "invalid operand for instruction");
4440 case Match_InvalidSuffix:
4441 return Error(Loc, "invalid type suffix for instruction");
4442 case Match_InvalidCondCode:
4443 return Error(Loc, "expected AArch64 condition code");
4444 case Match_AddSubRegExtendSmall:
4445 return Error(Loc,
4446 "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
4447 case Match_AddSubRegExtendLarge:
4448 return Error(Loc,
4449 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
4450 case Match_AddSubSecondSource:
4451 return Error(Loc,
4452 "expected compatible register, symbol or integer in range [0, 4095]");
4453 case Match_LogicalSecondSource:
4454 return Error(Loc, "expected compatible register or logical immediate");
4455 case Match_InvalidMovImm32Shift:
4456 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
4457 case Match_InvalidMovImm64Shift:
4458 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
4459 case Match_AddSubRegShift32:
4460 return Error(Loc,
4461 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
4462 case Match_AddSubRegShift64:
4463 return Error(Loc,
4464 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
4465 case Match_InvalidFPImm:
4466 return Error(Loc,
4467 "expected compatible register or floating-point constant");
4468 case Match_InvalidMemoryIndexedSImm6:
4469 return Error(Loc, "index must be an integer in range [-32, 31].");
4470 case Match_InvalidMemoryIndexedSImm5:
4471 return Error(Loc, "index must be an integer in range [-16, 15].");
4472 case Match_InvalidMemoryIndexed1SImm4:
4473 return Error(Loc, "index must be an integer in range [-8, 7].");
4474 case Match_InvalidMemoryIndexed2SImm4:
4475 return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
4476 case Match_InvalidMemoryIndexed3SImm4:
4477 return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
4478 case Match_InvalidMemoryIndexed4SImm4:
4479 return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
4480 case Match_InvalidMemoryIndexed16SImm4:
4481 return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
4482 case Match_InvalidMemoryIndexed32SImm4:
4483 return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
4484 case Match_InvalidMemoryIndexed1SImm6:
4485 return Error(Loc, "index must be an integer in range [-32, 31].");
4486 case Match_InvalidMemoryIndexedSImm8:
4487 return Error(Loc, "index must be an integer in range [-128, 127].");
4488 case Match_InvalidMemoryIndexedSImm9:
4489 return Error(Loc, "index must be an integer in range [-256, 255].");
4490 case Match_InvalidMemoryIndexed16SImm9:
4491 return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
4492 case Match_InvalidMemoryIndexed8SImm10:
4493 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
4494 case Match_InvalidMemoryIndexed4SImm7:
4495 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
4496 case Match_InvalidMemoryIndexed8SImm7:
4497 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
4498 case Match_InvalidMemoryIndexed16SImm7:
4499 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
4500 case Match_InvalidMemoryIndexed8UImm5:
4501 return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
4502 case Match_InvalidMemoryIndexed4UImm5:
4503 return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
4504 case Match_InvalidMemoryIndexed2UImm5:
4505 return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
4506 case Match_InvalidMemoryIndexed8UImm6:
4507 return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
4508 case Match_InvalidMemoryIndexed16UImm6:
4509 return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
4510 case Match_InvalidMemoryIndexed4UImm6:
4511 return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
4512 case Match_InvalidMemoryIndexed2UImm6:
4513 return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
4514 case Match_InvalidMemoryIndexed1UImm6:
4515 return Error(Loc, "index must be in range [0, 63].");
4516 case Match_InvalidMemoryWExtend8:
4517 return Error(Loc,
4518 "expected 'uxtw' or 'sxtw' with optional shift of #0");
4519 case Match_InvalidMemoryWExtend16:
4520 return Error(Loc,
4521 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
4522 case Match_InvalidMemoryWExtend32:
4523 return Error(Loc,
4524 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
4525 case Match_InvalidMemoryWExtend64:
4526 return Error(Loc,
4527 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
4528 case Match_InvalidMemoryWExtend128:
4529 return Error(Loc,
4530 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
4531 case Match_InvalidMemoryXExtend8:
4532 return Error(Loc,
4533 "expected 'lsl' or 'sxtx' with optional shift of #0");
4534 case Match_InvalidMemoryXExtend16:
4535 return Error(Loc,
4536 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
4537 case Match_InvalidMemoryXExtend32:
4538 return Error(Loc,
4539 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
4540 case Match_InvalidMemoryXExtend64:
4541 return Error(Loc,
4542 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
4543 case Match_InvalidMemoryXExtend128:
4544 return Error(Loc,
4545 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
4546 case Match_InvalidMemoryIndexed1:
4547 return Error(Loc, "index must be an integer in range [0, 4095].");
4548 case Match_InvalidMemoryIndexed2:
4549 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
4550 case Match_InvalidMemoryIndexed4:
4551 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
4552 case Match_InvalidMemoryIndexed8:
4553 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
4554 case Match_InvalidMemoryIndexed16:
4555 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
4556 case Match_InvalidImm0_1:
4557 return Error(Loc, "immediate must be an integer in range [0, 1].");
4558 case Match_InvalidImm0_7:
4559 return Error(Loc, "immediate must be an integer in range [0, 7].");
4560 case Match_InvalidImm0_15:
4561 return Error(Loc, "immediate must be an integer in range [0, 15].");
4562 case Match_InvalidImm0_31:
4563 return Error(Loc, "immediate must be an integer in range [0, 31].");
4564 case Match_InvalidImm0_63:
4565 return Error(Loc, "immediate must be an integer in range [0, 63].");
4566 case Match_InvalidImm0_127:
4567 return Error(Loc, "immediate must be an integer in range [0, 127].");
4568 case Match_InvalidImm0_255:
4569 return Error(Loc, "immediate must be an integer in range [0, 255].");
4570 case Match_InvalidImm0_65535:
4571 return Error(Loc, "immediate must be an integer in range [0, 65535].");
4572 case Match_InvalidImm1_8:
4573 return Error(Loc, "immediate must be an integer in range [1, 8].");
4574 case Match_InvalidImm1_16:
4575 return Error(Loc, "immediate must be an integer in range [1, 16].");
4576 case Match_InvalidImm1_32:
4577 return Error(Loc, "immediate must be an integer in range [1, 32].");
4578 case Match_InvalidImm1_64:
4579 return Error(Loc, "immediate must be an integer in range [1, 64].");
4580 case Match_InvalidSVEAddSubImm8:
4581 return Error(Loc, "immediate must be an integer in range [0, 255]"
4582 " with a shift amount of 0");
4583 case Match_InvalidSVEAddSubImm16:
4584 case Match_InvalidSVEAddSubImm32:
4585 case Match_InvalidSVEAddSubImm64:
4586 return Error(Loc, "immediate must be an integer in range [0, 255] or a "
4587 "multiple of 256 in range [256, 65280]");
4588 case Match_InvalidSVECpyImm8:
4589 return Error(Loc, "immediate must be an integer in range [-128, 255]"
4590 " with a shift amount of 0");
4591 case Match_InvalidSVECpyImm16:
4592 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4593 "multiple of 256 in range [-32768, 65280]");
4594 case Match_InvalidSVECpyImm32:
4595 case Match_InvalidSVECpyImm64:
4596 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4597 "multiple of 256 in range [-32768, 32512]");
4598 case Match_InvalidIndexRange1_1:
4599 return Error(Loc, "expected lane specifier '[1]'");
4600 case Match_InvalidIndexRange0_15:
4601 return Error(Loc, "vector lane must be an integer in range [0, 15].");
4602 case Match_InvalidIndexRange0_7:
4603 return Error(Loc, "vector lane must be an integer in range [0, 7].");
4604 case Match_InvalidIndexRange0_3:
4605 return Error(Loc, "vector lane must be an integer in range [0, 3].");
4606 case Match_InvalidIndexRange0_1:
4607 return Error(Loc, "vector lane must be an integer in range [0, 1].");
4608 case Match_InvalidSVEIndexRange0_63:
4609 return Error(Loc, "vector lane must be an integer in range [0, 63].");
4610 case Match_InvalidSVEIndexRange0_31:
4611 return Error(Loc, "vector lane must be an integer in range [0, 31].");
4612 case Match_InvalidSVEIndexRange0_15:
4613 return Error(Loc, "vector lane must be an integer in range [0, 15].");
4614 case Match_InvalidSVEIndexRange0_7:
4615 return Error(Loc, "vector lane must be an integer in range [0, 7].");
4616 case Match_InvalidSVEIndexRange0_3:
4617 return Error(Loc, "vector lane must be an integer in range [0, 3].");
4618 case Match_InvalidLabel:
4619 return Error(Loc, "expected label or encodable integer pc offset");
4620 case Match_MRS:
4621 return Error(Loc, "expected readable system register");
4622 case Match_MSR:
4623 return Error(Loc, "expected writable system register or pstate");
4624 case Match_InvalidComplexRotationEven:
4625 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
4626 case Match_InvalidComplexRotationOdd:
4627 return Error(Loc, "complex rotation must be 90 or 270.");
4628 case Match_MnemonicFail: {
4629 std::string Suggestion = AArch64MnemonicSpellCheck(
4630 ((AArch64Operand &)*Operands[0]).getToken(),
4631 ComputeAvailableFeatures(STI->getFeatureBits()));
4632 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
4633 }
4634 case Match_InvalidGPR64shifted8:
4635 return Error(Loc, "register must be x0..x30 or xzr, without shift");
4636 case Match_InvalidGPR64shifted16:
4637 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
4638 case Match_InvalidGPR64shifted32:
4639 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
4640 case Match_InvalidGPR64shifted64:
4641 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
4642 case Match_InvalidGPR64NoXZRshifted8:
4643 return Error(Loc, "register must be x0..x30 without shift");
4644 case Match_InvalidGPR64NoXZRshifted16:
4645 return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
4646 case Match_InvalidGPR64NoXZRshifted32:
4647 return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
4648 case Match_InvalidGPR64NoXZRshifted64:
4649 return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
4650 case Match_InvalidZPR32UXTW8:
4651 case Match_InvalidZPR32SXTW8:
4652 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
4653 case Match_InvalidZPR32UXTW16:
4654 case Match_InvalidZPR32SXTW16:
4655 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
4656 case Match_InvalidZPR32UXTW32:
4657 case Match_InvalidZPR32SXTW32:
4658 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
4659 case Match_InvalidZPR32UXTW64:
4660 case Match_InvalidZPR32SXTW64:
4661 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
4662 case Match_InvalidZPR64UXTW8:
4663 case Match_InvalidZPR64SXTW8:
4664 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
4665 case Match_InvalidZPR64UXTW16:
4666 case Match_InvalidZPR64SXTW16:
4667 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
4668 case Match_InvalidZPR64UXTW32:
4669 case Match_InvalidZPR64SXTW32:
4670 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
4671 case Match_InvalidZPR64UXTW64:
4672 case Match_InvalidZPR64SXTW64:
4673 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
4674 case Match_InvalidZPR32LSL8:
4675 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
4676 case Match_InvalidZPR32LSL16:
4677 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
4678 case Match_InvalidZPR32LSL32:
4679 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
4680 case Match_InvalidZPR32LSL64:
4681 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
4682 case Match_InvalidZPR64LSL8:
4683 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
4684 case Match_InvalidZPR64LSL16:
4685 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
4686 case Match_InvalidZPR64LSL32:
4687 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
4688 case Match_InvalidZPR64LSL64:
4689 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
4690 case Match_InvalidZPR0:
4691 return Error(Loc, "expected register without element width suffix");
4692 case Match_InvalidZPR8:
4693 case Match_InvalidZPR16:
4694 case Match_InvalidZPR32:
4695 case Match_InvalidZPR64:
4696 case Match_InvalidZPR128:
4697 return Error(Loc, "invalid element width");
4698 case Match_InvalidZPR_3b8:
4699 return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
4700 case Match_InvalidZPR_3b16:
4701 return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
4702 case Match_InvalidZPR_3b32:
4703 return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
4704 case Match_InvalidZPR_4b16:
4705 return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
4706 case Match_InvalidZPR_4b32:
4707 return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
4708 case Match_InvalidZPR_4b64:
4709 return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
4710 case Match_InvalidSVEPattern:
4711 return Error(Loc, "invalid predicate pattern");
4712 case Match_InvalidSVEPredicateAnyReg:
4713 case Match_InvalidSVEPredicateBReg:
4714 case Match_InvalidSVEPredicateHReg:
4715 case Match_InvalidSVEPredicateSReg:
4716 case Match_InvalidSVEPredicateDReg:
4717 return Error(Loc, "invalid predicate register.");
4718 case Match_InvalidSVEPredicate3bAnyReg:
4719 return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
4720 case Match_InvalidSVEPredicate3bBReg:
4721 return Error(Loc, "invalid restricted predicate register, expected p0.b..p7.b");
4722 case Match_InvalidSVEPredicate3bHReg:
4723 return Error(Loc, "invalid restricted predicate register, expected p0.h..p7.h");
4724 case Match_InvalidSVEPredicate3bSReg:
4725 return Error(Loc, "invalid restricted predicate register, expected p0.s..p7.s");
4726 case Match_InvalidSVEPredicate3bDReg:
4727 return Error(Loc, "invalid restricted predicate register, expected p0.d..p7.d");
4728 case Match_InvalidSVEExactFPImmOperandHalfOne:
4729 return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
4730 case Match_InvalidSVEExactFPImmOperandHalfTwo:
4731 return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
4732 case Match_InvalidSVEExactFPImmOperandZeroOne:
4733 return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
4734 default:
4735 llvm_unreachable("unexpected error code!");
4736 }
4737 }
4738
4739 static const char *getSubtargetFeatureName(uint64_t Val);
4740
MatchAndEmitInstruction(SMLoc IDLoc,unsigned & Opcode,OperandVector & Operands,MCStreamer & Out,uint64_t & ErrorInfo,bool MatchingInlineAsm)4741 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
4742 OperandVector &Operands,
4743 MCStreamer &Out,
4744 uint64_t &ErrorInfo,
4745 bool MatchingInlineAsm) {
4746 assert(!Operands.empty() && "Unexpect empty operand list!");
4747 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
4748 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
4749
4750 StringRef Tok = Op.getToken();
4751 unsigned NumOperands = Operands.size();
4752
4753 if (NumOperands == 4 && Tok == "lsl") {
4754 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4755 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4756 if (Op2.isScalarReg() && Op3.isImm()) {
4757 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4758 if (Op3CE) {
4759 uint64_t Op3Val = Op3CE->getValue();
4760 uint64_t NewOp3Val = 0;
4761 uint64_t NewOp4Val = 0;
4762 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
4763 Op2.getReg())) {
4764 NewOp3Val = (32 - Op3Val) & 0x1f;
4765 NewOp4Val = 31 - Op3Val;
4766 } else {
4767 NewOp3Val = (64 - Op3Val) & 0x3f;
4768 NewOp4Val = 63 - Op3Val;
4769 }
4770
4771 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
4772 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
4773
4774 Operands[0] = AArch64Operand::CreateToken(
4775 "ubfm", false, Op.getStartLoc(), getContext());
4776 Operands.push_back(AArch64Operand::CreateImm(
4777 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
4778 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
4779 Op3.getEndLoc(), getContext());
4780 }
4781 }
4782 } else if (NumOperands == 4 && Tok == "bfc") {
4783 // FIXME: Horrible hack to handle BFC->BFM alias.
4784 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4785 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
4786 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
4787
4788 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
4789 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
4790 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
4791
4792 if (LSBCE && WidthCE) {
4793 uint64_t LSB = LSBCE->getValue();
4794 uint64_t Width = WidthCE->getValue();
4795
4796 uint64_t RegWidth = 0;
4797 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4798 Op1.getReg()))
4799 RegWidth = 64;
4800 else
4801 RegWidth = 32;
4802
4803 if (LSB >= RegWidth)
4804 return Error(LSBOp.getStartLoc(),
4805 "expected integer in range [0, 31]");
4806 if (Width < 1 || Width > RegWidth)
4807 return Error(WidthOp.getStartLoc(),
4808 "expected integer in range [1, 32]");
4809
4810 uint64_t ImmR = 0;
4811 if (RegWidth == 32)
4812 ImmR = (32 - LSB) & 0x1f;
4813 else
4814 ImmR = (64 - LSB) & 0x3f;
4815
4816 uint64_t ImmS = Width - 1;
4817
4818 if (ImmR != 0 && ImmS >= ImmR)
4819 return Error(WidthOp.getStartLoc(),
4820 "requested insert overflows register");
4821
4822 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
4823 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
4824 Operands[0] = AArch64Operand::CreateToken(
4825 "bfm", false, Op.getStartLoc(), getContext());
4826 Operands[2] = AArch64Operand::CreateReg(
4827 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
4828 SMLoc(), SMLoc(), getContext());
4829 Operands[3] = AArch64Operand::CreateImm(
4830 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
4831 Operands.emplace_back(
4832 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
4833 WidthOp.getEndLoc(), getContext()));
4834 }
4835 }
4836 } else if (NumOperands == 5) {
4837 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
4838 // UBFIZ -> UBFM aliases.
4839 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
4840 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4841 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4842 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4843
4844 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4845 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4846 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4847
4848 if (Op3CE && Op4CE) {
4849 uint64_t Op3Val = Op3CE->getValue();
4850 uint64_t Op4Val = Op4CE->getValue();
4851
4852 uint64_t RegWidth = 0;
4853 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4854 Op1.getReg()))
4855 RegWidth = 64;
4856 else
4857 RegWidth = 32;
4858
4859 if (Op3Val >= RegWidth)
4860 return Error(Op3.getStartLoc(),
4861 "expected integer in range [0, 31]");
4862 if (Op4Val < 1 || Op4Val > RegWidth)
4863 return Error(Op4.getStartLoc(),
4864 "expected integer in range [1, 32]");
4865
4866 uint64_t NewOp3Val = 0;
4867 if (RegWidth == 32)
4868 NewOp3Val = (32 - Op3Val) & 0x1f;
4869 else
4870 NewOp3Val = (64 - Op3Val) & 0x3f;
4871
4872 uint64_t NewOp4Val = Op4Val - 1;
4873
4874 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
4875 return Error(Op4.getStartLoc(),
4876 "requested insert overflows register");
4877
4878 const MCExpr *NewOp3 =
4879 MCConstantExpr::create(NewOp3Val, getContext());
4880 const MCExpr *NewOp4 =
4881 MCConstantExpr::create(NewOp4Val, getContext());
4882 Operands[3] = AArch64Operand::CreateImm(
4883 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
4884 Operands[4] = AArch64Operand::CreateImm(
4885 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4886 if (Tok == "bfi")
4887 Operands[0] = AArch64Operand::CreateToken(
4888 "bfm", false, Op.getStartLoc(), getContext());
4889 else if (Tok == "sbfiz")
4890 Operands[0] = AArch64Operand::CreateToken(
4891 "sbfm", false, Op.getStartLoc(), getContext());
4892 else if (Tok == "ubfiz")
4893 Operands[0] = AArch64Operand::CreateToken(
4894 "ubfm", false, Op.getStartLoc(), getContext());
4895 else
4896 llvm_unreachable("No valid mnemonic for alias?");
4897 }
4898 }
4899
4900 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
4901 // UBFX -> UBFM aliases.
4902 } else if (NumOperands == 5 &&
4903 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
4904 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4905 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4906 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4907
4908 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4909 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4910 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4911
4912 if (Op3CE && Op4CE) {
4913 uint64_t Op3Val = Op3CE->getValue();
4914 uint64_t Op4Val = Op4CE->getValue();
4915
4916 uint64_t RegWidth = 0;
4917 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4918 Op1.getReg()))
4919 RegWidth = 64;
4920 else
4921 RegWidth = 32;
4922
4923 if (Op3Val >= RegWidth)
4924 return Error(Op3.getStartLoc(),
4925 "expected integer in range [0, 31]");
4926 if (Op4Val < 1 || Op4Val > RegWidth)
4927 return Error(Op4.getStartLoc(),
4928 "expected integer in range [1, 32]");
4929
4930 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
4931
4932 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
4933 return Error(Op4.getStartLoc(),
4934 "requested extract overflows register");
4935
4936 const MCExpr *NewOp4 =
4937 MCConstantExpr::create(NewOp4Val, getContext());
4938 Operands[4] = AArch64Operand::CreateImm(
4939 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4940 if (Tok == "bfxil")
4941 Operands[0] = AArch64Operand::CreateToken(
4942 "bfm", false, Op.getStartLoc(), getContext());
4943 else if (Tok == "sbfx")
4944 Operands[0] = AArch64Operand::CreateToken(
4945 "sbfm", false, Op.getStartLoc(), getContext());
4946 else if (Tok == "ubfx")
4947 Operands[0] = AArch64Operand::CreateToken(
4948 "ubfm", false, Op.getStartLoc(), getContext());
4949 else
4950 llvm_unreachable("No valid mnemonic for alias?");
4951 }
4952 }
4953 }
4954 }
4955
4956 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
4957 // instruction for FP registers correctly in some rare circumstances. Convert
4958 // it to a safe instruction and warn (because silently changing someone's
4959 // assembly is rude).
4960 if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
4961 NumOperands == 4 && Tok == "movi") {
4962 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4963 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4964 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4965 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
4966 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
4967 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
4968 if (Suffix.lower() == ".2d" &&
4969 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
4970 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
4971 " correctly on this CPU, converting to equivalent movi.16b");
4972 // Switch the suffix to .16b.
4973 unsigned Idx = Op1.isToken() ? 1 : 2;
4974 Operands[Idx] = AArch64Operand::CreateToken(".16b", false, IDLoc,
4975 getContext());
4976 }
4977 }
4978 }
4979
4980 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
4981 // InstAlias can't quite handle this since the reg classes aren't
4982 // subclasses.
4983 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
4984 // The source register can be Wn here, but the matcher expects a
4985 // GPR64. Twiddle it here if necessary.
4986 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4987 if (Op.isScalarReg()) {
4988 unsigned Reg = getXRegFromWReg(Op.getReg());
4989 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4990 Op.getStartLoc(), Op.getEndLoc(),
4991 getContext());
4992 }
4993 }
4994 // FIXME: Likewise for sxt[bh] with a Xd dst operand
4995 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
4996 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4997 if (Op.isScalarReg() &&
4998 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4999 Op.getReg())) {
5000 // The source register can be Wn here, but the matcher expects a
5001 // GPR64. Twiddle it here if necessary.
5002 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
5003 if (Op.isScalarReg()) {
5004 unsigned Reg = getXRegFromWReg(Op.getReg());
5005 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
5006 Op.getStartLoc(),
5007 Op.getEndLoc(), getContext());
5008 }
5009 }
5010 }
5011 // FIXME: Likewise for uxt[bh] with a Xd dst operand
5012 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
5013 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
5014 if (Op.isScalarReg() &&
5015 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5016 Op.getReg())) {
5017 // The source register can be Wn here, but the matcher expects a
5018 // GPR32. Twiddle it here if necessary.
5019 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
5020 if (Op.isScalarReg()) {
5021 unsigned Reg = getWRegFromXReg(Op.getReg());
5022 Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
5023 Op.getStartLoc(),
5024 Op.getEndLoc(), getContext());
5025 }
5026 }
5027 }
5028
5029 MCInst Inst;
5030 FeatureBitset MissingFeatures;
5031 // First try to match against the secondary set of tables containing the
5032 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
5033 unsigned MatchResult =
5034 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
5035 MatchingInlineAsm, 1);
5036
5037 // If that fails, try against the alternate table containing long-form NEON:
5038 // "fadd v0.2s, v1.2s, v2.2s"
5039 if (MatchResult != Match_Success) {
5040 // But first, save the short-form match result: we can use it in case the
5041 // long-form match also fails.
5042 auto ShortFormNEONErrorInfo = ErrorInfo;
5043 auto ShortFormNEONMatchResult = MatchResult;
5044 auto ShortFormNEONMissingFeatures = MissingFeatures;
5045
5046 MatchResult =
5047 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
5048 MatchingInlineAsm, 0);
5049
5050 // Now, both matches failed, and the long-form match failed on the mnemonic
5051 // suffix token operand. The short-form match failure is probably more
5052 // relevant: use it instead.
5053 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
5054 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
5055 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
5056 MatchResult = ShortFormNEONMatchResult;
5057 ErrorInfo = ShortFormNEONErrorInfo;
5058 MissingFeatures = ShortFormNEONMissingFeatures;
5059 }
5060 }
5061
5062 switch (MatchResult) {
5063 case Match_Success: {
5064 // Perform range checking and other semantic validations
5065 SmallVector<SMLoc, 8> OperandLocs;
5066 NumOperands = Operands.size();
5067 for (unsigned i = 1; i < NumOperands; ++i)
5068 OperandLocs.push_back(Operands[i]->getStartLoc());
5069 if (validateInstruction(Inst, IDLoc, OperandLocs))
5070 return true;
5071
5072 Inst.setLoc(IDLoc);
5073 Out.emitInstruction(Inst, getSTI());
5074 return false;
5075 }
5076 case Match_MissingFeature: {
5077 assert(MissingFeatures.any() && "Unknown missing feature!");
5078 // Special case the error message for the very common case where only
5079 // a single subtarget feature is missing (neon, e.g.).
5080 std::string Msg = "instruction requires:";
5081 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
5082 if (MissingFeatures[i]) {
5083 Msg += " ";
5084 Msg += getSubtargetFeatureName(i);
5085 }
5086 }
5087 return Error(IDLoc, Msg);
5088 }
5089 case Match_MnemonicFail:
5090 return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
5091 case Match_InvalidOperand: {
5092 SMLoc ErrorLoc = IDLoc;
5093
5094 if (ErrorInfo != ~0ULL) {
5095 if (ErrorInfo >= Operands.size())
5096 return Error(IDLoc, "too few operands for instruction",
5097 SMRange(IDLoc, getTok().getLoc()));
5098
5099 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
5100 if (ErrorLoc == SMLoc())
5101 ErrorLoc = IDLoc;
5102 }
5103 // If the match failed on a suffix token operand, tweak the diagnostic
5104 // accordingly.
5105 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
5106 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
5107 MatchResult = Match_InvalidSuffix;
5108
5109 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
5110 }
5111 case Match_InvalidTiedOperand:
5112 case Match_InvalidMemoryIndexed1:
5113 case Match_InvalidMemoryIndexed2:
5114 case Match_InvalidMemoryIndexed4:
5115 case Match_InvalidMemoryIndexed8:
5116 case Match_InvalidMemoryIndexed16:
5117 case Match_InvalidCondCode:
5118 case Match_AddSubRegExtendSmall:
5119 case Match_AddSubRegExtendLarge:
5120 case Match_AddSubSecondSource:
5121 case Match_LogicalSecondSource:
5122 case Match_AddSubRegShift32:
5123 case Match_AddSubRegShift64:
5124 case Match_InvalidMovImm32Shift:
5125 case Match_InvalidMovImm64Shift:
5126 case Match_InvalidFPImm:
5127 case Match_InvalidMemoryWExtend8:
5128 case Match_InvalidMemoryWExtend16:
5129 case Match_InvalidMemoryWExtend32:
5130 case Match_InvalidMemoryWExtend64:
5131 case Match_InvalidMemoryWExtend128:
5132 case Match_InvalidMemoryXExtend8:
5133 case Match_InvalidMemoryXExtend16:
5134 case Match_InvalidMemoryXExtend32:
5135 case Match_InvalidMemoryXExtend64:
5136 case Match_InvalidMemoryXExtend128:
5137 case Match_InvalidMemoryIndexed1SImm4:
5138 case Match_InvalidMemoryIndexed2SImm4:
5139 case Match_InvalidMemoryIndexed3SImm4:
5140 case Match_InvalidMemoryIndexed4SImm4:
5141 case Match_InvalidMemoryIndexed1SImm6:
5142 case Match_InvalidMemoryIndexed16SImm4:
5143 case Match_InvalidMemoryIndexed32SImm4:
5144 case Match_InvalidMemoryIndexed4SImm7:
5145 case Match_InvalidMemoryIndexed8SImm7:
5146 case Match_InvalidMemoryIndexed16SImm7:
5147 case Match_InvalidMemoryIndexed8UImm5:
5148 case Match_InvalidMemoryIndexed4UImm5:
5149 case Match_InvalidMemoryIndexed2UImm5:
5150 case Match_InvalidMemoryIndexed1UImm6:
5151 case Match_InvalidMemoryIndexed2UImm6:
5152 case Match_InvalidMemoryIndexed4UImm6:
5153 case Match_InvalidMemoryIndexed8UImm6:
5154 case Match_InvalidMemoryIndexed16UImm6:
5155 case Match_InvalidMemoryIndexedSImm6:
5156 case Match_InvalidMemoryIndexedSImm5:
5157 case Match_InvalidMemoryIndexedSImm8:
5158 case Match_InvalidMemoryIndexedSImm9:
5159 case Match_InvalidMemoryIndexed16SImm9:
5160 case Match_InvalidMemoryIndexed8SImm10:
5161 case Match_InvalidImm0_1:
5162 case Match_InvalidImm0_7:
5163 case Match_InvalidImm0_15:
5164 case Match_InvalidImm0_31:
5165 case Match_InvalidImm0_63:
5166 case Match_InvalidImm0_127:
5167 case Match_InvalidImm0_255:
5168 case Match_InvalidImm0_65535:
5169 case Match_InvalidImm1_8:
5170 case Match_InvalidImm1_16:
5171 case Match_InvalidImm1_32:
5172 case Match_InvalidImm1_64:
5173 case Match_InvalidSVEAddSubImm8:
5174 case Match_InvalidSVEAddSubImm16:
5175 case Match_InvalidSVEAddSubImm32:
5176 case Match_InvalidSVEAddSubImm64:
5177 case Match_InvalidSVECpyImm8:
5178 case Match_InvalidSVECpyImm16:
5179 case Match_InvalidSVECpyImm32:
5180 case Match_InvalidSVECpyImm64:
5181 case Match_InvalidIndexRange1_1:
5182 case Match_InvalidIndexRange0_15:
5183 case Match_InvalidIndexRange0_7:
5184 case Match_InvalidIndexRange0_3:
5185 case Match_InvalidIndexRange0_1:
5186 case Match_InvalidSVEIndexRange0_63:
5187 case Match_InvalidSVEIndexRange0_31:
5188 case Match_InvalidSVEIndexRange0_15:
5189 case Match_InvalidSVEIndexRange0_7:
5190 case Match_InvalidSVEIndexRange0_3:
5191 case Match_InvalidLabel:
5192 case Match_InvalidComplexRotationEven:
5193 case Match_InvalidComplexRotationOdd:
5194 case Match_InvalidGPR64shifted8:
5195 case Match_InvalidGPR64shifted16:
5196 case Match_InvalidGPR64shifted32:
5197 case Match_InvalidGPR64shifted64:
5198 case Match_InvalidGPR64NoXZRshifted8:
5199 case Match_InvalidGPR64NoXZRshifted16:
5200 case Match_InvalidGPR64NoXZRshifted32:
5201 case Match_InvalidGPR64NoXZRshifted64:
5202 case Match_InvalidZPR32UXTW8:
5203 case Match_InvalidZPR32UXTW16:
5204 case Match_InvalidZPR32UXTW32:
5205 case Match_InvalidZPR32UXTW64:
5206 case Match_InvalidZPR32SXTW8:
5207 case Match_InvalidZPR32SXTW16:
5208 case Match_InvalidZPR32SXTW32:
5209 case Match_InvalidZPR32SXTW64:
5210 case Match_InvalidZPR64UXTW8:
5211 case Match_InvalidZPR64SXTW8:
5212 case Match_InvalidZPR64UXTW16:
5213 case Match_InvalidZPR64SXTW16:
5214 case Match_InvalidZPR64UXTW32:
5215 case Match_InvalidZPR64SXTW32:
5216 case Match_InvalidZPR64UXTW64:
5217 case Match_InvalidZPR64SXTW64:
5218 case Match_InvalidZPR32LSL8:
5219 case Match_InvalidZPR32LSL16:
5220 case Match_InvalidZPR32LSL32:
5221 case Match_InvalidZPR32LSL64:
5222 case Match_InvalidZPR64LSL8:
5223 case Match_InvalidZPR64LSL16:
5224 case Match_InvalidZPR64LSL32:
5225 case Match_InvalidZPR64LSL64:
5226 case Match_InvalidZPR0:
5227 case Match_InvalidZPR8:
5228 case Match_InvalidZPR16:
5229 case Match_InvalidZPR32:
5230 case Match_InvalidZPR64:
5231 case Match_InvalidZPR128:
5232 case Match_InvalidZPR_3b8:
5233 case Match_InvalidZPR_3b16:
5234 case Match_InvalidZPR_3b32:
5235 case Match_InvalidZPR_4b16:
5236 case Match_InvalidZPR_4b32:
5237 case Match_InvalidZPR_4b64:
5238 case Match_InvalidSVEPredicateAnyReg:
5239 case Match_InvalidSVEPattern:
5240 case Match_InvalidSVEPredicateBReg:
5241 case Match_InvalidSVEPredicateHReg:
5242 case Match_InvalidSVEPredicateSReg:
5243 case Match_InvalidSVEPredicateDReg:
5244 case Match_InvalidSVEPredicate3bAnyReg:
5245 case Match_InvalidSVEPredicate3bBReg:
5246 case Match_InvalidSVEPredicate3bHReg:
5247 case Match_InvalidSVEPredicate3bSReg:
5248 case Match_InvalidSVEPredicate3bDReg:
5249 case Match_InvalidSVEExactFPImmOperandHalfOne:
5250 case Match_InvalidSVEExactFPImmOperandHalfTwo:
5251 case Match_InvalidSVEExactFPImmOperandZeroOne:
5252 case Match_MSR:
5253 case Match_MRS: {
5254 if (ErrorInfo >= Operands.size())
5255 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
5256 // Any time we get here, there's nothing fancy to do. Just get the
5257 // operand SMLoc and display the diagnostic.
5258 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
5259 if (ErrorLoc == SMLoc())
5260 ErrorLoc = IDLoc;
5261 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
5262 }
5263 }
5264
5265 llvm_unreachable("Implement any new match types added!");
5266 }
5267
5268 /// ParseDirective parses the arm specific directives
ParseDirective(AsmToken DirectiveID)5269 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
5270 const MCContext::Environment Format = getContext().getObjectFileType();
5271 bool IsMachO = Format == MCContext::IsMachO;
5272 bool IsCOFF = Format == MCContext::IsCOFF;
5273
5274 auto IDVal = DirectiveID.getIdentifier().lower();
5275 SMLoc Loc = DirectiveID.getLoc();
5276 if (IDVal == ".arch")
5277 parseDirectiveArch(Loc);
5278 else if (IDVal == ".cpu")
5279 parseDirectiveCPU(Loc);
5280 else if (IDVal == ".tlsdesccall")
5281 parseDirectiveTLSDescCall(Loc);
5282 else if (IDVal == ".ltorg" || IDVal == ".pool")
5283 parseDirectiveLtorg(Loc);
5284 else if (IDVal == ".unreq")
5285 parseDirectiveUnreq(Loc);
5286 else if (IDVal == ".inst")
5287 parseDirectiveInst(Loc);
5288 else if (IDVal == ".cfi_negate_ra_state")
5289 parseDirectiveCFINegateRAState();
5290 else if (IDVal == ".cfi_b_key_frame")
5291 parseDirectiveCFIBKeyFrame();
5292 else if (IDVal == ".arch_extension")
5293 parseDirectiveArchExtension(Loc);
5294 else if (IDVal == ".variant_pcs")
5295 parseDirectiveVariantPCS(Loc);
5296 else if (IsMachO) {
5297 if (IDVal == MCLOHDirectiveName())
5298 parseDirectiveLOH(IDVal, Loc);
5299 else
5300 return true;
5301 } else if (IsCOFF) {
5302 if (IDVal == ".seh_stackalloc")
5303 parseDirectiveSEHAllocStack(Loc);
5304 else if (IDVal == ".seh_endprologue")
5305 parseDirectiveSEHPrologEnd(Loc);
5306 else if (IDVal == ".seh_save_r19r20_x")
5307 parseDirectiveSEHSaveR19R20X(Loc);
5308 else if (IDVal == ".seh_save_fplr")
5309 parseDirectiveSEHSaveFPLR(Loc);
5310 else if (IDVal == ".seh_save_fplr_x")
5311 parseDirectiveSEHSaveFPLRX(Loc);
5312 else if (IDVal == ".seh_save_reg")
5313 parseDirectiveSEHSaveReg(Loc);
5314 else if (IDVal == ".seh_save_reg_x")
5315 parseDirectiveSEHSaveRegX(Loc);
5316 else if (IDVal == ".seh_save_regp")
5317 parseDirectiveSEHSaveRegP(Loc);
5318 else if (IDVal == ".seh_save_regp_x")
5319 parseDirectiveSEHSaveRegPX(Loc);
5320 else if (IDVal == ".seh_save_lrpair")
5321 parseDirectiveSEHSaveLRPair(Loc);
5322 else if (IDVal == ".seh_save_freg")
5323 parseDirectiveSEHSaveFReg(Loc);
5324 else if (IDVal == ".seh_save_freg_x")
5325 parseDirectiveSEHSaveFRegX(Loc);
5326 else if (IDVal == ".seh_save_fregp")
5327 parseDirectiveSEHSaveFRegP(Loc);
5328 else if (IDVal == ".seh_save_fregp_x")
5329 parseDirectiveSEHSaveFRegPX(Loc);
5330 else if (IDVal == ".seh_set_fp")
5331 parseDirectiveSEHSetFP(Loc);
5332 else if (IDVal == ".seh_add_fp")
5333 parseDirectiveSEHAddFP(Loc);
5334 else if (IDVal == ".seh_nop")
5335 parseDirectiveSEHNop(Loc);
5336 else if (IDVal == ".seh_save_next")
5337 parseDirectiveSEHSaveNext(Loc);
5338 else if (IDVal == ".seh_startepilogue")
5339 parseDirectiveSEHEpilogStart(Loc);
5340 else if (IDVal == ".seh_endepilogue")
5341 parseDirectiveSEHEpilogEnd(Loc);
5342 else if (IDVal == ".seh_trap_frame")
5343 parseDirectiveSEHTrapFrame(Loc);
5344 else if (IDVal == ".seh_pushframe")
5345 parseDirectiveSEHMachineFrame(Loc);
5346 else if (IDVal == ".seh_context")
5347 parseDirectiveSEHContext(Loc);
5348 else if (IDVal == ".seh_clear_unwound_to_call")
5349 parseDirectiveSEHClearUnwoundToCall(Loc);
5350 else
5351 return true;
5352 } else
5353 return true;
5354 return false;
5355 }
5356
ExpandCryptoAEK(AArch64::ArchKind ArchKind,SmallVector<StringRef,4> & RequestedExtensions)5357 static void ExpandCryptoAEK(AArch64::ArchKind ArchKind,
5358 SmallVector<StringRef, 4> &RequestedExtensions) {
5359 const bool NoCrypto = llvm::is_contained(RequestedExtensions, "nocrypto");
5360 const bool Crypto = llvm::is_contained(RequestedExtensions, "crypto");
5361
5362 if (!NoCrypto && Crypto) {
5363 switch (ArchKind) {
5364 default:
5365 // Map 'generic' (and others) to sha2 and aes, because
5366 // that was the traditional meaning of crypto.
5367 case AArch64::ArchKind::ARMV8_1A:
5368 case AArch64::ArchKind::ARMV8_2A:
5369 case AArch64::ArchKind::ARMV8_3A:
5370 RequestedExtensions.push_back("sha2");
5371 RequestedExtensions.push_back("aes");
5372 break;
5373 case AArch64::ArchKind::ARMV8_4A:
5374 case AArch64::ArchKind::ARMV8_5A:
5375 case AArch64::ArchKind::ARMV8_6A:
5376 case AArch64::ArchKind::ARMV8_7A:
5377 case AArch64::ArchKind::ARMV8R:
5378 RequestedExtensions.push_back("sm4");
5379 RequestedExtensions.push_back("sha3");
5380 RequestedExtensions.push_back("sha2");
5381 RequestedExtensions.push_back("aes");
5382 break;
5383 }
5384 } else if (NoCrypto) {
5385 switch (ArchKind) {
5386 default:
5387 // Map 'generic' (and others) to sha2 and aes, because
5388 // that was the traditional meaning of crypto.
5389 case AArch64::ArchKind::ARMV8_1A:
5390 case AArch64::ArchKind::ARMV8_2A:
5391 case AArch64::ArchKind::ARMV8_3A:
5392 RequestedExtensions.push_back("nosha2");
5393 RequestedExtensions.push_back("noaes");
5394 break;
5395 case AArch64::ArchKind::ARMV8_4A:
5396 case AArch64::ArchKind::ARMV8_5A:
5397 case AArch64::ArchKind::ARMV8_6A:
5398 case AArch64::ArchKind::ARMV8_7A:
5399 RequestedExtensions.push_back("nosm4");
5400 RequestedExtensions.push_back("nosha3");
5401 RequestedExtensions.push_back("nosha2");
5402 RequestedExtensions.push_back("noaes");
5403 break;
5404 }
5405 }
5406 }
5407
5408 /// parseDirectiveArch
5409 /// ::= .arch token
parseDirectiveArch(SMLoc L)5410 bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
5411 SMLoc ArchLoc = getLoc();
5412
5413 StringRef Arch, ExtensionString;
5414 std::tie(Arch, ExtensionString) =
5415 getParser().parseStringToEndOfStatement().trim().split('+');
5416
5417 AArch64::ArchKind ID = AArch64::parseArch(Arch);
5418 if (ID == AArch64::ArchKind::INVALID)
5419 return Error(ArchLoc, "unknown arch name");
5420
5421 if (parseToken(AsmToken::EndOfStatement))
5422 return true;
5423
5424 // Get the architecture and extension features.
5425 std::vector<StringRef> AArch64Features;
5426 AArch64::getArchFeatures(ID, AArch64Features);
5427 AArch64::getExtensionFeatures(AArch64::getDefaultExtensions("generic", ID),
5428 AArch64Features);
5429
5430 MCSubtargetInfo &STI = copySTI();
5431 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
5432 STI.setDefaultFeatures("generic", /*TuneCPU*/ "generic",
5433 join(ArchFeatures.begin(), ArchFeatures.end(), ","));
5434
5435 SmallVector<StringRef, 4> RequestedExtensions;
5436 if (!ExtensionString.empty())
5437 ExtensionString.split(RequestedExtensions, '+');
5438
5439 ExpandCryptoAEK(ID, RequestedExtensions);
5440
5441 FeatureBitset Features = STI.getFeatureBits();
5442 for (auto Name : RequestedExtensions) {
5443 bool EnableFeature = true;
5444
5445 if (Name.startswith_lower("no")) {
5446 EnableFeature = false;
5447 Name = Name.substr(2);
5448 }
5449
5450 for (const auto &Extension : ExtensionMap) {
5451 if (Extension.Name != Name)
5452 continue;
5453
5454 if (Extension.Features.none())
5455 report_fatal_error("unsupported architectural extension: " + Name);
5456
5457 FeatureBitset ToggleFeatures = EnableFeature
5458 ? (~Features & Extension.Features)
5459 : ( Features & Extension.Features);
5460 FeatureBitset Features =
5461 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5462 setAvailableFeatures(Features);
5463 break;
5464 }
5465 }
5466 return false;
5467 }
5468
5469 /// parseDirectiveArchExtension
5470 /// ::= .arch_extension [no]feature
parseDirectiveArchExtension(SMLoc L)5471 bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
5472 SMLoc ExtLoc = getLoc();
5473
5474 StringRef Name = getParser().parseStringToEndOfStatement().trim();
5475
5476 if (parseToken(AsmToken::EndOfStatement,
5477 "unexpected token in '.arch_extension' directive"))
5478 return true;
5479
5480 bool EnableFeature = true;
5481 if (Name.startswith_lower("no")) {
5482 EnableFeature = false;
5483 Name = Name.substr(2);
5484 }
5485
5486 MCSubtargetInfo &STI = copySTI();
5487 FeatureBitset Features = STI.getFeatureBits();
5488 for (const auto &Extension : ExtensionMap) {
5489 if (Extension.Name != Name)
5490 continue;
5491
5492 if (Extension.Features.none())
5493 return Error(ExtLoc, "unsupported architectural extension: " + Name);
5494
5495 FeatureBitset ToggleFeatures = EnableFeature
5496 ? (~Features & Extension.Features)
5497 : (Features & Extension.Features);
5498 FeatureBitset Features =
5499 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5500 setAvailableFeatures(Features);
5501 return false;
5502 }
5503
5504 return Error(ExtLoc, "unknown architectural extension: " + Name);
5505 }
5506
incrementLoc(SMLoc L,int Offset)5507 static SMLoc incrementLoc(SMLoc L, int Offset) {
5508 return SMLoc::getFromPointer(L.getPointer() + Offset);
5509 }
5510
5511 /// parseDirectiveCPU
5512 /// ::= .cpu id
parseDirectiveCPU(SMLoc L)5513 bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
5514 SMLoc CurLoc = getLoc();
5515
5516 StringRef CPU, ExtensionString;
5517 std::tie(CPU, ExtensionString) =
5518 getParser().parseStringToEndOfStatement().trim().split('+');
5519
5520 if (parseToken(AsmToken::EndOfStatement))
5521 return true;
5522
5523 SmallVector<StringRef, 4> RequestedExtensions;
5524 if (!ExtensionString.empty())
5525 ExtensionString.split(RequestedExtensions, '+');
5526
5527 // FIXME This is using tablegen data, but should be moved to ARMTargetParser
5528 // once that is tablegen'ed
5529 if (!getSTI().isCPUStringValid(CPU)) {
5530 Error(CurLoc, "unknown CPU name");
5531 return false;
5532 }
5533
5534 MCSubtargetInfo &STI = copySTI();
5535 STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
5536 CurLoc = incrementLoc(CurLoc, CPU.size());
5537
5538 ExpandCryptoAEK(llvm::AArch64::getCPUArchKind(CPU), RequestedExtensions);
5539
5540 FeatureBitset Features = STI.getFeatureBits();
5541 for (auto Name : RequestedExtensions) {
5542 // Advance source location past '+'.
5543 CurLoc = incrementLoc(CurLoc, 1);
5544
5545 bool EnableFeature = true;
5546
5547 if (Name.startswith_lower("no")) {
5548 EnableFeature = false;
5549 Name = Name.substr(2);
5550 }
5551
5552 bool FoundExtension = false;
5553 for (const auto &Extension : ExtensionMap) {
5554 if (Extension.Name != Name)
5555 continue;
5556
5557 if (Extension.Features.none())
5558 report_fatal_error("unsupported architectural extension: " + Name);
5559
5560 FeatureBitset ToggleFeatures = EnableFeature
5561 ? (~Features & Extension.Features)
5562 : ( Features & Extension.Features);
5563 FeatureBitset Features =
5564 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5565 setAvailableFeatures(Features);
5566 FoundExtension = true;
5567
5568 break;
5569 }
5570
5571 if (!FoundExtension)
5572 Error(CurLoc, "unsupported architectural extension");
5573
5574 CurLoc = incrementLoc(CurLoc, Name.size());
5575 }
5576 return false;
5577 }
5578
5579 /// parseDirectiveInst
5580 /// ::= .inst opcode [, ...]
parseDirectiveInst(SMLoc Loc)5581 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
5582 if (getLexer().is(AsmToken::EndOfStatement))
5583 return Error(Loc, "expected expression following '.inst' directive");
5584
5585 auto parseOp = [&]() -> bool {
5586 SMLoc L = getLoc();
5587 const MCExpr *Expr = nullptr;
5588 if (check(getParser().parseExpression(Expr), L, "expected expression"))
5589 return true;
5590 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
5591 if (check(!Value, L, "expected constant expression"))
5592 return true;
5593 getTargetStreamer().emitInst(Value->getValue());
5594 return false;
5595 };
5596
5597 return parseMany(parseOp);
5598 }
5599
5600 // parseDirectiveTLSDescCall:
5601 // ::= .tlsdesccall symbol
parseDirectiveTLSDescCall(SMLoc L)5602 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
5603 StringRef Name;
5604 if (check(getParser().parseIdentifier(Name), L,
5605 "expected symbol after directive") ||
5606 parseToken(AsmToken::EndOfStatement))
5607 return true;
5608
5609 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
5610 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
5611 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
5612
5613 MCInst Inst;
5614 Inst.setOpcode(AArch64::TLSDESCCALL);
5615 Inst.addOperand(MCOperand::createExpr(Expr));
5616
5617 getParser().getStreamer().emitInstruction(Inst, getSTI());
5618 return false;
5619 }
5620
5621 /// ::= .loh <lohName | lohId> label1, ..., labelN
5622 /// The number of arguments depends on the loh identifier.
parseDirectiveLOH(StringRef IDVal,SMLoc Loc)5623 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
5624 MCLOHType Kind;
5625 if (getParser().getTok().isNot(AsmToken::Identifier)) {
5626 if (getParser().getTok().isNot(AsmToken::Integer))
5627 return TokError("expected an identifier or a number in directive");
5628 // We successfully get a numeric value for the identifier.
5629 // Check if it is valid.
5630 int64_t Id = getParser().getTok().getIntVal();
5631 if (Id <= -1U && !isValidMCLOHType(Id))
5632 return TokError("invalid numeric identifier in directive");
5633 Kind = (MCLOHType)Id;
5634 } else {
5635 StringRef Name = getTok().getIdentifier();
5636 // We successfully parse an identifier.
5637 // Check if it is a recognized one.
5638 int Id = MCLOHNameToId(Name);
5639
5640 if (Id == -1)
5641 return TokError("invalid identifier in directive");
5642 Kind = (MCLOHType)Id;
5643 }
5644 // Consume the identifier.
5645 Lex();
5646 // Get the number of arguments of this LOH.
5647 int NbArgs = MCLOHIdToNbArgs(Kind);
5648
5649 assert(NbArgs != -1 && "Invalid number of arguments");
5650
5651 SmallVector<MCSymbol *, 3> Args;
5652 for (int Idx = 0; Idx < NbArgs; ++Idx) {
5653 StringRef Name;
5654 if (getParser().parseIdentifier(Name))
5655 return TokError("expected identifier in directive");
5656 Args.push_back(getContext().getOrCreateSymbol(Name));
5657
5658 if (Idx + 1 == NbArgs)
5659 break;
5660 if (parseToken(AsmToken::Comma,
5661 "unexpected token in '" + Twine(IDVal) + "' directive"))
5662 return true;
5663 }
5664 if (parseToken(AsmToken::EndOfStatement,
5665 "unexpected token in '" + Twine(IDVal) + "' directive"))
5666 return true;
5667
5668 getStreamer().emitLOHDirective((MCLOHType)Kind, Args);
5669 return false;
5670 }
5671
5672 /// parseDirectiveLtorg
5673 /// ::= .ltorg | .pool
parseDirectiveLtorg(SMLoc L)5674 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
5675 if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
5676 return true;
5677 getTargetStreamer().emitCurrentConstantPool();
5678 return false;
5679 }
5680
5681 /// parseDirectiveReq
5682 /// ::= name .req registername
parseDirectiveReq(StringRef Name,SMLoc L)5683 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
5684 MCAsmParser &Parser = getParser();
5685 Parser.Lex(); // Eat the '.req' token.
5686 SMLoc SRegLoc = getLoc();
5687 RegKind RegisterKind = RegKind::Scalar;
5688 unsigned RegNum;
5689 OperandMatchResultTy ParseRes = tryParseScalarRegister(RegNum);
5690
5691 if (ParseRes != MatchOperand_Success) {
5692 StringRef Kind;
5693 RegisterKind = RegKind::NeonVector;
5694 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
5695
5696 if (ParseRes == MatchOperand_ParseFail)
5697 return true;
5698
5699 if (ParseRes == MatchOperand_Success && !Kind.empty())
5700 return Error(SRegLoc, "vector register without type specifier expected");
5701 }
5702
5703 if (ParseRes != MatchOperand_Success) {
5704 StringRef Kind;
5705 RegisterKind = RegKind::SVEDataVector;
5706 ParseRes =
5707 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
5708
5709 if (ParseRes == MatchOperand_ParseFail)
5710 return true;
5711
5712 if (ParseRes == MatchOperand_Success && !Kind.empty())
5713 return Error(SRegLoc,
5714 "sve vector register without type specifier expected");
5715 }
5716
5717 if (ParseRes != MatchOperand_Success) {
5718 StringRef Kind;
5719 RegisterKind = RegKind::SVEPredicateVector;
5720 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
5721
5722 if (ParseRes == MatchOperand_ParseFail)
5723 return true;
5724
5725 if (ParseRes == MatchOperand_Success && !Kind.empty())
5726 return Error(SRegLoc,
5727 "sve predicate register without type specifier expected");
5728 }
5729
5730 if (ParseRes != MatchOperand_Success)
5731 return Error(SRegLoc, "register name or alias expected");
5732
5733 // Shouldn't be anything else.
5734 if (parseToken(AsmToken::EndOfStatement,
5735 "unexpected input in .req directive"))
5736 return true;
5737
5738 auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
5739 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
5740 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
5741
5742 return false;
5743 }
5744
5745 /// parseDirectiveUneq
5746 /// ::= .unreq registername
parseDirectiveUnreq(SMLoc L)5747 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
5748 MCAsmParser &Parser = getParser();
5749 if (getTok().isNot(AsmToken::Identifier))
5750 return TokError("unexpected input in .unreq directive.");
5751 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
5752 Parser.Lex(); // Eat the identifier.
5753 return parseToken(AsmToken::EndOfStatement);
5754 }
5755
parseDirectiveCFINegateRAState()5756 bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
5757 if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
5758 return true;
5759 getStreamer().emitCFINegateRAState();
5760 return false;
5761 }
5762
5763 /// parseDirectiveCFIBKeyFrame
5764 /// ::= .cfi_b_key
parseDirectiveCFIBKeyFrame()5765 bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
5766 if (parseToken(AsmToken::EndOfStatement,
5767 "unexpected token in '.cfi_b_key_frame'"))
5768 return true;
5769 getStreamer().emitCFIBKeyFrame();
5770 return false;
5771 }
5772
5773 /// parseDirectiveVariantPCS
5774 /// ::= .variant_pcs symbolname
parseDirectiveVariantPCS(SMLoc L)5775 bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) {
5776 MCAsmParser &Parser = getParser();
5777
5778 const AsmToken &Tok = Parser.getTok();
5779 if (Tok.isNot(AsmToken::Identifier))
5780 return TokError("expected symbol name");
5781
5782 StringRef SymbolName = Tok.getIdentifier();
5783
5784 MCSymbol *Sym = getContext().lookupSymbol(SymbolName);
5785 if (!Sym)
5786 return TokError("unknown symbol");
5787
5788 Parser.Lex(); // Eat the symbol
5789
5790 if (parseEOL())
5791 return true;
5792 getTargetStreamer().emitDirectiveVariantPCS(Sym);
5793 return false;
5794 }
5795
5796 /// parseDirectiveSEHAllocStack
5797 /// ::= .seh_stackalloc
parseDirectiveSEHAllocStack(SMLoc L)5798 bool AArch64AsmParser::parseDirectiveSEHAllocStack(SMLoc L) {
5799 int64_t Size;
5800 if (parseImmExpr(Size))
5801 return true;
5802 getTargetStreamer().EmitARM64WinCFIAllocStack(Size);
5803 return false;
5804 }
5805
5806 /// parseDirectiveSEHPrologEnd
5807 /// ::= .seh_endprologue
parseDirectiveSEHPrologEnd(SMLoc L)5808 bool AArch64AsmParser::parseDirectiveSEHPrologEnd(SMLoc L) {
5809 getTargetStreamer().EmitARM64WinCFIPrologEnd();
5810 return false;
5811 }
5812
5813 /// parseDirectiveSEHSaveR19R20X
5814 /// ::= .seh_save_r19r20_x
parseDirectiveSEHSaveR19R20X(SMLoc L)5815 bool AArch64AsmParser::parseDirectiveSEHSaveR19R20X(SMLoc L) {
5816 int64_t Offset;
5817 if (parseImmExpr(Offset))
5818 return true;
5819 getTargetStreamer().EmitARM64WinCFISaveR19R20X(Offset);
5820 return false;
5821 }
5822
5823 /// parseDirectiveSEHSaveFPLR
5824 /// ::= .seh_save_fplr
parseDirectiveSEHSaveFPLR(SMLoc L)5825 bool AArch64AsmParser::parseDirectiveSEHSaveFPLR(SMLoc L) {
5826 int64_t Offset;
5827 if (parseImmExpr(Offset))
5828 return true;
5829 getTargetStreamer().EmitARM64WinCFISaveFPLR(Offset);
5830 return false;
5831 }
5832
5833 /// parseDirectiveSEHSaveFPLRX
5834 /// ::= .seh_save_fplr_x
parseDirectiveSEHSaveFPLRX(SMLoc L)5835 bool AArch64AsmParser::parseDirectiveSEHSaveFPLRX(SMLoc L) {
5836 int64_t Offset;
5837 if (parseImmExpr(Offset))
5838 return true;
5839 getTargetStreamer().EmitARM64WinCFISaveFPLRX(Offset);
5840 return false;
5841 }
5842
5843 /// parseDirectiveSEHSaveReg
5844 /// ::= .seh_save_reg
parseDirectiveSEHSaveReg(SMLoc L)5845 bool AArch64AsmParser::parseDirectiveSEHSaveReg(SMLoc L) {
5846 unsigned Reg;
5847 int64_t Offset;
5848 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
5849 parseComma() || parseImmExpr(Offset))
5850 return true;
5851 getTargetStreamer().EmitARM64WinCFISaveReg(Reg, Offset);
5852 return false;
5853 }
5854
5855 /// parseDirectiveSEHSaveRegX
5856 /// ::= .seh_save_reg_x
parseDirectiveSEHSaveRegX(SMLoc L)5857 bool AArch64AsmParser::parseDirectiveSEHSaveRegX(SMLoc L) {
5858 unsigned Reg;
5859 int64_t Offset;
5860 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
5861 parseComma() || parseImmExpr(Offset))
5862 return true;
5863 getTargetStreamer().EmitARM64WinCFISaveRegX(Reg, Offset);
5864 return false;
5865 }
5866
5867 /// parseDirectiveSEHSaveRegP
5868 /// ::= .seh_save_regp
parseDirectiveSEHSaveRegP(SMLoc L)5869 bool AArch64AsmParser::parseDirectiveSEHSaveRegP(SMLoc L) {
5870 unsigned Reg;
5871 int64_t Offset;
5872 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
5873 parseComma() || parseImmExpr(Offset))
5874 return true;
5875 getTargetStreamer().EmitARM64WinCFISaveRegP(Reg, Offset);
5876 return false;
5877 }
5878
5879 /// parseDirectiveSEHSaveRegPX
5880 /// ::= .seh_save_regp_x
parseDirectiveSEHSaveRegPX(SMLoc L)5881 bool AArch64AsmParser::parseDirectiveSEHSaveRegPX(SMLoc L) {
5882 unsigned Reg;
5883 int64_t Offset;
5884 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
5885 parseComma() || parseImmExpr(Offset))
5886 return true;
5887 getTargetStreamer().EmitARM64WinCFISaveRegPX(Reg, Offset);
5888 return false;
5889 }
5890
5891 /// parseDirectiveSEHSaveLRPair
5892 /// ::= .seh_save_lrpair
parseDirectiveSEHSaveLRPair(SMLoc L)5893 bool AArch64AsmParser::parseDirectiveSEHSaveLRPair(SMLoc L) {
5894 unsigned Reg;
5895 int64_t Offset;
5896 L = getLoc();
5897 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
5898 parseComma() || parseImmExpr(Offset))
5899 return true;
5900 if (check(((Reg - 19) % 2 != 0), L,
5901 "expected register with even offset from x19"))
5902 return true;
5903 getTargetStreamer().EmitARM64WinCFISaveLRPair(Reg, Offset);
5904 return false;
5905 }
5906
5907 /// parseDirectiveSEHSaveFReg
5908 /// ::= .seh_save_freg
parseDirectiveSEHSaveFReg(SMLoc L)5909 bool AArch64AsmParser::parseDirectiveSEHSaveFReg(SMLoc L) {
5910 unsigned Reg;
5911 int64_t Offset;
5912 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
5913 parseComma() || parseImmExpr(Offset))
5914 return true;
5915 getTargetStreamer().EmitARM64WinCFISaveFReg(Reg, Offset);
5916 return false;
5917 }
5918
5919 /// parseDirectiveSEHSaveFRegX
5920 /// ::= .seh_save_freg_x
parseDirectiveSEHSaveFRegX(SMLoc L)5921 bool AArch64AsmParser::parseDirectiveSEHSaveFRegX(SMLoc L) {
5922 unsigned Reg;
5923 int64_t Offset;
5924 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
5925 parseComma() || parseImmExpr(Offset))
5926 return true;
5927 getTargetStreamer().EmitARM64WinCFISaveFRegX(Reg, Offset);
5928 return false;
5929 }
5930
5931 /// parseDirectiveSEHSaveFRegP
5932 /// ::= .seh_save_fregp
parseDirectiveSEHSaveFRegP(SMLoc L)5933 bool AArch64AsmParser::parseDirectiveSEHSaveFRegP(SMLoc L) {
5934 unsigned Reg;
5935 int64_t Offset;
5936 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
5937 parseComma() || parseImmExpr(Offset))
5938 return true;
5939 getTargetStreamer().EmitARM64WinCFISaveFRegP(Reg, Offset);
5940 return false;
5941 }
5942
5943 /// parseDirectiveSEHSaveFRegPX
5944 /// ::= .seh_save_fregp_x
parseDirectiveSEHSaveFRegPX(SMLoc L)5945 bool AArch64AsmParser::parseDirectiveSEHSaveFRegPX(SMLoc L) {
5946 unsigned Reg;
5947 int64_t Offset;
5948 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
5949 parseComma() || parseImmExpr(Offset))
5950 return true;
5951 getTargetStreamer().EmitARM64WinCFISaveFRegPX(Reg, Offset);
5952 return false;
5953 }
5954
5955 /// parseDirectiveSEHSetFP
5956 /// ::= .seh_set_fp
parseDirectiveSEHSetFP(SMLoc L)5957 bool AArch64AsmParser::parseDirectiveSEHSetFP(SMLoc L) {
5958 getTargetStreamer().EmitARM64WinCFISetFP();
5959 return false;
5960 }
5961
5962 /// parseDirectiveSEHAddFP
5963 /// ::= .seh_add_fp
parseDirectiveSEHAddFP(SMLoc L)5964 bool AArch64AsmParser::parseDirectiveSEHAddFP(SMLoc L) {
5965 int64_t Size;
5966 if (parseImmExpr(Size))
5967 return true;
5968 getTargetStreamer().EmitARM64WinCFIAddFP(Size);
5969 return false;
5970 }
5971
5972 /// parseDirectiveSEHNop
5973 /// ::= .seh_nop
parseDirectiveSEHNop(SMLoc L)5974 bool AArch64AsmParser::parseDirectiveSEHNop(SMLoc L) {
5975 getTargetStreamer().EmitARM64WinCFINop();
5976 return false;
5977 }
5978
5979 /// parseDirectiveSEHSaveNext
5980 /// ::= .seh_save_next
parseDirectiveSEHSaveNext(SMLoc L)5981 bool AArch64AsmParser::parseDirectiveSEHSaveNext(SMLoc L) {
5982 getTargetStreamer().EmitARM64WinCFISaveNext();
5983 return false;
5984 }
5985
5986 /// parseDirectiveSEHEpilogStart
5987 /// ::= .seh_startepilogue
parseDirectiveSEHEpilogStart(SMLoc L)5988 bool AArch64AsmParser::parseDirectiveSEHEpilogStart(SMLoc L) {
5989 getTargetStreamer().EmitARM64WinCFIEpilogStart();
5990 return false;
5991 }
5992
5993 /// parseDirectiveSEHEpilogEnd
5994 /// ::= .seh_endepilogue
parseDirectiveSEHEpilogEnd(SMLoc L)5995 bool AArch64AsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
5996 getTargetStreamer().EmitARM64WinCFIEpilogEnd();
5997 return false;
5998 }
5999
6000 /// parseDirectiveSEHTrapFrame
6001 /// ::= .seh_trap_frame
parseDirectiveSEHTrapFrame(SMLoc L)6002 bool AArch64AsmParser::parseDirectiveSEHTrapFrame(SMLoc L) {
6003 getTargetStreamer().EmitARM64WinCFITrapFrame();
6004 return false;
6005 }
6006
6007 /// parseDirectiveSEHMachineFrame
6008 /// ::= .seh_pushframe
parseDirectiveSEHMachineFrame(SMLoc L)6009 bool AArch64AsmParser::parseDirectiveSEHMachineFrame(SMLoc L) {
6010 getTargetStreamer().EmitARM64WinCFIMachineFrame();
6011 return false;
6012 }
6013
6014 /// parseDirectiveSEHContext
6015 /// ::= .seh_context
parseDirectiveSEHContext(SMLoc L)6016 bool AArch64AsmParser::parseDirectiveSEHContext(SMLoc L) {
6017 getTargetStreamer().EmitARM64WinCFIContext();
6018 return false;
6019 }
6020
6021 /// parseDirectiveSEHClearUnwoundToCall
6022 /// ::= .seh_clear_unwound_to_call
parseDirectiveSEHClearUnwoundToCall(SMLoc L)6023 bool AArch64AsmParser::parseDirectiveSEHClearUnwoundToCall(SMLoc L) {
6024 getTargetStreamer().EmitARM64WinCFIClearUnwoundToCall();
6025 return false;
6026 }
6027
6028 bool
classifySymbolRef(const MCExpr * Expr,AArch64MCExpr::VariantKind & ELFRefKind,MCSymbolRefExpr::VariantKind & DarwinRefKind,int64_t & Addend)6029 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
6030 AArch64MCExpr::VariantKind &ELFRefKind,
6031 MCSymbolRefExpr::VariantKind &DarwinRefKind,
6032 int64_t &Addend) {
6033 ELFRefKind = AArch64MCExpr::VK_INVALID;
6034 DarwinRefKind = MCSymbolRefExpr::VK_None;
6035 Addend = 0;
6036
6037 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
6038 ELFRefKind = AE->getKind();
6039 Expr = AE->getSubExpr();
6040 }
6041
6042 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
6043 if (SE) {
6044 // It's a simple symbol reference with no addend.
6045 DarwinRefKind = SE->getKind();
6046 return true;
6047 }
6048
6049 // Check that it looks like a symbol + an addend
6050 MCValue Res;
6051 bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr, nullptr);
6052 if (!Relocatable || Res.getSymB())
6053 return false;
6054
6055 // Treat expressions with an ELFRefKind (like ":abs_g1:3", or
6056 // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
6057 if (!Res.getSymA() && ELFRefKind == AArch64MCExpr::VK_INVALID)
6058 return false;
6059
6060 if (Res.getSymA())
6061 DarwinRefKind = Res.getSymA()->getKind();
6062 Addend = Res.getConstant();
6063
6064 // It's some symbol reference + a constant addend, but really
6065 // shouldn't use both Darwin and ELF syntax.
6066 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
6067 DarwinRefKind == MCSymbolRefExpr::VK_None;
6068 }
6069
6070 /// Force static initialization.
LLVMInitializeAArch64AsmParser()6071 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmParser() {
6072 RegisterMCAsmParser<AArch64AsmParser> X(getTheAArch64leTarget());
6073 RegisterMCAsmParser<AArch64AsmParser> Y(getTheAArch64beTarget());
6074 RegisterMCAsmParser<AArch64AsmParser> Z(getTheARM64Target());
6075 RegisterMCAsmParser<AArch64AsmParser> W(getTheARM64_32Target());
6076 RegisterMCAsmParser<AArch64AsmParser> V(getTheAArch64_32Target());
6077 }
6078
6079 #define GET_REGISTER_MATCHER
6080 #define GET_SUBTARGET_FEATURE_NAME
6081 #define GET_MATCHER_IMPLEMENTATION
6082 #define GET_MNEMONIC_SPELL_CHECKER
6083 #include "AArch64GenAsmMatcher.inc"
6084
6085 // Define this matcher function after the auto-generated include so we
6086 // have the match class enum definitions.
validateTargetOperandClass(MCParsedAsmOperand & AsmOp,unsigned Kind)6087 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
6088 unsigned Kind) {
6089 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
6090 // If the kind is a token for a literal immediate, check if our asm
6091 // operand matches. This is for InstAliases which have a fixed-value
6092 // immediate in the syntax.
6093 int64_t ExpectedVal;
6094 switch (Kind) {
6095 default:
6096 return Match_InvalidOperand;
6097 case MCK__HASH_0:
6098 ExpectedVal = 0;
6099 break;
6100 case MCK__HASH_1:
6101 ExpectedVal = 1;
6102 break;
6103 case MCK__HASH_12:
6104 ExpectedVal = 12;
6105 break;
6106 case MCK__HASH_16:
6107 ExpectedVal = 16;
6108 break;
6109 case MCK__HASH_2:
6110 ExpectedVal = 2;
6111 break;
6112 case MCK__HASH_24:
6113 ExpectedVal = 24;
6114 break;
6115 case MCK__HASH_3:
6116 ExpectedVal = 3;
6117 break;
6118 case MCK__HASH_32:
6119 ExpectedVal = 32;
6120 break;
6121 case MCK__HASH_4:
6122 ExpectedVal = 4;
6123 break;
6124 case MCK__HASH_48:
6125 ExpectedVal = 48;
6126 break;
6127 case MCK__HASH_6:
6128 ExpectedVal = 6;
6129 break;
6130 case MCK__HASH_64:
6131 ExpectedVal = 64;
6132 break;
6133 case MCK__HASH_8:
6134 ExpectedVal = 8;
6135 break;
6136 }
6137 if (!Op.isImm())
6138 return Match_InvalidOperand;
6139 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
6140 if (!CE)
6141 return Match_InvalidOperand;
6142 if (CE->getValue() == ExpectedVal)
6143 return Match_Success;
6144 return Match_InvalidOperand;
6145 }
6146
6147 OperandMatchResultTy
tryParseGPRSeqPair(OperandVector & Operands)6148 AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
6149
6150 SMLoc S = getLoc();
6151
6152 if (getParser().getTok().isNot(AsmToken::Identifier)) {
6153 Error(S, "expected register");
6154 return MatchOperand_ParseFail;
6155 }
6156
6157 unsigned FirstReg;
6158 OperandMatchResultTy Res = tryParseScalarRegister(FirstReg);
6159 if (Res != MatchOperand_Success)
6160 return MatchOperand_ParseFail;
6161
6162 const MCRegisterClass &WRegClass =
6163 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
6164 const MCRegisterClass &XRegClass =
6165 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
6166
6167 bool isXReg = XRegClass.contains(FirstReg),
6168 isWReg = WRegClass.contains(FirstReg);
6169 if (!isXReg && !isWReg) {
6170 Error(S, "expected first even register of a "
6171 "consecutive same-size even/odd register pair");
6172 return MatchOperand_ParseFail;
6173 }
6174
6175 const MCRegisterInfo *RI = getContext().getRegisterInfo();
6176 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
6177
6178 if (FirstEncoding & 0x1) {
6179 Error(S, "expected first even register of a "
6180 "consecutive same-size even/odd register pair");
6181 return MatchOperand_ParseFail;
6182 }
6183
6184 if (getParser().getTok().isNot(AsmToken::Comma)) {
6185 Error(getLoc(), "expected comma");
6186 return MatchOperand_ParseFail;
6187 }
6188 // Eat the comma
6189 getParser().Lex();
6190
6191 SMLoc E = getLoc();
6192 unsigned SecondReg;
6193 Res = tryParseScalarRegister(SecondReg);
6194 if (Res != MatchOperand_Success)
6195 return MatchOperand_ParseFail;
6196
6197 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
6198 (isXReg && !XRegClass.contains(SecondReg)) ||
6199 (isWReg && !WRegClass.contains(SecondReg))) {
6200 Error(E,"expected second odd register of a "
6201 "consecutive same-size even/odd register pair");
6202 return MatchOperand_ParseFail;
6203 }
6204
6205 unsigned Pair = 0;
6206 if (isXReg) {
6207 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
6208 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
6209 } else {
6210 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
6211 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
6212 }
6213
6214 Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
6215 getLoc(), getContext()));
6216
6217 return MatchOperand_Success;
6218 }
6219
6220 template <bool ParseShiftExtend, bool ParseSuffix>
6221 OperandMatchResultTy
tryParseSVEDataVector(OperandVector & Operands)6222 AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
6223 const SMLoc S = getLoc();
6224 // Check for a SVE vector register specifier first.
6225 unsigned RegNum;
6226 StringRef Kind;
6227
6228 OperandMatchResultTy Res =
6229 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
6230
6231 if (Res != MatchOperand_Success)
6232 return Res;
6233
6234 if (ParseSuffix && Kind.empty())
6235 return MatchOperand_NoMatch;
6236
6237 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
6238 if (!KindRes)
6239 return MatchOperand_NoMatch;
6240
6241 unsigned ElementWidth = KindRes->second;
6242
6243 // No shift/extend is the default.
6244 if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
6245 Operands.push_back(AArch64Operand::CreateVectorReg(
6246 RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
6247
6248 OperandMatchResultTy Res = tryParseVectorIndex(Operands);
6249 if (Res == MatchOperand_ParseFail)
6250 return MatchOperand_ParseFail;
6251 return MatchOperand_Success;
6252 }
6253
6254 // Eat the comma
6255 getParser().Lex();
6256
6257 // Match the shift
6258 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
6259 Res = tryParseOptionalShiftExtend(ExtOpnd);
6260 if (Res != MatchOperand_Success)
6261 return Res;
6262
6263 auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
6264 Operands.push_back(AArch64Operand::CreateVectorReg(
6265 RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
6266 getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
6267 Ext->hasShiftExtendAmount()));
6268
6269 return MatchOperand_Success;
6270 }
6271
6272 OperandMatchResultTy
tryParseSVEPattern(OperandVector & Operands)6273 AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
6274 MCAsmParser &Parser = getParser();
6275
6276 SMLoc SS = getLoc();
6277 const AsmToken &TokE = Parser.getTok();
6278 bool IsHash = TokE.is(AsmToken::Hash);
6279
6280 if (!IsHash && TokE.isNot(AsmToken::Identifier))
6281 return MatchOperand_NoMatch;
6282
6283 int64_t Pattern;
6284 if (IsHash) {
6285 Parser.Lex(); // Eat hash
6286
6287 // Parse the immediate operand.
6288 const MCExpr *ImmVal;
6289 SS = getLoc();
6290 if (Parser.parseExpression(ImmVal))
6291 return MatchOperand_ParseFail;
6292
6293 auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
6294 if (!MCE)
6295 return MatchOperand_ParseFail;
6296
6297 Pattern = MCE->getValue();
6298 } else {
6299 // Parse the pattern
6300 auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
6301 if (!Pat)
6302 return MatchOperand_NoMatch;
6303
6304 Parser.Lex();
6305 Pattern = Pat->Encoding;
6306 assert(Pattern >= 0 && Pattern < 32);
6307 }
6308
6309 Operands.push_back(
6310 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
6311 SS, getLoc(), getContext()));
6312
6313 return MatchOperand_Success;
6314 }
6315
6316 OperandMatchResultTy
tryParseGPR64x8(OperandVector & Operands)6317 AArch64AsmParser::tryParseGPR64x8(OperandVector &Operands) {
6318 SMLoc SS = getLoc();
6319
6320 unsigned XReg;
6321 if (tryParseScalarRegister(XReg) != MatchOperand_Success)
6322 return MatchOperand_NoMatch;
6323
6324 MCContext &ctx = getContext();
6325 const MCRegisterInfo *RI = ctx.getRegisterInfo();
6326 int X8Reg = RI->getMatchingSuperReg(
6327 XReg, AArch64::x8sub_0,
6328 &AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID]);
6329 if (!X8Reg) {
6330 Error(SS, "expected an even-numbered x-register in the range [x0,x22]");
6331 return MatchOperand_ParseFail;
6332 }
6333
6334 Operands.push_back(
6335 AArch64Operand::CreateReg(X8Reg, RegKind::Scalar, SS, getLoc(), ctx));
6336 return MatchOperand_Success;
6337 }
6338