xref: /llvm-project/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp (revision 2504693d75c6ed1047955dd6e65ce9d4c1a164c8)
1 //===- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "ARMBaseInstrInfo.h"
10 #include "ARMFeatures.h"
11 #include "MCTargetDesc/ARMAddressingModes.h"
12 #include "MCTargetDesc/ARMBaseInfo.h"
13 #include "MCTargetDesc/ARMInstPrinter.h"
14 #include "MCTargetDesc/ARMMCExpr.h"
15 #include "MCTargetDesc/ARMMCTargetDesc.h"
16 #include "TargetInfo/ARMTargetInfo.h"
17 #include "Utils/ARMBaseInfo.h"
18 #include "llvm/ADT/APFloat.h"
19 #include "llvm/ADT/APInt.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallBitVector.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringMap.h"
25 #include "llvm/ADT/StringRef.h"
26 #include "llvm/ADT/StringSet.h"
27 #include "llvm/ADT/StringSwitch.h"
28 #include "llvm/ADT/Twine.h"
29 #include "llvm/MC/MCContext.h"
30 #include "llvm/MC/MCExpr.h"
31 #include "llvm/MC/MCInst.h"
32 #include "llvm/MC/MCInstrDesc.h"
33 #include "llvm/MC/MCInstrInfo.h"
34 #include "llvm/MC/MCParser/MCAsmLexer.h"
35 #include "llvm/MC/MCParser/MCAsmParser.h"
36 #include "llvm/MC/MCParser/MCAsmParserExtension.h"
37 #include "llvm/MC/MCParser/MCAsmParserUtils.h"
38 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
39 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
40 #include "llvm/MC/MCRegisterInfo.h"
41 #include "llvm/MC/MCSection.h"
42 #include "llvm/MC/MCStreamer.h"
43 #include "llvm/MC/MCSubtargetInfo.h"
44 #include "llvm/MC/MCSymbol.h"
45 #include "llvm/MC/TargetRegistry.h"
46 #include "llvm/Support/ARMBuildAttributes.h"
47 #include "llvm/Support/ARMEHABI.h"
48 #include "llvm/Support/Casting.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Support/Compiler.h"
51 #include "llvm/Support/Debug.h"
52 #include "llvm/Support/ErrorHandling.h"
53 #include "llvm/Support/MathExtras.h"
54 #include "llvm/Support/SMLoc.h"
55 #include "llvm/Support/raw_ostream.h"
56 #include "llvm/TargetParser/SubtargetFeature.h"
57 #include "llvm/TargetParser/TargetParser.h"
58 #include "llvm/TargetParser/Triple.h"
59 #include <algorithm>
60 #include <cassert>
61 #include <cstddef>
62 #include <cstdint>
63 #include <iterator>
64 #include <limits>
65 #include <memory>
66 #include <optional>
67 #include <string>
68 #include <utility>
69 #include <vector>
70 
71 #define DEBUG_TYPE "asm-parser"
72 
73 using namespace llvm;
74 
75 namespace {
76 class ARMOperand;
77 
78 enum class ImplicitItModeTy { Always, Never, ARMOnly, ThumbOnly };
79 
80 static cl::opt<ImplicitItModeTy> ImplicitItMode(
81     "arm-implicit-it", cl::init(ImplicitItModeTy::ARMOnly),
82     cl::desc("Allow conditional instructions outside of an IT block"),
83     cl::values(clEnumValN(ImplicitItModeTy::Always, "always",
84                           "Accept in both ISAs, emit implicit ITs in Thumb"),
85                clEnumValN(ImplicitItModeTy::Never, "never",
86                           "Warn in ARM, reject in Thumb"),
87                clEnumValN(ImplicitItModeTy::ARMOnly, "arm",
88                           "Accept in ARM, reject in Thumb"),
89                clEnumValN(ImplicitItModeTy::ThumbOnly, "thumb",
90                           "Warn in ARM, emit implicit ITs in Thumb")));
91 
92 static cl::opt<bool> AddBuildAttributes("arm-add-build-attributes",
93                                         cl::init(false));
94 
95 enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
96 
97 static inline unsigned extractITMaskBit(unsigned Mask, unsigned Position) {
98   // Position==0 means we're not in an IT block at all. Position==1
99   // means we want the first state bit, which is always 0 (Then).
100   // Position==2 means we want the second state bit, stored at bit 3
101   // of Mask, and so on downwards. So (5 - Position) will shift the
102   // right bit down to bit 0, including the always-0 bit at bit 4 for
103   // the mandatory initial Then.
104   return (Mask >> (5 - Position) & 1);
105 }
106 
107 class UnwindContext {
108   using Locs = SmallVector<SMLoc, 4>;
109 
110   MCAsmParser &Parser;
111   Locs FnStartLocs;
112   Locs CantUnwindLocs;
113   Locs PersonalityLocs;
114   Locs PersonalityIndexLocs;
115   Locs HandlerDataLocs;
116   MCRegister FPReg;
117 
118 public:
119   UnwindContext(MCAsmParser &P) : Parser(P), FPReg(ARM::SP) {}
120 
121   bool hasFnStart() const { return !FnStartLocs.empty(); }
122   bool cantUnwind() const { return !CantUnwindLocs.empty(); }
123   bool hasHandlerData() const { return !HandlerDataLocs.empty(); }
124 
125   bool hasPersonality() const {
126     return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
127   }
128 
129   void recordFnStart(SMLoc L) { FnStartLocs.push_back(L); }
130   void recordCantUnwind(SMLoc L) { CantUnwindLocs.push_back(L); }
131   void recordPersonality(SMLoc L) { PersonalityLocs.push_back(L); }
132   void recordHandlerData(SMLoc L) { HandlerDataLocs.push_back(L); }
133   void recordPersonalityIndex(SMLoc L) { PersonalityIndexLocs.push_back(L); }
134 
135   void saveFPReg(MCRegister Reg) { FPReg = Reg; }
136   MCRegister getFPReg() const { return FPReg; }
137 
138   void emitFnStartLocNotes() const {
139     for (const SMLoc &Loc : FnStartLocs)
140       Parser.Note(Loc, ".fnstart was specified here");
141   }
142 
143   void emitCantUnwindLocNotes() const {
144     for (const SMLoc &Loc : CantUnwindLocs)
145       Parser.Note(Loc, ".cantunwind was specified here");
146   }
147 
148   void emitHandlerDataLocNotes() const {
149     for (const SMLoc &Loc : HandlerDataLocs)
150       Parser.Note(Loc, ".handlerdata was specified here");
151   }
152 
153   void emitPersonalityLocNotes() const {
154     for (Locs::const_iterator PI = PersonalityLocs.begin(),
155                               PE = PersonalityLocs.end(),
156                               PII = PersonalityIndexLocs.begin(),
157                               PIE = PersonalityIndexLocs.end();
158          PI != PE || PII != PIE;) {
159       if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
160         Parser.Note(*PI++, ".personality was specified here");
161       else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
162         Parser.Note(*PII++, ".personalityindex was specified here");
163       else
164         llvm_unreachable(".personality and .personalityindex cannot be "
165                          "at the same location");
166     }
167   }
168 
169   void reset() {
170     FnStartLocs = Locs();
171     CantUnwindLocs = Locs();
172     PersonalityLocs = Locs();
173     HandlerDataLocs = Locs();
174     PersonalityIndexLocs = Locs();
175     FPReg = ARM::SP;
176   }
177 };
178 
179 // Various sets of ARM instruction mnemonics which are used by the asm parser
180 class ARMMnemonicSets {
181   StringSet<> CDE;
182   StringSet<> CDEWithVPTSuffix;
183 public:
184   ARMMnemonicSets(const MCSubtargetInfo &STI);
185 
186   /// Returns true iff a given mnemonic is a CDE instruction
187   bool isCDEInstr(StringRef Mnemonic) {
188     // Quick check before searching the set
189     if (!Mnemonic.starts_with("cx") && !Mnemonic.starts_with("vcx"))
190       return false;
191     return CDE.count(Mnemonic);
192   }
193 
194   /// Returns true iff a given mnemonic is a VPT-predicable CDE instruction
195   /// (possibly with a predication suffix "e" or "t")
196   bool isVPTPredicableCDEInstr(StringRef Mnemonic) {
197     if (!Mnemonic.starts_with("vcx"))
198       return false;
199     return CDEWithVPTSuffix.count(Mnemonic);
200   }
201 
202   /// Returns true iff a given mnemonic is an IT-predicable CDE instruction
203   /// (possibly with a condition suffix)
204   bool isITPredicableCDEInstr(StringRef Mnemonic) {
205     if (!Mnemonic.starts_with("cx"))
206       return false;
207     return Mnemonic.starts_with("cx1a") || Mnemonic.starts_with("cx1da") ||
208            Mnemonic.starts_with("cx2a") || Mnemonic.starts_with("cx2da") ||
209            Mnemonic.starts_with("cx3a") || Mnemonic.starts_with("cx3da");
210   }
211 
212   /// Return true iff a given mnemonic is an integer CDE instruction with
213   /// dual-register destination
214   bool isCDEDualRegInstr(StringRef Mnemonic) {
215     if (!Mnemonic.starts_with("cx"))
216       return false;
217     return Mnemonic == "cx1d" || Mnemonic == "cx1da" ||
218            Mnemonic == "cx2d" || Mnemonic == "cx2da" ||
219            Mnemonic == "cx3d" || Mnemonic == "cx3da";
220   }
221 };
222 
223 ARMMnemonicSets::ARMMnemonicSets(const MCSubtargetInfo &STI) {
224   for (StringRef Mnemonic: { "cx1", "cx1a", "cx1d", "cx1da",
225                              "cx2", "cx2a", "cx2d", "cx2da",
226                              "cx3", "cx3a", "cx3d", "cx3da", })
227     CDE.insert(Mnemonic);
228   for (StringRef Mnemonic :
229        {"vcx1", "vcx1a", "vcx2", "vcx2a", "vcx3", "vcx3a"}) {
230     CDE.insert(Mnemonic);
231     CDEWithVPTSuffix.insert(Mnemonic);
232     CDEWithVPTSuffix.insert(std::string(Mnemonic) + "t");
233     CDEWithVPTSuffix.insert(std::string(Mnemonic) + "e");
234   }
235 }
236 
237 class ARMAsmParser : public MCTargetAsmParser {
238   const MCRegisterInfo *MRI;
239   UnwindContext UC;
240   ARMMnemonicSets MS;
241 
242   ARMTargetStreamer &getTargetStreamer() {
243     assert(getParser().getStreamer().getTargetStreamer() &&
244            "do not have a target streamer");
245     MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
246     return static_cast<ARMTargetStreamer &>(TS);
247   }
248 
249   // Map of register aliases registers via the .req directive.
250   StringMap<MCRegister> RegisterReqs;
251 
252   bool NextSymbolIsThumb;
253 
254   bool useImplicitITThumb() const {
255     return ImplicitItMode == ImplicitItModeTy::Always ||
256            ImplicitItMode == ImplicitItModeTy::ThumbOnly;
257   }
258 
259   bool useImplicitITARM() const {
260     return ImplicitItMode == ImplicitItModeTy::Always ||
261            ImplicitItMode == ImplicitItModeTy::ARMOnly;
262   }
263 
264   struct {
265     ARMCC::CondCodes Cond;    // Condition for IT block.
266     unsigned Mask:4;          // Condition mask for instructions.
267                               // Starting at first 1 (from lsb).
268                               //   '1'  condition as indicated in IT.
269                               //   '0'  inverse of condition (else).
270                               // Count of instructions in IT block is
271                               // 4 - trailingzeroes(mask)
272                               // Note that this does not have the same encoding
273                               // as in the IT instruction, which also depends
274                               // on the low bit of the condition code.
275 
276     unsigned CurPosition;     // Current position in parsing of IT
277                               // block. In range [0,4], with 0 being the IT
278                               // instruction itself. Initialized according to
279                               // count of instructions in block.  ~0U if no
280                               // active IT block.
281 
282     bool IsExplicit;          // true  - The IT instruction was present in the
283                               //         input, we should not modify it.
284                               // false - The IT instruction was added
285                               //         implicitly, we can extend it if that
286                               //         would be legal.
287   } ITState;
288 
289   SmallVector<MCInst, 4> PendingConditionalInsts;
290 
291   void flushPendingInstructions(MCStreamer &Out) override {
292     if (!inImplicitITBlock()) {
293       assert(PendingConditionalInsts.size() == 0);
294       return;
295     }
296 
297     // Emit the IT instruction
298     MCInst ITInst;
299     ITInst.setOpcode(ARM::t2IT);
300     ITInst.addOperand(MCOperand::createImm(ITState.Cond));
301     ITInst.addOperand(MCOperand::createImm(ITState.Mask));
302     Out.emitInstruction(ITInst, getSTI());
303 
304     // Emit the conditional instructions
305     assert(PendingConditionalInsts.size() <= 4);
306     for (const MCInst &Inst : PendingConditionalInsts) {
307       Out.emitInstruction(Inst, getSTI());
308     }
309     PendingConditionalInsts.clear();
310 
311     // Clear the IT state
312     ITState.Mask = 0;
313     ITState.CurPosition = ~0U;
314   }
315 
316   bool inITBlock() { return ITState.CurPosition != ~0U; }
317   bool inExplicitITBlock() { return inITBlock() && ITState.IsExplicit; }
318   bool inImplicitITBlock() { return inITBlock() && !ITState.IsExplicit; }
319 
320   bool lastInITBlock() {
321     return ITState.CurPosition == 4 - (unsigned)llvm::countr_zero(ITState.Mask);
322   }
323 
324   void forwardITPosition() {
325     if (!inITBlock()) return;
326     // Move to the next instruction in the IT block, if there is one. If not,
327     // mark the block as done, except for implicit IT blocks, which we leave
328     // open until we find an instruction that can't be added to it.
329     unsigned TZ = llvm::countr_zero(ITState.Mask);
330     if (++ITState.CurPosition == 5 - TZ && ITState.IsExplicit)
331       ITState.CurPosition = ~0U; // Done with the IT block after this.
332   }
333 
334   // Rewind the state of the current IT block, removing the last slot from it.
335   void rewindImplicitITPosition() {
336     assert(inImplicitITBlock());
337     assert(ITState.CurPosition > 1);
338     ITState.CurPosition--;
339     unsigned TZ = llvm::countr_zero(ITState.Mask);
340     unsigned NewMask = 0;
341     NewMask |= ITState.Mask & (0xC << TZ);
342     NewMask |= 0x2 << TZ;
343     ITState.Mask = NewMask;
344   }
345 
346   // Rewind the state of the current IT block, removing the last slot from it.
347   // If we were at the first slot, this closes the IT block.
348   void discardImplicitITBlock() {
349     assert(inImplicitITBlock());
350     assert(ITState.CurPosition == 1);
351     ITState.CurPosition = ~0U;
352   }
353 
354   // Get the condition code corresponding to the current IT block slot.
355   ARMCC::CondCodes currentITCond() {
356     unsigned MaskBit = extractITMaskBit(ITState.Mask, ITState.CurPosition);
357     return MaskBit ? ARMCC::getOppositeCondition(ITState.Cond) : ITState.Cond;
358   }
359 
360   // Invert the condition of the current IT block slot without changing any
361   // other slots in the same block.
362   void invertCurrentITCondition() {
363     if (ITState.CurPosition == 1) {
364       ITState.Cond = ARMCC::getOppositeCondition(ITState.Cond);
365     } else {
366       ITState.Mask ^= 1 << (5 - ITState.CurPosition);
367     }
368   }
369 
370   // Returns true if the current IT block is full (all 4 slots used).
371   bool isITBlockFull() {
372     return inITBlock() && (ITState.Mask & 1);
373   }
374 
375   // Extend the current implicit IT block to have one more slot with the given
376   // condition code.
377   void extendImplicitITBlock(ARMCC::CondCodes Cond) {
378     assert(inImplicitITBlock());
379     assert(!isITBlockFull());
380     assert(Cond == ITState.Cond ||
381            Cond == ARMCC::getOppositeCondition(ITState.Cond));
382     unsigned TZ = llvm::countr_zero(ITState.Mask);
383     unsigned NewMask = 0;
384     // Keep any existing condition bits.
385     NewMask |= ITState.Mask & (0xE << TZ);
386     // Insert the new condition bit.
387     NewMask |= (Cond != ITState.Cond) << TZ;
388     // Move the trailing 1 down one bit.
389     NewMask |= 1 << (TZ - 1);
390     ITState.Mask = NewMask;
391   }
392 
393   // Create a new implicit IT block with a dummy condition code.
394   void startImplicitITBlock() {
395     assert(!inITBlock());
396     ITState.Cond = ARMCC::AL;
397     ITState.Mask = 8;
398     ITState.CurPosition = 1;
399     ITState.IsExplicit = false;
400   }
401 
402   // Create a new explicit IT block with the given condition and mask.
403   // The mask should be in the format used in ARMOperand and
404   // MCOperand, with a 1 implying 'e', regardless of the low bit of
405   // the condition.
406   void startExplicitITBlock(ARMCC::CondCodes Cond, unsigned Mask) {
407     assert(!inITBlock());
408     ITState.Cond = Cond;
409     ITState.Mask = Mask;
410     ITState.CurPosition = 0;
411     ITState.IsExplicit = true;
412   }
413 
414   struct {
415     unsigned Mask : 4;
416     unsigned CurPosition;
417   } VPTState;
418   bool inVPTBlock() { return VPTState.CurPosition != ~0U; }
419   void forwardVPTPosition() {
420     if (!inVPTBlock()) return;
421     unsigned TZ = llvm::countr_zero(VPTState.Mask);
422     if (++VPTState.CurPosition == 5 - TZ)
423       VPTState.CurPosition = ~0U;
424   }
425 
426   void Note(SMLoc L, const Twine &Msg, SMRange Range = std::nullopt) {
427     return getParser().Note(L, Msg, Range);
428   }
429 
430   bool Warning(SMLoc L, const Twine &Msg, SMRange Range = std::nullopt) {
431     return getParser().Warning(L, Msg, Range);
432   }
433 
434   bool Error(SMLoc L, const Twine &Msg, SMRange Range = std::nullopt) {
435     return getParser().Error(L, Msg, Range);
436   }
437 
438   bool validatetLDMRegList(const MCInst &Inst, const OperandVector &Operands,
439                            unsigned MnemonicOpsEndInd, unsigned ListIndex,
440                            bool IsARPop = false);
441   bool validatetSTMRegList(const MCInst &Inst, const OperandVector &Operands,
442                            unsigned MnemonicOpsEndInd, unsigned ListIndex);
443 
444   MCRegister tryParseRegister(bool AllowOutofBoundReg = false);
445   bool tryParseRegisterWithWriteBack(OperandVector &);
446   int tryParseShiftRegister(OperandVector &);
447   std::optional<ARM_AM::ShiftOpc> tryParseShiftToken();
448   bool parseRegisterList(OperandVector &, bool EnforceOrder = true,
449                          bool AllowRAAC = false, bool IsLazyLoadStore = false,
450                          bool IsVSCCLRM = false);
451   bool parseMemory(OperandVector &);
452   bool parseOperand(OperandVector &, StringRef Mnemonic);
453   bool parseImmExpr(int64_t &Out);
454   bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
455   bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
456                               unsigned &ShiftAmount);
457   bool parseLiteralValues(unsigned Size, SMLoc L);
458   bool parseDirectiveThumb(SMLoc L);
459   bool parseDirectiveARM(SMLoc L);
460   bool parseDirectiveThumbFunc(SMLoc L);
461   bool parseDirectiveCode(SMLoc L);
462   bool parseDirectiveSyntax(SMLoc L);
463   bool parseDirectiveReq(StringRef Name, SMLoc L);
464   bool parseDirectiveUnreq(SMLoc L);
465   bool parseDirectiveArch(SMLoc L);
466   bool parseDirectiveEabiAttr(SMLoc L);
467   bool parseDirectiveCPU(SMLoc L);
468   bool parseDirectiveFPU(SMLoc L);
469   bool parseDirectiveFnStart(SMLoc L);
470   bool parseDirectiveFnEnd(SMLoc L);
471   bool parseDirectiveCantUnwind(SMLoc L);
472   bool parseDirectivePersonality(SMLoc L);
473   bool parseDirectiveHandlerData(SMLoc L);
474   bool parseDirectiveSetFP(SMLoc L);
475   bool parseDirectivePad(SMLoc L);
476   bool parseDirectiveRegSave(SMLoc L, bool IsVector);
477   bool parseDirectiveInst(SMLoc L, char Suffix = '\0');
478   bool parseDirectiveLtorg(SMLoc L);
479   bool parseDirectiveEven(SMLoc L);
480   bool parseDirectivePersonalityIndex(SMLoc L);
481   bool parseDirectiveUnwindRaw(SMLoc L);
482   bool parseDirectiveTLSDescSeq(SMLoc L);
483   bool parseDirectiveMovSP(SMLoc L);
484   bool parseDirectiveObjectArch(SMLoc L);
485   bool parseDirectiveArchExtension(SMLoc L);
486   bool parseDirectiveAlign(SMLoc L);
487   bool parseDirectiveThumbSet(SMLoc L);
488 
489   bool parseDirectiveSEHAllocStack(SMLoc L, bool Wide);
490   bool parseDirectiveSEHSaveRegs(SMLoc L, bool Wide);
491   bool parseDirectiveSEHSaveSP(SMLoc L);
492   bool parseDirectiveSEHSaveFRegs(SMLoc L);
493   bool parseDirectiveSEHSaveLR(SMLoc L);
494   bool parseDirectiveSEHPrologEnd(SMLoc L, bool Fragment);
495   bool parseDirectiveSEHNop(SMLoc L, bool Wide);
496   bool parseDirectiveSEHEpilogStart(SMLoc L, bool Condition);
497   bool parseDirectiveSEHEpilogEnd(SMLoc L);
498   bool parseDirectiveSEHCustom(SMLoc L);
499 
500   std::unique_ptr<ARMOperand> defaultCondCodeOp();
501   std::unique_ptr<ARMOperand> defaultCCOutOp();
502   std::unique_ptr<ARMOperand> defaultVPTPredOp();
503 
504   bool isMnemonicVPTPredicable(StringRef Mnemonic, StringRef ExtraToken);
505   StringRef splitMnemonic(StringRef Mnemonic, StringRef ExtraToken,
506                           ARMCC::CondCodes &PredicationCode,
507                           ARMVCC::VPTCodes &VPTPredicationCode,
508                           bool &CarrySetting, unsigned &ProcessorIMod,
509                           StringRef &ITMask);
510   void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef ExtraToken,
511                              StringRef FullInst, bool &CanAcceptCarrySet,
512                              bool &CanAcceptPredicationCode,
513                              bool &CanAcceptVPTPredicationCode);
514   bool enableArchExtFeature(StringRef Name, SMLoc &ExtLoc);
515 
516   void tryConvertingToTwoOperandForm(StringRef Mnemonic,
517                                      ARMCC::CondCodes PredicationCode,
518                                      bool CarrySetting, OperandVector &Operands,
519                                      unsigned MnemonicOpsEndInd);
520 
521   bool CDEConvertDualRegOperand(StringRef Mnemonic, OperandVector &Operands,
522                                 unsigned MnemonicOpsEndInd);
523 
524   bool isThumb() const {
525     // FIXME: Can tablegen auto-generate this?
526     return getSTI().hasFeature(ARM::ModeThumb);
527   }
528 
529   bool isThumbOne() const {
530     return isThumb() && !getSTI().hasFeature(ARM::FeatureThumb2);
531   }
532 
533   bool isThumbTwo() const {
534     return isThumb() && getSTI().hasFeature(ARM::FeatureThumb2);
535   }
536 
537   bool hasThumb() const {
538     return getSTI().hasFeature(ARM::HasV4TOps);
539   }
540 
541   bool hasThumb2() const {
542     return getSTI().hasFeature(ARM::FeatureThumb2);
543   }
544 
545   bool hasV6Ops() const {
546     return getSTI().hasFeature(ARM::HasV6Ops);
547   }
548 
549   bool hasV6T2Ops() const {
550     return getSTI().hasFeature(ARM::HasV6T2Ops);
551   }
552 
553   bool hasV6MOps() const {
554     return getSTI().hasFeature(ARM::HasV6MOps);
555   }
556 
557   bool hasV7Ops() const {
558     return getSTI().hasFeature(ARM::HasV7Ops);
559   }
560 
561   bool hasV8Ops() const {
562     return getSTI().hasFeature(ARM::HasV8Ops);
563   }
564 
565   bool hasV8MBaseline() const {
566     return getSTI().hasFeature(ARM::HasV8MBaselineOps);
567   }
568 
569   bool hasV8MMainline() const {
570     return getSTI().hasFeature(ARM::HasV8MMainlineOps);
571   }
572   bool hasV8_1MMainline() const {
573     return getSTI().hasFeature(ARM::HasV8_1MMainlineOps);
574   }
575   bool hasMVEFloat() const {
576     return getSTI().hasFeature(ARM::HasMVEFloatOps);
577   }
578   bool hasCDE() const {
579     return getSTI().hasFeature(ARM::HasCDEOps);
580   }
581   bool has8MSecExt() const {
582     return getSTI().hasFeature(ARM::Feature8MSecExt);
583   }
584 
585   bool hasARM() const {
586     return !getSTI().hasFeature(ARM::FeatureNoARM);
587   }
588 
589   bool hasDSP() const {
590     return getSTI().hasFeature(ARM::FeatureDSP);
591   }
592 
593   bool hasD32() const {
594     return getSTI().hasFeature(ARM::FeatureD32);
595   }
596 
597   bool hasV8_1aOps() const {
598     return getSTI().hasFeature(ARM::HasV8_1aOps);
599   }
600 
601   bool hasRAS() const {
602     return getSTI().hasFeature(ARM::FeatureRAS);
603   }
604 
605   void SwitchMode() {
606     MCSubtargetInfo &STI = copySTI();
607     auto FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
608     setAvailableFeatures(FB);
609   }
610 
611   void FixModeAfterArchChange(bool WasThumb, SMLoc Loc);
612 
613   bool isMClass() const {
614     return getSTI().hasFeature(ARM::FeatureMClass);
615   }
616 
617   /// @name Auto-generated Match Functions
618   /// {
619 
620 #define GET_ASSEMBLER_HEADER
621 #include "ARMGenAsmMatcher.inc"
622 
623   /// }
624 
625   ParseStatus parseITCondCode(OperandVector &);
626   ParseStatus parseCoprocNumOperand(OperandVector &);
627   ParseStatus parseCoprocRegOperand(OperandVector &);
628   ParseStatus parseCoprocOptionOperand(OperandVector &);
629   ParseStatus parseMemBarrierOptOperand(OperandVector &);
630   ParseStatus parseTraceSyncBarrierOptOperand(OperandVector &);
631   ParseStatus parseInstSyncBarrierOptOperand(OperandVector &);
632   ParseStatus parseProcIFlagsOperand(OperandVector &);
633   ParseStatus parseMSRMaskOperand(OperandVector &);
634   ParseStatus parseBankedRegOperand(OperandVector &);
635   ParseStatus parsePKHImm(OperandVector &O, ARM_AM::ShiftOpc, int Low,
636                           int High);
637   ParseStatus parsePKHLSLImm(OperandVector &O) {
638     return parsePKHImm(O, ARM_AM::lsl, 0, 31);
639   }
640   ParseStatus parsePKHASRImm(OperandVector &O) {
641     return parsePKHImm(O, ARM_AM::asr, 1, 32);
642   }
643   ParseStatus parseSetEndImm(OperandVector &);
644   ParseStatus parseShifterImm(OperandVector &);
645   ParseStatus parseRotImm(OperandVector &);
646   ParseStatus parseModImm(OperandVector &);
647   ParseStatus parseBitfield(OperandVector &);
648   ParseStatus parsePostIdxReg(OperandVector &);
649   ParseStatus parseAM3Offset(OperandVector &);
650   ParseStatus parseFPImm(OperandVector &);
651   ParseStatus parseVectorList(OperandVector &);
652   ParseStatus parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
653                               SMLoc &EndLoc);
654 
655   // Asm Match Converter Methods
656   void cvtThumbMultiply(MCInst &Inst, const OperandVector &);
657   void cvtThumbBranches(MCInst &Inst, const OperandVector &);
658   void cvtMVEVMOVQtoDReg(MCInst &Inst, const OperandVector &);
659 
660   bool validateInstruction(MCInst &Inst, const OperandVector &Ops,
661                            unsigned MnemonicOpsEndInd);
662   bool processInstruction(MCInst &Inst, const OperandVector &Ops,
663                           unsigned MnemonicOpsEndInd, MCStreamer &Out);
664   bool shouldOmitVectorPredicateOperand(StringRef Mnemonic,
665                                         OperandVector &Operands,
666                                         unsigned MnemonicOpsEndInd);
667   bool isITBlockTerminator(MCInst &Inst) const;
668 
669   void fixupGNULDRDAlias(StringRef Mnemonic, OperandVector &Operands,
670                          unsigned MnemonicOpsEndInd);
671   bool validateLDRDSTRD(MCInst &Inst, const OperandVector &Operands, bool Load,
672                         bool ARMMode, bool Writeback,
673                         unsigned MnemonicOpsEndInd);
674 
675 public:
676   enum ARMMatchResultTy {
677     Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
678     Match_RequiresNotITBlock,
679     Match_RequiresV6,
680     Match_RequiresThumb2,
681     Match_RequiresV8,
682     Match_RequiresFlagSetting,
683 #define GET_OPERAND_DIAGNOSTIC_TYPES
684 #include "ARMGenAsmMatcher.inc"
685 
686   };
687 
688   ARMAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
689                const MCInstrInfo &MII, const MCTargetOptions &Options)
690     : MCTargetAsmParser(Options, STI, MII), UC(Parser), MS(STI) {
691     MCAsmParserExtension::Initialize(Parser);
692 
693     // Cache the MCRegisterInfo.
694     MRI = getContext().getRegisterInfo();
695 
696     // Initialize the set of available features.
697     setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
698 
699     // Add build attributes based on the selected target.
700     if (AddBuildAttributes)
701       getTargetStreamer().emitTargetAttributes(STI);
702 
703     // Not in an ITBlock to start with.
704     ITState.CurPosition = ~0U;
705 
706     VPTState.CurPosition = ~0U;
707 
708     NextSymbolIsThumb = false;
709   }
710 
711   // Implementation of the MCTargetAsmParser interface:
712   bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
713   ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
714                                SMLoc &EndLoc) override;
715   bool parseInstruction(ParseInstructionInfo &Info, StringRef Name,
716                         SMLoc NameLoc, OperandVector &Operands) override;
717   bool ParseDirective(AsmToken DirectiveID) override;
718 
719   unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
720                                       unsigned Kind) override;
721   unsigned checkTargetMatchPredicate(MCInst &Inst) override;
722   unsigned
723   checkEarlyTargetMatchPredicate(MCInst &Inst,
724                                  const OperandVector &Operands) override;
725 
726   bool matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
727                                OperandVector &Operands, MCStreamer &Out,
728                                uint64_t &ErrorInfo,
729                                bool MatchingInlineAsm) override;
730   unsigned MatchInstruction(OperandVector &Operands, MCInst &Inst,
731                             SmallVectorImpl<NearMissInfo> &NearMisses,
732                             bool MatchingInlineAsm, bool &EmitInITBlock,
733                             MCStreamer &Out);
734 
735   struct NearMissMessage {
736     SMLoc Loc;
737     SmallString<128> Message;
738   };
739 
740   const char *getCustomOperandDiag(ARMMatchResultTy MatchError);
741 
742   void FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
743                         SmallVectorImpl<NearMissMessage> &NearMissesOut,
744                         SMLoc IDLoc, OperandVector &Operands);
745   void ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses, SMLoc IDLoc,
746                         OperandVector &Operands);
747 
748   MCSymbolRefExpr::VariantKind
749   getVariantKindForName(StringRef Name) const override;
750 
751   void doBeforeLabelEmit(MCSymbol *Symbol, SMLoc IDLoc) override;
752 
753   void onLabelParsed(MCSymbol *Symbol) override;
754 
755   const MCInstrDesc &getInstrDesc(unsigned int Opcode) const {
756     return MII.get(Opcode);
757   }
758 
759   bool hasMVE() const { return getSTI().hasFeature(ARM::HasMVEIntegerOps); }
760 
761   // Return the low-subreg of a given Q register.
762   MCRegister getDRegFromQReg(MCRegister QReg) const {
763     return MRI->getSubReg(QReg, ARM::dsub_0);
764   }
765 
766   const MCRegisterInfo *getMRI() const { return MRI; }
767 };
768 
769 /// ARMOperand - Instances of this class represent a parsed ARM machine
770 /// operand.
771 class ARMOperand : public MCParsedAsmOperand {
772   enum KindTy {
773     k_CondCode,
774     k_VPTPred,
775     k_CCOut,
776     k_ITCondMask,
777     k_CoprocNum,
778     k_CoprocReg,
779     k_CoprocOption,
780     k_Immediate,
781     k_MemBarrierOpt,
782     k_InstSyncBarrierOpt,
783     k_TraceSyncBarrierOpt,
784     k_Memory,
785     k_PostIndexRegister,
786     k_MSRMask,
787     k_BankedReg,
788     k_ProcIFlags,
789     k_VectorIndex,
790     k_Register,
791     k_RegisterList,
792     k_RegisterListWithAPSR,
793     k_DPRRegisterList,
794     k_SPRRegisterList,
795     k_FPSRegisterListWithVPR,
796     k_FPDRegisterListWithVPR,
797     k_VectorList,
798     k_VectorListAllLanes,
799     k_VectorListIndexed,
800     k_ShiftedRegister,
801     k_ShiftedImmediate,
802     k_ShifterImmediate,
803     k_RotateImmediate,
804     k_ModifiedImmediate,
805     k_ConstantPoolImmediate,
806     k_BitfieldDescriptor,
807     k_Token,
808   } Kind;
809 
810   SMLoc StartLoc, EndLoc, AlignmentLoc;
811   SmallVector<MCRegister, 8> Registers;
812 
813   ARMAsmParser *Parser;
814 
815   struct CCOp {
816     ARMCC::CondCodes Val;
817   };
818 
819   struct VCCOp {
820     ARMVCC::VPTCodes Val;
821   };
822 
823   struct CopOp {
824     unsigned Val;
825   };
826 
827   struct CoprocOptionOp {
828     unsigned Val;
829   };
830 
831   struct ITMaskOp {
832     unsigned Mask:4;
833   };
834 
835   struct MBOptOp {
836     ARM_MB::MemBOpt Val;
837   };
838 
839   struct ISBOptOp {
840     ARM_ISB::InstSyncBOpt Val;
841   };
842 
843   struct TSBOptOp {
844     ARM_TSB::TraceSyncBOpt Val;
845   };
846 
847   struct IFlagsOp {
848     ARM_PROC::IFlags Val;
849   };
850 
851   struct MMaskOp {
852     unsigned Val;
853   };
854 
855   struct BankedRegOp {
856     unsigned Val;
857   };
858 
859   struct TokOp {
860     const char *Data;
861     unsigned Length;
862   };
863 
864   struct RegOp {
865     MCRegister RegNum;
866   };
867 
868   // A vector register list is a sequential list of 1 to 4 registers.
869   struct VectorListOp {
870     MCRegister RegNum;
871     unsigned Count;
872     unsigned LaneIndex;
873     bool isDoubleSpaced;
874   };
875 
876   struct VectorIndexOp {
877     unsigned Val;
878   };
879 
880   struct ImmOp {
881     const MCExpr *Val;
882   };
883 
884   /// Combined record for all forms of ARM address expressions.
885   struct MemoryOp {
886     MCRegister BaseRegNum;
887     // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
888     // was specified.
889     const MCExpr *OffsetImm;  // Offset immediate value
890     MCRegister OffsetRegNum;  // Offset register num, when OffsetImm == NULL
891     ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
892     unsigned ShiftImm;        // shift for OffsetReg.
893     unsigned Alignment;       // 0 = no alignment specified
894     // n = alignment in bytes (2, 4, 8, 16, or 32)
895     unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
896   };
897 
898   struct PostIdxRegOp {
899     MCRegister RegNum;
900     bool isAdd;
901     ARM_AM::ShiftOpc ShiftTy;
902     unsigned ShiftImm;
903   };
904 
905   struct ShifterImmOp {
906     bool isASR;
907     unsigned Imm;
908   };
909 
910   struct RegShiftedRegOp {
911     ARM_AM::ShiftOpc ShiftTy;
912     MCRegister SrcReg;
913     MCRegister ShiftReg;
914     unsigned ShiftImm;
915   };
916 
917   struct RegShiftedImmOp {
918     ARM_AM::ShiftOpc ShiftTy;
919     MCRegister SrcReg;
920     unsigned ShiftImm;
921   };
922 
923   struct RotImmOp {
924     unsigned Imm;
925   };
926 
927   struct ModImmOp {
928     unsigned Bits;
929     unsigned Rot;
930   };
931 
932   struct BitfieldOp {
933     unsigned LSB;
934     unsigned Width;
935   };
936 
937   union {
938     struct CCOp CC;
939     struct VCCOp VCC;
940     struct CopOp Cop;
941     struct CoprocOptionOp CoprocOption;
942     struct MBOptOp MBOpt;
943     struct ISBOptOp ISBOpt;
944     struct TSBOptOp TSBOpt;
945     struct ITMaskOp ITMask;
946     struct IFlagsOp IFlags;
947     struct MMaskOp MMask;
948     struct BankedRegOp BankedReg;
949     struct TokOp Tok;
950     struct RegOp Reg;
951     struct VectorListOp VectorList;
952     struct VectorIndexOp VectorIndex;
953     struct ImmOp Imm;
954     struct MemoryOp Memory;
955     struct PostIdxRegOp PostIdxReg;
956     struct ShifterImmOp ShifterImm;
957     struct RegShiftedRegOp RegShiftedReg;
958     struct RegShiftedImmOp RegShiftedImm;
959     struct RotImmOp RotImm;
960     struct ModImmOp ModImm;
961     struct BitfieldOp Bitfield;
962   };
963 
964 public:
965   ARMOperand(KindTy K, ARMAsmParser &Parser) : Kind(K), Parser(&Parser) {}
966 
967   /// getStartLoc - Get the location of the first token of this operand.
968   SMLoc getStartLoc() const override { return StartLoc; }
969 
970   /// getEndLoc - Get the location of the last token of this operand.
971   SMLoc getEndLoc() const override { return EndLoc; }
972 
973   /// getLocRange - Get the range between the first and last token of this
974   /// operand.
975   SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
976 
977   /// getAlignmentLoc - Get the location of the Alignment token of this operand.
978   SMLoc getAlignmentLoc() const {
979     assert(Kind == k_Memory && "Invalid access!");
980     return AlignmentLoc;
981   }
982 
983   ARMCC::CondCodes getCondCode() const {
984     assert(Kind == k_CondCode && "Invalid access!");
985     return CC.Val;
986   }
987 
988   ARMVCC::VPTCodes getVPTPred() const {
989     assert(isVPTPred() && "Invalid access!");
990     return VCC.Val;
991   }
992 
993   unsigned getCoproc() const {
994     assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
995     return Cop.Val;
996   }
997 
998   StringRef getToken() const {
999     assert(Kind == k_Token && "Invalid access!");
1000     return StringRef(Tok.Data, Tok.Length);
1001   }
1002 
1003   MCRegister getReg() const override {
1004     assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
1005     return Reg.RegNum;
1006   }
1007 
1008   const SmallVectorImpl<MCRegister> &getRegList() const {
1009     assert((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR ||
1010             Kind == k_DPRRegisterList || Kind == k_SPRRegisterList ||
1011             Kind == k_FPSRegisterListWithVPR ||
1012             Kind == k_FPDRegisterListWithVPR) &&
1013            "Invalid access!");
1014     return Registers;
1015   }
1016 
1017   const MCExpr *getImm() const {
1018     assert(isImm() && "Invalid access!");
1019     return Imm.Val;
1020   }
1021 
1022   const MCExpr *getConstantPoolImm() const {
1023     assert(isConstantPoolImm() && "Invalid access!");
1024     return Imm.Val;
1025   }
1026 
1027   unsigned getVectorIndex() const {
1028     assert(Kind == k_VectorIndex && "Invalid access!");
1029     return VectorIndex.Val;
1030   }
1031 
1032   ARM_MB::MemBOpt getMemBarrierOpt() const {
1033     assert(Kind == k_MemBarrierOpt && "Invalid access!");
1034     return MBOpt.Val;
1035   }
1036 
1037   ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const {
1038     assert(Kind == k_InstSyncBarrierOpt && "Invalid access!");
1039     return ISBOpt.Val;
1040   }
1041 
1042   ARM_TSB::TraceSyncBOpt getTraceSyncBarrierOpt() const {
1043     assert(Kind == k_TraceSyncBarrierOpt && "Invalid access!");
1044     return TSBOpt.Val;
1045   }
1046 
1047   ARM_PROC::IFlags getProcIFlags() const {
1048     assert(Kind == k_ProcIFlags && "Invalid access!");
1049     return IFlags.Val;
1050   }
1051 
1052   unsigned getMSRMask() const {
1053     assert(Kind == k_MSRMask && "Invalid access!");
1054     return MMask.Val;
1055   }
1056 
1057   unsigned getBankedReg() const {
1058     assert(Kind == k_BankedReg && "Invalid access!");
1059     return BankedReg.Val;
1060   }
1061 
1062   bool isCoprocNum() const { return Kind == k_CoprocNum; }
1063   bool isCoprocReg() const { return Kind == k_CoprocReg; }
1064   bool isCoprocOption() const { return Kind == k_CoprocOption; }
1065   bool isCondCode() const { return Kind == k_CondCode; }
1066   bool isVPTPred() const { return Kind == k_VPTPred; }
1067   bool isCCOut() const { return Kind == k_CCOut; }
1068   bool isITMask() const { return Kind == k_ITCondMask; }
1069   bool isITCondCode() const { return Kind == k_CondCode; }
1070   bool isImm() const override {
1071     return Kind == k_Immediate;
1072   }
1073 
1074   bool isARMBranchTarget() const {
1075     if (!isImm()) return false;
1076 
1077     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1078       return CE->getValue() % 4 == 0;
1079     return true;
1080   }
1081 
1082 
1083   bool isThumbBranchTarget() const {
1084     if (!isImm()) return false;
1085 
1086     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1087       return CE->getValue() % 2 == 0;
1088     return true;
1089   }
1090 
1091   // checks whether this operand is an unsigned offset which fits is a field
1092   // of specified width and scaled by a specific number of bits
1093   template<unsigned width, unsigned scale>
1094   bool isUnsignedOffset() const {
1095     if (!isImm()) return false;
1096     if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1097     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1098       int64_t Val = CE->getValue();
1099       int64_t Align = 1LL << scale;
1100       int64_t Max = Align * ((1LL << width) - 1);
1101       return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max);
1102     }
1103     return false;
1104   }
1105 
1106   // checks whether this operand is an signed offset which fits is a field
1107   // of specified width and scaled by a specific number of bits
1108   template<unsigned width, unsigned scale>
1109   bool isSignedOffset() const {
1110     if (!isImm()) return false;
1111     if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1112     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1113       int64_t Val = CE->getValue();
1114       int64_t Align = 1LL << scale;
1115       int64_t Max = Align * ((1LL << (width-1)) - 1);
1116       int64_t Min = -Align * (1LL << (width-1));
1117       return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max);
1118     }
1119     return false;
1120   }
1121 
1122   // checks whether this operand is an offset suitable for the LE /
1123   // LETP instructions in Arm v8.1M
1124   bool isLEOffset() const {
1125     if (!isImm()) return false;
1126     if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1127     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1128       int64_t Val = CE->getValue();
1129       return Val < 0 && Val >= -4094 && (Val & 1) == 0;
1130     }
1131     return false;
1132   }
1133 
1134   // checks whether this operand is a memory operand computed as an offset
1135   // applied to PC. the offset may have 8 bits of magnitude and is represented
1136   // with two bits of shift. textually it may be either [pc, #imm], #imm or
1137   // relocable expression...
1138   bool isThumbMemPC() const {
1139     int64_t Val = 0;
1140     if (isImm()) {
1141       if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1142       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
1143       if (!CE) return false;
1144       Val = CE->getValue();
1145     }
1146     else if (isGPRMem()) {
1147       if(!Memory.OffsetImm || Memory.OffsetRegNum) return false;
1148       if(Memory.BaseRegNum != ARM::PC) return false;
1149       if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
1150         Val = CE->getValue();
1151       else
1152         return false;
1153     }
1154     else return false;
1155     return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
1156   }
1157 
1158   bool isFPImm() const {
1159     if (!isImm()) return false;
1160     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1161     if (!CE || !isUInt<32>(CE->getValue()))
1162       return false;
1163     int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1164     return Val != -1;
1165   }
1166 
1167   template<int64_t N, int64_t M>
1168   bool isImmediate() const {
1169     if (!isImm()) return false;
1170     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1171     if (!CE) return false;
1172     int64_t Value = CE->getValue();
1173     return Value >= N && Value <= M;
1174   }
1175 
1176   template<int64_t N, int64_t M>
1177   bool isImmediateS4() const {
1178     if (!isImm()) return false;
1179     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1180     if (!CE) return false;
1181     int64_t Value = CE->getValue();
1182     return ((Value & 3) == 0) && Value >= N && Value <= M;
1183   }
1184   template<int64_t N, int64_t M>
1185   bool isImmediateS2() const {
1186     if (!isImm()) return false;
1187     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1188     if (!CE) return false;
1189     int64_t Value = CE->getValue();
1190     return ((Value & 1) == 0) && Value >= N && Value <= M;
1191   }
1192   bool isFBits16() const {
1193     return isImmediate<0, 17>();
1194   }
1195   bool isFBits32() const {
1196     return isImmediate<1, 33>();
1197   }
1198   bool isImm8s4() const {
1199     return isImmediateS4<-1020, 1020>();
1200   }
1201   bool isImm7s4() const {
1202     return isImmediateS4<-508, 508>();
1203   }
1204   bool isImm7Shift0() const {
1205     return isImmediate<-127, 127>();
1206   }
1207   bool isImm7Shift1() const {
1208     return isImmediateS2<-255, 255>();
1209   }
1210   bool isImm7Shift2() const {
1211     return isImmediateS4<-511, 511>();
1212   }
1213   bool isImm7() const {
1214     return isImmediate<-127, 127>();
1215   }
1216   bool isImm0_1020s4() const {
1217     return isImmediateS4<0, 1020>();
1218   }
1219   bool isImm0_508s4() const {
1220     return isImmediateS4<0, 508>();
1221   }
1222   bool isImm0_508s4Neg() const {
1223     if (!isImm()) return false;
1224     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1225     if (!CE) return false;
1226     int64_t Value = -CE->getValue();
1227     // explicitly exclude zero. we want that to use the normal 0_508 version.
1228     return ((Value & 3) == 0) && Value > 0 && Value <= 508;
1229   }
1230 
1231   bool isImm0_4095Neg() const {
1232     if (!isImm()) return false;
1233     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1234     if (!CE) return false;
1235     // isImm0_4095Neg is used with 32-bit immediates only.
1236     // 32-bit immediates are zero extended to 64-bit when parsed,
1237     // thus simple -CE->getValue() results in a big negative number,
1238     // not a small positive number as intended
1239     if ((CE->getValue() >> 32) > 0) return false;
1240     uint32_t Value = -static_cast<uint32_t>(CE->getValue());
1241     return Value > 0 && Value < 4096;
1242   }
1243 
1244   bool isImm0_7() const {
1245     return isImmediate<0, 7>();
1246   }
1247 
1248   bool isImm1_16() const {
1249     return isImmediate<1, 16>();
1250   }
1251 
1252   bool isImm1_32() const {
1253     return isImmediate<1, 32>();
1254   }
1255 
1256   bool isImm8_255() const {
1257     return isImmediate<8, 255>();
1258   }
1259 
1260   bool isImm0_255Expr() const {
1261     if (!isImm())
1262       return false;
1263     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1264     // If it's not a constant expression, it'll generate a fixup and be
1265     // handled later.
1266     if (!CE)
1267       return true;
1268     int64_t Value = CE->getValue();
1269     return isUInt<8>(Value);
1270   }
1271 
1272   bool isImm256_65535Expr() const {
1273     if (!isImm()) return false;
1274     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1275     // If it's not a constant expression, it'll generate a fixup and be
1276     // handled later.
1277     if (!CE) return true;
1278     int64_t Value = CE->getValue();
1279     return Value >= 256 && Value < 65536;
1280   }
1281 
1282   bool isImm0_65535Expr() const {
1283     if (!isImm()) return false;
1284     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1285     // If it's not a constant expression, it'll generate a fixup and be
1286     // handled later.
1287     if (!CE) return true;
1288     int64_t Value = CE->getValue();
1289     return Value >= 0 && Value < 65536;
1290   }
1291 
1292   bool isImm24bit() const {
1293     return isImmediate<0, 0xffffff + 1>();
1294   }
1295 
1296   bool isImmThumbSR() const {
1297     return isImmediate<1, 33>();
1298   }
1299 
1300   bool isPKHLSLImm() const {
1301     return isImmediate<0, 32>();
1302   }
1303 
1304   bool isPKHASRImm() const {
1305     return isImmediate<0, 33>();
1306   }
1307 
1308   bool isAdrLabel() const {
1309     // If we have an immediate that's not a constant, treat it as a label
1310     // reference needing a fixup.
1311     if (isImm() && !isa<MCConstantExpr>(getImm()))
1312       return true;
1313 
1314     // If it is a constant, it must fit into a modified immediate encoding.
1315     if (!isImm()) return false;
1316     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1317     if (!CE) return false;
1318     int64_t Value = CE->getValue();
1319     return (ARM_AM::getSOImmVal(Value) != -1 ||
1320             ARM_AM::getSOImmVal(-Value) != -1);
1321   }
1322 
1323   bool isT2SOImm() const {
1324     // If we have an immediate that's not a constant, treat it as an expression
1325     // needing a fixup.
1326     if (isImm() && !isa<MCConstantExpr>(getImm())) {
1327       // We want to avoid matching :upper16: and :lower16: as we want these
1328       // expressions to match in isImm0_65535Expr()
1329       const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(getImm());
1330       return (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
1331                              ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16));
1332     }
1333     if (!isImm()) return false;
1334     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1335     if (!CE) return false;
1336     int64_t Value = CE->getValue();
1337     return ARM_AM::getT2SOImmVal(Value) != -1;
1338   }
1339 
1340   bool isT2SOImmNot() const {
1341     if (!isImm()) return false;
1342     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1343     if (!CE) return false;
1344     int64_t Value = CE->getValue();
1345     return ARM_AM::getT2SOImmVal(Value) == -1 &&
1346       ARM_AM::getT2SOImmVal(~Value) != -1;
1347   }
1348 
1349   bool isT2SOImmNeg() const {
1350     if (!isImm()) return false;
1351     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1352     if (!CE) return false;
1353     int64_t Value = CE->getValue();
1354     // Only use this when not representable as a plain so_imm.
1355     return ARM_AM::getT2SOImmVal(Value) == -1 &&
1356       ARM_AM::getT2SOImmVal(-Value) != -1;
1357   }
1358 
1359   bool isSetEndImm() const {
1360     if (!isImm()) return false;
1361     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1362     if (!CE) return false;
1363     int64_t Value = CE->getValue();
1364     return Value == 1 || Value == 0;
1365   }
1366 
1367   bool isReg() const override { return Kind == k_Register; }
1368   bool isRegList() const { return Kind == k_RegisterList; }
1369   bool isRegListWithAPSR() const {
1370     return Kind == k_RegisterListWithAPSR || Kind == k_RegisterList;
1371   }
1372   bool isDReg() const {
1373     return isReg() &&
1374            ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg.RegNum);
1375   }
1376   bool isQReg() const {
1377     return isReg() &&
1378            ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg.RegNum);
1379   }
1380   bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
1381   bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
1382   bool isFPSRegListWithVPR() const { return Kind == k_FPSRegisterListWithVPR; }
1383   bool isFPDRegListWithVPR() const { return Kind == k_FPDRegisterListWithVPR; }
1384   bool isToken() const override { return Kind == k_Token; }
1385   bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
1386   bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
1387   bool isTraceSyncBarrierOpt() const { return Kind == k_TraceSyncBarrierOpt; }
1388   bool isMem() const override {
1389       return isGPRMem() || isMVEMem();
1390   }
1391   bool isMVEMem() const {
1392     if (Kind != k_Memory)
1393       return false;
1394     if (Memory.BaseRegNum &&
1395         !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum) &&
1396         !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Memory.BaseRegNum))
1397       return false;
1398     if (Memory.OffsetRegNum &&
1399         !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1400             Memory.OffsetRegNum))
1401       return false;
1402     return true;
1403   }
1404   bool isGPRMem() const {
1405     if (Kind != k_Memory)
1406       return false;
1407     if (Memory.BaseRegNum &&
1408         !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum))
1409       return false;
1410     if (Memory.OffsetRegNum &&
1411         !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.OffsetRegNum))
1412       return false;
1413     return true;
1414   }
1415   bool isShifterImm() const { return Kind == k_ShifterImmediate; }
1416   bool isRegShiftedReg() const {
1417     return Kind == k_ShiftedRegister &&
1418            ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1419                RegShiftedReg.SrcReg) &&
1420            ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1421                RegShiftedReg.ShiftReg);
1422   }
1423   bool isRegShiftedImm() const {
1424     return Kind == k_ShiftedImmediate &&
1425            ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1426                RegShiftedImm.SrcReg);
1427   }
1428   bool isRotImm() const { return Kind == k_RotateImmediate; }
1429 
1430   template<unsigned Min, unsigned Max>
1431   bool isPowerTwoInRange() const {
1432     if (!isImm()) return false;
1433     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1434     if (!CE) return false;
1435     int64_t Value = CE->getValue();
1436     return Value > 0 && llvm::popcount((uint64_t)Value) == 1 && Value >= Min &&
1437            Value <= Max;
1438   }
1439   bool isModImm() const { return Kind == k_ModifiedImmediate; }
1440 
1441   bool isModImmNot() const {
1442     if (!isImm()) return false;
1443     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1444     if (!CE) return false;
1445     int64_t Value = CE->getValue();
1446     return ARM_AM::getSOImmVal(~Value) != -1;
1447   }
1448 
1449   bool isModImmNeg() const {
1450     if (!isImm()) return false;
1451     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1452     if (!CE) return false;
1453     int64_t Value = CE->getValue();
1454     return ARM_AM::getSOImmVal(Value) == -1 &&
1455       ARM_AM::getSOImmVal(-Value) != -1;
1456   }
1457 
1458   bool isThumbModImmNeg1_7() const {
1459     if (!isImm()) return false;
1460     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1461     if (!CE) return false;
1462     int32_t Value = -(int32_t)CE->getValue();
1463     return 0 < Value && Value < 8;
1464   }
1465 
1466   bool isThumbModImmNeg8_255() const {
1467     if (!isImm()) return false;
1468     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1469     if (!CE) return false;
1470     int32_t Value = -(int32_t)CE->getValue();
1471     return 7 < Value && Value < 256;
1472   }
1473 
1474   bool isConstantPoolImm() const { return Kind == k_ConstantPoolImmediate; }
1475   bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
1476   bool isPostIdxRegShifted() const {
1477     return Kind == k_PostIndexRegister &&
1478            ARMMCRegisterClasses[ARM::GPRRegClassID].contains(PostIdxReg.RegNum);
1479   }
1480   bool isPostIdxReg() const {
1481     return isPostIdxRegShifted() && PostIdxReg.ShiftTy == ARM_AM::no_shift;
1482   }
1483   bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const {
1484     if (!isGPRMem())
1485       return false;
1486     // No offset of any kind.
1487     return !Memory.OffsetRegNum && Memory.OffsetImm == nullptr &&
1488            (alignOK || Memory.Alignment == Alignment);
1489   }
1490   bool isMemNoOffsetT2(bool alignOK = false, unsigned Alignment = 0) const {
1491     if (!isGPRMem())
1492       return false;
1493 
1494     if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1495             Memory.BaseRegNum))
1496       return false;
1497 
1498     // No offset of any kind.
1499     return !Memory.OffsetRegNum && Memory.OffsetImm == nullptr &&
1500            (alignOK || Memory.Alignment == Alignment);
1501   }
1502   bool isMemNoOffsetT2NoSp(bool alignOK = false, unsigned Alignment = 0) const {
1503     if (!isGPRMem())
1504       return false;
1505 
1506     if (!ARMMCRegisterClasses[ARM::rGPRRegClassID].contains(
1507             Memory.BaseRegNum))
1508       return false;
1509 
1510     // No offset of any kind.
1511     return !Memory.OffsetRegNum && Memory.OffsetImm == nullptr &&
1512            (alignOK || Memory.Alignment == Alignment);
1513   }
1514   bool isMemNoOffsetT(bool alignOK = false, unsigned Alignment = 0) const {
1515     if (!isGPRMem())
1516       return false;
1517 
1518     if (!ARMMCRegisterClasses[ARM::tGPRRegClassID].contains(
1519             Memory.BaseRegNum))
1520       return false;
1521 
1522     // No offset of any kind.
1523     return !Memory.OffsetRegNum && Memory.OffsetImm == nullptr &&
1524            (alignOK || Memory.Alignment == Alignment);
1525   }
1526   bool isMemPCRelImm12() const {
1527     if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0)
1528       return false;
1529     // Base register must be PC.
1530     if (Memory.BaseRegNum != ARM::PC)
1531       return false;
1532     // Immediate offset in range [-4095, 4095].
1533     if (!Memory.OffsetImm) return true;
1534     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1535       int64_t Val = CE->getValue();
1536       return (Val > -4096 && Val < 4096) ||
1537              (Val == std::numeric_limits<int32_t>::min());
1538     }
1539     return false;
1540   }
1541 
1542   bool isAlignedMemory() const {
1543     return isMemNoOffset(true);
1544   }
1545 
1546   bool isAlignedMemoryNone() const {
1547     return isMemNoOffset(false, 0);
1548   }
1549 
1550   bool isDupAlignedMemoryNone() const {
1551     return isMemNoOffset(false, 0);
1552   }
1553 
1554   bool isAlignedMemory16() const {
1555     if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1556       return true;
1557     return isMemNoOffset(false, 0);
1558   }
1559 
1560   bool isDupAlignedMemory16() const {
1561     if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1562       return true;
1563     return isMemNoOffset(false, 0);
1564   }
1565 
1566   bool isAlignedMemory32() const {
1567     if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1568       return true;
1569     return isMemNoOffset(false, 0);
1570   }
1571 
1572   bool isDupAlignedMemory32() const {
1573     if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1574       return true;
1575     return isMemNoOffset(false, 0);
1576   }
1577 
1578   bool isAlignedMemory64() const {
1579     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1580       return true;
1581     return isMemNoOffset(false, 0);
1582   }
1583 
1584   bool isDupAlignedMemory64() const {
1585     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1586       return true;
1587     return isMemNoOffset(false, 0);
1588   }
1589 
1590   bool isAlignedMemory64or128() const {
1591     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1592       return true;
1593     if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1594       return true;
1595     return isMemNoOffset(false, 0);
1596   }
1597 
1598   bool isDupAlignedMemory64or128() const {
1599     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1600       return true;
1601     if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1602       return true;
1603     return isMemNoOffset(false, 0);
1604   }
1605 
1606   bool isAlignedMemory64or128or256() const {
1607     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1608       return true;
1609     if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1610       return true;
1611     if (isMemNoOffset(false, 32)) // alignment in bytes for 256-bits is 32.
1612       return true;
1613     return isMemNoOffset(false, 0);
1614   }
1615 
1616   bool isAddrMode2() const {
1617     if (!isGPRMem() || Memory.Alignment != 0) return false;
1618     // Check for register offset.
1619     if (Memory.OffsetRegNum) return true;
1620     // Immediate offset in range [-4095, 4095].
1621     if (!Memory.OffsetImm) return true;
1622     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1623       int64_t Val = CE->getValue();
1624       return Val > -4096 && Val < 4096;
1625     }
1626     return false;
1627   }
1628 
1629   bool isAM2OffsetImm() const {
1630     if (!isImm()) return false;
1631     // Immediate offset in range [-4095, 4095].
1632     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1633     if (!CE) return false;
1634     int64_t Val = CE->getValue();
1635     return (Val == std::numeric_limits<int32_t>::min()) ||
1636            (Val > -4096 && Val < 4096);
1637   }
1638 
1639   bool isAddrMode3() const {
1640     // If we have an immediate that's not a constant, treat it as a label
1641     // reference needing a fixup. If it is a constant, it's something else
1642     // and we reject it.
1643     if (isImm() && !isa<MCConstantExpr>(getImm()))
1644       return true;
1645     if (!isGPRMem() || Memory.Alignment != 0) return false;
1646     // No shifts are legal for AM3.
1647     if (Memory.ShiftType != ARM_AM::no_shift) return false;
1648     // Check for register offset.
1649     if (Memory.OffsetRegNum) return true;
1650     // Immediate offset in range [-255, 255].
1651     if (!Memory.OffsetImm) return true;
1652     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1653       int64_t Val = CE->getValue();
1654       // The #-0 offset is encoded as std::numeric_limits<int32_t>::min(), and
1655       // we have to check for this too.
1656       return (Val > -256 && Val < 256) ||
1657              Val == std::numeric_limits<int32_t>::min();
1658     }
1659     return false;
1660   }
1661 
1662   bool isAM3Offset() const {
1663     if (isPostIdxReg())
1664       return true;
1665     if (!isImm())
1666       return false;
1667     // Immediate offset in range [-255, 255].
1668     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1669     if (!CE) return false;
1670     int64_t Val = CE->getValue();
1671     // Special case, #-0 is std::numeric_limits<int32_t>::min().
1672     return (Val > -256 && Val < 256) ||
1673            Val == std::numeric_limits<int32_t>::min();
1674   }
1675 
1676   bool isAddrMode5() const {
1677     // If we have an immediate that's not a constant, treat it as a label
1678     // reference needing a fixup. If it is a constant, it's something else
1679     // and we reject it.
1680     if (isImm() && !isa<MCConstantExpr>(getImm()))
1681       return true;
1682     if (!isGPRMem() || Memory.Alignment != 0) return false;
1683     // Check for register offset.
1684     if (Memory.OffsetRegNum) return false;
1685     // Immediate offset in range [-1020, 1020] and a multiple of 4.
1686     if (!Memory.OffsetImm) return true;
1687     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1688       int64_t Val = CE->getValue();
1689       return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1690              Val == std::numeric_limits<int32_t>::min();
1691     }
1692     return false;
1693   }
1694 
1695   bool isAddrMode5FP16() const {
1696     // If we have an immediate that's not a constant, treat it as a label
1697     // reference needing a fixup. If it is a constant, it's something else
1698     // and we reject it.
1699     if (isImm() && !isa<MCConstantExpr>(getImm()))
1700       return true;
1701     if (!isGPRMem() || Memory.Alignment != 0) return false;
1702     // Check for register offset.
1703     if (Memory.OffsetRegNum) return false;
1704     // Immediate offset in range [-510, 510] and a multiple of 2.
1705     if (!Memory.OffsetImm) return true;
1706     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1707       int64_t Val = CE->getValue();
1708       return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) ||
1709              Val == std::numeric_limits<int32_t>::min();
1710     }
1711     return false;
1712   }
1713 
1714   bool isMemTBB() const {
1715     if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1716         Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1717       return false;
1718     return true;
1719   }
1720 
1721   bool isMemTBH() const {
1722     if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1723         Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
1724         Memory.Alignment != 0 )
1725       return false;
1726     return true;
1727   }
1728 
1729   bool isMemRegOffset() const {
1730     if (!isGPRMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
1731       return false;
1732     return true;
1733   }
1734 
1735   bool isT2MemRegOffset() const {
1736     if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1737         Memory.Alignment != 0 || Memory.BaseRegNum == ARM::PC)
1738       return false;
1739     // Only lsl #{0, 1, 2, 3} allowed.
1740     if (Memory.ShiftType == ARM_AM::no_shift)
1741       return true;
1742     if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
1743       return false;
1744     return true;
1745   }
1746 
1747   bool isMemThumbRR() const {
1748     // Thumb reg+reg addressing is simple. Just two registers, a base and
1749     // an offset. No shifts, negations or any other complicating factors.
1750     if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1751         Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1752       return false;
1753     return isARMLowRegister(Memory.BaseRegNum) &&
1754       (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
1755   }
1756 
1757   bool isMemThumbRIs4() const {
1758     if (!isGPRMem() || Memory.OffsetRegNum ||
1759         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1760       return false;
1761     // Immediate offset, multiple of 4 in range [0, 124].
1762     if (!Memory.OffsetImm) return true;
1763     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1764       int64_t Val = CE->getValue();
1765       return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1766     }
1767     return false;
1768   }
1769 
1770   bool isMemThumbRIs2() const {
1771     if (!isGPRMem() || Memory.OffsetRegNum ||
1772         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1773       return false;
1774     // Immediate offset, multiple of 4 in range [0, 62].
1775     if (!Memory.OffsetImm) return true;
1776     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1777       int64_t Val = CE->getValue();
1778       return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1779     }
1780     return false;
1781   }
1782 
1783   bool isMemThumbRIs1() const {
1784     if (!isGPRMem() || Memory.OffsetRegNum ||
1785         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1786       return false;
1787     // Immediate offset in range [0, 31].
1788     if (!Memory.OffsetImm) return true;
1789     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1790       int64_t Val = CE->getValue();
1791       return Val >= 0 && Val <= 31;
1792     }
1793     return false;
1794   }
1795 
1796   bool isMemThumbSPI() const {
1797     if (!isGPRMem() || Memory.OffsetRegNum || Memory.BaseRegNum != ARM::SP ||
1798         Memory.Alignment != 0)
1799       return false;
1800     // Immediate offset, multiple of 4 in range [0, 1020].
1801     if (!Memory.OffsetImm) return true;
1802     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1803       int64_t Val = CE->getValue();
1804       return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1805     }
1806     return false;
1807   }
1808 
1809   bool isMemImm8s4Offset() const {
1810     // If we have an immediate that's not a constant, treat it as a label
1811     // reference needing a fixup. If it is a constant, it's something else
1812     // and we reject it.
1813     if (isImm() && !isa<MCConstantExpr>(getImm()))
1814       return true;
1815     if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0)
1816       return false;
1817     // Immediate offset a multiple of 4 in range [-1020, 1020].
1818     if (!Memory.OffsetImm) return true;
1819     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1820       int64_t Val = CE->getValue();
1821       // Special case, #-0 is std::numeric_limits<int32_t>::min().
1822       return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) ||
1823              Val == std::numeric_limits<int32_t>::min();
1824     }
1825     return false;
1826   }
1827 
1828   bool isMemImm7s4Offset() const {
1829     // If we have an immediate that's not a constant, treat it as a label
1830     // reference needing a fixup. If it is a constant, it's something else
1831     // and we reject it.
1832     if (isImm() && !isa<MCConstantExpr>(getImm()))
1833       return true;
1834     if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0 ||
1835         !ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1836             Memory.BaseRegNum))
1837       return false;
1838     // Immediate offset a multiple of 4 in range [-508, 508].
1839     if (!Memory.OffsetImm) return true;
1840     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1841       int64_t Val = CE->getValue();
1842       // Special case, #-0 is INT32_MIN.
1843       return (Val >= -508 && Val <= 508 && (Val & 3) == 0) || Val == INT32_MIN;
1844     }
1845     return false;
1846   }
1847 
1848   bool isMemImm0_1020s4Offset() const {
1849     if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0)
1850       return false;
1851     // Immediate offset a multiple of 4 in range [0, 1020].
1852     if (!Memory.OffsetImm) return true;
1853     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1854       int64_t Val = CE->getValue();
1855       return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1856     }
1857     return false;
1858   }
1859 
1860   bool isMemImm8Offset() const {
1861     if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0)
1862       return false;
1863     // Base reg of PC isn't allowed for these encodings.
1864     if (Memory.BaseRegNum == ARM::PC) return false;
1865     // Immediate offset in range [-255, 255].
1866     if (!Memory.OffsetImm) return true;
1867     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1868       int64_t Val = CE->getValue();
1869       return (Val == std::numeric_limits<int32_t>::min()) ||
1870              (Val > -256 && Val < 256);
1871     }
1872     return false;
1873   }
1874 
1875   template<unsigned Bits, unsigned RegClassID>
1876   bool isMemImm7ShiftedOffset() const {
1877     if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0 ||
1878         !ARMMCRegisterClasses[RegClassID].contains(Memory.BaseRegNum))
1879       return false;
1880 
1881     // Expect an immediate offset equal to an element of the range
1882     // [-127, 127], shifted left by Bits.
1883 
1884     if (!Memory.OffsetImm) return true;
1885     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1886       int64_t Val = CE->getValue();
1887 
1888       // INT32_MIN is a special-case value (indicating the encoding with
1889       // zero offset and the subtract bit set)
1890       if (Val == INT32_MIN)
1891         return true;
1892 
1893       unsigned Divisor = 1U << Bits;
1894 
1895       // Check that the low bits are zero
1896       if (Val % Divisor != 0)
1897         return false;
1898 
1899       // Check that the remaining offset is within range.
1900       Val /= Divisor;
1901       return (Val >= -127 && Val <= 127);
1902     }
1903     return false;
1904   }
1905 
1906   template <int shift> bool isMemRegRQOffset() const {
1907     if (!isMVEMem() || Memory.OffsetImm != nullptr || Memory.Alignment != 0)
1908       return false;
1909 
1910     if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1911             Memory.BaseRegNum))
1912       return false;
1913     if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1914             Memory.OffsetRegNum))
1915       return false;
1916 
1917     if (shift == 0 && Memory.ShiftType != ARM_AM::no_shift)
1918       return false;
1919 
1920     if (shift > 0 &&
1921         (Memory.ShiftType != ARM_AM::uxtw || Memory.ShiftImm != shift))
1922       return false;
1923 
1924     return true;
1925   }
1926 
1927   template <int shift> bool isMemRegQOffset() const {
1928     if (!isMVEMem() || Memory.OffsetRegNum || Memory.Alignment != 0)
1929       return false;
1930 
1931     if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1932             Memory.BaseRegNum))
1933       return false;
1934 
1935     if (!Memory.OffsetImm)
1936       return true;
1937     static_assert(shift < 56,
1938                   "Such that we dont shift by a value higher than 62");
1939     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1940       int64_t Val = CE->getValue();
1941 
1942       // The value must be a multiple of (1 << shift)
1943       if ((Val & ((1U << shift) - 1)) != 0)
1944         return false;
1945 
1946       // And be in the right range, depending on the amount that it is shifted
1947       // by.  Shift 0, is equal to 7 unsigned bits, the sign bit is set
1948       // separately.
1949       int64_t Range = (1U << (7 + shift)) - 1;
1950       return (Val == INT32_MIN) || (Val > -Range && Val < Range);
1951     }
1952     return false;
1953   }
1954 
1955   bool isMemPosImm8Offset() const {
1956     if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0)
1957       return false;
1958     // Immediate offset in range [0, 255].
1959     if (!Memory.OffsetImm) return true;
1960     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1961       int64_t Val = CE->getValue();
1962       return Val >= 0 && Val < 256;
1963     }
1964     return false;
1965   }
1966 
1967   bool isMemNegImm8Offset() const {
1968     if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0)
1969       return false;
1970     // Base reg of PC isn't allowed for these encodings.
1971     if (Memory.BaseRegNum == ARM::PC) return false;
1972     // Immediate offset in range [-255, -1].
1973     if (!Memory.OffsetImm) return false;
1974     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1975       int64_t Val = CE->getValue();
1976       return (Val == std::numeric_limits<int32_t>::min()) ||
1977              (Val > -256 && Val < 0);
1978     }
1979     return false;
1980   }
1981 
1982   bool isMemUImm12Offset() const {
1983     if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0)
1984       return false;
1985     // Immediate offset in range [0, 4095].
1986     if (!Memory.OffsetImm) return true;
1987     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1988       int64_t Val = CE->getValue();
1989       return (Val >= 0 && Val < 4096);
1990     }
1991     return false;
1992   }
1993 
1994   bool isMemImm12Offset() const {
1995     // If we have an immediate that's not a constant, treat it as a label
1996     // reference needing a fixup. If it is a constant, it's something else
1997     // and we reject it.
1998 
1999     if (isImm() && !isa<MCConstantExpr>(getImm()))
2000       return true;
2001 
2002     if (!isGPRMem() || Memory.OffsetRegNum || Memory.Alignment != 0)
2003       return false;
2004     // Immediate offset in range [-4095, 4095].
2005     if (!Memory.OffsetImm) return true;
2006     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
2007       int64_t Val = CE->getValue();
2008       return (Val > -4096 && Val < 4096) ||
2009              (Val == std::numeric_limits<int32_t>::min());
2010     }
2011     // If we have an immediate that's not a constant, treat it as a
2012     // symbolic expression needing a fixup.
2013     return true;
2014   }
2015 
2016   bool isConstPoolAsmImm() const {
2017     // Delay processing of Constant Pool Immediate, this will turn into
2018     // a constant. Match no other operand
2019     return (isConstantPoolImm());
2020   }
2021 
2022   bool isPostIdxImm8() const {
2023     if (!isImm()) return false;
2024     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2025     if (!CE) return false;
2026     int64_t Val = CE->getValue();
2027     return (Val > -256 && Val < 256) ||
2028            (Val == std::numeric_limits<int32_t>::min());
2029   }
2030 
2031   bool isPostIdxImm8s4() const {
2032     if (!isImm()) return false;
2033     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2034     if (!CE) return false;
2035     int64_t Val = CE->getValue();
2036     return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
2037            (Val == std::numeric_limits<int32_t>::min());
2038   }
2039 
2040   bool isMSRMask() const { return Kind == k_MSRMask; }
2041   bool isBankedReg() const { return Kind == k_BankedReg; }
2042   bool isProcIFlags() const { return Kind == k_ProcIFlags; }
2043 
2044   // NEON operands.
2045   bool isAnyVectorList() const {
2046     return Kind == k_VectorList || Kind == k_VectorListAllLanes ||
2047            Kind == k_VectorListIndexed;
2048   }
2049 
2050   bool isVectorList() const { return Kind == k_VectorList; }
2051 
2052   bool isSingleSpacedVectorList() const {
2053     return Kind == k_VectorList && !VectorList.isDoubleSpaced;
2054   }
2055 
2056   bool isDoubleSpacedVectorList() const {
2057     return Kind == k_VectorList && VectorList.isDoubleSpaced;
2058   }
2059 
2060   bool isVecListOneD() const {
2061     // We convert a single D reg to a list containing a D reg
2062     if (isDReg() && !Parser->hasMVE())
2063       return true;
2064     if (!isSingleSpacedVectorList()) return false;
2065     return VectorList.Count == 1;
2066   }
2067 
2068   bool isVecListTwoMQ() const {
2069     return isSingleSpacedVectorList() && VectorList.Count == 2 &&
2070            ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2071                VectorList.RegNum);
2072   }
2073 
2074   bool isVecListDPair() const {
2075     // We convert a single Q reg to a list with the two corresponding D
2076     // registers
2077     if (isQReg() && !Parser->hasMVE())
2078       return true;
2079     if (!isSingleSpacedVectorList()) return false;
2080     return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2081               .contains(VectorList.RegNum));
2082   }
2083 
2084   bool isVecListThreeD() const {
2085     if (!isSingleSpacedVectorList()) return false;
2086     return VectorList.Count == 3;
2087   }
2088 
2089   bool isVecListFourD() const {
2090     if (!isSingleSpacedVectorList()) return false;
2091     return VectorList.Count == 4;
2092   }
2093 
2094   bool isVecListDPairSpaced() const {
2095     if (Kind != k_VectorList) return false;
2096     if (isSingleSpacedVectorList()) return false;
2097     return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
2098               .contains(VectorList.RegNum));
2099   }
2100 
2101   bool isVecListThreeQ() const {
2102     if (!isDoubleSpacedVectorList()) return false;
2103     return VectorList.Count == 3;
2104   }
2105 
2106   bool isVecListFourQ() const {
2107     if (!isDoubleSpacedVectorList()) return false;
2108     return VectorList.Count == 4;
2109   }
2110 
2111   bool isVecListFourMQ() const {
2112     return isSingleSpacedVectorList() && VectorList.Count == 4 &&
2113            ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2114                VectorList.RegNum);
2115   }
2116 
2117   bool isSingleSpacedVectorAllLanes() const {
2118     return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
2119   }
2120 
2121   bool isDoubleSpacedVectorAllLanes() const {
2122     return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
2123   }
2124 
2125   bool isVecListOneDAllLanes() const {
2126     if (!isSingleSpacedVectorAllLanes()) return false;
2127     return VectorList.Count == 1;
2128   }
2129 
2130   bool isVecListDPairAllLanes() const {
2131     if (!isSingleSpacedVectorAllLanes()) return false;
2132     return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2133               .contains(VectorList.RegNum));
2134   }
2135 
2136   bool isVecListDPairSpacedAllLanes() const {
2137     if (!isDoubleSpacedVectorAllLanes()) return false;
2138     return VectorList.Count == 2;
2139   }
2140 
2141   bool isVecListThreeDAllLanes() const {
2142     if (!isSingleSpacedVectorAllLanes()) return false;
2143     return VectorList.Count == 3;
2144   }
2145 
2146   bool isVecListThreeQAllLanes() const {
2147     if (!isDoubleSpacedVectorAllLanes()) return false;
2148     return VectorList.Count == 3;
2149   }
2150 
2151   bool isVecListFourDAllLanes() const {
2152     if (!isSingleSpacedVectorAllLanes()) return false;
2153     return VectorList.Count == 4;
2154   }
2155 
2156   bool isVecListFourQAllLanes() const {
2157     if (!isDoubleSpacedVectorAllLanes()) return false;
2158     return VectorList.Count == 4;
2159   }
2160 
2161   bool isSingleSpacedVectorIndexed() const {
2162     return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
2163   }
2164 
2165   bool isDoubleSpacedVectorIndexed() const {
2166     return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
2167   }
2168 
2169   bool isVecListOneDByteIndexed() const {
2170     if (!isSingleSpacedVectorIndexed()) return false;
2171     return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
2172   }
2173 
2174   bool isVecListOneDHWordIndexed() const {
2175     if (!isSingleSpacedVectorIndexed()) return false;
2176     return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
2177   }
2178 
2179   bool isVecListOneDWordIndexed() const {
2180     if (!isSingleSpacedVectorIndexed()) return false;
2181     return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
2182   }
2183 
2184   bool isVecListTwoDByteIndexed() const {
2185     if (!isSingleSpacedVectorIndexed()) return false;
2186     return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
2187   }
2188 
2189   bool isVecListTwoDHWordIndexed() const {
2190     if (!isSingleSpacedVectorIndexed()) return false;
2191     return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2192   }
2193 
2194   bool isVecListTwoQWordIndexed() const {
2195     if (!isDoubleSpacedVectorIndexed()) return false;
2196     return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2197   }
2198 
2199   bool isVecListTwoQHWordIndexed() const {
2200     if (!isDoubleSpacedVectorIndexed()) return false;
2201     return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2202   }
2203 
2204   bool isVecListTwoDWordIndexed() const {
2205     if (!isSingleSpacedVectorIndexed()) return false;
2206     return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2207   }
2208 
2209   bool isVecListThreeDByteIndexed() const {
2210     if (!isSingleSpacedVectorIndexed()) return false;
2211     return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
2212   }
2213 
2214   bool isVecListThreeDHWordIndexed() const {
2215     if (!isSingleSpacedVectorIndexed()) return false;
2216     return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2217   }
2218 
2219   bool isVecListThreeQWordIndexed() const {
2220     if (!isDoubleSpacedVectorIndexed()) return false;
2221     return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2222   }
2223 
2224   bool isVecListThreeQHWordIndexed() const {
2225     if (!isDoubleSpacedVectorIndexed()) return false;
2226     return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2227   }
2228 
2229   bool isVecListThreeDWordIndexed() const {
2230     if (!isSingleSpacedVectorIndexed()) return false;
2231     return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2232   }
2233 
2234   bool isVecListFourDByteIndexed() const {
2235     if (!isSingleSpacedVectorIndexed()) return false;
2236     return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
2237   }
2238 
2239   bool isVecListFourDHWordIndexed() const {
2240     if (!isSingleSpacedVectorIndexed()) return false;
2241     return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2242   }
2243 
2244   bool isVecListFourQWordIndexed() const {
2245     if (!isDoubleSpacedVectorIndexed()) return false;
2246     return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2247   }
2248 
2249   bool isVecListFourQHWordIndexed() const {
2250     if (!isDoubleSpacedVectorIndexed()) return false;
2251     return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2252   }
2253 
2254   bool isVecListFourDWordIndexed() const {
2255     if (!isSingleSpacedVectorIndexed()) return false;
2256     return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2257   }
2258 
2259   bool isVectorIndex() const { return Kind == k_VectorIndex; }
2260 
2261   template <unsigned NumLanes>
2262   bool isVectorIndexInRange() const {
2263     if (Kind != k_VectorIndex) return false;
2264     return VectorIndex.Val < NumLanes;
2265   }
2266 
2267   bool isVectorIndex8()  const { return isVectorIndexInRange<8>(); }
2268   bool isVectorIndex16() const { return isVectorIndexInRange<4>(); }
2269   bool isVectorIndex32() const { return isVectorIndexInRange<2>(); }
2270   bool isVectorIndex64() const { return isVectorIndexInRange<1>(); }
2271 
2272   template<int PermittedValue, int OtherPermittedValue>
2273   bool isMVEPairVectorIndex() const {
2274     if (Kind != k_VectorIndex) return false;
2275     return VectorIndex.Val == PermittedValue ||
2276            VectorIndex.Val == OtherPermittedValue;
2277   }
2278 
2279   bool isNEONi8splat() const {
2280     if (!isImm()) return false;
2281     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2282     // Must be a constant.
2283     if (!CE) return false;
2284     int64_t Value = CE->getValue();
2285     // i8 value splatted across 8 bytes. The immediate is just the 8 byte
2286     // value.
2287     return Value >= 0 && Value < 256;
2288   }
2289 
2290   bool isNEONi16splat() const {
2291     if (isNEONByteReplicate(2))
2292       return false; // Leave that for bytes replication and forbid by default.
2293     if (!isImm())
2294       return false;
2295     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2296     // Must be a constant.
2297     if (!CE) return false;
2298     unsigned Value = CE->getValue();
2299     return ARM_AM::isNEONi16splat(Value);
2300   }
2301 
2302   bool isNEONi16splatNot() const {
2303     if (!isImm())
2304       return false;
2305     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2306     // Must be a constant.
2307     if (!CE) return false;
2308     unsigned Value = CE->getValue();
2309     return ARM_AM::isNEONi16splat(~Value & 0xffff);
2310   }
2311 
2312   bool isNEONi32splat() const {
2313     if (isNEONByteReplicate(4))
2314       return false; // Leave that for bytes replication and forbid by default.
2315     if (!isImm())
2316       return false;
2317     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2318     // Must be a constant.
2319     if (!CE) return false;
2320     unsigned Value = CE->getValue();
2321     return ARM_AM::isNEONi32splat(Value);
2322   }
2323 
2324   bool isNEONi32splatNot() const {
2325     if (!isImm())
2326       return false;
2327     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2328     // Must be a constant.
2329     if (!CE) return false;
2330     unsigned Value = CE->getValue();
2331     return ARM_AM::isNEONi32splat(~Value);
2332   }
2333 
2334   static bool isValidNEONi32vmovImm(int64_t Value) {
2335     // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
2336     // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
2337     return ((Value & 0xffffffffffffff00) == 0) ||
2338            ((Value & 0xffffffffffff00ff) == 0) ||
2339            ((Value & 0xffffffffff00ffff) == 0) ||
2340            ((Value & 0xffffffff00ffffff) == 0) ||
2341            ((Value & 0xffffffffffff00ff) == 0xff) ||
2342            ((Value & 0xffffffffff00ffff) == 0xffff);
2343   }
2344 
2345   bool isNEONReplicate(unsigned Width, unsigned NumElems, bool Inv) const {
2346     assert((Width == 8 || Width == 16 || Width == 32) &&
2347            "Invalid element width");
2348     assert(NumElems * Width <= 64 && "Invalid result width");
2349 
2350     if (!isImm())
2351       return false;
2352     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2353     // Must be a constant.
2354     if (!CE)
2355       return false;
2356     int64_t Value = CE->getValue();
2357     if (!Value)
2358       return false; // Don't bother with zero.
2359     if (Inv)
2360       Value = ~Value;
2361 
2362     uint64_t Mask = (1ull << Width) - 1;
2363     uint64_t Elem = Value & Mask;
2364     if (Width == 16 && (Elem & 0x00ff) != 0 && (Elem & 0xff00) != 0)
2365       return false;
2366     if (Width == 32 && !isValidNEONi32vmovImm(Elem))
2367       return false;
2368 
2369     for (unsigned i = 1; i < NumElems; ++i) {
2370       Value >>= Width;
2371       if ((Value & Mask) != Elem)
2372         return false;
2373     }
2374     return true;
2375   }
2376 
2377   bool isNEONByteReplicate(unsigned NumBytes) const {
2378     return isNEONReplicate(8, NumBytes, false);
2379   }
2380 
2381   static void checkNeonReplicateArgs(unsigned FromW, unsigned ToW) {
2382     assert((FromW == 8 || FromW == 16 || FromW == 32) &&
2383            "Invalid source width");
2384     assert((ToW == 16 || ToW == 32 || ToW == 64) &&
2385            "Invalid destination width");
2386     assert(FromW < ToW && "ToW is not less than FromW");
2387   }
2388 
2389   template<unsigned FromW, unsigned ToW>
2390   bool isNEONmovReplicate() const {
2391     checkNeonReplicateArgs(FromW, ToW);
2392     if (ToW == 64 && isNEONi64splat())
2393       return false;
2394     return isNEONReplicate(FromW, ToW / FromW, false);
2395   }
2396 
2397   template<unsigned FromW, unsigned ToW>
2398   bool isNEONinvReplicate() const {
2399     checkNeonReplicateArgs(FromW, ToW);
2400     return isNEONReplicate(FromW, ToW / FromW, true);
2401   }
2402 
2403   bool isNEONi32vmov() const {
2404     if (isNEONByteReplicate(4))
2405       return false; // Let it to be classified as byte-replicate case.
2406     if (!isImm())
2407       return false;
2408     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2409     // Must be a constant.
2410     if (!CE)
2411       return false;
2412     return isValidNEONi32vmovImm(CE->getValue());
2413   }
2414 
2415   bool isNEONi32vmovNeg() const {
2416     if (!isImm()) return false;
2417     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2418     // Must be a constant.
2419     if (!CE) return false;
2420     return isValidNEONi32vmovImm(~CE->getValue());
2421   }
2422 
2423   bool isNEONi64splat() const {
2424     if (!isImm()) return false;
2425     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2426     // Must be a constant.
2427     if (!CE) return false;
2428     uint64_t Value = CE->getValue();
2429     // i64 value with each byte being either 0 or 0xff.
2430     for (unsigned i = 0; i < 8; ++i, Value >>= 8)
2431       if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
2432     return true;
2433   }
2434 
2435   template<int64_t Angle, int64_t Remainder>
2436   bool isComplexRotation() const {
2437     if (!isImm()) return false;
2438 
2439     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2440     if (!CE) return false;
2441     uint64_t Value = CE->getValue();
2442 
2443     return (Value % Angle == Remainder && Value <= 270);
2444   }
2445 
2446   bool isMVELongShift() const {
2447     if (!isImm()) return false;
2448     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2449     // Must be a constant.
2450     if (!CE) return false;
2451     uint64_t Value = CE->getValue();
2452     return Value >= 1 && Value <= 32;
2453   }
2454 
2455   bool isMveSaturateOp() const {
2456     if (!isImm()) return false;
2457     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2458     if (!CE) return false;
2459     uint64_t Value = CE->getValue();
2460     return Value == 48 || Value == 64;
2461   }
2462 
2463   bool isITCondCodeNoAL() const {
2464     if (!isITCondCode()) return false;
2465     ARMCC::CondCodes CC = getCondCode();
2466     return CC != ARMCC::AL;
2467   }
2468 
2469   bool isITCondCodeRestrictedI() const {
2470     if (!isITCondCode())
2471       return false;
2472     ARMCC::CondCodes CC = getCondCode();
2473     return CC == ARMCC::EQ || CC == ARMCC::NE;
2474   }
2475 
2476   bool isITCondCodeRestrictedS() const {
2477     if (!isITCondCode())
2478       return false;
2479     ARMCC::CondCodes CC = getCondCode();
2480     return CC == ARMCC::LT || CC == ARMCC::GT || CC == ARMCC::LE ||
2481            CC == ARMCC::GE;
2482   }
2483 
2484   bool isITCondCodeRestrictedU() const {
2485     if (!isITCondCode())
2486       return false;
2487     ARMCC::CondCodes CC = getCondCode();
2488     return CC == ARMCC::HS || CC == ARMCC::HI;
2489   }
2490 
2491   bool isITCondCodeRestrictedFP() const {
2492     if (!isITCondCode())
2493       return false;
2494     ARMCC::CondCodes CC = getCondCode();
2495     return CC == ARMCC::EQ || CC == ARMCC::NE || CC == ARMCC::LT ||
2496            CC == ARMCC::GT || CC == ARMCC::LE || CC == ARMCC::GE;
2497   }
2498 
2499   void setVecListDPair(unsigned int DPair) {
2500     Kind = k_VectorList;
2501     VectorList.RegNum = DPair;
2502     VectorList.Count = 2;
2503     VectorList.isDoubleSpaced = false;
2504   }
2505 
2506   void setVecListOneD(unsigned int DReg) {
2507     Kind = k_VectorList;
2508     VectorList.RegNum = DReg;
2509     VectorList.Count = 1;
2510     VectorList.isDoubleSpaced = false;
2511   }
2512 
2513   void addExpr(MCInst &Inst, const MCExpr *Expr) const {
2514     // Add as immediates when possible.  Null MCExpr = 0.
2515     if (!Expr)
2516       Inst.addOperand(MCOperand::createImm(0));
2517     else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
2518       Inst.addOperand(MCOperand::createImm(CE->getValue()));
2519     else
2520       Inst.addOperand(MCOperand::createExpr(Expr));
2521   }
2522 
2523   void addARMBranchTargetOperands(MCInst &Inst, unsigned N) const {
2524     assert(N == 1 && "Invalid number of operands!");
2525     addExpr(Inst, getImm());
2526   }
2527 
2528   void addThumbBranchTargetOperands(MCInst &Inst, unsigned N) const {
2529     assert(N == 1 && "Invalid number of operands!");
2530     addExpr(Inst, getImm());
2531   }
2532 
2533   void addCondCodeOperands(MCInst &Inst, unsigned N) const {
2534     assert(N == 2 && "Invalid number of operands!");
2535     Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2536     unsigned RegNum = getCondCode() == ARMCC::AL ? ARM::NoRegister : ARM::CPSR;
2537     Inst.addOperand(MCOperand::createReg(RegNum));
2538   }
2539 
2540   void addVPTPredNOperands(MCInst &Inst, unsigned N) const {
2541     assert(N == 3 && "Invalid number of operands!");
2542     Inst.addOperand(MCOperand::createImm(unsigned(getVPTPred())));
2543     unsigned RegNum = getVPTPred() == ARMVCC::None ? ARM::NoRegister : ARM::P0;
2544     Inst.addOperand(MCOperand::createReg(RegNum));
2545     Inst.addOperand(MCOperand::createReg(0));
2546   }
2547 
2548   void addVPTPredROperands(MCInst &Inst, unsigned N) const {
2549     assert(N == 4 && "Invalid number of operands!");
2550     addVPTPredNOperands(Inst, N-1);
2551     MCRegister RegNum;
2552     if (getVPTPred() == ARMVCC::None) {
2553       RegNum = ARM::NoRegister;
2554     } else {
2555       unsigned NextOpIndex = Inst.getNumOperands();
2556       auto &MCID = Parser->getInstrDesc(Inst.getOpcode());
2557       int TiedOp = MCID.getOperandConstraint(NextOpIndex, MCOI::TIED_TO);
2558       assert(TiedOp >= 0 &&
2559              "Inactive register in vpred_r is not tied to an output!");
2560       RegNum = Inst.getOperand(TiedOp).getReg();
2561     }
2562     Inst.addOperand(MCOperand::createReg(RegNum));
2563   }
2564 
2565   void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
2566     assert(N == 1 && "Invalid number of operands!");
2567     Inst.addOperand(MCOperand::createImm(getCoproc()));
2568   }
2569 
2570   void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
2571     assert(N == 1 && "Invalid number of operands!");
2572     Inst.addOperand(MCOperand::createImm(getCoproc()));
2573   }
2574 
2575   void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
2576     assert(N == 1 && "Invalid number of operands!");
2577     Inst.addOperand(MCOperand::createImm(CoprocOption.Val));
2578   }
2579 
2580   void addITMaskOperands(MCInst &Inst, unsigned N) const {
2581     assert(N == 1 && "Invalid number of operands!");
2582     Inst.addOperand(MCOperand::createImm(ITMask.Mask));
2583   }
2584 
2585   void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
2586     assert(N == 1 && "Invalid number of operands!");
2587     Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2588   }
2589 
2590   void addITCondCodeInvOperands(MCInst &Inst, unsigned N) const {
2591     assert(N == 1 && "Invalid number of operands!");
2592     Inst.addOperand(MCOperand::createImm(unsigned(ARMCC::getOppositeCondition(getCondCode()))));
2593   }
2594 
2595   void addCCOutOperands(MCInst &Inst, unsigned N) const {
2596     assert(N == 1 && "Invalid number of operands!");
2597     Inst.addOperand(MCOperand::createReg(getReg()));
2598   }
2599 
2600   void addRegOperands(MCInst &Inst, unsigned N) const {
2601     assert(N == 1 && "Invalid number of operands!");
2602     Inst.addOperand(MCOperand::createReg(getReg()));
2603   }
2604 
2605   void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
2606     assert(N == 3 && "Invalid number of operands!");
2607     assert(isRegShiftedReg() &&
2608            "addRegShiftedRegOperands() on non-RegShiftedReg!");
2609     Inst.addOperand(MCOperand::createReg(RegShiftedReg.SrcReg));
2610     Inst.addOperand(MCOperand::createReg(RegShiftedReg.ShiftReg));
2611     Inst.addOperand(MCOperand::createImm(
2612       ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
2613   }
2614 
2615   void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
2616     assert(N == 2 && "Invalid number of operands!");
2617     assert(isRegShiftedImm() &&
2618            "addRegShiftedImmOperands() on non-RegShiftedImm!");
2619     Inst.addOperand(MCOperand::createReg(RegShiftedImm.SrcReg));
2620     // Shift of #32 is encoded as 0 where permitted
2621     unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
2622     Inst.addOperand(MCOperand::createImm(
2623       ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
2624   }
2625 
2626   void addShifterImmOperands(MCInst &Inst, unsigned N) const {
2627     assert(N == 1 && "Invalid number of operands!");
2628     Inst.addOperand(MCOperand::createImm((ShifterImm.isASR << 5) |
2629                                          ShifterImm.Imm));
2630   }
2631 
2632   void addRegListOperands(MCInst &Inst, unsigned N) const {
2633     assert(N == 1 && "Invalid number of operands!");
2634     const SmallVectorImpl<MCRegister> &RegList = getRegList();
2635     for (MCRegister Reg : RegList)
2636       Inst.addOperand(MCOperand::createReg(Reg));
2637   }
2638 
2639   void addRegListWithAPSROperands(MCInst &Inst, unsigned N) const {
2640     assert(N == 1 && "Invalid number of operands!");
2641     const SmallVectorImpl<MCRegister> &RegList = getRegList();
2642     for (MCRegister Reg : RegList)
2643       Inst.addOperand(MCOperand::createReg(Reg));
2644   }
2645 
2646   void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
2647     addRegListOperands(Inst, N);
2648   }
2649 
2650   void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
2651     addRegListOperands(Inst, N);
2652   }
2653 
2654   void addFPSRegListWithVPROperands(MCInst &Inst, unsigned N) const {
2655     addRegListOperands(Inst, N);
2656   }
2657 
2658   void addFPDRegListWithVPROperands(MCInst &Inst, unsigned N) const {
2659     addRegListOperands(Inst, N);
2660   }
2661 
2662   void addRotImmOperands(MCInst &Inst, unsigned N) const {
2663     assert(N == 1 && "Invalid number of operands!");
2664     // Encoded as val>>3. The printer handles display as 8, 16, 24.
2665     Inst.addOperand(MCOperand::createImm(RotImm.Imm >> 3));
2666   }
2667 
2668   void addModImmOperands(MCInst &Inst, unsigned N) const {
2669     assert(N == 1 && "Invalid number of operands!");
2670 
2671     // Support for fixups (MCFixup)
2672     if (isImm())
2673       return addImmOperands(Inst, N);
2674 
2675     Inst.addOperand(MCOperand::createImm(ModImm.Bits | (ModImm.Rot << 7)));
2676   }
2677 
2678   void addModImmNotOperands(MCInst &Inst, unsigned N) const {
2679     assert(N == 1 && "Invalid number of operands!");
2680     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2681     uint32_t Enc = ARM_AM::getSOImmVal(~CE->getValue());
2682     Inst.addOperand(MCOperand::createImm(Enc));
2683   }
2684 
2685   void addModImmNegOperands(MCInst &Inst, unsigned N) const {
2686     assert(N == 1 && "Invalid number of operands!");
2687     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2688     uint32_t Enc = ARM_AM::getSOImmVal(-CE->getValue());
2689     Inst.addOperand(MCOperand::createImm(Enc));
2690   }
2691 
2692   void addThumbModImmNeg8_255Operands(MCInst &Inst, unsigned N) const {
2693     assert(N == 1 && "Invalid number of operands!");
2694     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2695     uint32_t Val = -CE->getValue();
2696     Inst.addOperand(MCOperand::createImm(Val));
2697   }
2698 
2699   void addThumbModImmNeg1_7Operands(MCInst &Inst, unsigned N) const {
2700     assert(N == 1 && "Invalid number of operands!");
2701     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2702     uint32_t Val = -CE->getValue();
2703     Inst.addOperand(MCOperand::createImm(Val));
2704   }
2705 
2706   void addBitfieldOperands(MCInst &Inst, unsigned N) const {
2707     assert(N == 1 && "Invalid number of operands!");
2708     // Munge the lsb/width into a bitfield mask.
2709     unsigned lsb = Bitfield.LSB;
2710     unsigned width = Bitfield.Width;
2711     // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
2712     uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
2713                       (32 - (lsb + width)));
2714     Inst.addOperand(MCOperand::createImm(Mask));
2715   }
2716 
2717   void addImmOperands(MCInst &Inst, unsigned N) const {
2718     assert(N == 1 && "Invalid number of operands!");
2719     addExpr(Inst, getImm());
2720   }
2721 
2722   void addFBits16Operands(MCInst &Inst, unsigned N) const {
2723     assert(N == 1 && "Invalid number of operands!");
2724     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2725     Inst.addOperand(MCOperand::createImm(16 - CE->getValue()));
2726   }
2727 
2728   void addFBits32Operands(MCInst &Inst, unsigned N) const {
2729     assert(N == 1 && "Invalid number of operands!");
2730     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2731     Inst.addOperand(MCOperand::createImm(32 - CE->getValue()));
2732   }
2733 
2734   void addFPImmOperands(MCInst &Inst, unsigned N) const {
2735     assert(N == 1 && "Invalid number of operands!");
2736     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2737     int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
2738     Inst.addOperand(MCOperand::createImm(Val));
2739   }
2740 
2741   void addImm8s4Operands(MCInst &Inst, unsigned N) const {
2742     assert(N == 1 && "Invalid number of operands!");
2743     // FIXME: We really want to scale the value here, but the LDRD/STRD
2744     // instruction don't encode operands that way yet.
2745     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2746     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2747   }
2748 
2749   void addImm7s4Operands(MCInst &Inst, unsigned N) const {
2750     assert(N == 1 && "Invalid number of operands!");
2751     // FIXME: We really want to scale the value here, but the VSTR/VLDR_VSYSR
2752     // instruction don't encode operands that way yet.
2753     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2754     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2755   }
2756 
2757   void addImm7Shift0Operands(MCInst &Inst, unsigned N) const {
2758     assert(N == 1 && "Invalid number of operands!");
2759     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2760     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2761   }
2762 
2763   void addImm7Shift1Operands(MCInst &Inst, unsigned N) const {
2764     assert(N == 1 && "Invalid number of operands!");
2765     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2766     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2767   }
2768 
2769   void addImm7Shift2Operands(MCInst &Inst, unsigned N) const {
2770     assert(N == 1 && "Invalid number of operands!");
2771     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2772     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2773   }
2774 
2775   void addImm7Operands(MCInst &Inst, unsigned N) const {
2776     assert(N == 1 && "Invalid number of operands!");
2777     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2778     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2779   }
2780 
2781   void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
2782     assert(N == 1 && "Invalid number of operands!");
2783     // The immediate is scaled by four in the encoding and is stored
2784     // in the MCInst as such. Lop off the low two bits here.
2785     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2786     Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2787   }
2788 
2789   void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
2790     assert(N == 1 && "Invalid number of operands!");
2791     // The immediate is scaled by four in the encoding and is stored
2792     // in the MCInst as such. Lop off the low two bits here.
2793     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2794     Inst.addOperand(MCOperand::createImm(-(CE->getValue() / 4)));
2795   }
2796 
2797   void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
2798     assert(N == 1 && "Invalid number of operands!");
2799     // The immediate is scaled by four in the encoding and is stored
2800     // in the MCInst as such. Lop off the low two bits here.
2801     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2802     Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2803   }
2804 
2805   void addImm1_16Operands(MCInst &Inst, unsigned N) const {
2806     assert(N == 1 && "Invalid number of operands!");
2807     // The constant encodes as the immediate-1, and we store in the instruction
2808     // the bits as encoded, so subtract off one here.
2809     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2810     Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2811   }
2812 
2813   void addImm1_32Operands(MCInst &Inst, unsigned N) const {
2814     assert(N == 1 && "Invalid number of operands!");
2815     // The constant encodes as the immediate-1, and we store in the instruction
2816     // the bits as encoded, so subtract off one here.
2817     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2818     Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2819   }
2820 
2821   void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
2822     assert(N == 1 && "Invalid number of operands!");
2823     // The constant encodes as the immediate, except for 32, which encodes as
2824     // zero.
2825     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2826     unsigned Imm = CE->getValue();
2827     Inst.addOperand(MCOperand::createImm((Imm == 32 ? 0 : Imm)));
2828   }
2829 
2830   void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
2831     assert(N == 1 && "Invalid number of operands!");
2832     // An ASR value of 32 encodes as 0, so that's how we want to add it to
2833     // the instruction as well.
2834     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2835     int Val = CE->getValue();
2836     Inst.addOperand(MCOperand::createImm(Val == 32 ? 0 : Val));
2837   }
2838 
2839   void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
2840     assert(N == 1 && "Invalid number of operands!");
2841     // The operand is actually a t2_so_imm, but we have its bitwise
2842     // negation in the assembly source, so twiddle it here.
2843     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2844     Inst.addOperand(MCOperand::createImm(~(uint32_t)CE->getValue()));
2845   }
2846 
2847   void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
2848     assert(N == 1 && "Invalid number of operands!");
2849     // The operand is actually a t2_so_imm, but we have its
2850     // negation in the assembly source, so twiddle it here.
2851     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2852     Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2853   }
2854 
2855   void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
2856     assert(N == 1 && "Invalid number of operands!");
2857     // The operand is actually an imm0_4095, but we have its
2858     // negation in the assembly source, so twiddle it here.
2859     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2860     Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2861   }
2862 
2863   void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const {
2864     if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
2865       Inst.addOperand(MCOperand::createImm(CE->getValue() >> 2));
2866       return;
2867     }
2868     const MCSymbolRefExpr *SR = cast<MCSymbolRefExpr>(Imm.Val);
2869     Inst.addOperand(MCOperand::createExpr(SR));
2870   }
2871 
2872   void addThumbMemPCOperands(MCInst &Inst, unsigned N) const {
2873     assert(N == 1 && "Invalid number of operands!");
2874     if (isImm()) {
2875       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2876       if (CE) {
2877         Inst.addOperand(MCOperand::createImm(CE->getValue()));
2878         return;
2879       }
2880       const MCSymbolRefExpr *SR = cast<MCSymbolRefExpr>(Imm.Val);
2881       Inst.addOperand(MCOperand::createExpr(SR));
2882       return;
2883     }
2884 
2885     assert(isGPRMem()  && "Unknown value type!");
2886     assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!");
2887     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
2888       Inst.addOperand(MCOperand::createImm(CE->getValue()));
2889     else
2890       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
2891   }
2892 
2893   void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
2894     assert(N == 1 && "Invalid number of operands!");
2895     Inst.addOperand(MCOperand::createImm(unsigned(getMemBarrierOpt())));
2896   }
2897 
2898   void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2899     assert(N == 1 && "Invalid number of operands!");
2900     Inst.addOperand(MCOperand::createImm(unsigned(getInstSyncBarrierOpt())));
2901   }
2902 
2903   void addTraceSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2904     assert(N == 1 && "Invalid number of operands!");
2905     Inst.addOperand(MCOperand::createImm(unsigned(getTraceSyncBarrierOpt())));
2906   }
2907 
2908   void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
2909     assert(N == 1 && "Invalid number of operands!");
2910     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2911   }
2912 
2913   void addMemNoOffsetT2Operands(MCInst &Inst, unsigned N) const {
2914     assert(N == 1 && "Invalid number of operands!");
2915     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2916   }
2917 
2918   void addMemNoOffsetT2NoSpOperands(MCInst &Inst, unsigned N) const {
2919     assert(N == 1 && "Invalid number of operands!");
2920     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2921   }
2922 
2923   void addMemNoOffsetTOperands(MCInst &Inst, unsigned N) const {
2924     assert(N == 1 && "Invalid number of operands!");
2925     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2926   }
2927 
2928   void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
2929     assert(N == 1 && "Invalid number of operands!");
2930     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
2931       Inst.addOperand(MCOperand::createImm(CE->getValue()));
2932     else
2933       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
2934   }
2935 
2936   void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2937     assert(N == 1 && "Invalid number of operands!");
2938     assert(isImm() && "Not an immediate!");
2939 
2940     // If we have an immediate that's not a constant, treat it as a label
2941     // reference needing a fixup.
2942     if (!isa<MCConstantExpr>(getImm())) {
2943       Inst.addOperand(MCOperand::createExpr(getImm()));
2944       return;
2945     }
2946 
2947     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2948     int Val = CE->getValue();
2949     Inst.addOperand(MCOperand::createImm(Val));
2950   }
2951 
2952   void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
2953     assert(N == 2 && "Invalid number of operands!");
2954     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2955     Inst.addOperand(MCOperand::createImm(Memory.Alignment));
2956   }
2957 
2958   void addDupAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2959     addAlignedMemoryOperands(Inst, N);
2960   }
2961 
2962   void addAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2963     addAlignedMemoryOperands(Inst, N);
2964   }
2965 
2966   void addAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2967     addAlignedMemoryOperands(Inst, N);
2968   }
2969 
2970   void addDupAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2971     addAlignedMemoryOperands(Inst, N);
2972   }
2973 
2974   void addAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2975     addAlignedMemoryOperands(Inst, N);
2976   }
2977 
2978   void addDupAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2979     addAlignedMemoryOperands(Inst, N);
2980   }
2981 
2982   void addAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2983     addAlignedMemoryOperands(Inst, N);
2984   }
2985 
2986   void addDupAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2987     addAlignedMemoryOperands(Inst, N);
2988   }
2989 
2990   void addAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2991     addAlignedMemoryOperands(Inst, N);
2992   }
2993 
2994   void addDupAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2995     addAlignedMemoryOperands(Inst, N);
2996   }
2997 
2998   void addAlignedMemory64or128or256Operands(MCInst &Inst, unsigned N) const {
2999     addAlignedMemoryOperands(Inst, N);
3000   }
3001 
3002   void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
3003     assert(N == 3 && "Invalid number of operands!");
3004     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3005     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3006     if (!Memory.OffsetRegNum) {
3007       if (!Memory.OffsetImm)
3008         Inst.addOperand(MCOperand::createImm(0));
3009       else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3010         int32_t Val = CE->getValue();
3011         ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3012         // Special case for #-0
3013         if (Val == std::numeric_limits<int32_t>::min())
3014           Val = 0;
3015         if (Val < 0)
3016           Val = -Val;
3017         Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
3018         Inst.addOperand(MCOperand::createImm(Val));
3019       } else
3020         Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3021     } else {
3022       // For register offset, we encode the shift type and negation flag
3023       // here.
3024       int32_t Val =
3025           ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
3026                             Memory.ShiftImm, Memory.ShiftType);
3027       Inst.addOperand(MCOperand::createImm(Val));
3028     }
3029   }
3030 
3031   void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
3032     assert(N == 2 && "Invalid number of operands!");
3033     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3034     assert(CE && "non-constant AM2OffsetImm operand!");
3035     int32_t Val = CE->getValue();
3036     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3037     // Special case for #-0
3038     if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
3039     if (Val < 0) Val = -Val;
3040     Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
3041     Inst.addOperand(MCOperand::createReg(0));
3042     Inst.addOperand(MCOperand::createImm(Val));
3043   }
3044 
3045   void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
3046     assert(N == 3 && "Invalid number of operands!");
3047     // If we have an immediate that's not a constant, treat it as a label
3048     // reference needing a fixup. If it is a constant, it's something else
3049     // and we reject it.
3050     if (isImm()) {
3051       Inst.addOperand(MCOperand::createExpr(getImm()));
3052       Inst.addOperand(MCOperand::createReg(0));
3053       Inst.addOperand(MCOperand::createImm(0));
3054       return;
3055     }
3056 
3057     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3058     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3059     if (!Memory.OffsetRegNum) {
3060       if (!Memory.OffsetImm)
3061         Inst.addOperand(MCOperand::createImm(0));
3062       else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3063         int32_t Val = CE->getValue();
3064         ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3065         // Special case for #-0
3066         if (Val == std::numeric_limits<int32_t>::min())
3067           Val = 0;
3068         if (Val < 0)
3069           Val = -Val;
3070         Val = ARM_AM::getAM3Opc(AddSub, Val);
3071         Inst.addOperand(MCOperand::createImm(Val));
3072       } else
3073         Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3074     } else {
3075       // For register offset, we encode the shift type and negation flag
3076       // here.
3077       int32_t Val =
3078           ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
3079       Inst.addOperand(MCOperand::createImm(Val));
3080     }
3081   }
3082 
3083   void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
3084     assert(N == 2 && "Invalid number of operands!");
3085     if (Kind == k_PostIndexRegister) {
3086       int32_t Val =
3087         ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
3088       Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3089       Inst.addOperand(MCOperand::createImm(Val));
3090       return;
3091     }
3092 
3093     // Constant offset.
3094     const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
3095     int32_t Val = CE->getValue();
3096     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3097     // Special case for #-0
3098     if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
3099     if (Val < 0) Val = -Val;
3100     Val = ARM_AM::getAM3Opc(AddSub, Val);
3101     Inst.addOperand(MCOperand::createReg(0));
3102     Inst.addOperand(MCOperand::createImm(Val));
3103   }
3104 
3105   void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
3106     assert(N == 2 && "Invalid number of operands!");
3107     // If we have an immediate that's not a constant, treat it as a label
3108     // reference needing a fixup. If it is a constant, it's something else
3109     // and we reject it.
3110     if (isImm()) {
3111       Inst.addOperand(MCOperand::createExpr(getImm()));
3112       Inst.addOperand(MCOperand::createImm(0));
3113       return;
3114     }
3115 
3116     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3117     if (!Memory.OffsetImm)
3118       Inst.addOperand(MCOperand::createImm(0));
3119     else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3120       // The lower two bits are always zero and as such are not encoded.
3121       int32_t Val = CE->getValue() / 4;
3122       ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3123       // Special case for #-0
3124       if (Val == std::numeric_limits<int32_t>::min())
3125         Val = 0;
3126       if (Val < 0)
3127         Val = -Val;
3128       Val = ARM_AM::getAM5Opc(AddSub, Val);
3129       Inst.addOperand(MCOperand::createImm(Val));
3130     } else
3131       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3132   }
3133 
3134   void addAddrMode5FP16Operands(MCInst &Inst, unsigned N) const {
3135     assert(N == 2 && "Invalid number of operands!");
3136     // If we have an immediate that's not a constant, treat it as a label
3137     // reference needing a fixup. If it is a constant, it's something else
3138     // and we reject it.
3139     if (isImm()) {
3140       Inst.addOperand(MCOperand::createExpr(getImm()));
3141       Inst.addOperand(MCOperand::createImm(0));
3142       return;
3143     }
3144 
3145     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3146     // The lower bit is always zero and as such is not encoded.
3147     if (!Memory.OffsetImm)
3148       Inst.addOperand(MCOperand::createImm(0));
3149     else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3150       int32_t Val = CE->getValue() / 2;
3151       ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3152       // Special case for #-0
3153       if (Val == std::numeric_limits<int32_t>::min())
3154         Val = 0;
3155       if (Val < 0)
3156         Val = -Val;
3157       Val = ARM_AM::getAM5FP16Opc(AddSub, Val);
3158       Inst.addOperand(MCOperand::createImm(Val));
3159     } else
3160       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3161   }
3162 
3163   void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
3164     assert(N == 2 && "Invalid number of operands!");
3165     // If we have an immediate that's not a constant, treat it as a label
3166     // reference needing a fixup. If it is a constant, it's something else
3167     // and we reject it.
3168     if (isImm()) {
3169       Inst.addOperand(MCOperand::createExpr(getImm()));
3170       Inst.addOperand(MCOperand::createImm(0));
3171       return;
3172     }
3173 
3174     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3175     addExpr(Inst, Memory.OffsetImm);
3176   }
3177 
3178   void addMemImm7s4OffsetOperands(MCInst &Inst, unsigned N) const {
3179     assert(N == 2 && "Invalid number of operands!");
3180     // If we have an immediate that's not a constant, treat it as a label
3181     // reference needing a fixup. If it is a constant, it's something else
3182     // and we reject it.
3183     if (isImm()) {
3184       Inst.addOperand(MCOperand::createExpr(getImm()));
3185       Inst.addOperand(MCOperand::createImm(0));
3186       return;
3187     }
3188 
3189     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3190     addExpr(Inst, Memory.OffsetImm);
3191   }
3192 
3193   void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
3194     assert(N == 2 && "Invalid number of operands!");
3195     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3196     if (!Memory.OffsetImm)
3197       Inst.addOperand(MCOperand::createImm(0));
3198     else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3199       // The lower two bits are always zero and as such are not encoded.
3200       Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3201     else
3202       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3203   }
3204 
3205   void addMemImmOffsetOperands(MCInst &Inst, unsigned N) const {
3206     assert(N == 2 && "Invalid number of operands!");
3207     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3208     addExpr(Inst, Memory.OffsetImm);
3209   }
3210 
3211   void addMemRegRQOffsetOperands(MCInst &Inst, unsigned N) const {
3212     assert(N == 2 && "Invalid number of operands!");
3213     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3214     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3215   }
3216 
3217   void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
3218     assert(N == 2 && "Invalid number of operands!");
3219     // If this is an immediate, it's a label reference.
3220     if (isImm()) {
3221       addExpr(Inst, getImm());
3222       Inst.addOperand(MCOperand::createImm(0));
3223       return;
3224     }
3225 
3226     // Otherwise, it's a normal memory reg+offset.
3227     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3228     addExpr(Inst, Memory.OffsetImm);
3229   }
3230 
3231   void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
3232     assert(N == 2 && "Invalid number of operands!");
3233     // If this is an immediate, it's a label reference.
3234     if (isImm()) {
3235       addExpr(Inst, getImm());
3236       Inst.addOperand(MCOperand::createImm(0));
3237       return;
3238     }
3239 
3240     // Otherwise, it's a normal memory reg+offset.
3241     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3242     addExpr(Inst, Memory.OffsetImm);
3243   }
3244 
3245   void addConstPoolAsmImmOperands(MCInst &Inst, unsigned N) const {
3246     assert(N == 1 && "Invalid number of operands!");
3247     // This is container for the immediate that we will create the constant
3248     // pool from
3249     addExpr(Inst, getConstantPoolImm());
3250   }
3251 
3252   void addMemTBBOperands(MCInst &Inst, unsigned N) const {
3253     assert(N == 2 && "Invalid number of operands!");
3254     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3255     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3256   }
3257 
3258   void addMemTBHOperands(MCInst &Inst, unsigned N) const {
3259     assert(N == 2 && "Invalid number of operands!");
3260     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3261     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3262   }
3263 
3264   void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
3265     assert(N == 3 && "Invalid number of operands!");
3266     unsigned Val =
3267       ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
3268                         Memory.ShiftImm, Memory.ShiftType);
3269     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3270     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3271     Inst.addOperand(MCOperand::createImm(Val));
3272   }
3273 
3274   void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
3275     assert(N == 3 && "Invalid number of operands!");
3276     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3277     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3278     Inst.addOperand(MCOperand::createImm(Memory.ShiftImm));
3279   }
3280 
3281   void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
3282     assert(N == 2 && "Invalid number of operands!");
3283     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3284     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3285   }
3286 
3287   void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
3288     assert(N == 2 && "Invalid number of operands!");
3289     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3290     if (!Memory.OffsetImm)
3291       Inst.addOperand(MCOperand::createImm(0));
3292     else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3293       // The lower two bits are always zero and as such are not encoded.
3294       Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3295     else
3296       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3297   }
3298 
3299   void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
3300     assert(N == 2 && "Invalid number of operands!");
3301     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3302     if (!Memory.OffsetImm)
3303       Inst.addOperand(MCOperand::createImm(0));
3304     else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3305       Inst.addOperand(MCOperand::createImm(CE->getValue() / 2));
3306     else
3307       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3308   }
3309 
3310   void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
3311     assert(N == 2 && "Invalid number of operands!");
3312     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3313     addExpr(Inst, Memory.OffsetImm);
3314   }
3315 
3316   void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
3317     assert(N == 2 && "Invalid number of operands!");
3318     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3319     if (!Memory.OffsetImm)
3320       Inst.addOperand(MCOperand::createImm(0));
3321     else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3322       // The lower two bits are always zero and as such are not encoded.
3323       Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3324     else
3325       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3326   }
3327 
3328   void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
3329     assert(N == 1 && "Invalid number of operands!");
3330     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3331     assert(CE && "non-constant post-idx-imm8 operand!");
3332     int Imm = CE->getValue();
3333     bool isAdd = Imm >= 0;
3334     if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
3335     Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
3336     Inst.addOperand(MCOperand::createImm(Imm));
3337   }
3338 
3339   void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
3340     assert(N == 1 && "Invalid number of operands!");
3341     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3342     assert(CE && "non-constant post-idx-imm8s4 operand!");
3343     int Imm = CE->getValue();
3344     bool isAdd = Imm >= 0;
3345     if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
3346     // Immediate is scaled by 4.
3347     Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
3348     Inst.addOperand(MCOperand::createImm(Imm));
3349   }
3350 
3351   void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
3352     assert(N == 2 && "Invalid number of operands!");
3353     Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3354     Inst.addOperand(MCOperand::createImm(PostIdxReg.isAdd));
3355   }
3356 
3357   void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
3358     assert(N == 2 && "Invalid number of operands!");
3359     Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3360     // The sign, shift type, and shift amount are encoded in a single operand
3361     // using the AM2 encoding helpers.
3362     ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
3363     unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
3364                                      PostIdxReg.ShiftTy);
3365     Inst.addOperand(MCOperand::createImm(Imm));
3366   }
3367 
3368   void addPowerTwoOperands(MCInst &Inst, unsigned N) const {
3369     assert(N == 1 && "Invalid number of operands!");
3370     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3371     Inst.addOperand(MCOperand::createImm(CE->getValue()));
3372   }
3373 
3374   void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
3375     assert(N == 1 && "Invalid number of operands!");
3376     Inst.addOperand(MCOperand::createImm(unsigned(getMSRMask())));
3377   }
3378 
3379   void addBankedRegOperands(MCInst &Inst, unsigned N) const {
3380     assert(N == 1 && "Invalid number of operands!");
3381     Inst.addOperand(MCOperand::createImm(unsigned(getBankedReg())));
3382   }
3383 
3384   void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
3385     assert(N == 1 && "Invalid number of operands!");
3386     Inst.addOperand(MCOperand::createImm(unsigned(getProcIFlags())));
3387   }
3388 
3389   void addVecListOperands(MCInst &Inst, unsigned N) const {
3390     assert(N == 1 && "Invalid number of operands!");
3391 
3392     if (isAnyVectorList())
3393       Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
3394     else if (isDReg() && !Parser->hasMVE()) {
3395       Inst.addOperand(MCOperand::createReg(Reg.RegNum));
3396     } else if (isQReg() && !Parser->hasMVE()) {
3397       MCRegister DPair = Parser->getDRegFromQReg(Reg.RegNum);
3398       DPair = Parser->getMRI()->getMatchingSuperReg(
3399           DPair, ARM::dsub_0, &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3400       Inst.addOperand(MCOperand::createReg(DPair));
3401     } else {
3402       LLVM_DEBUG(dbgs() << "TYPE: " << Kind << "\n");
3403       llvm_unreachable(
3404           "attempted to add a vector list register with wrong type!");
3405     }
3406   }
3407 
3408   void addMVEVecListOperands(MCInst &Inst, unsigned N) const {
3409     assert(N == 1 && "Invalid number of operands!");
3410 
3411     // When we come here, the VectorList field will identify a range
3412     // of q-registers by its base register and length, and it will
3413     // have already been error-checked to be the expected length of
3414     // range and contain only q-regs in the range q0-q7. So we can
3415     // count on the base register being in the range q0-q6 (for 2
3416     // regs) or q0-q4 (for 4)
3417     //
3418     // The MVE instructions taking a register range of this kind will
3419     // need an operand in the MQQPR or MQQQQPR class, representing the
3420     // entire range as a unit. So we must translate into that class,
3421     // by finding the index of the base register in the MQPR reg
3422     // class, and returning the super-register at the corresponding
3423     // index in the target class.
3424 
3425     const MCRegisterClass *RC_in = &ARMMCRegisterClasses[ARM::MQPRRegClassID];
3426     const MCRegisterClass *RC_out =
3427         (VectorList.Count == 2) ? &ARMMCRegisterClasses[ARM::MQQPRRegClassID]
3428                                 : &ARMMCRegisterClasses[ARM::MQQQQPRRegClassID];
3429 
3430     unsigned I, E = RC_out->getNumRegs();
3431     for (I = 0; I < E; I++)
3432       if (RC_in->getRegister(I) == VectorList.RegNum)
3433         break;
3434     assert(I < E && "Invalid vector list start register!");
3435 
3436     Inst.addOperand(MCOperand::createReg(RC_out->getRegister(I)));
3437   }
3438 
3439   void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
3440     assert(N == 2 && "Invalid number of operands!");
3441     Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
3442     Inst.addOperand(MCOperand::createImm(VectorList.LaneIndex));
3443   }
3444 
3445   void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
3446     assert(N == 1 && "Invalid number of operands!");
3447     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3448   }
3449 
3450   void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
3451     assert(N == 1 && "Invalid number of operands!");
3452     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3453   }
3454 
3455   void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
3456     assert(N == 1 && "Invalid number of operands!");
3457     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3458   }
3459 
3460   void addVectorIndex64Operands(MCInst &Inst, unsigned N) const {
3461     assert(N == 1 && "Invalid number of operands!");
3462     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3463   }
3464 
3465   void addMVEVectorIndexOperands(MCInst &Inst, unsigned N) const {
3466     assert(N == 1 && "Invalid number of operands!");
3467     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3468   }
3469 
3470   void addMVEPairVectorIndexOperands(MCInst &Inst, unsigned N) const {
3471     assert(N == 1 && "Invalid number of operands!");
3472     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3473   }
3474 
3475   void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
3476     assert(N == 1 && "Invalid number of operands!");
3477     // The immediate encodes the type of constant as well as the value.
3478     // Mask in that this is an i8 splat.
3479     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3480     Inst.addOperand(MCOperand::createImm(CE->getValue() | 0xe00));
3481   }
3482 
3483   void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
3484     assert(N == 1 && "Invalid number of operands!");
3485     // The immediate encodes the type of constant as well as the value.
3486     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3487     unsigned Value = CE->getValue();
3488     Value = ARM_AM::encodeNEONi16splat(Value);
3489     Inst.addOperand(MCOperand::createImm(Value));
3490   }
3491 
3492   void addNEONi16splatNotOperands(MCInst &Inst, unsigned N) const {
3493     assert(N == 1 && "Invalid number of operands!");
3494     // The immediate encodes the type of constant as well as the value.
3495     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3496     unsigned Value = CE->getValue();
3497     Value = ARM_AM::encodeNEONi16splat(~Value & 0xffff);
3498     Inst.addOperand(MCOperand::createImm(Value));
3499   }
3500 
3501   void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
3502     assert(N == 1 && "Invalid number of operands!");
3503     // The immediate encodes the type of constant as well as the value.
3504     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3505     unsigned Value = CE->getValue();
3506     Value = ARM_AM::encodeNEONi32splat(Value);
3507     Inst.addOperand(MCOperand::createImm(Value));
3508   }
3509 
3510   void addNEONi32splatNotOperands(MCInst &Inst, unsigned N) const {
3511     assert(N == 1 && "Invalid number of operands!");
3512     // The immediate encodes the type of constant as well as the value.
3513     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3514     unsigned Value = CE->getValue();
3515     Value = ARM_AM::encodeNEONi32splat(~Value);
3516     Inst.addOperand(MCOperand::createImm(Value));
3517   }
3518 
3519   void addNEONi8ReplicateOperands(MCInst &Inst, bool Inv) const {
3520     // The immediate encodes the type of constant as well as the value.
3521     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3522     assert((Inst.getOpcode() == ARM::VMOVv8i8 ||
3523             Inst.getOpcode() == ARM::VMOVv16i8) &&
3524           "All instructions that wants to replicate non-zero byte "
3525           "always must be replaced with VMOVv8i8 or VMOVv16i8.");
3526     unsigned Value = CE->getValue();
3527     if (Inv)
3528       Value = ~Value;
3529     unsigned B = Value & 0xff;
3530     B |= 0xe00; // cmode = 0b1110
3531     Inst.addOperand(MCOperand::createImm(B));
3532   }
3533 
3534   void addNEONinvi8ReplicateOperands(MCInst &Inst, unsigned N) const {
3535     assert(N == 1 && "Invalid number of operands!");
3536     addNEONi8ReplicateOperands(Inst, true);
3537   }
3538 
3539   static unsigned encodeNeonVMOVImmediate(unsigned Value) {
3540     if (Value >= 256 && Value <= 0xffff)
3541       Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
3542     else if (Value > 0xffff && Value <= 0xffffff)
3543       Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
3544     else if (Value > 0xffffff)
3545       Value = (Value >> 24) | 0x600;
3546     return Value;
3547   }
3548 
3549   void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
3550     assert(N == 1 && "Invalid number of operands!");
3551     // The immediate encodes the type of constant as well as the value.
3552     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3553     unsigned Value = encodeNeonVMOVImmediate(CE->getValue());
3554     Inst.addOperand(MCOperand::createImm(Value));
3555   }
3556 
3557   void addNEONvmovi8ReplicateOperands(MCInst &Inst, unsigned N) const {
3558     assert(N == 1 && "Invalid number of operands!");
3559     addNEONi8ReplicateOperands(Inst, false);
3560   }
3561 
3562   void addNEONvmovi16ReplicateOperands(MCInst &Inst, unsigned N) const {
3563     assert(N == 1 && "Invalid number of operands!");
3564     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3565     assert((Inst.getOpcode() == ARM::VMOVv4i16 ||
3566             Inst.getOpcode() == ARM::VMOVv8i16 ||
3567             Inst.getOpcode() == ARM::VMVNv4i16 ||
3568             Inst.getOpcode() == ARM::VMVNv8i16) &&
3569           "All instructions that want to replicate non-zero half-word "
3570           "always must be replaced with V{MOV,MVN}v{4,8}i16.");
3571     uint64_t Value = CE->getValue();
3572     unsigned Elem = Value & 0xffff;
3573     if (Elem >= 256)
3574       Elem = (Elem >> 8) | 0x200;
3575     Inst.addOperand(MCOperand::createImm(Elem));
3576   }
3577 
3578   void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
3579     assert(N == 1 && "Invalid number of operands!");
3580     // The immediate encodes the type of constant as well as the value.
3581     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3582     unsigned Value = encodeNeonVMOVImmediate(~CE->getValue());
3583     Inst.addOperand(MCOperand::createImm(Value));
3584   }
3585 
3586   void addNEONvmovi32ReplicateOperands(MCInst &Inst, unsigned N) const {
3587     assert(N == 1 && "Invalid number of operands!");
3588     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3589     assert((Inst.getOpcode() == ARM::VMOVv2i32 ||
3590             Inst.getOpcode() == ARM::VMOVv4i32 ||
3591             Inst.getOpcode() == ARM::VMVNv2i32 ||
3592             Inst.getOpcode() == ARM::VMVNv4i32) &&
3593           "All instructions that want to replicate non-zero word "
3594           "always must be replaced with V{MOV,MVN}v{2,4}i32.");
3595     uint64_t Value = CE->getValue();
3596     unsigned Elem = encodeNeonVMOVImmediate(Value & 0xffffffff);
3597     Inst.addOperand(MCOperand::createImm(Elem));
3598   }
3599 
3600   void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
3601     assert(N == 1 && "Invalid number of operands!");
3602     // The immediate encodes the type of constant as well as the value.
3603     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3604     uint64_t Value = CE->getValue();
3605     unsigned Imm = 0;
3606     for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
3607       Imm |= (Value & 1) << i;
3608     }
3609     Inst.addOperand(MCOperand::createImm(Imm | 0x1e00));
3610   }
3611 
3612   void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
3613     assert(N == 1 && "Invalid number of operands!");
3614     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3615     Inst.addOperand(MCOperand::createImm(CE->getValue() / 90));
3616   }
3617 
3618   void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
3619     assert(N == 1 && "Invalid number of operands!");
3620     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3621     Inst.addOperand(MCOperand::createImm((CE->getValue() - 90) / 180));
3622   }
3623 
3624   void addMveSaturateOperands(MCInst &Inst, unsigned N) const {
3625     assert(N == 1 && "Invalid number of operands!");
3626     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3627     unsigned Imm = CE->getValue();
3628     assert((Imm == 48 || Imm == 64) && "Invalid saturate operand");
3629     Inst.addOperand(MCOperand::createImm(Imm == 48 ? 1 : 0));
3630   }
3631 
3632   void print(raw_ostream &OS) const override;
3633 
3634   static std::unique_ptr<ARMOperand> CreateITMask(unsigned Mask, SMLoc S,
3635                                                   ARMAsmParser &Parser) {
3636     auto Op = std::make_unique<ARMOperand>(k_ITCondMask, Parser);
3637     Op->ITMask.Mask = Mask;
3638     Op->StartLoc = S;
3639     Op->EndLoc = S;
3640     return Op;
3641   }
3642 
3643   static std::unique_ptr<ARMOperand>
3644   CreateCondCode(ARMCC::CondCodes CC, SMLoc S, ARMAsmParser &Parser) {
3645     auto Op = std::make_unique<ARMOperand>(k_CondCode, Parser);
3646     Op->CC.Val = CC;
3647     Op->StartLoc = S;
3648     Op->EndLoc = S;
3649     return Op;
3650   }
3651 
3652   static std::unique_ptr<ARMOperand> CreateVPTPred(ARMVCC::VPTCodes CC, SMLoc S,
3653                                                    ARMAsmParser &Parser) {
3654     auto Op = std::make_unique<ARMOperand>(k_VPTPred, Parser);
3655     Op->VCC.Val = CC;
3656     Op->StartLoc = S;
3657     Op->EndLoc = S;
3658     return Op;
3659   }
3660 
3661   static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S,
3662                                                      ARMAsmParser &Parser) {
3663     auto Op = std::make_unique<ARMOperand>(k_CoprocNum, Parser);
3664     Op->Cop.Val = CopVal;
3665     Op->StartLoc = S;
3666     Op->EndLoc = S;
3667     return Op;
3668   }
3669 
3670   static std::unique_ptr<ARMOperand> CreateCoprocReg(unsigned CopVal, SMLoc S,
3671                                                      ARMAsmParser &Parser) {
3672     auto Op = std::make_unique<ARMOperand>(k_CoprocReg, Parser);
3673     Op->Cop.Val = CopVal;
3674     Op->StartLoc = S;
3675     Op->EndLoc = S;
3676     return Op;
3677   }
3678 
3679   static std::unique_ptr<ARMOperand>
3680   CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E, ARMAsmParser &Parser) {
3681     auto Op = std::make_unique<ARMOperand>(k_CoprocOption, Parser);
3682     Op->Cop.Val = Val;
3683     Op->StartLoc = S;
3684     Op->EndLoc = E;
3685     return Op;
3686   }
3687 
3688   static std::unique_ptr<ARMOperand> CreateCCOut(MCRegister Reg, SMLoc S,
3689                                                  ARMAsmParser &Parser) {
3690     auto Op = std::make_unique<ARMOperand>(k_CCOut, Parser);
3691     Op->Reg.RegNum = Reg;
3692     Op->StartLoc = S;
3693     Op->EndLoc = S;
3694     return Op;
3695   }
3696 
3697   static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S,
3698                                                  ARMAsmParser &Parser) {
3699     auto Op = std::make_unique<ARMOperand>(k_Token, Parser);
3700     Op->Tok.Data = Str.data();
3701     Op->Tok.Length = Str.size();
3702     Op->StartLoc = S;
3703     Op->EndLoc = S;
3704     return Op;
3705   }
3706 
3707   static std::unique_ptr<ARMOperand> CreateReg(MCRegister Reg, SMLoc S, SMLoc E,
3708                                                ARMAsmParser &Parser) {
3709     auto Op = std::make_unique<ARMOperand>(k_Register, Parser);
3710     Op->Reg.RegNum = Reg;
3711     Op->StartLoc = S;
3712     Op->EndLoc = E;
3713     return Op;
3714   }
3715 
3716   static std::unique_ptr<ARMOperand>
3717   CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, MCRegister SrcReg,
3718                         MCRegister ShiftReg, unsigned ShiftImm, SMLoc S,
3719                         SMLoc E, ARMAsmParser &Parser) {
3720     auto Op = std::make_unique<ARMOperand>(k_ShiftedRegister, Parser);
3721     Op->RegShiftedReg.ShiftTy = ShTy;
3722     Op->RegShiftedReg.SrcReg = SrcReg;
3723     Op->RegShiftedReg.ShiftReg = ShiftReg;
3724     Op->RegShiftedReg.ShiftImm = ShiftImm;
3725     Op->StartLoc = S;
3726     Op->EndLoc = E;
3727     return Op;
3728   }
3729 
3730   static std::unique_ptr<ARMOperand>
3731   CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, MCRegister SrcReg,
3732                          unsigned ShiftImm, SMLoc S, SMLoc E,
3733                          ARMAsmParser &Parser) {
3734     auto Op = std::make_unique<ARMOperand>(k_ShiftedImmediate, Parser);
3735     Op->RegShiftedImm.ShiftTy = ShTy;
3736     Op->RegShiftedImm.SrcReg = SrcReg;
3737     Op->RegShiftedImm.ShiftImm = ShiftImm;
3738     Op->StartLoc = S;
3739     Op->EndLoc = E;
3740     return Op;
3741   }
3742 
3743   static std::unique_ptr<ARMOperand> CreateShifterImm(bool isASR, unsigned Imm,
3744                                                       SMLoc S, SMLoc E,
3745                                                       ARMAsmParser &Parser) {
3746     auto Op = std::make_unique<ARMOperand>(k_ShifterImmediate, Parser);
3747     Op->ShifterImm.isASR = isASR;
3748     Op->ShifterImm.Imm = Imm;
3749     Op->StartLoc = S;
3750     Op->EndLoc = E;
3751     return Op;
3752   }
3753 
3754   static std::unique_ptr<ARMOperand>
3755   CreateRotImm(unsigned Imm, SMLoc S, SMLoc E, ARMAsmParser &Parser) {
3756     auto Op = std::make_unique<ARMOperand>(k_RotateImmediate, Parser);
3757     Op->RotImm.Imm = Imm;
3758     Op->StartLoc = S;
3759     Op->EndLoc = E;
3760     return Op;
3761   }
3762 
3763   static std::unique_ptr<ARMOperand> CreateModImm(unsigned Bits, unsigned Rot,
3764                                                   SMLoc S, SMLoc E,
3765                                                   ARMAsmParser &Parser) {
3766     auto Op = std::make_unique<ARMOperand>(k_ModifiedImmediate, Parser);
3767     Op->ModImm.Bits = Bits;
3768     Op->ModImm.Rot = Rot;
3769     Op->StartLoc = S;
3770     Op->EndLoc = E;
3771     return Op;
3772   }
3773 
3774   static std::unique_ptr<ARMOperand>
3775   CreateConstantPoolImm(const MCExpr *Val, SMLoc S, SMLoc E,
3776                         ARMAsmParser &Parser) {
3777     auto Op = std::make_unique<ARMOperand>(k_ConstantPoolImmediate, Parser);
3778     Op->Imm.Val = Val;
3779     Op->StartLoc = S;
3780     Op->EndLoc = E;
3781     return Op;
3782   }
3783 
3784   static std::unique_ptr<ARMOperand> CreateBitfield(unsigned LSB,
3785                                                     unsigned Width, SMLoc S,
3786                                                     SMLoc E,
3787                                                     ARMAsmParser &Parser) {
3788     auto Op = std::make_unique<ARMOperand>(k_BitfieldDescriptor, Parser);
3789     Op->Bitfield.LSB = LSB;
3790     Op->Bitfield.Width = Width;
3791     Op->StartLoc = S;
3792     Op->EndLoc = E;
3793     return Op;
3794   }
3795 
3796   static std::unique_ptr<ARMOperand>
3797   CreateRegList(SmallVectorImpl<std::pair<unsigned, MCRegister>> &Regs,
3798                 SMLoc StartLoc, SMLoc EndLoc, ARMAsmParser &Parser) {
3799     assert(Regs.size() > 0 && "RegList contains no registers?");
3800     KindTy Kind = k_RegisterList;
3801 
3802     if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
3803             Regs.front().second)) {
3804       if (Regs.back().second == ARM::VPR)
3805         Kind = k_FPDRegisterListWithVPR;
3806       else
3807         Kind = k_DPRRegisterList;
3808     } else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
3809                    Regs.front().second)) {
3810       if (Regs.back().second == ARM::VPR)
3811         Kind = k_FPSRegisterListWithVPR;
3812       else
3813         Kind = k_SPRRegisterList;
3814     } else if (Regs.front().second == ARM::VPR) {
3815       assert(Regs.size() == 1 &&
3816              "Register list starting with VPR expected to only contain VPR");
3817       Kind = k_FPSRegisterListWithVPR;
3818     }
3819 
3820     if (Kind == k_RegisterList && Regs.back().second == ARM::APSR)
3821       Kind = k_RegisterListWithAPSR;
3822 
3823     assert(llvm::is_sorted(Regs) && "Register list must be sorted by encoding");
3824 
3825     auto Op = std::make_unique<ARMOperand>(Kind, Parser);
3826     for (const auto &P : Regs)
3827       Op->Registers.push_back(P.second);
3828 
3829     Op->StartLoc = StartLoc;
3830     Op->EndLoc = EndLoc;
3831     return Op;
3832   }
3833 
3834   static std::unique_ptr<ARMOperand>
3835   CreateVectorList(MCRegister Reg, unsigned Count, bool isDoubleSpaced, SMLoc S,
3836                    SMLoc E, ARMAsmParser &Parser) {
3837     auto Op = std::make_unique<ARMOperand>(k_VectorList, Parser);
3838     Op->VectorList.RegNum = Reg;
3839     Op->VectorList.Count = Count;
3840     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3841     Op->StartLoc = S;
3842     Op->EndLoc = E;
3843     return Op;
3844   }
3845 
3846   static std::unique_ptr<ARMOperand>
3847   CreateVectorListAllLanes(MCRegister Reg, unsigned Count, bool isDoubleSpaced,
3848                            SMLoc S, SMLoc E, ARMAsmParser &Parser) {
3849     auto Op = std::make_unique<ARMOperand>(k_VectorListAllLanes, Parser);
3850     Op->VectorList.RegNum = Reg;
3851     Op->VectorList.Count = Count;
3852     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3853     Op->StartLoc = S;
3854     Op->EndLoc = E;
3855     return Op;
3856   }
3857 
3858   static std::unique_ptr<ARMOperand>
3859   CreateVectorListIndexed(MCRegister Reg, unsigned Count, unsigned Index,
3860                           bool isDoubleSpaced, SMLoc S, SMLoc E,
3861                           ARMAsmParser &Parser) {
3862     auto Op = std::make_unique<ARMOperand>(k_VectorListIndexed, Parser);
3863     Op->VectorList.RegNum = Reg;
3864     Op->VectorList.Count = Count;
3865     Op->VectorList.LaneIndex = Index;
3866     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3867     Op->StartLoc = S;
3868     Op->EndLoc = E;
3869     return Op;
3870   }
3871 
3872   static std::unique_ptr<ARMOperand> CreateVectorIndex(unsigned Idx, SMLoc S,
3873                                                        SMLoc E, MCContext &Ctx,
3874                                                        ARMAsmParser &Parser) {
3875     auto Op = std::make_unique<ARMOperand>(k_VectorIndex, Parser);
3876     Op->VectorIndex.Val = Idx;
3877     Op->StartLoc = S;
3878     Op->EndLoc = E;
3879     return Op;
3880   }
3881 
3882   static std::unique_ptr<ARMOperand> CreateImm(const MCExpr *Val, SMLoc S,
3883                                                SMLoc E, ARMAsmParser &Parser) {
3884     auto Op = std::make_unique<ARMOperand>(k_Immediate, Parser);
3885     Op->Imm.Val = Val;
3886     Op->StartLoc = S;
3887     Op->EndLoc = E;
3888     return Op;
3889   }
3890 
3891   static std::unique_ptr<ARMOperand>
3892   CreateMem(MCRegister BaseReg, const MCExpr *OffsetImm, MCRegister OffsetReg,
3893             ARM_AM::ShiftOpc ShiftType, unsigned ShiftImm, unsigned Alignment,
3894             bool isNegative, SMLoc S, SMLoc E, ARMAsmParser &Parser,
3895             SMLoc AlignmentLoc = SMLoc()) {
3896     auto Op = std::make_unique<ARMOperand>(k_Memory, Parser);
3897     Op->Memory.BaseRegNum = BaseReg;
3898     Op->Memory.OffsetImm = OffsetImm;
3899     Op->Memory.OffsetRegNum = OffsetReg;
3900     Op->Memory.ShiftType = ShiftType;
3901     Op->Memory.ShiftImm = ShiftImm;
3902     Op->Memory.Alignment = Alignment;
3903     Op->Memory.isNegative = isNegative;
3904     Op->StartLoc = S;
3905     Op->EndLoc = E;
3906     Op->AlignmentLoc = AlignmentLoc;
3907     return Op;
3908   }
3909 
3910   static std::unique_ptr<ARMOperand>
3911   CreatePostIdxReg(MCRegister Reg, bool isAdd, ARM_AM::ShiftOpc ShiftTy,
3912                    unsigned ShiftImm, SMLoc S, SMLoc E, ARMAsmParser &Parser) {
3913     auto Op = std::make_unique<ARMOperand>(k_PostIndexRegister, Parser);
3914     Op->PostIdxReg.RegNum = Reg;
3915     Op->PostIdxReg.isAdd = isAdd;
3916     Op->PostIdxReg.ShiftTy = ShiftTy;
3917     Op->PostIdxReg.ShiftImm = ShiftImm;
3918     Op->StartLoc = S;
3919     Op->EndLoc = E;
3920     return Op;
3921   }
3922 
3923   static std::unique_ptr<ARMOperand>
3924   CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S, ARMAsmParser &Parser) {
3925     auto Op = std::make_unique<ARMOperand>(k_MemBarrierOpt, Parser);
3926     Op->MBOpt.Val = Opt;
3927     Op->StartLoc = S;
3928     Op->EndLoc = S;
3929     return Op;
3930   }
3931 
3932   static std::unique_ptr<ARMOperand>
3933   CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S,
3934                            ARMAsmParser &Parser) {
3935     auto Op = std::make_unique<ARMOperand>(k_InstSyncBarrierOpt, Parser);
3936     Op->ISBOpt.Val = Opt;
3937     Op->StartLoc = S;
3938     Op->EndLoc = S;
3939     return Op;
3940   }
3941 
3942   static std::unique_ptr<ARMOperand>
3943   CreateTraceSyncBarrierOpt(ARM_TSB::TraceSyncBOpt Opt, SMLoc S,
3944                             ARMAsmParser &Parser) {
3945     auto Op = std::make_unique<ARMOperand>(k_TraceSyncBarrierOpt, Parser);
3946     Op->TSBOpt.Val = Opt;
3947     Op->StartLoc = S;
3948     Op->EndLoc = S;
3949     return Op;
3950   }
3951 
3952   static std::unique_ptr<ARMOperand>
3953   CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S, ARMAsmParser &Parser) {
3954     auto Op = std::make_unique<ARMOperand>(k_ProcIFlags, Parser);
3955     Op->IFlags.Val = IFlags;
3956     Op->StartLoc = S;
3957     Op->EndLoc = S;
3958     return Op;
3959   }
3960 
3961   static std::unique_ptr<ARMOperand> CreateMSRMask(unsigned MMask, SMLoc S,
3962                                                    ARMAsmParser &Parser) {
3963     auto Op = std::make_unique<ARMOperand>(k_MSRMask, Parser);
3964     Op->MMask.Val = MMask;
3965     Op->StartLoc = S;
3966     Op->EndLoc = S;
3967     return Op;
3968   }
3969 
3970   static std::unique_ptr<ARMOperand> CreateBankedReg(unsigned Reg, SMLoc S,
3971                                                      ARMAsmParser &Parser) {
3972     auto Op = std::make_unique<ARMOperand>(k_BankedReg, Parser);
3973     Op->BankedReg.Val = Reg;
3974     Op->StartLoc = S;
3975     Op->EndLoc = S;
3976     return Op;
3977   }
3978 };
3979 
3980 } // end anonymous namespace.
3981 
3982 void ARMOperand::print(raw_ostream &OS) const {
3983   auto RegName = [](MCRegister Reg) {
3984     if (Reg)
3985       return ARMInstPrinter::getRegisterName(Reg);
3986     else
3987       return "noreg";
3988   };
3989 
3990   switch (Kind) {
3991   case k_CondCode:
3992     OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
3993     break;
3994   case k_VPTPred:
3995     OS << "<ARMVCC::" << ARMVPTPredToString(getVPTPred()) << ">";
3996     break;
3997   case k_CCOut:
3998     OS << "<ccout " << RegName(getReg()) << ">";
3999     break;
4000   case k_ITCondMask: {
4001     static const char *const MaskStr[] = {
4002       "(invalid)", "(tttt)", "(ttt)", "(ttte)",
4003       "(tt)",      "(ttet)", "(tte)", "(ttee)",
4004       "(t)",       "(tett)", "(tet)", "(tete)",
4005       "(te)",      "(teet)", "(tee)", "(teee)",
4006     };
4007     assert((ITMask.Mask & 0xf) == ITMask.Mask);
4008     OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
4009     break;
4010   }
4011   case k_CoprocNum:
4012     OS << "<coprocessor number: " << getCoproc() << ">";
4013     break;
4014   case k_CoprocReg:
4015     OS << "<coprocessor register: " << getCoproc() << ">";
4016     break;
4017   case k_CoprocOption:
4018     OS << "<coprocessor option: " << CoprocOption.Val << ">";
4019     break;
4020   case k_MSRMask:
4021     OS << "<mask: " << getMSRMask() << ">";
4022     break;
4023   case k_BankedReg:
4024     OS << "<banked reg: " << getBankedReg() << ">";
4025     break;
4026   case k_Immediate:
4027     OS << *getImm();
4028     break;
4029   case k_MemBarrierOpt:
4030     OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">";
4031     break;
4032   case k_InstSyncBarrierOpt:
4033     OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">";
4034     break;
4035   case k_TraceSyncBarrierOpt:
4036     OS << "<ARM_TSB::" << TraceSyncBOptToString(getTraceSyncBarrierOpt()) << ">";
4037     break;
4038   case k_Memory:
4039     OS << "<memory";
4040     if (Memory.BaseRegNum)
4041       OS << " base:" << RegName(Memory.BaseRegNum);
4042     if (Memory.OffsetImm)
4043       OS << " offset-imm:" << *Memory.OffsetImm;
4044     if (Memory.OffsetRegNum)
4045       OS << " offset-reg:" << (Memory.isNegative ? "-" : "")
4046          << RegName(Memory.OffsetRegNum);
4047     if (Memory.ShiftType != ARM_AM::no_shift) {
4048       OS << " shift-type:" << ARM_AM::getShiftOpcStr(Memory.ShiftType);
4049       OS << " shift-imm:" << Memory.ShiftImm;
4050     }
4051     if (Memory.Alignment)
4052       OS << " alignment:" << Memory.Alignment;
4053     OS << ">";
4054     break;
4055   case k_PostIndexRegister:
4056     OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
4057        << RegName(PostIdxReg.RegNum);
4058     if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
4059       OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
4060          << PostIdxReg.ShiftImm;
4061     OS << ">";
4062     break;
4063   case k_ProcIFlags: {
4064     OS << "<ARM_PROC::";
4065     unsigned IFlags = getProcIFlags();
4066     for (int i=2; i >= 0; --i)
4067       if (IFlags & (1 << i))
4068         OS << ARM_PROC::IFlagsToString(1 << i);
4069     OS << ">";
4070     break;
4071   }
4072   case k_Register:
4073     OS << "<register " << RegName(getReg()) << ">";
4074     break;
4075   case k_ShifterImmediate:
4076     OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
4077        << " #" << ShifterImm.Imm << ">";
4078     break;
4079   case k_ShiftedRegister:
4080     OS << "<so_reg_reg " << RegName(RegShiftedReg.SrcReg) << " "
4081        << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy) << " "
4082        << RegName(RegShiftedReg.ShiftReg) << ">";
4083     break;
4084   case k_ShiftedImmediate:
4085     OS << "<so_reg_imm " << RegName(RegShiftedImm.SrcReg) << " "
4086        << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy) << " #"
4087        << RegShiftedImm.ShiftImm << ">";
4088     break;
4089   case k_RotateImmediate:
4090     OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
4091     break;
4092   case k_ModifiedImmediate:
4093     OS << "<mod_imm #" << ModImm.Bits << ", #"
4094        <<  ModImm.Rot << ")>";
4095     break;
4096   case k_ConstantPoolImmediate:
4097     OS << "<constant_pool_imm #" << *getConstantPoolImm();
4098     break;
4099   case k_BitfieldDescriptor:
4100     OS << "<bitfield " << "lsb: " << Bitfield.LSB
4101        << ", width: " << Bitfield.Width << ">";
4102     break;
4103   case k_RegisterList:
4104   case k_RegisterListWithAPSR:
4105   case k_DPRRegisterList:
4106   case k_SPRRegisterList:
4107   case k_FPSRegisterListWithVPR:
4108   case k_FPDRegisterListWithVPR: {
4109     OS << "<register_list ";
4110 
4111     const SmallVectorImpl<MCRegister> &RegList = getRegList();
4112     for (auto I = RegList.begin(), E = RegList.end(); I != E;) {
4113       OS << RegName(*I);
4114       if (++I < E) OS << ", ";
4115     }
4116 
4117     OS << ">";
4118     break;
4119   }
4120   case k_VectorList:
4121     OS << "<vector_list " << VectorList.Count << " * "
4122        << RegName(VectorList.RegNum) << ">";
4123     break;
4124   case k_VectorListAllLanes:
4125     OS << "<vector_list(all lanes) " << VectorList.Count << " * "
4126        << RegName(VectorList.RegNum) << ">";
4127     break;
4128   case k_VectorListIndexed:
4129     OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
4130        << VectorList.Count << " * " << RegName(VectorList.RegNum) << ">";
4131     break;
4132   case k_Token:
4133     OS << "'" << getToken() << "'";
4134     break;
4135   case k_VectorIndex:
4136     OS << "<vectorindex " << getVectorIndex() << ">";
4137     break;
4138   }
4139 }
4140 
4141 /// @name Auto-generated Match Functions
4142 /// {
4143 
4144 static MCRegister MatchRegisterName(StringRef Name);
4145 
4146 /// }
4147 
4148 static bool isDataTypeToken(StringRef Tok) {
4149   static const DenseSet<StringRef> DataTypes{
4150       ".8",  ".16",  ".32",  ".64",  ".i8", ".i16", ".i32", ".i64",
4151       ".u8", ".u16", ".u32", ".u64", ".s8", ".s16", ".s32", ".s64",
4152       ".p8", ".p16", ".f32", ".f64", ".f",  ".d"};
4153   return DataTypes.contains(Tok);
4154 }
4155 
4156 static unsigned getMnemonicOpsEndInd(const OperandVector &Operands) {
4157   unsigned MnemonicOpsEndInd = 1;
4158   // Special case for CPS which has a Mnemonic side token for possibly storing
4159   // ie/id variant
4160   if (Operands[0]->isToken() &&
4161       static_cast<ARMOperand &>(*Operands[0]).getToken() == "cps") {
4162     if (Operands.size() > 1 && Operands[1]->isImm() &&
4163         static_cast<ARMOperand &>(*Operands[1]).getImm()->getKind() ==
4164             llvm::MCExpr::Constant &&
4165         (dyn_cast<MCConstantExpr>(
4166              static_cast<ARMOperand &>(*Operands[1]).getImm())
4167                  ->getValue() == ARM_PROC::IE ||
4168          dyn_cast<MCConstantExpr>(
4169              static_cast<ARMOperand &>(*Operands[1]).getImm())
4170                  ->getValue() == ARM_PROC::ID))
4171       ++MnemonicOpsEndInd;
4172   }
4173 
4174   // In some circumstances the condition code moves to the right
4175   bool RHSCondCode = false;
4176   while (MnemonicOpsEndInd < Operands.size()) {
4177     auto Op = static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]);
4178     // Special case for it instructions which have a condition code on the RHS
4179     if (Op.isITMask()) {
4180       RHSCondCode = true;
4181       MnemonicOpsEndInd++;
4182     } else if (Op.isToken() &&
4183                (
4184                    // There are several special cases not covered by
4185                    // isDataTypeToken
4186                    Op.getToken() == ".w" || Op.getToken() == ".bf16" ||
4187                    Op.getToken() == ".p64" || Op.getToken() == ".f16" ||
4188                    isDataTypeToken(Op.getToken()))) {
4189       // In the mnemonic operators the cond code must always precede the data
4190       // type. So we can now safely assume any subsequent cond code is on the
4191       // RHS. As is the case for VCMP and VPT.
4192       RHSCondCode = true;
4193       MnemonicOpsEndInd++;
4194     }
4195     // Skip all mnemonic operator types
4196     else if (Op.isCCOut() || (Op.isCondCode() && !RHSCondCode) ||
4197              Op.isVPTPred() || (Op.isToken() && Op.getToken() == ".w"))
4198       MnemonicOpsEndInd++;
4199     else
4200       break;
4201   }
4202   return MnemonicOpsEndInd;
4203 }
4204 
4205 bool ARMAsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
4206                                  SMLoc &EndLoc) {
4207   const AsmToken &Tok = getParser().getTok();
4208   StartLoc = Tok.getLoc();
4209   EndLoc = Tok.getEndLoc();
4210   Reg = tryParseRegister();
4211 
4212   return !Reg;
4213 }
4214 
4215 ParseStatus ARMAsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
4216                                            SMLoc &EndLoc) {
4217   if (parseRegister(Reg, StartLoc, EndLoc))
4218     return ParseStatus::NoMatch;
4219   return ParseStatus::Success;
4220 }
4221 
4222 /// Try to parse a register name.  The token must be an Identifier when called,
4223 /// and if it is a register name the token is eaten and the register is
4224 /// returned.  Otherwise return an invalid MCRegister.
4225 MCRegister ARMAsmParser::tryParseRegister(bool AllowOutOfBoundReg) {
4226   MCAsmParser &Parser = getParser();
4227   const AsmToken &Tok = Parser.getTok();
4228   if (Tok.isNot(AsmToken::Identifier))
4229     return MCRegister();
4230 
4231   std::string lowerCase = Tok.getString().lower();
4232   MCRegister Reg = MatchRegisterName(lowerCase);
4233   if (!Reg) {
4234     Reg = StringSwitch<MCRegister>(lowerCase)
4235               .Case("r13", ARM::SP)
4236               .Case("r14", ARM::LR)
4237               .Case("r15", ARM::PC)
4238               .Case("ip", ARM::R12)
4239               // Additional register name aliases for 'gas' compatibility.
4240               .Case("a1", ARM::R0)
4241               .Case("a2", ARM::R1)
4242               .Case("a3", ARM::R2)
4243               .Case("a4", ARM::R3)
4244               .Case("v1", ARM::R4)
4245               .Case("v2", ARM::R5)
4246               .Case("v3", ARM::R6)
4247               .Case("v4", ARM::R7)
4248               .Case("v5", ARM::R8)
4249               .Case("v6", ARM::R9)
4250               .Case("v7", ARM::R10)
4251               .Case("v8", ARM::R11)
4252               .Case("sb", ARM::R9)
4253               .Case("sl", ARM::R10)
4254               .Case("fp", ARM::R11)
4255               .Default(MCRegister());
4256   }
4257   if (!Reg) {
4258     // Check for aliases registered via .req. Canonicalize to lower case.
4259     // That's more consistent since register names are case insensitive, and
4260     // it's how the original entry was passed in from MC/MCParser/AsmParser.
4261     auto Entry = RegisterReqs.find(lowerCase);
4262     // If no match, return failure.
4263     if (Entry == RegisterReqs.end())
4264       return MCRegister();
4265     Parser.Lex(); // Eat identifier token.
4266     return Entry->getValue();
4267   }
4268 
4269   // Some FPUs only have 16 D registers, so D16-D31 are invalid
4270   if (!AllowOutOfBoundReg && !hasD32() && Reg >= ARM::D16 && Reg <= ARM::D31)
4271     return MCRegister();
4272 
4273   Parser.Lex(); // Eat identifier token.
4274 
4275   return Reg;
4276 }
4277 
4278 std::optional<ARM_AM::ShiftOpc> ARMAsmParser::tryParseShiftToken() {
4279   MCAsmParser &Parser = getParser();
4280   const AsmToken &Tok = Parser.getTok();
4281   if (Tok.isNot(AsmToken::Identifier))
4282     return std::nullopt;
4283 
4284   std::string lowerCase = Tok.getString().lower();
4285   return StringSwitch<std::optional<ARM_AM::ShiftOpc>>(lowerCase)
4286       .Case("asl", ARM_AM::lsl)
4287       .Case("lsl", ARM_AM::lsl)
4288       .Case("lsr", ARM_AM::lsr)
4289       .Case("asr", ARM_AM::asr)
4290       .Case("ror", ARM_AM::ror)
4291       .Case("rrx", ARM_AM::rrx)
4292       .Default(std::nullopt);
4293 }
4294 
4295 // Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
4296 // If a recoverable error occurs, return 1. If an irrecoverable error
4297 // occurs, return -1. An irrecoverable error is one where tokens have been
4298 // consumed in the process of trying to parse the shifter (i.e., when it is
4299 // indeed a shifter operand, but malformed).
4300 int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) {
4301   MCAsmParser &Parser = getParser();
4302   SMLoc S = Parser.getTok().getLoc();
4303 
4304   auto ShiftTyOpt = tryParseShiftToken();
4305   if (ShiftTyOpt == std::nullopt)
4306     return 1;
4307   auto ShiftTy = ShiftTyOpt.value();
4308 
4309   Parser.Lex(); // Eat the operator.
4310 
4311   // The source register for the shift has already been added to the
4312   // operand list, so we need to pop it off and combine it into the shifted
4313   // register operand instead.
4314   std::unique_ptr<ARMOperand> PrevOp(
4315       (ARMOperand *)Operands.pop_back_val().release());
4316   if (!PrevOp->isReg())
4317     return Error(PrevOp->getStartLoc(), "shift must be of a register");
4318   MCRegister SrcReg = PrevOp->getReg();
4319 
4320   SMLoc EndLoc;
4321   int64_t Imm = 0;
4322   MCRegister ShiftReg;
4323   if (ShiftTy == ARM_AM::rrx) {
4324     // RRX Doesn't have an explicit shift amount. The encoder expects
4325     // the shift register to be the same as the source register. Seems odd,
4326     // but OK.
4327     ShiftReg = SrcReg;
4328   } else {
4329     // Figure out if this is shifted by a constant or a register (for non-RRX).
4330     if (Parser.getTok().is(AsmToken::Hash) ||
4331         Parser.getTok().is(AsmToken::Dollar)) {
4332       Parser.Lex(); // Eat hash.
4333       SMLoc ImmLoc = Parser.getTok().getLoc();
4334       const MCExpr *ShiftExpr = nullptr;
4335       if (getParser().parseExpression(ShiftExpr, EndLoc)) {
4336         Error(ImmLoc, "invalid immediate shift value");
4337         return -1;
4338       }
4339       // The expression must be evaluatable as an immediate.
4340       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
4341       if (!CE) {
4342         Error(ImmLoc, "invalid immediate shift value");
4343         return -1;
4344       }
4345       // Range check the immediate.
4346       // lsl, ror: 0 <= imm <= 31
4347       // lsr, asr: 0 <= imm <= 32
4348       Imm = CE->getValue();
4349       if (Imm < 0 ||
4350           ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
4351           ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
4352         Error(ImmLoc, "immediate shift value out of range");
4353         return -1;
4354       }
4355       // shift by zero is a nop. Always send it through as lsl.
4356       // ('as' compatibility)
4357       if (Imm == 0)
4358         ShiftTy = ARM_AM::lsl;
4359     } else if (Parser.getTok().is(AsmToken::Identifier)) {
4360       SMLoc L = Parser.getTok().getLoc();
4361       EndLoc = Parser.getTok().getEndLoc();
4362       ShiftReg = tryParseRegister();
4363       if (!ShiftReg) {
4364         Error(L, "expected immediate or register in shift operand");
4365         return -1;
4366       }
4367     } else {
4368       Error(Parser.getTok().getLoc(),
4369             "expected immediate or register in shift operand");
4370       return -1;
4371     }
4372   }
4373 
4374   if (ShiftReg && ShiftTy != ARM_AM::rrx)
4375     Operands.push_back(ARMOperand::CreateShiftedRegister(
4376         ShiftTy, SrcReg, ShiftReg, Imm, S, EndLoc, *this));
4377   else
4378     Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
4379                                                           S, EndLoc, *this));
4380 
4381   return 0;
4382 }
4383 
4384 /// Try to parse a register name.  The token must be an Identifier when called.
4385 /// If it's a register, an AsmOperand is created. Another AsmOperand is created
4386 /// if there is a "writeback". 'true' if it's not a register.
4387 ///
4388 /// TODO this is likely to change to allow different register types and or to
4389 /// parse for a specific register type.
4390 bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) {
4391   MCAsmParser &Parser = getParser();
4392   SMLoc RegStartLoc = Parser.getTok().getLoc();
4393   SMLoc RegEndLoc = Parser.getTok().getEndLoc();
4394   MCRegister Reg = tryParseRegister();
4395   if (!Reg)
4396     return true;
4397 
4398   Operands.push_back(ARMOperand::CreateReg(Reg, RegStartLoc, RegEndLoc, *this));
4399 
4400   const AsmToken &ExclaimTok = Parser.getTok();
4401   if (ExclaimTok.is(AsmToken::Exclaim)) {
4402     Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
4403                                                ExclaimTok.getLoc(), *this));
4404     Parser.Lex(); // Eat exclaim token
4405     return false;
4406   }
4407 
4408   // Also check for an index operand. This is only legal for vector registers,
4409   // but that'll get caught OK in operand matching, so we don't need to
4410   // explicitly filter everything else out here.
4411   if (Parser.getTok().is(AsmToken::LBrac)) {
4412     SMLoc SIdx = Parser.getTok().getLoc();
4413     Parser.Lex(); // Eat left bracket token.
4414 
4415     const MCExpr *ImmVal;
4416     if (getParser().parseExpression(ImmVal))
4417       return true;
4418     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4419     if (!MCE)
4420       return TokError("immediate value expected for vector index");
4421 
4422     if (Parser.getTok().isNot(AsmToken::RBrac))
4423       return Error(Parser.getTok().getLoc(), "']' expected");
4424 
4425     SMLoc E = Parser.getTok().getEndLoc();
4426     Parser.Lex(); // Eat right bracket token.
4427 
4428     Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(), SIdx, E,
4429                                                      getContext(), *this));
4430   }
4431 
4432   return false;
4433 }
4434 
4435 /// MatchCoprocessorOperandName - Try to parse an coprocessor related
4436 /// instruction with a symbolic operand name.
4437 /// We accept "crN" syntax for GAS compatibility.
4438 /// <operand-name> ::= <prefix><number>
4439 /// If CoprocOp is 'c', then:
4440 ///   <prefix> ::= c | cr
4441 /// If CoprocOp is 'p', then :
4442 ///   <prefix> ::= p
4443 /// <number> ::= integer in range [0, 15]
4444 static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
4445   // Use the same layout as the tablegen'erated register name matcher. Ugly,
4446   // but efficient.
4447   if (Name.size() < 2 || Name[0] != CoprocOp)
4448     return -1;
4449   Name = (Name[1] == 'r') ? Name.drop_front(2) : Name.drop_front();
4450 
4451   switch (Name.size()) {
4452   default: return -1;
4453   case 1:
4454     switch (Name[0]) {
4455     default:  return -1;
4456     case '0': return 0;
4457     case '1': return 1;
4458     case '2': return 2;
4459     case '3': return 3;
4460     case '4': return 4;
4461     case '5': return 5;
4462     case '6': return 6;
4463     case '7': return 7;
4464     case '8': return 8;
4465     case '9': return 9;
4466     }
4467   case 2:
4468     if (Name[0] != '1')
4469       return -1;
4470     switch (Name[1]) {
4471     default:  return -1;
4472     // CP10 and CP11 are VFP/NEON and so vector instructions should be used.
4473     // However, old cores (v5/v6) did use them in that way.
4474     case '0': return 10;
4475     case '1': return 11;
4476     case '2': return 12;
4477     case '3': return 13;
4478     case '4': return 14;
4479     case '5': return 15;
4480     }
4481   }
4482 }
4483 
4484 /// parseITCondCode - Try to parse a condition code for an IT instruction.
4485 ParseStatus ARMAsmParser::parseITCondCode(OperandVector &Operands) {
4486   MCAsmParser &Parser = getParser();
4487   SMLoc S = Parser.getTok().getLoc();
4488   const AsmToken &Tok = Parser.getTok();
4489   if (!Tok.is(AsmToken::Identifier))
4490     return ParseStatus::NoMatch;
4491   unsigned CC = ARMCondCodeFromString(Tok.getString());
4492   if (CC == ~0U)
4493     return ParseStatus::NoMatch;
4494   Parser.Lex(); // Eat the token.
4495 
4496   Operands.push_back(
4497       ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S, *this));
4498 
4499   return ParseStatus::Success;
4500 }
4501 
4502 /// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
4503 /// token must be an Identifier when called, and if it is a coprocessor
4504 /// number, the token is eaten and the operand is added to the operand list.
4505 ParseStatus ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands) {
4506   MCAsmParser &Parser = getParser();
4507   SMLoc S = Parser.getTok().getLoc();
4508   const AsmToken &Tok = Parser.getTok();
4509   if (Tok.isNot(AsmToken::Identifier))
4510     return ParseStatus::NoMatch;
4511 
4512   int Num = MatchCoprocessorOperandName(Tok.getString().lower(), 'p');
4513   if (Num == -1)
4514     return ParseStatus::NoMatch;
4515   if (!isValidCoprocessorNumber(Num, getSTI().getFeatureBits()))
4516     return ParseStatus::NoMatch;
4517 
4518   Parser.Lex(); // Eat identifier token.
4519   Operands.push_back(ARMOperand::CreateCoprocNum(Num, S, *this));
4520   return ParseStatus::Success;
4521 }
4522 
4523 /// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
4524 /// token must be an Identifier when called, and if it is a coprocessor
4525 /// number, the token is eaten and the operand is added to the operand list.
4526 ParseStatus ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands) {
4527   MCAsmParser &Parser = getParser();
4528   SMLoc S = Parser.getTok().getLoc();
4529   const AsmToken &Tok = Parser.getTok();
4530   if (Tok.isNot(AsmToken::Identifier))
4531     return ParseStatus::NoMatch;
4532 
4533   int Reg = MatchCoprocessorOperandName(Tok.getString().lower(), 'c');
4534   if (Reg == -1)
4535     return ParseStatus::NoMatch;
4536 
4537   Parser.Lex(); // Eat identifier token.
4538   Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S, *this));
4539   return ParseStatus::Success;
4540 }
4541 
4542 /// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
4543 /// coproc_option : '{' imm0_255 '}'
4544 ParseStatus ARMAsmParser::parseCoprocOptionOperand(OperandVector &Operands) {
4545   MCAsmParser &Parser = getParser();
4546   SMLoc S = Parser.getTok().getLoc();
4547 
4548   // If this isn't a '{', this isn't a coprocessor immediate operand.
4549   if (Parser.getTok().isNot(AsmToken::LCurly))
4550     return ParseStatus::NoMatch;
4551   Parser.Lex(); // Eat the '{'
4552 
4553   const MCExpr *Expr;
4554   SMLoc Loc = Parser.getTok().getLoc();
4555   if (getParser().parseExpression(Expr))
4556     return Error(Loc, "illegal expression");
4557   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4558   if (!CE || CE->getValue() < 0 || CE->getValue() > 255)
4559     return Error(Loc,
4560                  "coprocessor option must be an immediate in range [0, 255]");
4561   int Val = CE->getValue();
4562 
4563   // Check for and consume the closing '}'
4564   if (Parser.getTok().isNot(AsmToken::RCurly))
4565     return ParseStatus::Failure;
4566   SMLoc E = Parser.getTok().getEndLoc();
4567   Parser.Lex(); // Eat the '}'
4568 
4569   Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E, *this));
4570   return ParseStatus::Success;
4571 }
4572 
4573 // For register list parsing, we need to map from raw GPR register numbering
4574 // to the enumeration values. The enumeration values aren't sorted by
4575 // register number due to our using "sp", "lr" and "pc" as canonical names.
4576 static MCRegister getNextRegister(MCRegister Reg) {
4577   // If this is a GPR, we need to do it manually, otherwise we can rely
4578   // on the sort ordering of the enumeration since the other reg-classes
4579   // are sane.
4580   if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4581     return Reg + 1;
4582   switch (Reg.id()) {
4583   default: llvm_unreachable("Invalid GPR number!");
4584   case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
4585   case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
4586   case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
4587   case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
4588   case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
4589   case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
4590   case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
4591   case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
4592   }
4593 }
4594 
4595 // Insert an <Encoding, Register> pair in an ordered vector. Return true on
4596 // success, or false, if duplicate encoding found.
4597 static bool
4598 insertNoDuplicates(SmallVectorImpl<std::pair<unsigned, MCRegister>> &Regs,
4599                    unsigned Enc, MCRegister Reg) {
4600   Regs.emplace_back(Enc, Reg);
4601   for (auto I = Regs.rbegin(), J = I + 1, E = Regs.rend(); J != E; ++I, ++J) {
4602     if (J->first == Enc) {
4603       Regs.erase(J.base());
4604       return false;
4605     }
4606     if (J->first < Enc)
4607       break;
4608     std::swap(*I, *J);
4609   }
4610   return true;
4611 }
4612 
4613 /// Parse a register list.
4614 bool ARMAsmParser::parseRegisterList(OperandVector &Operands, bool EnforceOrder,
4615                                      bool AllowRAAC, bool IsLazyLoadStore,
4616                                      bool IsVSCCLRM) {
4617   MCAsmParser &Parser = getParser();
4618   if (Parser.getTok().isNot(AsmToken::LCurly))
4619     return TokError("Token is not a Left Curly Brace");
4620   SMLoc S = Parser.getTok().getLoc();
4621   Parser.Lex(); // Eat '{' token.
4622   SMLoc RegLoc = Parser.getTok().getLoc();
4623 
4624   // Check the first register in the list to see what register class
4625   // this is a list of.
4626   bool AllowOutOfBoundReg = IsLazyLoadStore || IsVSCCLRM;
4627   MCRegister Reg = tryParseRegister(AllowOutOfBoundReg);
4628   if (!Reg)
4629     return Error(RegLoc, "register expected");
4630   if (!AllowRAAC && Reg == ARM::RA_AUTH_CODE)
4631     return Error(RegLoc, "pseudo-register not allowed");
4632   // The reglist instructions have at most 32 registers, so reserve
4633   // space for that many.
4634   int EReg = 0;
4635   SmallVector<std::pair<unsigned, MCRegister>, 32> Registers;
4636 
4637   // Single-precision VSCCLRM can have double-precision registers in the
4638   // register list. When VSCCLRMAdjustEncoding is true then we've switched from
4639   // single-precision to double-precision and we pretend that these registers
4640   // are encoded as S32 onwards, which we can do by adding 16 to the encoding
4641   // value.
4642   bool VSCCLRMAdjustEncoding = false;
4643 
4644   // Allow Q regs and just interpret them as the two D sub-registers.
4645   if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4646     Reg = getDRegFromQReg(Reg);
4647     EReg = MRI->getEncodingValue(Reg);
4648     Registers.emplace_back(EReg, Reg);
4649     Reg = Reg + 1;
4650   }
4651   const MCRegisterClass *RC;
4652   if (Reg == ARM::RA_AUTH_CODE ||
4653       ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4654     RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
4655   else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
4656     RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
4657   else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
4658     RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
4659   else if (ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
4660     RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4661   else if (Reg == ARM::VPR)
4662     RC = &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID];
4663   else
4664     return Error(RegLoc, "invalid register in register list");
4665 
4666   // Store the register.
4667   EReg = MRI->getEncodingValue(Reg);
4668   Registers.emplace_back(EReg, Reg);
4669 
4670   // This starts immediately after the first register token in the list,
4671   // so we can see either a comma or a minus (range separator) as a legal
4672   // next token.
4673   while (Parser.getTok().is(AsmToken::Comma) ||
4674          Parser.getTok().is(AsmToken::Minus)) {
4675     if (Parser.getTok().is(AsmToken::Minus)) {
4676       if (Reg == ARM::RA_AUTH_CODE)
4677         return Error(RegLoc, "pseudo-register not allowed");
4678       Parser.Lex(); // Eat the minus.
4679       SMLoc AfterMinusLoc = Parser.getTok().getLoc();
4680       MCRegister EndReg = tryParseRegister(AllowOutOfBoundReg);
4681       if (!EndReg)
4682         return Error(AfterMinusLoc, "register expected");
4683       if (EndReg == ARM::RA_AUTH_CODE)
4684         return Error(AfterMinusLoc, "pseudo-register not allowed");
4685       // Allow Q regs and just interpret them as the two D sub-registers.
4686       if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
4687         EndReg = getDRegFromQReg(EndReg) + 1;
4688       // If the register is the same as the start reg, there's nothing
4689       // more to do.
4690       if (Reg == EndReg)
4691         continue;
4692       // The register must be in the same register class as the first.
4693       if (!RC->contains(Reg))
4694         return Error(AfterMinusLoc, "invalid register in register list");
4695       // Ranges must go from low to high.
4696       if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
4697         return Error(AfterMinusLoc, "bad range in register list");
4698 
4699       // Add all the registers in the range to the register list.
4700       while (Reg != EndReg) {
4701         Reg = getNextRegister(Reg);
4702         EReg = MRI->getEncodingValue(Reg);
4703         if (VSCCLRMAdjustEncoding)
4704           EReg += 16;
4705         if (!insertNoDuplicates(Registers, EReg, Reg)) {
4706           Warning(AfterMinusLoc, StringRef("duplicated register (") +
4707                                      ARMInstPrinter::getRegisterName(Reg) +
4708                                      ") in register list");
4709         }
4710       }
4711       continue;
4712     }
4713     Parser.Lex(); // Eat the comma.
4714     RegLoc = Parser.getTok().getLoc();
4715     MCRegister OldReg = Reg;
4716     int EOldReg = EReg;
4717     const AsmToken RegTok = Parser.getTok();
4718     Reg = tryParseRegister(AllowOutOfBoundReg);
4719     if (!Reg)
4720       return Error(RegLoc, "register expected");
4721     if (!AllowRAAC && Reg == ARM::RA_AUTH_CODE)
4722       return Error(RegLoc, "pseudo-register not allowed");
4723     // Allow Q regs and just interpret them as the two D sub-registers.
4724     bool isQReg = false;
4725     if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4726       Reg = getDRegFromQReg(Reg);
4727       isQReg = true;
4728     }
4729     if (Reg != ARM::RA_AUTH_CODE && !RC->contains(Reg) &&
4730         RC->getID() == ARMMCRegisterClasses[ARM::GPRRegClassID].getID() &&
4731         ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg)) {
4732       // switch the register classes, as GPRwithAPSRnospRegClassID is a partial
4733       // subset of GPRRegClassId except it contains APSR as well.
4734       RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4735     }
4736     if (Reg == ARM::VPR &&
4737         (RC == &ARMMCRegisterClasses[ARM::SPRRegClassID] ||
4738          RC == &ARMMCRegisterClasses[ARM::DPRRegClassID] ||
4739          RC == &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID])) {
4740       RC = &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID];
4741       EReg = MRI->getEncodingValue(Reg);
4742       if (!insertNoDuplicates(Registers, EReg, Reg)) {
4743         Warning(RegLoc, "duplicated register (" + RegTok.getString() +
4744                             ") in register list");
4745       }
4746       continue;
4747     }
4748     // VSCCLRM can switch from single-precision to double-precision only when
4749     // S31 is followed by D16.
4750     if (IsVSCCLRM && OldReg == ARM::S31 && Reg == ARM::D16) {
4751       VSCCLRMAdjustEncoding = true;
4752       RC = &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID];
4753     }
4754     // The register must be in the same register class as the first.
4755     if ((Reg == ARM::RA_AUTH_CODE &&
4756          RC != &ARMMCRegisterClasses[ARM::GPRRegClassID]) ||
4757         (Reg != ARM::RA_AUTH_CODE && !RC->contains(Reg)))
4758       return Error(RegLoc, "invalid register in register list");
4759     // In most cases, the list must be monotonically increasing. An
4760     // exception is CLRM, which is order-independent anyway, so
4761     // there's no potential for confusion if you write clrm {r2,r1}
4762     // instead of clrm {r1,r2}.
4763     EReg = MRI->getEncodingValue(Reg);
4764     if (VSCCLRMAdjustEncoding)
4765       EReg += 16;
4766     if (EnforceOrder && EReg < EOldReg) {
4767       if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4768         Warning(RegLoc, "register list not in ascending order");
4769       else if (!ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
4770         return Error(RegLoc, "register list not in ascending order");
4771     }
4772     // VFP register lists must also be contiguous.
4773     if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
4774         RC != &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID] &&
4775         EReg != EOldReg + 1)
4776       return Error(RegLoc, "non-contiguous register range");
4777 
4778     if (!insertNoDuplicates(Registers, EReg, Reg)) {
4779       Warning(RegLoc, "duplicated register (" + RegTok.getString() +
4780                           ") in register list");
4781     }
4782     if (isQReg) {
4783       Reg = Reg + 1;
4784       EReg = MRI->getEncodingValue(Reg);
4785       Registers.emplace_back(EReg, Reg);
4786     }
4787   }
4788 
4789   if (Parser.getTok().isNot(AsmToken::RCurly))
4790     return Error(Parser.getTok().getLoc(), "'}' expected");
4791   SMLoc E = Parser.getTok().getEndLoc();
4792   Parser.Lex(); // Eat '}' token.
4793 
4794   // Push the register list operand.
4795   Operands.push_back(ARMOperand::CreateRegList(Registers, S, E, *this));
4796 
4797   // The ARM system instruction variants for LDM/STM have a '^' token here.
4798   if (Parser.getTok().is(AsmToken::Caret)) {
4799     Operands.push_back(
4800         ARMOperand::CreateToken("^", Parser.getTok().getLoc(), *this));
4801     Parser.Lex(); // Eat '^' token.
4802   }
4803 
4804   return false;
4805 }
4806 
4807 // Helper function to parse the lane index for vector lists.
4808 ParseStatus ARMAsmParser::parseVectorLane(VectorLaneTy &LaneKind,
4809                                           unsigned &Index, SMLoc &EndLoc) {
4810   MCAsmParser &Parser = getParser();
4811   Index = 0; // Always return a defined index value.
4812   if (Parser.getTok().is(AsmToken::LBrac)) {
4813     Parser.Lex(); // Eat the '['.
4814     if (Parser.getTok().is(AsmToken::RBrac)) {
4815       // "Dn[]" is the 'all lanes' syntax.
4816       LaneKind = AllLanes;
4817       EndLoc = Parser.getTok().getEndLoc();
4818       Parser.Lex(); // Eat the ']'.
4819       return ParseStatus::Success;
4820     }
4821 
4822     // There's an optional '#' token here. Normally there wouldn't be, but
4823     // inline assemble puts one in, and it's friendly to accept that.
4824     if (Parser.getTok().is(AsmToken::Hash))
4825       Parser.Lex(); // Eat '#' or '$'.
4826 
4827     const MCExpr *LaneIndex;
4828     SMLoc Loc = Parser.getTok().getLoc();
4829     if (getParser().parseExpression(LaneIndex))
4830       return Error(Loc, "illegal expression");
4831     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
4832     if (!CE)
4833       return Error(Loc, "lane index must be empty or an integer");
4834     if (Parser.getTok().isNot(AsmToken::RBrac))
4835       return Error(Parser.getTok().getLoc(), "']' expected");
4836     EndLoc = Parser.getTok().getEndLoc();
4837     Parser.Lex(); // Eat the ']'.
4838     int64_t Val = CE->getValue();
4839 
4840     // FIXME: Make this range check context sensitive for .8, .16, .32.
4841     if (Val < 0 || Val > 7)
4842       return Error(Parser.getTok().getLoc(), "lane index out of range");
4843     Index = Val;
4844     LaneKind = IndexedLane;
4845     return ParseStatus::Success;
4846   }
4847   LaneKind = NoLanes;
4848   return ParseStatus::Success;
4849 }
4850 
4851 // parse a vector register list
4852 ParseStatus ARMAsmParser::parseVectorList(OperandVector &Operands) {
4853   MCAsmParser &Parser = getParser();
4854   VectorLaneTy LaneKind;
4855   unsigned LaneIndex;
4856   SMLoc S = Parser.getTok().getLoc();
4857   // As an extension (to match gas), support a plain D register or Q register
4858   // (without encosing curly braces) as a single or double entry list,
4859   // respectively.
4860   // If there is no lane supplied, just parse as a register and
4861   // use the custom matcher to convert to list if necessary
4862   if (!hasMVE() && Parser.getTok().is(AsmToken::Identifier)) {
4863     SMLoc E = Parser.getTok().getEndLoc();
4864     MCRegister Reg = tryParseRegister();
4865     if (!Reg)
4866       return ParseStatus::NoMatch;
4867     if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
4868       ParseStatus Res = parseVectorLane(LaneKind, LaneIndex, E);
4869       if (!Res.isSuccess())
4870         return Res;
4871       switch (LaneKind) {
4872       case NoLanes:
4873         Operands.push_back(ARMOperand::CreateReg(Reg, S, E, *this));
4874         break;
4875       case AllLanes:
4876         Operands.push_back(
4877             ARMOperand::CreateVectorListAllLanes(Reg, 1, false, S, E, *this));
4878         break;
4879       case IndexedLane:
4880         Operands.push_back(ARMOperand::CreateVectorListIndexed(
4881             Reg, 1, LaneIndex, false, S, E, *this));
4882         break;
4883       }
4884       return ParseStatus::Success;
4885     }
4886     if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4887       Reg = getDRegFromQReg(Reg);
4888       ParseStatus Res = parseVectorLane(LaneKind, LaneIndex, E);
4889       if (!Res.isSuccess())
4890         return Res;
4891       switch (LaneKind) {
4892       case NoLanes:
4893         Operands.push_back(ARMOperand::CreateReg(Reg, S, E, *this));
4894         break;
4895       case AllLanes:
4896         Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
4897                                    &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4898         Operands.push_back(
4899             ARMOperand::CreateVectorListAllLanes(Reg, 2, false, S, E, *this));
4900         break;
4901       case IndexedLane:
4902         Operands.push_back(ARMOperand::CreateVectorListIndexed(
4903             Reg, 2, LaneIndex, false, S, E, *this));
4904         break;
4905       }
4906       return ParseStatus::Success;
4907     }
4908     Operands.push_back(ARMOperand::CreateReg(Reg, S, E, *this));
4909     return ParseStatus::Success;
4910   }
4911 
4912   if (Parser.getTok().isNot(AsmToken::LCurly))
4913     return ParseStatus::NoMatch;
4914 
4915   Parser.Lex(); // Eat '{' token.
4916   SMLoc RegLoc = Parser.getTok().getLoc();
4917 
4918   MCRegister Reg = tryParseRegister();
4919   if (!Reg)
4920     return Error(RegLoc, "register expected");
4921   unsigned Count = 1;
4922   int Spacing = 0;
4923   MCRegister FirstReg = Reg;
4924 
4925   if (hasMVE() && !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Reg))
4926     return Error(Parser.getTok().getLoc(),
4927                  "vector register in range Q0-Q7 expected");
4928   // The list is of D registers, but we also allow Q regs and just interpret
4929   // them as the two D sub-registers.
4930   else if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4931     FirstReg = Reg = getDRegFromQReg(Reg);
4932     Spacing = 1; // double-spacing requires explicit D registers, otherwise
4933                  // it's ambiguous with four-register single spaced.
4934     Reg = Reg + 1;
4935     ++Count;
4936   }
4937 
4938   SMLoc E;
4939   if (!parseVectorLane(LaneKind, LaneIndex, E).isSuccess())
4940     return ParseStatus::Failure;
4941 
4942   while (Parser.getTok().is(AsmToken::Comma) ||
4943          Parser.getTok().is(AsmToken::Minus)) {
4944     if (Parser.getTok().is(AsmToken::Minus)) {
4945       if (!Spacing)
4946         Spacing = 1; // Register range implies a single spaced list.
4947       else if (Spacing == 2)
4948         return Error(Parser.getTok().getLoc(),
4949                      "sequential registers in double spaced list");
4950       Parser.Lex(); // Eat the minus.
4951       SMLoc AfterMinusLoc = Parser.getTok().getLoc();
4952       MCRegister EndReg = tryParseRegister();
4953       if (!EndReg)
4954         return Error(AfterMinusLoc, "register expected");
4955       // Allow Q regs and just interpret them as the two D sub-registers.
4956       if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
4957         EndReg = getDRegFromQReg(EndReg) + 1;
4958       // If the register is the same as the start reg, there's nothing
4959       // more to do.
4960       if (Reg == EndReg)
4961         continue;
4962       // The register must be in the same register class as the first.
4963       if ((hasMVE() &&
4964            !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(EndReg)) ||
4965           (!hasMVE() &&
4966            !ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)))
4967         return Error(AfterMinusLoc, "invalid register in register list");
4968       // Ranges must go from low to high.
4969       if (Reg > EndReg)
4970         return Error(AfterMinusLoc, "bad range in register list");
4971       // Parse the lane specifier if present.
4972       VectorLaneTy NextLaneKind;
4973       unsigned NextLaneIndex;
4974       if (!parseVectorLane(NextLaneKind, NextLaneIndex, E).isSuccess())
4975         return ParseStatus::Failure;
4976       if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
4977         return Error(AfterMinusLoc, "mismatched lane index in register list");
4978 
4979       // Add all the registers in the range to the register list.
4980       Count += EndReg - Reg;
4981       Reg = EndReg;
4982       continue;
4983     }
4984     Parser.Lex(); // Eat the comma.
4985     RegLoc = Parser.getTok().getLoc();
4986     MCRegister OldReg = Reg;
4987     Reg = tryParseRegister();
4988     if (!Reg)
4989       return Error(RegLoc, "register expected");
4990 
4991     if (hasMVE()) {
4992       if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Reg))
4993         return Error(RegLoc, "vector register in range Q0-Q7 expected");
4994       Spacing = 1;
4995     }
4996     // vector register lists must be contiguous.
4997     // It's OK to use the enumeration values directly here rather, as the
4998     // VFP register classes have the enum sorted properly.
4999     //
5000     // The list is of D registers, but we also allow Q regs and just interpret
5001     // them as the two D sub-registers.
5002     else if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
5003       if (!Spacing)
5004         Spacing = 1; // Register range implies a single spaced list.
5005       else if (Spacing == 2)
5006         return Error(
5007             RegLoc,
5008             "invalid register in double-spaced list (must be 'D' register')");
5009       Reg = getDRegFromQReg(Reg);
5010       if (Reg != OldReg + 1)
5011         return Error(RegLoc, "non-contiguous register range");
5012       Reg = Reg + 1;
5013       Count += 2;
5014       // Parse the lane specifier if present.
5015       VectorLaneTy NextLaneKind;
5016       unsigned NextLaneIndex;
5017       SMLoc LaneLoc = Parser.getTok().getLoc();
5018       if (!parseVectorLane(NextLaneKind, NextLaneIndex, E).isSuccess())
5019         return ParseStatus::Failure;
5020       if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
5021         return Error(LaneLoc, "mismatched lane index in register list");
5022       continue;
5023     }
5024     // Normal D register.
5025     // Figure out the register spacing (single or double) of the list if
5026     // we don't know it already.
5027     if (!Spacing)
5028       Spacing = 1 + (Reg == OldReg + 2);
5029 
5030     // Just check that it's contiguous and keep going.
5031     if (Reg != OldReg + Spacing)
5032       return Error(RegLoc, "non-contiguous register range");
5033     ++Count;
5034     // Parse the lane specifier if present.
5035     VectorLaneTy NextLaneKind;
5036     unsigned NextLaneIndex;
5037     SMLoc EndLoc = Parser.getTok().getLoc();
5038     if (!parseVectorLane(NextLaneKind, NextLaneIndex, E).isSuccess())
5039       return ParseStatus::Failure;
5040     if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
5041       return Error(EndLoc, "mismatched lane index in register list");
5042   }
5043 
5044   if (Parser.getTok().isNot(AsmToken::RCurly))
5045     return Error(Parser.getTok().getLoc(), "'}' expected");
5046   E = Parser.getTok().getEndLoc();
5047   Parser.Lex(); // Eat '}' token.
5048 
5049   switch (LaneKind) {
5050   case NoLanes:
5051   case AllLanes: {
5052     // Two-register operands have been converted to the
5053     // composite register classes.
5054     if (Count == 2 && !hasMVE()) {
5055       const MCRegisterClass *RC = (Spacing == 1) ?
5056         &ARMMCRegisterClasses[ARM::DPairRegClassID] :
5057         &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
5058       FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
5059     }
5060     auto Create = (LaneKind == NoLanes ? ARMOperand::CreateVectorList :
5061                    ARMOperand::CreateVectorListAllLanes);
5062     Operands.push_back(Create(FirstReg, Count, (Spacing == 2), S, E, *this));
5063     break;
5064   }
5065   case IndexedLane:
5066     Operands.push_back(ARMOperand::CreateVectorListIndexed(
5067         FirstReg, Count, LaneIndex, (Spacing == 2), S, E, *this));
5068     break;
5069   }
5070   return ParseStatus::Success;
5071 }
5072 
5073 /// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
5074 ParseStatus ARMAsmParser::parseMemBarrierOptOperand(OperandVector &Operands) {
5075   MCAsmParser &Parser = getParser();
5076   SMLoc S = Parser.getTok().getLoc();
5077   const AsmToken &Tok = Parser.getTok();
5078   unsigned Opt;
5079 
5080   if (Tok.is(AsmToken::Identifier)) {
5081     StringRef OptStr = Tok.getString();
5082 
5083     Opt = StringSwitch<unsigned>(OptStr.lower())
5084               .Case("sy", ARM_MB::SY)
5085               .Case("st", ARM_MB::ST)
5086               .Case("ld", ARM_MB::LD)
5087               .Case("sh", ARM_MB::ISH)
5088               .Case("ish", ARM_MB::ISH)
5089               .Case("shst", ARM_MB::ISHST)
5090               .Case("ishst", ARM_MB::ISHST)
5091               .Case("ishld", ARM_MB::ISHLD)
5092               .Case("nsh", ARM_MB::NSH)
5093               .Case("un", ARM_MB::NSH)
5094               .Case("nshst", ARM_MB::NSHST)
5095               .Case("nshld", ARM_MB::NSHLD)
5096               .Case("unst", ARM_MB::NSHST)
5097               .Case("osh", ARM_MB::OSH)
5098               .Case("oshst", ARM_MB::OSHST)
5099               .Case("oshld", ARM_MB::OSHLD)
5100               .Default(~0U);
5101 
5102     // ishld, oshld, nshld and ld are only available from ARMv8.
5103     if (!hasV8Ops() && (Opt == ARM_MB::ISHLD || Opt == ARM_MB::OSHLD ||
5104                         Opt == ARM_MB::NSHLD || Opt == ARM_MB::LD))
5105       Opt = ~0U;
5106 
5107     if (Opt == ~0U)
5108       return ParseStatus::NoMatch;
5109 
5110     Parser.Lex(); // Eat identifier token.
5111   } else if (Tok.is(AsmToken::Hash) ||
5112              Tok.is(AsmToken::Dollar) ||
5113              Tok.is(AsmToken::Integer)) {
5114     if (Parser.getTok().isNot(AsmToken::Integer))
5115       Parser.Lex(); // Eat '#' or '$'.
5116     SMLoc Loc = Parser.getTok().getLoc();
5117 
5118     const MCExpr *MemBarrierID;
5119     if (getParser().parseExpression(MemBarrierID))
5120       return Error(Loc, "illegal expression");
5121 
5122     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID);
5123     if (!CE)
5124       return Error(Loc, "constant expression expected");
5125 
5126     int Val = CE->getValue();
5127     if (Val & ~0xf)
5128       return Error(Loc, "immediate value out of range");
5129 
5130     Opt = ARM_MB::RESERVED_0 + Val;
5131   } else
5132     return Error(Parser.getTok().getLoc(),
5133                  "expected an immediate or barrier type");
5134 
5135   Operands.push_back(
5136       ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S, *this));
5137   return ParseStatus::Success;
5138 }
5139 
5140 ParseStatus
5141 ARMAsmParser::parseTraceSyncBarrierOptOperand(OperandVector &Operands) {
5142   MCAsmParser &Parser = getParser();
5143   SMLoc S = Parser.getTok().getLoc();
5144   const AsmToken &Tok = Parser.getTok();
5145 
5146   if (Tok.isNot(AsmToken::Identifier))
5147     return ParseStatus::NoMatch;
5148 
5149   if (!Tok.getString().equals_insensitive("csync"))
5150     return ParseStatus::NoMatch;
5151 
5152   Parser.Lex(); // Eat identifier token.
5153 
5154   Operands.push_back(
5155       ARMOperand::CreateTraceSyncBarrierOpt(ARM_TSB::CSYNC, S, *this));
5156   return ParseStatus::Success;
5157 }
5158 
5159 /// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options.
5160 ParseStatus
5161 ARMAsmParser::parseInstSyncBarrierOptOperand(OperandVector &Operands) {
5162   MCAsmParser &Parser = getParser();
5163   SMLoc S = Parser.getTok().getLoc();
5164   const AsmToken &Tok = Parser.getTok();
5165   unsigned Opt;
5166 
5167   if (Tok.is(AsmToken::Identifier)) {
5168     StringRef OptStr = Tok.getString();
5169 
5170     if (OptStr.equals_insensitive("sy"))
5171       Opt = ARM_ISB::SY;
5172     else
5173       return ParseStatus::NoMatch;
5174 
5175     Parser.Lex(); // Eat identifier token.
5176   } else if (Tok.is(AsmToken::Hash) ||
5177              Tok.is(AsmToken::Dollar) ||
5178              Tok.is(AsmToken::Integer)) {
5179     if (Parser.getTok().isNot(AsmToken::Integer))
5180       Parser.Lex(); // Eat '#' or '$'.
5181     SMLoc Loc = Parser.getTok().getLoc();
5182 
5183     const MCExpr *ISBarrierID;
5184     if (getParser().parseExpression(ISBarrierID))
5185       return Error(Loc, "illegal expression");
5186 
5187     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ISBarrierID);
5188     if (!CE)
5189       return Error(Loc, "constant expression expected");
5190 
5191     int Val = CE->getValue();
5192     if (Val & ~0xf)
5193       return Error(Loc, "immediate value out of range");
5194 
5195     Opt = ARM_ISB::RESERVED_0 + Val;
5196   } else
5197     return Error(Parser.getTok().getLoc(),
5198                  "expected an immediate or barrier type");
5199 
5200   Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt(
5201       (ARM_ISB::InstSyncBOpt)Opt, S, *this));
5202   return ParseStatus::Success;
5203 }
5204 
5205 /// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
5206 ParseStatus ARMAsmParser::parseProcIFlagsOperand(OperandVector &Operands) {
5207   MCAsmParser &Parser = getParser();
5208   SMLoc S = Parser.getTok().getLoc();
5209   const AsmToken &Tok = Parser.getTok();
5210   if (!Tok.is(AsmToken::Identifier))
5211     return ParseStatus::NoMatch;
5212   StringRef IFlagsStr = Tok.getString();
5213 
5214   // An iflags string of "none" is interpreted to mean that none of the AIF
5215   // bits are set.  Not a terribly useful instruction, but a valid encoding.
5216   unsigned IFlags = 0;
5217   if (IFlagsStr != "none") {
5218         for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
5219       unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1).lower())
5220         .Case("a", ARM_PROC::A)
5221         .Case("i", ARM_PROC::I)
5222         .Case("f", ARM_PROC::F)
5223         .Default(~0U);
5224 
5225       // If some specific iflag is already set, it means that some letter is
5226       // present more than once, this is not acceptable.
5227       if (Flag == ~0U || (IFlags & Flag))
5228         return ParseStatus::NoMatch;
5229 
5230       IFlags |= Flag;
5231     }
5232   }
5233 
5234   Parser.Lex(); // Eat identifier token.
5235   Operands.push_back(
5236       ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S, *this));
5237   return ParseStatus::Success;
5238 }
5239 
5240 /// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
5241 ParseStatus ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) {
5242   // Don't parse two MSR registers in a row
5243   if (static_cast<ARMOperand &>(*Operands.back()).isMSRMask() ||
5244       static_cast<ARMOperand &>(*Operands.back()).isBankedReg())
5245     return ParseStatus::NoMatch;
5246   MCAsmParser &Parser = getParser();
5247   SMLoc S = Parser.getTok().getLoc();
5248   const AsmToken &Tok = Parser.getTok();
5249 
5250   if (Tok.is(AsmToken::Integer)) {
5251     int64_t Val = Tok.getIntVal();
5252     if (Val > 255 || Val < 0) {
5253       return ParseStatus::NoMatch;
5254     }
5255     unsigned SYSmvalue = Val & 0xFF;
5256     Parser.Lex();
5257     Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S, *this));
5258     return ParseStatus::Success;
5259   }
5260 
5261   if (!Tok.is(AsmToken::Identifier))
5262     return ParseStatus::NoMatch;
5263   StringRef Mask = Tok.getString();
5264 
5265   if (isMClass()) {
5266     auto TheReg = ARMSysReg::lookupMClassSysRegByName(Mask.lower());
5267     if (!TheReg || !TheReg->hasRequiredFeatures(getSTI().getFeatureBits()))
5268       return ParseStatus::NoMatch;
5269 
5270     unsigned SYSmvalue = TheReg->Encoding & 0xFFF;
5271 
5272     Parser.Lex(); // Eat identifier token.
5273     Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S, *this));
5274     return ParseStatus::Success;
5275   }
5276 
5277   // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
5278   size_t Start = 0, Next = Mask.find('_');
5279   StringRef Flags = "";
5280   std::string SpecReg = Mask.slice(Start, Next).lower();
5281   if (Next != StringRef::npos)
5282     Flags = Mask.substr(Next + 1);
5283 
5284   // FlagsVal contains the complete mask:
5285   // 3-0: Mask
5286   // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
5287   unsigned FlagsVal = 0;
5288 
5289   if (SpecReg == "apsr") {
5290     FlagsVal = StringSwitch<unsigned>(Flags)
5291     .Case("nzcvq",  0x8) // same as CPSR_f
5292     .Case("g",      0x4) // same as CPSR_s
5293     .Case("nzcvqg", 0xc) // same as CPSR_fs
5294     .Default(~0U);
5295 
5296     if (FlagsVal == ~0U) {
5297       if (!Flags.empty())
5298         return ParseStatus::NoMatch;
5299       else
5300         FlagsVal = 8; // No flag
5301     }
5302   } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
5303     // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
5304     if (Flags == "all" || Flags == "")
5305       Flags = "fc";
5306     for (int i = 0, e = Flags.size(); i != e; ++i) {
5307       unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
5308       .Case("c", 1)
5309       .Case("x", 2)
5310       .Case("s", 4)
5311       .Case("f", 8)
5312       .Default(~0U);
5313 
5314       // If some specific flag is already set, it means that some letter is
5315       // present more than once, this is not acceptable.
5316       if (Flag == ~0U || (FlagsVal & Flag))
5317         return ParseStatus::NoMatch;
5318       FlagsVal |= Flag;
5319     }
5320   } else // No match for special register.
5321     return ParseStatus::NoMatch;
5322 
5323   // Special register without flags is NOT equivalent to "fc" flags.
5324   // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
5325   // two lines would enable gas compatibility at the expense of breaking
5326   // round-tripping.
5327   //
5328   // if (!FlagsVal)
5329   //  FlagsVal = 0x9;
5330 
5331   // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
5332   if (SpecReg == "spsr")
5333     FlagsVal |= 16;
5334 
5335   Parser.Lex(); // Eat identifier token.
5336   Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S, *this));
5337   return ParseStatus::Success;
5338 }
5339 
5340 /// parseBankedRegOperand - Try to parse a banked register (e.g. "lr_irq") for
5341 /// use in the MRS/MSR instructions added to support virtualization.
5342 ParseStatus ARMAsmParser::parseBankedRegOperand(OperandVector &Operands) {
5343   // Don't parse two Banked registers in a row
5344   if (static_cast<ARMOperand &>(*Operands.back()).isBankedReg() ||
5345       static_cast<ARMOperand &>(*Operands.back()).isMSRMask())
5346     return ParseStatus::NoMatch;
5347   MCAsmParser &Parser = getParser();
5348   SMLoc S = Parser.getTok().getLoc();
5349   const AsmToken &Tok = Parser.getTok();
5350   if (!Tok.is(AsmToken::Identifier))
5351     return ParseStatus::NoMatch;
5352   StringRef RegName = Tok.getString();
5353 
5354   auto TheReg = ARMBankedReg::lookupBankedRegByName(RegName.lower());
5355   if (!TheReg)
5356     return ParseStatus::NoMatch;
5357   unsigned Encoding = TheReg->Encoding;
5358 
5359   Parser.Lex(); // Eat identifier token.
5360   Operands.push_back(ARMOperand::CreateBankedReg(Encoding, S, *this));
5361   return ParseStatus::Success;
5362 }
5363 
5364 // FIXME: Unify the different methods for handling shift operators
5365 // and use TableGen matching mechanisms to do the validation rather than
5366 // separate parsing paths.
5367 ParseStatus ARMAsmParser::parsePKHImm(OperandVector &Operands,
5368                                       ARM_AM::ShiftOpc Op, int Low, int High) {
5369   MCAsmParser &Parser = getParser();
5370   auto ShiftCodeOpt = tryParseShiftToken();
5371 
5372   if (!ShiftCodeOpt.has_value())
5373     return ParseStatus::NoMatch;
5374   auto ShiftCode = ShiftCodeOpt.value();
5375 
5376   // The wrong shift code has been provided. Can error here as has matched the
5377   // correct operand in this case.
5378   if (ShiftCode != Op)
5379     return Error(Parser.getTok().getLoc(),
5380                  ARM_AM::getShiftOpcStr(Op) + " operand expected.");
5381 
5382   Parser.Lex(); // Eat shift type token.
5383 
5384   // There must be a '#' and a shift amount.
5385   if (Parser.getTok().isNot(AsmToken::Hash) &&
5386       Parser.getTok().isNot(AsmToken::Dollar))
5387     return ParseStatus::NoMatch;
5388   Parser.Lex(); // Eat hash token.
5389 
5390   const MCExpr *ShiftAmount;
5391   SMLoc Loc = Parser.getTok().getLoc();
5392   SMLoc EndLoc;
5393   if (getParser().parseExpression(ShiftAmount, EndLoc))
5394     return Error(Loc, "illegal expression");
5395   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
5396   if (!CE)
5397     return Error(Loc, "constant expression expected");
5398   int Val = CE->getValue();
5399   if (Val < Low || Val > High)
5400     return Error(Loc, "immediate value out of range");
5401 
5402   Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc, *this));
5403 
5404   return ParseStatus::Success;
5405 }
5406 
5407 ParseStatus ARMAsmParser::parseSetEndImm(OperandVector &Operands) {
5408   MCAsmParser &Parser = getParser();
5409   const AsmToken &Tok = Parser.getTok();
5410   SMLoc S = Tok.getLoc();
5411   if (Tok.isNot(AsmToken::Identifier))
5412     return Error(S, "'be' or 'le' operand expected");
5413   int Val = StringSwitch<int>(Tok.getString().lower())
5414     .Case("be", 1)
5415     .Case("le", 0)
5416     .Default(-1);
5417   Parser.Lex(); // Eat the token.
5418 
5419   if (Val == -1)
5420     return Error(S, "'be' or 'le' operand expected");
5421   Operands.push_back(ARMOperand::CreateImm(
5422       MCConstantExpr::create(Val, getContext()), S, Tok.getEndLoc(), *this));
5423   return ParseStatus::Success;
5424 }
5425 
5426 /// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
5427 /// instructions. Legal values are:
5428 ///     lsl #n  'n' in [0,31]
5429 ///     asr #n  'n' in [1,32]
5430 ///             n == 32 encoded as n == 0.
5431 ParseStatus ARMAsmParser::parseShifterImm(OperandVector &Operands) {
5432   MCAsmParser &Parser = getParser();
5433   const AsmToken &Tok = Parser.getTok();
5434   SMLoc S = Tok.getLoc();
5435   if (Tok.isNot(AsmToken::Identifier))
5436     return ParseStatus::NoMatch;
5437   StringRef ShiftName = Tok.getString();
5438   bool isASR;
5439   if (ShiftName == "lsl" || ShiftName == "LSL")
5440     isASR = false;
5441   else if (ShiftName == "asr" || ShiftName == "ASR")
5442     isASR = true;
5443   else
5444     return ParseStatus::NoMatch;
5445   Parser.Lex(); // Eat the operator.
5446 
5447   // A '#' and a shift amount.
5448   if (Parser.getTok().isNot(AsmToken::Hash) &&
5449       Parser.getTok().isNot(AsmToken::Dollar))
5450     return Error(Parser.getTok().getLoc(), "'#' expected");
5451   Parser.Lex(); // Eat hash token.
5452   SMLoc ExLoc = Parser.getTok().getLoc();
5453 
5454   const MCExpr *ShiftAmount;
5455   SMLoc EndLoc;
5456   if (getParser().parseExpression(ShiftAmount, EndLoc))
5457     return Error(ExLoc, "malformed shift expression");
5458   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
5459   if (!CE)
5460     return Error(ExLoc, "shift amount must be an immediate");
5461 
5462   int64_t Val = CE->getValue();
5463   if (isASR) {
5464     // Shift amount must be in [1,32]
5465     if (Val < 1 || Val > 32)
5466       return Error(ExLoc, "'asr' shift amount must be in range [1,32]");
5467     // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
5468     if (isThumb() && Val == 32)
5469       return Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode");
5470     if (Val == 32) Val = 0;
5471   } else {
5472     // Shift amount must be in [1,32]
5473     if (Val < 0 || Val > 31)
5474       return Error(ExLoc, "'lsr' shift amount must be in range [0,31]");
5475   }
5476 
5477   Operands.push_back(
5478       ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc, *this));
5479 
5480   return ParseStatus::Success;
5481 }
5482 
5483 /// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
5484 /// of instructions. Legal values are:
5485 ///     ror #n  'n' in {0, 8, 16, 24}
5486 ParseStatus ARMAsmParser::parseRotImm(OperandVector &Operands) {
5487   MCAsmParser &Parser = getParser();
5488   const AsmToken &Tok = Parser.getTok();
5489   SMLoc S = Tok.getLoc();
5490   if (Tok.isNot(AsmToken::Identifier))
5491     return ParseStatus::NoMatch;
5492   StringRef ShiftName = Tok.getString();
5493   if (ShiftName != "ror" && ShiftName != "ROR")
5494     return ParseStatus::NoMatch;
5495   Parser.Lex(); // Eat the operator.
5496 
5497   // A '#' and a rotate amount.
5498   if (Parser.getTok().isNot(AsmToken::Hash) &&
5499       Parser.getTok().isNot(AsmToken::Dollar))
5500     return Error(Parser.getTok().getLoc(), "'#' expected");
5501   Parser.Lex(); // Eat hash token.
5502   SMLoc ExLoc = Parser.getTok().getLoc();
5503 
5504   const MCExpr *ShiftAmount;
5505   SMLoc EndLoc;
5506   if (getParser().parseExpression(ShiftAmount, EndLoc))
5507     return Error(ExLoc, "malformed rotate expression");
5508   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
5509   if (!CE)
5510     return Error(ExLoc, "rotate amount must be an immediate");
5511 
5512   int64_t Val = CE->getValue();
5513   // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
5514   // normally, zero is represented in asm by omitting the rotate operand
5515   // entirely.
5516   if (Val != 8 && Val != 16 && Val != 24 && Val != 0)
5517     return Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24");
5518 
5519   Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc, *this));
5520 
5521   return ParseStatus::Success;
5522 }
5523 
5524 ParseStatus ARMAsmParser::parseModImm(OperandVector &Operands) {
5525   MCAsmParser &Parser = getParser();
5526   MCAsmLexer &Lexer = getLexer();
5527   int64_t Imm1, Imm2;
5528 
5529   SMLoc S = Parser.getTok().getLoc();
5530 
5531   // 1) A mod_imm operand can appear in the place of a register name:
5532   //   add r0, #mod_imm
5533   //   add r0, r0, #mod_imm
5534   // to correctly handle the latter, we bail out as soon as we see an
5535   // identifier.
5536   //
5537   // 2) Similarly, we do not want to parse into complex operands:
5538   //   mov r0, #mod_imm
5539   //   mov r0, :lower16:(_foo)
5540   if (Parser.getTok().is(AsmToken::Identifier) ||
5541       Parser.getTok().is(AsmToken::Colon))
5542     return ParseStatus::NoMatch;
5543 
5544   // Hash (dollar) is optional as per the ARMARM
5545   if (Parser.getTok().is(AsmToken::Hash) ||
5546       Parser.getTok().is(AsmToken::Dollar)) {
5547     // Avoid parsing into complex operands (#:)
5548     if (Lexer.peekTok().is(AsmToken::Colon))
5549       return ParseStatus::NoMatch;
5550 
5551     // Eat the hash (dollar)
5552     Parser.Lex();
5553   }
5554 
5555   SMLoc Sx1, Ex1;
5556   Sx1 = Parser.getTok().getLoc();
5557   const MCExpr *Imm1Exp;
5558   if (getParser().parseExpression(Imm1Exp, Ex1))
5559     return Error(Sx1, "malformed expression");
5560 
5561   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm1Exp);
5562 
5563   if (CE) {
5564     // Immediate must fit within 32-bits
5565     Imm1 = CE->getValue();
5566     int Enc = ARM_AM::getSOImmVal(Imm1);
5567     if (Enc != -1 && Parser.getTok().is(AsmToken::EndOfStatement)) {
5568       // We have a match!
5569       Operands.push_back(ARMOperand::CreateModImm(
5570           (Enc & 0xFF), (Enc & 0xF00) >> 7, Sx1, Ex1, *this));
5571       return ParseStatus::Success;
5572     }
5573 
5574     // We have parsed an immediate which is not for us, fallback to a plain
5575     // immediate. This can happen for instruction aliases. For an example,
5576     // ARMInstrInfo.td defines the alias [mov <-> mvn] which can transform
5577     // a mov (mvn) with a mod_imm_neg/mod_imm_not operand into the opposite
5578     // instruction with a mod_imm operand. The alias is defined such that the
5579     // parser method is shared, that's why we have to do this here.
5580     if (Parser.getTok().is(AsmToken::EndOfStatement)) {
5581       Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1, *this));
5582       return ParseStatus::Success;
5583     }
5584   } else {
5585     // Operands like #(l1 - l2) can only be evaluated at a later stage (via an
5586     // MCFixup). Fallback to a plain immediate.
5587     Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1, *this));
5588     return ParseStatus::Success;
5589   }
5590 
5591   // From this point onward, we expect the input to be a (#bits, #rot) pair
5592   if (Parser.getTok().isNot(AsmToken::Comma))
5593     return Error(Sx1,
5594                  "expected modified immediate operand: #[0, 255], #even[0-30]");
5595 
5596   if (Imm1 & ~0xFF)
5597     return Error(Sx1, "immediate operand must a number in the range [0, 255]");
5598 
5599   // Eat the comma
5600   Parser.Lex();
5601 
5602   // Repeat for #rot
5603   SMLoc Sx2, Ex2;
5604   Sx2 = Parser.getTok().getLoc();
5605 
5606   // Eat the optional hash (dollar)
5607   if (Parser.getTok().is(AsmToken::Hash) ||
5608       Parser.getTok().is(AsmToken::Dollar))
5609     Parser.Lex();
5610 
5611   const MCExpr *Imm2Exp;
5612   if (getParser().parseExpression(Imm2Exp, Ex2))
5613     return Error(Sx2, "malformed expression");
5614 
5615   CE = dyn_cast<MCConstantExpr>(Imm2Exp);
5616 
5617   if (CE) {
5618     Imm2 = CE->getValue();
5619     if (!(Imm2 & ~0x1E)) {
5620       // We have a match!
5621       Operands.push_back(ARMOperand::CreateModImm(Imm1, Imm2, S, Ex2, *this));
5622       return ParseStatus::Success;
5623     }
5624     return Error(Sx2,
5625                  "immediate operand must an even number in the range [0, 30]");
5626   } else {
5627     return Error(Sx2, "constant expression expected");
5628   }
5629 }
5630 
5631 ParseStatus ARMAsmParser::parseBitfield(OperandVector &Operands) {
5632   MCAsmParser &Parser = getParser();
5633   SMLoc S = Parser.getTok().getLoc();
5634   // The bitfield descriptor is really two operands, the LSB and the width.
5635   if (Parser.getTok().isNot(AsmToken::Hash) &&
5636       Parser.getTok().isNot(AsmToken::Dollar))
5637     return ParseStatus::NoMatch;
5638   Parser.Lex(); // Eat hash token.
5639 
5640   const MCExpr *LSBExpr;
5641   SMLoc E = Parser.getTok().getLoc();
5642   if (getParser().parseExpression(LSBExpr))
5643     return Error(E, "malformed immediate expression");
5644   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
5645   if (!CE)
5646     return Error(E, "'lsb' operand must be an immediate");
5647 
5648   int64_t LSB = CE->getValue();
5649   // The LSB must be in the range [0,31]
5650   if (LSB < 0 || LSB > 31)
5651     return Error(E, "'lsb' operand must be in the range [0,31]");
5652   E = Parser.getTok().getLoc();
5653 
5654   // Expect another immediate operand.
5655   if (Parser.getTok().isNot(AsmToken::Comma))
5656     return Error(Parser.getTok().getLoc(), "too few operands");
5657   Parser.Lex(); // Eat hash token.
5658   if (Parser.getTok().isNot(AsmToken::Hash) &&
5659       Parser.getTok().isNot(AsmToken::Dollar))
5660     return Error(Parser.getTok().getLoc(), "'#' expected");
5661   Parser.Lex(); // Eat hash token.
5662 
5663   const MCExpr *WidthExpr;
5664   SMLoc EndLoc;
5665   if (getParser().parseExpression(WidthExpr, EndLoc))
5666     return Error(E, "malformed immediate expression");
5667   CE = dyn_cast<MCConstantExpr>(WidthExpr);
5668   if (!CE)
5669     return Error(E, "'width' operand must be an immediate");
5670 
5671   int64_t Width = CE->getValue();
5672   // The LSB must be in the range [1,32-lsb]
5673   if (Width < 1 || Width > 32 - LSB)
5674     return Error(E, "'width' operand must be in the range [1,32-lsb]");
5675 
5676   Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc, *this));
5677 
5678   return ParseStatus::Success;
5679 }
5680 
5681 ParseStatus ARMAsmParser::parsePostIdxReg(OperandVector &Operands) {
5682   // Check for a post-index addressing register operand. Specifically:
5683   // postidx_reg := '+' register {, shift}
5684   //              | '-' register {, shift}
5685   //              | register {, shift}
5686 
5687   // This method must return ParseStatus::NoMatch without consuming any tokens
5688   // in the case where there is no match, as other alternatives take other
5689   // parse methods.
5690   MCAsmParser &Parser = getParser();
5691   AsmToken Tok = Parser.getTok();
5692   SMLoc S = Tok.getLoc();
5693   bool haveEaten = false;
5694   bool isAdd = true;
5695   if (Tok.is(AsmToken::Plus)) {
5696     Parser.Lex(); // Eat the '+' token.
5697     haveEaten = true;
5698   } else if (Tok.is(AsmToken::Minus)) {
5699     Parser.Lex(); // Eat the '-' token.
5700     isAdd = false;
5701     haveEaten = true;
5702   }
5703 
5704   SMLoc E = Parser.getTok().getEndLoc();
5705   MCRegister Reg = tryParseRegister();
5706   if (!Reg) {
5707     if (!haveEaten)
5708       return ParseStatus::NoMatch;
5709     return Error(Parser.getTok().getLoc(), "register expected");
5710   }
5711 
5712   ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
5713   unsigned ShiftImm = 0;
5714   if (Parser.getTok().is(AsmToken::Comma)) {
5715     Parser.Lex(); // Eat the ','.
5716     if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
5717       return ParseStatus::Failure;
5718 
5719     // FIXME: Only approximates end...may include intervening whitespace.
5720     E = Parser.getTok().getLoc();
5721   }
5722 
5723   Operands.push_back(
5724       ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy, ShiftImm, S, E, *this));
5725 
5726   return ParseStatus::Success;
5727 }
5728 
5729 ParseStatus ARMAsmParser::parseAM3Offset(OperandVector &Operands) {
5730   // Check for a post-index addressing register operand. Specifically:
5731   // am3offset := '+' register
5732   //              | '-' register
5733   //              | register
5734   //              | # imm
5735   //              | # + imm
5736   //              | # - imm
5737 
5738   // This method must return ParseStatus::NoMatch without consuming any tokens
5739   // in the case where there is no match, as other alternatives take other
5740   // parse methods.
5741   MCAsmParser &Parser = getParser();
5742   AsmToken Tok = Parser.getTok();
5743   SMLoc S = Tok.getLoc();
5744 
5745   // Do immediates first, as we always parse those if we have a '#'.
5746   if (Parser.getTok().is(AsmToken::Hash) ||
5747       Parser.getTok().is(AsmToken::Dollar)) {
5748     Parser.Lex(); // Eat '#' or '$'.
5749     // Explicitly look for a '-', as we need to encode negative zero
5750     // differently.
5751     bool isNegative = Parser.getTok().is(AsmToken::Minus);
5752     const MCExpr *Offset;
5753     SMLoc E;
5754     if (getParser().parseExpression(Offset, E))
5755       return ParseStatus::Failure;
5756     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
5757     if (!CE)
5758       return Error(S, "constant expression expected");
5759     // Negative zero is encoded as the flag value
5760     // std::numeric_limits<int32_t>::min().
5761     int32_t Val = CE->getValue();
5762     if (isNegative && Val == 0)
5763       Val = std::numeric_limits<int32_t>::min();
5764 
5765     Operands.push_back(ARMOperand::CreateImm(
5766         MCConstantExpr::create(Val, getContext()), S, E, *this));
5767 
5768     return ParseStatus::Success;
5769   }
5770 
5771   bool haveEaten = false;
5772   bool isAdd = true;
5773   if (Tok.is(AsmToken::Plus)) {
5774     Parser.Lex(); // Eat the '+' token.
5775     haveEaten = true;
5776   } else if (Tok.is(AsmToken::Minus)) {
5777     Parser.Lex(); // Eat the '-' token.
5778     isAdd = false;
5779     haveEaten = true;
5780   }
5781 
5782   Tok = Parser.getTok();
5783   MCRegister Reg = tryParseRegister();
5784   if (!Reg) {
5785     if (!haveEaten)
5786       return ParseStatus::NoMatch;
5787     return Error(Tok.getLoc(), "register expected");
5788   }
5789 
5790   Operands.push_back(ARMOperand::CreatePostIdxReg(
5791       Reg, isAdd, ARM_AM::no_shift, 0, S, Tok.getEndLoc(), *this));
5792 
5793   return ParseStatus::Success;
5794 }
5795 
5796 // Finds the index of the first CondCode operator, if there is none returns 0
5797 unsigned findCondCodeInd(const OperandVector &Operands,
5798                          unsigned MnemonicOpsEndInd) {
5799   for (unsigned I = 1; I < MnemonicOpsEndInd; ++I) {
5800     auto Op = static_cast<ARMOperand &>(*Operands[I]);
5801     if (Op.isCondCode())
5802       return I;
5803   }
5804   return 0;
5805 }
5806 
5807 unsigned findCCOutInd(const OperandVector &Operands,
5808                       unsigned MnemonicOpsEndInd) {
5809   for (unsigned I = 1; I < MnemonicOpsEndInd; ++I) {
5810     auto Op = static_cast<ARMOperand &>(*Operands[I]);
5811     if (Op.isCCOut())
5812       return I;
5813   }
5814   return 0;
5815 }
5816 
5817 /// Convert parsed operands to MCInst.  Needed here because this instruction
5818 /// only has two register operands, but multiplication is commutative so
5819 /// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN".
5820 void ARMAsmParser::cvtThumbMultiply(MCInst &Inst,
5821                                     const OperandVector &Operands) {
5822   unsigned MnemonicOpsEndInd = getMnemonicOpsEndInd(Operands);
5823   unsigned CondI = findCondCodeInd(Operands, MnemonicOpsEndInd);
5824   unsigned CondOutI = findCCOutInd(Operands, MnemonicOpsEndInd);
5825 
5826   // 2 operand form
5827   unsigned RegRd = MnemonicOpsEndInd;
5828   unsigned RegRn = MnemonicOpsEndInd + 1;
5829   unsigned RegRm = MnemonicOpsEndInd;
5830 
5831   if (Operands.size() == MnemonicOpsEndInd + 3) {
5832     // If we have a three-operand form, make sure to set Rn to be the operand
5833     // that isn't the same as Rd.
5834     if (((ARMOperand &)*Operands[RegRd]).getReg() ==
5835         ((ARMOperand &)*Operands[MnemonicOpsEndInd + 1]).getReg()) {
5836       RegRn = MnemonicOpsEndInd + 2;
5837       RegRm = MnemonicOpsEndInd + 1;
5838     } else {
5839       RegRn = MnemonicOpsEndInd + 1;
5840       RegRm = MnemonicOpsEndInd + 2;
5841     }
5842   }
5843 
5844   // Rd
5845   ((ARMOperand &)*Operands[RegRd]).addRegOperands(Inst, 1);
5846   // CCOut
5847   if (CondOutI != 0) {
5848     ((ARMOperand &)*Operands[CondOutI]).addCCOutOperands(Inst, 1);
5849   } else {
5850     ARMOperand Op =
5851         *ARMOperand::CreateCCOut(0, Operands[0]->getEndLoc(), *this);
5852     Op.addCCOutOperands(Inst, 1);
5853   }
5854   // Rn
5855   ((ARMOperand &)*Operands[RegRn]).addRegOperands(Inst, 1);
5856   // Rm
5857   ((ARMOperand &)*Operands[RegRm]).addRegOperands(Inst, 1);
5858 
5859   // Cond code
5860   if (CondI != 0) {
5861     ((ARMOperand &)*Operands[CondI]).addCondCodeOperands(Inst, 2);
5862   } else {
5863     ARMOperand Op = *ARMOperand::CreateCondCode(
5864         llvm::ARMCC::AL, Operands[0]->getEndLoc(), *this);
5865     Op.addCondCodeOperands(Inst, 2);
5866   }
5867 }
5868 
5869 void ARMAsmParser::cvtThumbBranches(MCInst &Inst,
5870                                     const OperandVector &Operands) {
5871   unsigned MnemonicOpsEndInd = getMnemonicOpsEndInd(Operands);
5872   unsigned CondI = findCondCodeInd(Operands, MnemonicOpsEndInd);
5873   unsigned Cond =
5874       (CondI == 0 ? ARMCC::AL
5875                   : static_cast<ARMOperand &>(*Operands[CondI]).getCondCode());
5876 
5877   // first decide whether or not the branch should be conditional
5878   // by looking at it's location relative to an IT block
5879   if(inITBlock()) {
5880     // inside an IT block we cannot have any conditional branches. any
5881     // such instructions needs to be converted to unconditional form
5882     switch(Inst.getOpcode()) {
5883       case ARM::tBcc: Inst.setOpcode(ARM::tB); break;
5884       case ARM::t2Bcc: Inst.setOpcode(ARM::t2B); break;
5885     }
5886   } else {
5887     switch(Inst.getOpcode()) {
5888       case ARM::tB:
5889       case ARM::tBcc:
5890         Inst.setOpcode(Cond == ARMCC::AL ? ARM::tB : ARM::tBcc);
5891         break;
5892       case ARM::t2B:
5893       case ARM::t2Bcc:
5894         Inst.setOpcode(Cond == ARMCC::AL ? ARM::t2B : ARM::t2Bcc);
5895         break;
5896     }
5897   }
5898 
5899   // now decide on encoding size based on branch target range
5900   switch(Inst.getOpcode()) {
5901     // classify tB as either t2B or t1B based on range of immediate operand
5902     case ARM::tB: {
5903       ARMOperand &op = static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]);
5904       if (!op.isSignedOffset<11, 1>() && isThumb() && hasV8MBaseline())
5905         Inst.setOpcode(ARM::t2B);
5906       break;
5907     }
5908     // classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand
5909     case ARM::tBcc: {
5910       ARMOperand &op = static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]);
5911       if (!op.isSignedOffset<8, 1>() && isThumb() && hasV8MBaseline())
5912         Inst.setOpcode(ARM::t2Bcc);
5913       break;
5914     }
5915   }
5916   ((ARMOperand &)*Operands[MnemonicOpsEndInd]).addImmOperands(Inst, 1);
5917   if (CondI != 0) {
5918     ((ARMOperand &)*Operands[CondI]).addCondCodeOperands(Inst, 2);
5919   } else {
5920     ARMOperand Op = *ARMOperand::CreateCondCode(
5921         llvm::ARMCC::AL, Operands[0]->getEndLoc(), *this);
5922     Op.addCondCodeOperands(Inst, 2);
5923   }
5924 }
5925 
5926 void ARMAsmParser::cvtMVEVMOVQtoDReg(
5927   MCInst &Inst, const OperandVector &Operands) {
5928 
5929   unsigned MnemonicOpsEndInd = getMnemonicOpsEndInd(Operands);
5930   unsigned CondI = findCondCodeInd(Operands, MnemonicOpsEndInd);
5931 
5932   // mnemonic, condition code, Rt, Rt2, Qd, idx, Qd again, idx2
5933   assert(Operands.size() == MnemonicOpsEndInd + 6);
5934 
5935   ((ARMOperand &)*Operands[MnemonicOpsEndInd]).addRegOperands(Inst, 1); // Rt
5936   ((ARMOperand &)*Operands[MnemonicOpsEndInd + 1])
5937       .addRegOperands(Inst, 1); // Rt2
5938   ((ARMOperand &)*Operands[MnemonicOpsEndInd + 2])
5939       .addRegOperands(Inst, 1); // Qd
5940   ((ARMOperand &)*Operands[MnemonicOpsEndInd + 3])
5941       .addMVEPairVectorIndexOperands(Inst, 1); // idx
5942   // skip second copy of Qd in Operands[6]
5943   ((ARMOperand &)*Operands[MnemonicOpsEndInd + 5])
5944       .addMVEPairVectorIndexOperands(Inst, 1); // idx2
5945   if (CondI != 0) {
5946     ((ARMOperand &)*Operands[CondI])
5947         .addCondCodeOperands(Inst, 2); // condition code
5948   } else {
5949     ARMOperand Op =
5950         *ARMOperand::CreateCondCode(ARMCC::AL, Operands[0]->getEndLoc(), *this);
5951     Op.addCondCodeOperands(Inst, 2);
5952   }
5953 }
5954 
5955 /// Parse an ARM memory expression, return false if successful else return true
5956 /// or an error.  The first token must be a '[' when called.
5957 bool ARMAsmParser::parseMemory(OperandVector &Operands) {
5958   MCAsmParser &Parser = getParser();
5959   SMLoc S, E;
5960   if (Parser.getTok().isNot(AsmToken::LBrac))
5961     return TokError("Token is not a Left Bracket");
5962   S = Parser.getTok().getLoc();
5963   Parser.Lex(); // Eat left bracket token.
5964 
5965   const AsmToken &BaseRegTok = Parser.getTok();
5966   MCRegister BaseReg = tryParseRegister();
5967   if (!BaseReg)
5968     return Error(BaseRegTok.getLoc(), "register expected");
5969 
5970   // The next token must either be a comma, a colon or a closing bracket.
5971   const AsmToken &Tok = Parser.getTok();
5972   if (!Tok.is(AsmToken::Colon) && !Tok.is(AsmToken::Comma) &&
5973       !Tok.is(AsmToken::RBrac))
5974     return Error(Tok.getLoc(), "malformed memory operand");
5975 
5976   if (Tok.is(AsmToken::RBrac)) {
5977     E = Tok.getEndLoc();
5978     Parser.Lex(); // Eat right bracket token.
5979 
5980     Operands.push_back(ARMOperand::CreateMem(
5981         BaseReg, nullptr, 0, ARM_AM::no_shift, 0, 0, false, S, E, *this));
5982 
5983     // If there's a pre-indexing writeback marker, '!', just add it as a token
5984     // operand. It's rather odd, but syntactically valid.
5985     if (Parser.getTok().is(AsmToken::Exclaim)) {
5986       Operands.push_back(
5987           ARMOperand::CreateToken("!", Parser.getTok().getLoc(), *this));
5988       Parser.Lex(); // Eat the '!'.
5989     }
5990 
5991     return false;
5992   }
5993 
5994   assert((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) &&
5995          "Lost colon or comma in memory operand?!");
5996   if (Tok.is(AsmToken::Comma)) {
5997     Parser.Lex(); // Eat the comma.
5998   }
5999 
6000   // If we have a ':', it's an alignment specifier.
6001   if (Parser.getTok().is(AsmToken::Colon)) {
6002     Parser.Lex(); // Eat the ':'.
6003     E = Parser.getTok().getLoc();
6004     SMLoc AlignmentLoc = Tok.getLoc();
6005 
6006     const MCExpr *Expr;
6007     if (getParser().parseExpression(Expr))
6008      return true;
6009 
6010     // The expression has to be a constant. Memory references with relocations
6011     // don't come through here, as they use the <label> forms of the relevant
6012     // instructions.
6013     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
6014     if (!CE)
6015       return Error (E, "constant expression expected");
6016 
6017     unsigned Align = 0;
6018     switch (CE->getValue()) {
6019     default:
6020       return Error(E,
6021                    "alignment specifier must be 16, 32, 64, 128, or 256 bits");
6022     case 16:  Align = 2; break;
6023     case 32:  Align = 4; break;
6024     case 64:  Align = 8; break;
6025     case 128: Align = 16; break;
6026     case 256: Align = 32; break;
6027     }
6028 
6029     // Now we should have the closing ']'
6030     if (Parser.getTok().isNot(AsmToken::RBrac))
6031       return Error(Parser.getTok().getLoc(), "']' expected");
6032     E = Parser.getTok().getEndLoc();
6033     Parser.Lex(); // Eat right bracket token.
6034 
6035     // Don't worry about range checking the value here. That's handled by
6036     // the is*() predicates.
6037     Operands.push_back(ARMOperand::CreateMem(BaseReg, nullptr, 0,
6038                                              ARM_AM::no_shift, 0, Align, false,
6039                                              S, E, *this, AlignmentLoc));
6040 
6041     // If there's a pre-indexing writeback marker, '!', just add it as a token
6042     // operand.
6043     if (Parser.getTok().is(AsmToken::Exclaim)) {
6044       Operands.push_back(
6045           ARMOperand::CreateToken("!", Parser.getTok().getLoc(), *this));
6046       Parser.Lex(); // Eat the '!'.
6047     }
6048 
6049     return false;
6050   }
6051 
6052   // If we have a '#' or '$', it's an immediate offset, else assume it's a
6053   // register offset. Be friendly and also accept a plain integer or expression
6054   // (without a leading hash) for gas compatibility.
6055   if (Parser.getTok().is(AsmToken::Hash) ||
6056       Parser.getTok().is(AsmToken::Dollar) ||
6057       Parser.getTok().is(AsmToken::LParen) ||
6058       Parser.getTok().is(AsmToken::Integer)) {
6059     if (Parser.getTok().is(AsmToken::Hash) ||
6060         Parser.getTok().is(AsmToken::Dollar))
6061       Parser.Lex(); // Eat '#' or '$'
6062     E = Parser.getTok().getLoc();
6063 
6064     bool isNegative = getParser().getTok().is(AsmToken::Minus);
6065     const MCExpr *Offset, *AdjustedOffset;
6066     if (getParser().parseExpression(Offset))
6067      return true;
6068 
6069     if (const auto *CE = dyn_cast<MCConstantExpr>(Offset)) {
6070       // If the constant was #-0, represent it as
6071       // std::numeric_limits<int32_t>::min().
6072       int32_t Val = CE->getValue();
6073       if (isNegative && Val == 0)
6074         CE = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
6075                                     getContext());
6076       // Don't worry about range checking the value here. That's handled by
6077       // the is*() predicates.
6078       AdjustedOffset = CE;
6079     } else
6080       AdjustedOffset = Offset;
6081     Operands.push_back(ARMOperand::CreateMem(BaseReg, AdjustedOffset, 0,
6082                                              ARM_AM::no_shift, 0, 0, false, S,
6083                                              E, *this));
6084 
6085     // Now we should have the closing ']'
6086     if (Parser.getTok().isNot(AsmToken::RBrac))
6087       return Error(Parser.getTok().getLoc(), "']' expected");
6088     E = Parser.getTok().getEndLoc();
6089     Parser.Lex(); // Eat right bracket token.
6090 
6091     // If there's a pre-indexing writeback marker, '!', just add it as a token
6092     // operand.
6093     if (Parser.getTok().is(AsmToken::Exclaim)) {
6094       Operands.push_back(
6095           ARMOperand::CreateToken("!", Parser.getTok().getLoc(), *this));
6096       Parser.Lex(); // Eat the '!'.
6097     }
6098 
6099     return false;
6100   }
6101 
6102   // The register offset is optionally preceded by a '+' or '-'
6103   bool isNegative = false;
6104   if (Parser.getTok().is(AsmToken::Minus)) {
6105     isNegative = true;
6106     Parser.Lex(); // Eat the '-'.
6107   } else if (Parser.getTok().is(AsmToken::Plus)) {
6108     // Nothing to do.
6109     Parser.Lex(); // Eat the '+'.
6110   }
6111 
6112   E = Parser.getTok().getLoc();
6113   MCRegister OffsetReg = tryParseRegister();
6114   if (!OffsetReg)
6115     return Error(E, "register expected");
6116 
6117   // If there's a shift operator, handle it.
6118   ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
6119   unsigned ShiftImm = 0;
6120   if (Parser.getTok().is(AsmToken::Comma)) {
6121     Parser.Lex(); // Eat the ','.
6122     if (parseMemRegOffsetShift(ShiftType, ShiftImm))
6123       return true;
6124   }
6125 
6126   // Now we should have the closing ']'
6127   if (Parser.getTok().isNot(AsmToken::RBrac))
6128     return Error(Parser.getTok().getLoc(), "']' expected");
6129   E = Parser.getTok().getEndLoc();
6130   Parser.Lex(); // Eat right bracket token.
6131 
6132   Operands.push_back(ARMOperand::CreateMem(BaseReg, nullptr, OffsetReg,
6133                                            ShiftType, ShiftImm, 0, isNegative,
6134                                            S, E, *this));
6135 
6136   // If there's a pre-indexing writeback marker, '!', just add it as a token
6137   // operand.
6138   if (Parser.getTok().is(AsmToken::Exclaim)) {
6139     Operands.push_back(
6140         ARMOperand::CreateToken("!", Parser.getTok().getLoc(), *this));
6141     Parser.Lex(); // Eat the '!'.
6142   }
6143 
6144   return false;
6145 }
6146 
6147 /// parseMemRegOffsetShift - one of these two:
6148 ///   ( lsl | lsr | asr | ror ) , # shift_amount
6149 ///   rrx
6150 /// return true if it parses a shift otherwise it returns false.
6151 bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
6152                                           unsigned &Amount) {
6153   MCAsmParser &Parser = getParser();
6154   SMLoc Loc = Parser.getTok().getLoc();
6155   const AsmToken &Tok = Parser.getTok();
6156   if (Tok.isNot(AsmToken::Identifier))
6157     return Error(Loc, "illegal shift operator");
6158   StringRef ShiftName = Tok.getString();
6159   if (ShiftName == "lsl" || ShiftName == "LSL" ||
6160       ShiftName == "asl" || ShiftName == "ASL")
6161     St = ARM_AM::lsl;
6162   else if (ShiftName == "lsr" || ShiftName == "LSR")
6163     St = ARM_AM::lsr;
6164   else if (ShiftName == "asr" || ShiftName == "ASR")
6165     St = ARM_AM::asr;
6166   else if (ShiftName == "ror" || ShiftName == "ROR")
6167     St = ARM_AM::ror;
6168   else if (ShiftName == "rrx" || ShiftName == "RRX")
6169     St = ARM_AM::rrx;
6170   else if (ShiftName == "uxtw" || ShiftName == "UXTW")
6171     St = ARM_AM::uxtw;
6172   else
6173     return Error(Loc, "illegal shift operator");
6174   Parser.Lex(); // Eat shift type token.
6175 
6176   // rrx stands alone.
6177   Amount = 0;
6178   if (St != ARM_AM::rrx) {
6179     Loc = Parser.getTok().getLoc();
6180     // A '#' and a shift amount.
6181     const AsmToken &HashTok = Parser.getTok();
6182     if (HashTok.isNot(AsmToken::Hash) &&
6183         HashTok.isNot(AsmToken::Dollar))
6184       return Error(HashTok.getLoc(), "'#' expected");
6185     Parser.Lex(); // Eat hash token.
6186 
6187     const MCExpr *Expr;
6188     if (getParser().parseExpression(Expr))
6189       return true;
6190     // Range check the immediate.
6191     // lsl, ror: 0 <= imm <= 31
6192     // lsr, asr: 0 <= imm <= 32
6193     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
6194     if (!CE)
6195       return Error(Loc, "shift amount must be an immediate");
6196     int64_t Imm = CE->getValue();
6197     if (Imm < 0 ||
6198         ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
6199         ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
6200       return Error(Loc, "immediate shift value out of range");
6201     // If <ShiftTy> #0, turn it into a no_shift.
6202     if (Imm == 0)
6203       St = ARM_AM::lsl;
6204     // For consistency, treat lsr #32 and asr #32 as having immediate value 0.
6205     if (Imm == 32)
6206       Imm = 0;
6207     Amount = Imm;
6208   }
6209 
6210   return false;
6211 }
6212 
6213 /// parseFPImm - A floating point immediate expression operand.
6214 ParseStatus ARMAsmParser::parseFPImm(OperandVector &Operands) {
6215   LLVM_DEBUG(dbgs() << "PARSE FPImm, Ops: " << Operands.size());
6216 
6217   MCAsmParser &Parser = getParser();
6218   // Anything that can accept a floating point constant as an operand
6219   // needs to go through here, as the regular parseExpression is
6220   // integer only.
6221   //
6222   // This routine still creates a generic Immediate operand, containing
6223   // a bitcast of the 64-bit floating point value. The various operands
6224   // that accept floats can check whether the value is valid for them
6225   // via the standard is*() predicates.
6226 
6227   SMLoc S = Parser.getTok().getLoc();
6228 
6229   if (Parser.getTok().isNot(AsmToken::Hash) &&
6230       Parser.getTok().isNot(AsmToken::Dollar))
6231     return ParseStatus::NoMatch;
6232 
6233   // Disambiguate the VMOV forms that can accept an FP immediate.
6234   // vmov.f32 <sreg>, #imm
6235   // vmov.f64 <dreg>, #imm
6236   // vmov.f32 <dreg>, #imm  @ vector f32x2
6237   // vmov.f32 <qreg>, #imm  @ vector f32x4
6238   //
6239   // There are also the NEON VMOV instructions which expect an
6240   // integer constant. Make sure we don't try to parse an FPImm
6241   // for these:
6242   // vmov.i{8|16|32|64} <dreg|qreg>, #imm
6243 
6244   bool isVmovf = false;
6245   unsigned MnemonicOpsEndInd = getMnemonicOpsEndInd(Operands);
6246   for (unsigned I = 1; I < MnemonicOpsEndInd; ++I) {
6247     ARMOperand &TyOp = static_cast<ARMOperand &>(*Operands[I]);
6248     if (TyOp.isToken() &&
6249         (TyOp.getToken() == ".f32" || TyOp.getToken() == ".f64" ||
6250          TyOp.getToken() == ".f16")) {
6251       isVmovf = true;
6252       break;
6253     }
6254   }
6255 
6256   ARMOperand &Mnemonic = static_cast<ARMOperand &>(*Operands[0]);
6257   bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() == "fconstd" ||
6258                                          Mnemonic.getToken() == "fconsts");
6259   if (!(isVmovf || isFconst))
6260     return ParseStatus::NoMatch;
6261 
6262   Parser.Lex(); // Eat '#' or '$'.
6263 
6264   // Handle negation, as that still comes through as a separate token.
6265   bool isNegative = false;
6266   if (Parser.getTok().is(AsmToken::Minus)) {
6267     isNegative = true;
6268     Parser.Lex();
6269   }
6270   const AsmToken &Tok = Parser.getTok();
6271   SMLoc Loc = Tok.getLoc();
6272   if (Tok.is(AsmToken::Real) && isVmovf) {
6273     APFloat RealVal(APFloat::IEEEsingle(), Tok.getString());
6274     uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
6275     // If we had a '-' in front, toggle the sign bit.
6276     IntVal ^= (uint64_t)isNegative << 31;
6277     Parser.Lex(); // Eat the token.
6278     Operands.push_back(
6279         ARMOperand::CreateImm(MCConstantExpr::create(IntVal, getContext()), S,
6280                               Parser.getTok().getLoc(), *this));
6281     return ParseStatus::Success;
6282   }
6283   // Also handle plain integers. Instructions which allow floating point
6284   // immediates also allow a raw encoded 8-bit value.
6285   if (Tok.is(AsmToken::Integer) && isFconst) {
6286     int64_t Val = Tok.getIntVal();
6287     Parser.Lex(); // Eat the token.
6288     if (Val > 255 || Val < 0)
6289       return Error(Loc, "encoded floating point value out of range");
6290     float RealVal = ARM_AM::getFPImmFloat(Val);
6291     Val = APFloat(RealVal).bitcastToAPInt().getZExtValue();
6292 
6293     Operands.push_back(
6294         ARMOperand::CreateImm(MCConstantExpr::create(Val, getContext()), S,
6295                               Parser.getTok().getLoc(), *this));
6296     return ParseStatus::Success;
6297   }
6298 
6299   return Error(Loc, "invalid floating point immediate");
6300 }
6301 
6302 /// Parse a arm instruction operand.  For now this parses the operand regardless
6303 /// of the mnemonic.
6304 bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
6305   MCAsmParser &Parser = getParser();
6306   SMLoc S, E;
6307 
6308   // Check if the current operand has a custom associated parser, if so, try to
6309   // custom parse the operand, or fallback to the general approach.
6310   ParseStatus ResTy = MatchOperandParserImpl(Operands, Mnemonic);
6311   if (ResTy.isSuccess())
6312     return false;
6313   // If there wasn't a custom match, try the generic matcher below. Otherwise,
6314   // there was a match, but an error occurred, in which case, just return that
6315   // the operand parsing failed.
6316   if (ResTy.isFailure())
6317     return true;
6318 
6319   switch (getLexer().getKind()) {
6320   default:
6321     Error(Parser.getTok().getLoc(), "unexpected token in operand");
6322     return true;
6323   case AsmToken::Identifier: {
6324     // If we've seen a branch mnemonic, the next operand must be a label.  This
6325     // is true even if the label is a register name.  So "br r1" means branch to
6326     // label "r1".
6327     bool ExpectLabel = Mnemonic == "b" || Mnemonic == "bl";
6328     if (!ExpectLabel) {
6329       if (!tryParseRegisterWithWriteBack(Operands))
6330         return false;
6331       int Res = tryParseShiftRegister(Operands);
6332       if (Res == 0) // success
6333         return false;
6334       else if (Res == -1) // irrecoverable error
6335         return true;
6336       // If this is VMRS, check for the apsr_nzcv operand.
6337       if (Mnemonic == "vmrs" &&
6338           Parser.getTok().getString().equals_insensitive("apsr_nzcv")) {
6339         S = Parser.getTok().getLoc();
6340         Parser.Lex();
6341         Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S, *this));
6342         return false;
6343       }
6344     }
6345 
6346     // Fall though for the Identifier case that is not a register or a
6347     // special name.
6348     [[fallthrough]];
6349   }
6350   case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
6351   case AsmToken::Integer: // things like 1f and 2b as a branch targets
6352   case AsmToken::String:  // quoted label names.
6353   case AsmToken::Dot: {   // . as a branch target
6354     // This was not a register so parse other operands that start with an
6355     // identifier (like labels) as expressions and create them as immediates.
6356     const MCExpr *IdVal;
6357     S = Parser.getTok().getLoc();
6358     if (getParser().parseExpression(IdVal))
6359       return true;
6360     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
6361     Operands.push_back(ARMOperand::CreateImm(IdVal, S, E, *this));
6362     return false;
6363   }
6364   case AsmToken::LBrac:
6365     return parseMemory(Operands);
6366   case AsmToken::LCurly: {
6367     bool IsLazyLoadStore = Mnemonic == "vlldm" || Mnemonic == "vlstm";
6368     bool IsVSCCLRM = Mnemonic == "vscclrm";
6369     return parseRegisterList(Operands, !Mnemonic.starts_with("clr"), false,
6370                              IsLazyLoadStore, IsVSCCLRM);
6371   }
6372   case AsmToken::Dollar:
6373   case AsmToken::Hash: {
6374     // #42 -> immediate
6375     // $ 42 -> immediate
6376     // $foo -> symbol name
6377     // $42 -> symbol name
6378     S = Parser.getTok().getLoc();
6379 
6380     // Favor the interpretation of $-prefixed operands as symbol names.
6381     // Cases where immediates are explicitly expected are handled by their
6382     // specific ParseMethod implementations.
6383     auto AdjacentToken = getLexer().peekTok(/*ShouldSkipSpace=*/false);
6384     bool ExpectIdentifier = Parser.getTok().is(AsmToken::Dollar) &&
6385                             (AdjacentToken.is(AsmToken::Identifier) ||
6386                              AdjacentToken.is(AsmToken::Integer));
6387     if (!ExpectIdentifier) {
6388       // Token is not part of identifier. Drop leading $ or # before parsing
6389       // expression.
6390       Parser.Lex();
6391     }
6392 
6393     if (Parser.getTok().isNot(AsmToken::Colon)) {
6394       bool IsNegative = Parser.getTok().is(AsmToken::Minus);
6395       const MCExpr *ImmVal;
6396       if (getParser().parseExpression(ImmVal))
6397         return true;
6398       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
6399       if (CE) {
6400         int32_t Val = CE->getValue();
6401         if (IsNegative && Val == 0)
6402           ImmVal = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
6403                                           getContext());
6404       }
6405       E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
6406       Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E, *this));
6407 
6408       // There can be a trailing '!' on operands that we want as a separate
6409       // '!' Token operand. Handle that here. For example, the compatibility
6410       // alias for 'srsdb sp!, #imm' is 'srsdb #imm!'.
6411       if (Parser.getTok().is(AsmToken::Exclaim)) {
6412         Operands.push_back(ARMOperand::CreateToken(
6413             Parser.getTok().getString(), Parser.getTok().getLoc(), *this));
6414         Parser.Lex(); // Eat exclaim token
6415       }
6416       return false;
6417     }
6418     // w/ a ':' after the '#', it's just like a plain ':'.
6419     [[fallthrough]];
6420   }
6421   case AsmToken::Colon: {
6422     S = Parser.getTok().getLoc();
6423     // ":lower16:", ":upper16:", ":lower0_7:", ":lower8_15:", ":upper0_7:" and
6424     // ":upper8_15:", expression prefixes
6425     // FIXME: Check it's an expression prefix,
6426     // e.g. (FOO - :lower16:BAR) isn't legal.
6427     ARMMCExpr::VariantKind RefKind;
6428     if (parsePrefix(RefKind))
6429       return true;
6430 
6431     const MCExpr *SubExprVal;
6432     if (getParser().parseExpression(SubExprVal))
6433       return true;
6434 
6435     const MCExpr *ExprVal = ARMMCExpr::create(RefKind, SubExprVal,
6436                                               getContext());
6437     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
6438     Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E, *this));
6439     return false;
6440   }
6441   case AsmToken::Equal: {
6442     S = Parser.getTok().getLoc();
6443     if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
6444       return Error(S, "unexpected token in operand");
6445     Parser.Lex(); // Eat '='
6446     const MCExpr *SubExprVal;
6447     if (getParser().parseExpression(SubExprVal))
6448       return true;
6449     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
6450 
6451     // execute-only: we assume that assembly programmers know what they are
6452     // doing and allow literal pool creation here
6453     Operands.push_back(
6454         ARMOperand::CreateConstantPoolImm(SubExprVal, S, E, *this));
6455     return false;
6456   }
6457   }
6458 }
6459 
6460 bool ARMAsmParser::parseImmExpr(int64_t &Out) {
6461   const MCExpr *Expr = nullptr;
6462   SMLoc L = getParser().getTok().getLoc();
6463   if (check(getParser().parseExpression(Expr), L, "expected expression"))
6464     return true;
6465   const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
6466   if (check(!Value, L, "expected constant expression"))
6467     return true;
6468   Out = Value->getValue();
6469   return false;
6470 }
6471 
6472 // parsePrefix - Parse ARM 16-bit relocations expression prefixes, i.e.
6473 // :lower16: and :upper16: and Thumb 8-bit relocation expression prefixes, i.e.
6474 // :upper8_15:, :upper0_7:, :lower8_15: and :lower0_7:
6475 bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
6476   MCAsmParser &Parser = getParser();
6477   RefKind = ARMMCExpr::VK_ARM_None;
6478 
6479   // consume an optional '#' (GNU compatibility)
6480   if (getLexer().is(AsmToken::Hash))
6481     Parser.Lex();
6482 
6483   assert(getLexer().is(AsmToken::Colon) && "expected a :");
6484   Parser.Lex(); // Eat ':'
6485 
6486   if (getLexer().isNot(AsmToken::Identifier)) {
6487     Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
6488     return true;
6489   }
6490 
6491   enum {
6492     COFF = (1 << MCContext::IsCOFF),
6493     ELF = (1 << MCContext::IsELF),
6494     MACHO = (1 << MCContext::IsMachO),
6495     WASM = (1 << MCContext::IsWasm),
6496   };
6497   static const struct PrefixEntry {
6498     const char *Spelling;
6499     ARMMCExpr::VariantKind VariantKind;
6500     uint8_t SupportedFormats;
6501   } PrefixEntries[] = {
6502       {"upper16", ARMMCExpr::VK_ARM_HI16, COFF | ELF | MACHO},
6503       {"lower16", ARMMCExpr::VK_ARM_LO16, COFF | ELF | MACHO},
6504       {"upper8_15", ARMMCExpr::VK_ARM_HI_8_15, ELF},
6505       {"upper0_7", ARMMCExpr::VK_ARM_HI_0_7, ELF},
6506       {"lower8_15", ARMMCExpr::VK_ARM_LO_8_15, ELF},
6507       {"lower0_7", ARMMCExpr::VK_ARM_LO_0_7, ELF},
6508   };
6509 
6510   StringRef IDVal = Parser.getTok().getIdentifier();
6511 
6512   const auto &Prefix =
6513       llvm::find_if(PrefixEntries, [&IDVal](const PrefixEntry &PE) {
6514         return PE.Spelling == IDVal;
6515       });
6516   if (Prefix == std::end(PrefixEntries)) {
6517     Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
6518     return true;
6519   }
6520 
6521   uint8_t CurrentFormat;
6522   switch (getContext().getObjectFileType()) {
6523   case MCContext::IsMachO:
6524     CurrentFormat = MACHO;
6525     break;
6526   case MCContext::IsELF:
6527     CurrentFormat = ELF;
6528     break;
6529   case MCContext::IsCOFF:
6530     CurrentFormat = COFF;
6531     break;
6532   case MCContext::IsWasm:
6533     CurrentFormat = WASM;
6534     break;
6535   case MCContext::IsGOFF:
6536   case MCContext::IsSPIRV:
6537   case MCContext::IsXCOFF:
6538   case MCContext::IsDXContainer:
6539     llvm_unreachable("unexpected object format");
6540     break;
6541   }
6542 
6543   if (~Prefix->SupportedFormats & CurrentFormat) {
6544     Error(Parser.getTok().getLoc(),
6545           "cannot represent relocation in the current file format");
6546     return true;
6547   }
6548 
6549   RefKind = Prefix->VariantKind;
6550   Parser.Lex();
6551 
6552   if (getLexer().isNot(AsmToken::Colon)) {
6553     Error(Parser.getTok().getLoc(), "unexpected token after prefix");
6554     return true;
6555   }
6556   Parser.Lex(); // Eat the last ':'
6557 
6558   // consume an optional trailing '#' (GNU compatibility) bla
6559   parseOptionalToken(AsmToken::Hash);
6560 
6561   return false;
6562 }
6563 
6564 /// Given a mnemonic, split out possible predication code and carry
6565 /// setting letters to form a canonical mnemonic and flags.
6566 //
6567 // FIXME: Would be nice to autogen this.
6568 // FIXME: This is a bit of a maze of special cases.
6569 StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, StringRef ExtraToken,
6570                                       ARMCC::CondCodes &PredicationCode,
6571                                       ARMVCC::VPTCodes &VPTPredicationCode,
6572                                       bool &CarrySetting,
6573                                       unsigned &ProcessorIMod,
6574                                       StringRef &ITMask) {
6575   PredicationCode = ARMCC::AL;
6576   VPTPredicationCode = ARMVCC::None;
6577   CarrySetting = false;
6578   ProcessorIMod = 0;
6579 
6580   // Ignore some mnemonics we know aren't predicated forms.
6581   //
6582   // FIXME: Would be nice to autogen this.
6583   if ((Mnemonic == "movs" && isThumb()) || Mnemonic == "teq" ||
6584       Mnemonic == "vceq" || Mnemonic == "svc" || Mnemonic == "mls" ||
6585       Mnemonic == "smmls" || Mnemonic == "vcls" || Mnemonic == "vmls" ||
6586       Mnemonic == "vnmls" || Mnemonic == "vacge" || Mnemonic == "vcge" ||
6587       Mnemonic == "vclt" || Mnemonic == "vacgt" || Mnemonic == "vaclt" ||
6588       Mnemonic == "vacle" || Mnemonic == "hlt" || Mnemonic == "vcgt" ||
6589       Mnemonic == "vcle" || Mnemonic == "smlal" || Mnemonic == "umaal" ||
6590       Mnemonic == "umlal" || Mnemonic == "vabal" || Mnemonic == "vmlal" ||
6591       Mnemonic == "vpadal" || Mnemonic == "vqdmlal" || Mnemonic == "fmuls" ||
6592       Mnemonic == "vmaxnm" || Mnemonic == "vminnm" || Mnemonic == "vcvta" ||
6593       Mnemonic == "vcvtn" || Mnemonic == "vcvtp" || Mnemonic == "vcvtm" ||
6594       Mnemonic == "vrinta" || Mnemonic == "vrintn" || Mnemonic == "vrintp" ||
6595       Mnemonic == "vrintm" || Mnemonic == "hvc" ||
6596       Mnemonic.starts_with("vsel") || Mnemonic == "vins" ||
6597       Mnemonic == "vmovx" || Mnemonic == "bxns" || Mnemonic == "blxns" ||
6598       Mnemonic == "vdot" || Mnemonic == "vmmla" || Mnemonic == "vudot" ||
6599       Mnemonic == "vsdot" || Mnemonic == "vcmla" || Mnemonic == "vcadd" ||
6600       Mnemonic == "vfmal" || Mnemonic == "vfmsl" || Mnemonic == "wls" ||
6601       Mnemonic == "le" || Mnemonic == "dls" || Mnemonic == "csel" ||
6602       Mnemonic == "csinc" || Mnemonic == "csinv" || Mnemonic == "csneg" ||
6603       Mnemonic == "cinc" || Mnemonic == "cinv" || Mnemonic == "cneg" ||
6604       Mnemonic == "cset" || Mnemonic == "csetm" || Mnemonic == "aut" ||
6605       Mnemonic == "pac" || Mnemonic == "pacbti" || Mnemonic == "bti")
6606     return Mnemonic;
6607 
6608   // First, split out any predication code. Ignore mnemonics we know aren't
6609   // predicated but do have a carry-set and so weren't caught above.
6610   if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
6611       Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
6612       Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
6613       Mnemonic != "sbcs" && Mnemonic != "rscs" &&
6614       !(hasMVE() &&
6615         (Mnemonic == "vmine" || Mnemonic == "vshle" || Mnemonic == "vshlt" ||
6616          Mnemonic == "vshllt" || Mnemonic == "vrshle" || Mnemonic == "vrshlt" ||
6617          Mnemonic == "vmvne" || Mnemonic == "vorne" || Mnemonic == "vnege" ||
6618          Mnemonic == "vnegt" || Mnemonic == "vmule" || Mnemonic == "vmult" ||
6619          Mnemonic == "vrintne" || Mnemonic == "vcmult" ||
6620          Mnemonic == "vcmule" || Mnemonic == "vpsele" || Mnemonic == "vpselt" ||
6621          Mnemonic.starts_with("vq")))) {
6622     unsigned CC = ARMCondCodeFromString(Mnemonic.substr(Mnemonic.size()-2));
6623     if (CC != ~0U) {
6624       Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
6625       PredicationCode = static_cast<ARMCC::CondCodes>(CC);
6626     }
6627   }
6628 
6629   // Next, determine if we have a carry setting bit. We explicitly ignore all
6630   // the instructions we know end in 's'.
6631   if (Mnemonic.ends_with("s") &&
6632       !(Mnemonic == "cps" || Mnemonic == "mls" || Mnemonic == "mrs" ||
6633         Mnemonic == "smmls" || Mnemonic == "vabs" || Mnemonic == "vcls" ||
6634         Mnemonic == "vmls" || Mnemonic == "vmrs" || Mnemonic == "vnmls" ||
6635         Mnemonic == "vqabs" || Mnemonic == "vrecps" || Mnemonic == "vrsqrts" ||
6636         Mnemonic == "srs" || Mnemonic == "flds" || Mnemonic == "fmrs" ||
6637         Mnemonic == "fsqrts" || Mnemonic == "fsubs" || Mnemonic == "fsts" ||
6638         Mnemonic == "fcpys" || Mnemonic == "fdivs" || Mnemonic == "fmuls" ||
6639         Mnemonic == "fcmps" || Mnemonic == "fcmpzs" || Mnemonic == "vfms" ||
6640         Mnemonic == "vfnms" || Mnemonic == "fconsts" || Mnemonic == "bxns" ||
6641         Mnemonic == "blxns" || Mnemonic == "vfmas" || Mnemonic == "vmlas" ||
6642         (Mnemonic == "movs" && isThumb()))) {
6643     Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
6644     CarrySetting = true;
6645   }
6646 
6647   // The "cps" instruction can have a interrupt mode operand which is glued into
6648   // the mnemonic. Check if this is the case, split it and parse the imod op
6649   if (Mnemonic.starts_with("cps")) {
6650     // Split out any imod code.
6651     unsigned IMod =
6652       StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
6653       .Case("ie", ARM_PROC::IE)
6654       .Case("id", ARM_PROC::ID)
6655       .Default(~0U);
6656     if (IMod != ~0U) {
6657       Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
6658       ProcessorIMod = IMod;
6659     }
6660   }
6661 
6662   if (isMnemonicVPTPredicable(Mnemonic, ExtraToken) && Mnemonic != "vmovlt" &&
6663       Mnemonic != "vshllt" && Mnemonic != "vrshrnt" && Mnemonic != "vshrnt" &&
6664       Mnemonic != "vqrshrunt" && Mnemonic != "vqshrunt" &&
6665       Mnemonic != "vqrshrnt" && Mnemonic != "vqshrnt" && Mnemonic != "vmullt" &&
6666       Mnemonic != "vqmovnt" && Mnemonic != "vqmovunt" && Mnemonic != "vmovnt" &&
6667       Mnemonic != "vqdmullt" && Mnemonic != "vpnot" && Mnemonic != "vcvtt" &&
6668       Mnemonic != "vcvt") {
6669     unsigned VCC =
6670         ARMVectorCondCodeFromString(Mnemonic.substr(Mnemonic.size() - 1));
6671     if (VCC != ~0U) {
6672       Mnemonic = Mnemonic.slice(0, Mnemonic.size()-1);
6673       VPTPredicationCode = static_cast<ARMVCC::VPTCodes>(VCC);
6674     }
6675     return Mnemonic;
6676   }
6677 
6678   // The "it" instruction has the condition mask on the end of the mnemonic.
6679   if (Mnemonic.starts_with("it")) {
6680     ITMask = Mnemonic.substr(2);
6681     Mnemonic = Mnemonic.slice(0, 2);
6682   }
6683 
6684   if (Mnemonic.starts_with("vpst")) {
6685     ITMask = Mnemonic.substr(4);
6686     Mnemonic = Mnemonic.slice(0, 4);
6687   } else if (Mnemonic.starts_with("vpt")) {
6688     ITMask = Mnemonic.substr(3);
6689     Mnemonic = Mnemonic.slice(0, 3);
6690   }
6691 
6692   return Mnemonic;
6693 }
6694 
6695 /// Given a canonical mnemonic, determine if the instruction ever allows
6696 /// inclusion of carry set or predication code operands.
6697 //
6698 // FIXME: It would be nice to autogen this.
6699 void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic,
6700                                          StringRef ExtraToken,
6701                                          StringRef FullInst,
6702                                          bool &CanAcceptCarrySet,
6703                                          bool &CanAcceptPredicationCode,
6704                                          bool &CanAcceptVPTPredicationCode) {
6705   CanAcceptVPTPredicationCode = isMnemonicVPTPredicable(Mnemonic, ExtraToken);
6706 
6707   CanAcceptCarrySet =
6708       Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
6709       Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
6710       Mnemonic == "add" || Mnemonic == "adc" || Mnemonic == "mul" ||
6711       Mnemonic == "bic" || Mnemonic == "asr" || Mnemonic == "orr" ||
6712       Mnemonic == "mvn" || Mnemonic == "rsb" || Mnemonic == "rsc" ||
6713       Mnemonic == "orn" || Mnemonic == "sbc" || Mnemonic == "eor" ||
6714       Mnemonic == "neg" || Mnemonic == "vfm" || Mnemonic == "vfnm" ||
6715       (!isThumb() &&
6716        (Mnemonic == "smull" || Mnemonic == "mov" || Mnemonic == "mla" ||
6717         Mnemonic == "smlal" || Mnemonic == "umlal" || Mnemonic == "umull"));
6718 
6719   if (Mnemonic == "bkpt" || Mnemonic == "cbnz" || Mnemonic == "setend" ||
6720       Mnemonic == "cps" || Mnemonic == "it" || Mnemonic == "cbz" ||
6721       Mnemonic == "trap" || Mnemonic == "hlt" || Mnemonic == "udf" ||
6722       Mnemonic.starts_with("crc32") || Mnemonic.starts_with("cps") ||
6723       Mnemonic.starts_with("vsel") || Mnemonic == "vmaxnm" ||
6724       Mnemonic == "vminnm" || Mnemonic == "vcvta" || Mnemonic == "vcvtn" ||
6725       Mnemonic == "vcvtp" || Mnemonic == "vcvtm" || Mnemonic == "vrinta" ||
6726       Mnemonic == "vrintn" || Mnemonic == "vrintp" || Mnemonic == "vrintm" ||
6727       Mnemonic.starts_with("aes") || Mnemonic == "hvc" ||
6728       Mnemonic == "setpan" || Mnemonic.starts_with("sha1") ||
6729       Mnemonic.starts_with("sha256") ||
6730       (FullInst.starts_with("vmull") && FullInst.ends_with(".p64")) ||
6731       Mnemonic == "vmovx" || Mnemonic == "vins" || Mnemonic == "vudot" ||
6732       Mnemonic == "vsdot" || Mnemonic == "vcmla" || Mnemonic == "vcadd" ||
6733       Mnemonic == "vfmal" || Mnemonic == "vfmsl" || Mnemonic == "vfmat" ||
6734       Mnemonic == "vfmab" || Mnemonic == "vdot" || Mnemonic == "vmmla" ||
6735       Mnemonic == "sb" || Mnemonic == "ssbb" || Mnemonic == "pssbb" ||
6736       Mnemonic == "vsmmla" || Mnemonic == "vummla" || Mnemonic == "vusmmla" ||
6737       Mnemonic == "vusdot" || Mnemonic == "vsudot" || Mnemonic == "bfcsel" ||
6738       Mnemonic == "wls" || Mnemonic == "dls" || Mnemonic == "le" ||
6739       Mnemonic == "csel" || Mnemonic == "csinc" || Mnemonic == "csinv" ||
6740       Mnemonic == "csneg" || Mnemonic == "cinc" || Mnemonic == "cinv" ||
6741       Mnemonic == "cneg" || Mnemonic == "cset" || Mnemonic == "csetm" ||
6742       (hasCDE() && MS.isCDEInstr(Mnemonic) &&
6743        !MS.isITPredicableCDEInstr(Mnemonic)) ||
6744       Mnemonic.starts_with("vpt") || Mnemonic.starts_with("vpst") ||
6745       Mnemonic == "pac" || Mnemonic == "pacbti" || Mnemonic == "aut" ||
6746       Mnemonic == "bti" ||
6747       (hasMVE() &&
6748        (Mnemonic.starts_with("vst2") || Mnemonic.starts_with("vld2") ||
6749         Mnemonic.starts_with("vst4") || Mnemonic.starts_with("vld4") ||
6750         Mnemonic.starts_with("wlstp") || Mnemonic.starts_with("dlstp") ||
6751         Mnemonic.starts_with("letp")))) {
6752     // These mnemonics are never predicable
6753     CanAcceptPredicationCode = false;
6754   } else if (!isThumb()) {
6755     // Some instructions are only predicable in Thumb mode
6756     CanAcceptPredicationCode =
6757         Mnemonic != "cdp2" && Mnemonic != "clrex" && Mnemonic != "mcr2" &&
6758         Mnemonic != "mcrr2" && Mnemonic != "mrc2" && Mnemonic != "mrrc2" &&
6759         Mnemonic != "dmb" && Mnemonic != "dfb" && Mnemonic != "dsb" &&
6760         Mnemonic != "isb" && Mnemonic != "pld" && Mnemonic != "pli" &&
6761         Mnemonic != "pldw" && Mnemonic != "ldc2" && Mnemonic != "ldc2l" &&
6762         Mnemonic != "stc2" && Mnemonic != "stc2l" && Mnemonic != "tsb" &&
6763         !Mnemonic.starts_with("rfe") && !Mnemonic.starts_with("srs");
6764   } else if (isThumbOne()) {
6765     if (hasV6MOps())
6766       CanAcceptPredicationCode = Mnemonic != "movs";
6767     else
6768       CanAcceptPredicationCode = Mnemonic != "nop" && Mnemonic != "movs";
6769   } else
6770     CanAcceptPredicationCode = true;
6771 }
6772 
6773 bool operandsContainWide(OperandVector &Operands, unsigned MnemonicOpsEndInd) {
6774   for (unsigned I = 0; I < MnemonicOpsEndInd; ++I) {
6775     auto &Op = static_cast<ARMOperand &>(*Operands[I]);
6776     if (Op.isToken() && Op.getToken() == ".w")
6777       return true;
6778   }
6779   return false;
6780 }
6781 
6782 // Some Thumb instructions have two operand forms that are not
6783 // available as three operand, convert to two operand form if possible.
6784 //
6785 // FIXME: We would really like to be able to tablegen'erate this.
6786 void ARMAsmParser::tryConvertingToTwoOperandForm(
6787     StringRef Mnemonic, ARMCC::CondCodes PredicationCode, bool CarrySetting,
6788     OperandVector &Operands, unsigned MnemonicOpsEndInd) {
6789 
6790   if (operandsContainWide(Operands, MnemonicOpsEndInd))
6791     return;
6792   if (Operands.size() != MnemonicOpsEndInd + 3)
6793     return;
6794 
6795   const auto &Op3 = static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]);
6796   auto &Op4 = static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1]);
6797   if (!Op3.isReg() || !Op4.isReg())
6798     return;
6799 
6800   auto Op3Reg = Op3.getReg();
6801   auto Op4Reg = Op4.getReg();
6802 
6803   // For most Thumb2 cases we just generate the 3 operand form and reduce
6804   // it in processInstruction(), but the 3 operand form of ADD (t2ADDrr)
6805   // won't accept SP or PC so we do the transformation here taking care
6806   // with immediate range in the 'add sp, sp #imm' case.
6807   auto &Op5 = static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 2]);
6808   if (isThumbTwo()) {
6809     if (Mnemonic != "add")
6810       return;
6811     bool TryTransform = Op3Reg == ARM::PC || Op4Reg == ARM::PC ||
6812                         (Op5.isReg() && Op5.getReg() == ARM::PC);
6813     if (!TryTransform) {
6814       TryTransform = (Op3Reg == ARM::SP || Op4Reg == ARM::SP ||
6815                       (Op5.isReg() && Op5.getReg() == ARM::SP)) &&
6816                      !(Op3Reg == ARM::SP && Op4Reg == ARM::SP &&
6817                        Op5.isImm() && !Op5.isImm0_508s4());
6818     }
6819     if (!TryTransform)
6820       return;
6821   } else if (!isThumbOne())
6822     return;
6823 
6824   if (!(Mnemonic == "add" || Mnemonic == "sub" || Mnemonic == "and" ||
6825         Mnemonic == "eor" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
6826         Mnemonic == "asr" || Mnemonic == "adc" || Mnemonic == "sbc" ||
6827         Mnemonic == "ror" || Mnemonic == "orr" || Mnemonic == "bic"))
6828     return;
6829 
6830   // If first 2 operands of a 3 operand instruction are the same
6831   // then transform to 2 operand version of the same instruction
6832   // e.g. 'adds r0, r0, #1' transforms to 'adds r0, #1'
6833   bool Transform = Op3Reg == Op4Reg;
6834 
6835   // For communtative operations, we might be able to transform if we swap
6836   // Op4 and Op5.  The 'ADD Rdm, SP, Rdm' form is already handled specially
6837   // as tADDrsp.
6838   const ARMOperand *LastOp = &Op5;
6839   bool Swap = false;
6840   if (!Transform && Op5.isReg() && Op3Reg == Op5.getReg() &&
6841       ((Mnemonic == "add" && Op4Reg != ARM::SP) ||
6842        Mnemonic == "and" || Mnemonic == "eor" ||
6843        Mnemonic == "adc" || Mnemonic == "orr")) {
6844     Swap = true;
6845     LastOp = &Op4;
6846     Transform = true;
6847   }
6848 
6849   // If both registers are the same then remove one of them from
6850   // the operand list, with certain exceptions.
6851   if (Transform) {
6852     // Don't transform 'adds Rd, Rd, Rm' or 'sub{s} Rd, Rd, Rm' because the
6853     // 2 operand forms don't exist.
6854     if (((Mnemonic == "add" && CarrySetting) || Mnemonic == "sub") &&
6855         LastOp->isReg())
6856       Transform = false;
6857 
6858     // Don't transform 'add/sub{s} Rd, Rd, #imm' if the immediate fits into
6859     // 3-bits because the ARMARM says not to.
6860     if ((Mnemonic == "add" || Mnemonic == "sub") && LastOp->isImm0_7())
6861       Transform = false;
6862   }
6863 
6864   if (Transform) {
6865     if (Swap)
6866       std::swap(Op4, Op5);
6867     Operands.erase(Operands.begin() + MnemonicOpsEndInd);
6868   }
6869 }
6870 
6871 // this function returns true if the operand is one of the following
6872 // relocations: :upper8_15:, :upper0_7:, :lower8_15: or :lower0_7:
6873 static bool isThumbI8Relocation(MCParsedAsmOperand &MCOp) {
6874   ARMOperand &Op = static_cast<ARMOperand &>(MCOp);
6875   if (!Op.isImm())
6876     return false;
6877   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
6878   if (CE)
6879     return false;
6880   const MCExpr *E = dyn_cast<MCExpr>(Op.getImm());
6881   if (!E)
6882     return false;
6883   const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E);
6884   if (ARM16Expr && (ARM16Expr->getKind() == ARMMCExpr::VK_ARM_HI_8_15 ||
6885                     ARM16Expr->getKind() == ARMMCExpr::VK_ARM_HI_0_7 ||
6886                     ARM16Expr->getKind() == ARMMCExpr::VK_ARM_LO_8_15 ||
6887                     ARM16Expr->getKind() == ARMMCExpr::VK_ARM_LO_0_7))
6888     return true;
6889   return false;
6890 }
6891 
6892 bool ARMAsmParser::shouldOmitVectorPredicateOperand(
6893     StringRef Mnemonic, OperandVector &Operands, unsigned MnemonicOpsEndInd) {
6894   if (!hasMVE() || Operands.size() <= MnemonicOpsEndInd)
6895     return true;
6896 
6897   if (Mnemonic.starts_with("vld2") || Mnemonic.starts_with("vld4") ||
6898       Mnemonic.starts_with("vst2") || Mnemonic.starts_with("vst4"))
6899     return true;
6900 
6901   if (Mnemonic.starts_with("vctp") || Mnemonic.starts_with("vpnot"))
6902     return false;
6903 
6904   if (Mnemonic.starts_with("vmov") &&
6905       !(Mnemonic.starts_with("vmovl") || Mnemonic.starts_with("vmovn") ||
6906         Mnemonic.starts_with("vmovx"))) {
6907     for (auto &Operand : Operands) {
6908       if (static_cast<ARMOperand &>(*Operand).isVectorIndex() ||
6909           ((*Operand).isReg() &&
6910            (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
6911              (*Operand).getReg()) ||
6912             ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
6913               (*Operand).getReg())))) {
6914         return true;
6915       }
6916     }
6917     return false;
6918   } else {
6919     for (auto &Operand : Operands) {
6920       // We check the larger class QPR instead of just the legal class
6921       // MQPR, to more accurately report errors when using Q registers
6922       // outside of the allowed range.
6923       if (static_cast<ARMOperand &>(*Operand).isVectorIndex() ||
6924           static_cast<ARMOperand &>(*Operand).isQReg())
6925         return false;
6926     }
6927     return true;
6928   }
6929 }
6930 
6931 // FIXME: This bit should probably be handled via an explicit match class
6932 // in the .td files that matches the suffix instead of having it be
6933 // a literal string token the way it is now.
6934 static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
6935   return Mnemonic.starts_with("vldm") || Mnemonic.starts_with("vstm");
6936 }
6937 
6938 static void applyMnemonicAliases(StringRef &Mnemonic,
6939                                  const FeatureBitset &Features,
6940                                  unsigned VariantID);
6941 
6942 // The GNU assembler has aliases of ldrd, strd, ldrexd, strexd, ldaexd, and
6943 // stlexd with the second register omitted. We don't have a way to do that in
6944 // tablegen, so fix it up here.
6945 //
6946 // We have to be careful to not emit an invalid Rt2 here, because the rest of
6947 // the assembly parser could then generate confusing diagnostics refering to
6948 // it. If we do find anything that prevents us from doing the transformation we
6949 // bail out, and let the assembly parser report an error on the instruction as
6950 // it is written.
6951 void ARMAsmParser::fixupGNULDRDAlias(StringRef Mnemonic,
6952                                      OperandVector &Operands,
6953                                      unsigned MnemonicOpsEndInd) {
6954   if (Mnemonic != "ldrd" && Mnemonic != "strd" && Mnemonic != "ldrexd" &&
6955       Mnemonic != "strexd" && Mnemonic != "ldaexd" && Mnemonic != "stlexd")
6956     return;
6957 
6958   unsigned IdX = Mnemonic == "strexd" || Mnemonic == "stlexd"
6959                      ? MnemonicOpsEndInd + 1
6960                      : MnemonicOpsEndInd;
6961 
6962   if (Operands.size() < IdX + 2)
6963     return;
6964 
6965   ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[IdX]);
6966   ARMOperand &Op3 = static_cast<ARMOperand &>(*Operands[IdX + 1]);
6967 
6968   if (!Op2.isReg())
6969     return;
6970   if (!Op3.isGPRMem())
6971     return;
6972 
6973   const MCRegisterClass &GPR = MRI->getRegClass(ARM::GPRRegClassID);
6974   if (!GPR.contains(Op2.getReg()))
6975     return;
6976 
6977   unsigned RtEncoding = MRI->getEncodingValue(Op2.getReg());
6978   if (!isThumb() && (RtEncoding & 1)) {
6979     // In ARM mode, the registers must be from an aligned pair, this
6980     // restriction does not apply in Thumb mode.
6981     return;
6982   }
6983   if (Op2.getReg() == ARM::PC)
6984     return;
6985   unsigned PairedReg = GPR.getRegister(RtEncoding + 1);
6986   if (!PairedReg || PairedReg == ARM::PC ||
6987       (PairedReg == ARM::SP && !hasV8Ops()))
6988     return;
6989 
6990   Operands.insert(Operands.begin() + IdX + 1,
6991                   ARMOperand::CreateReg(PairedReg, Op2.getStartLoc(),
6992                                         Op2.getEndLoc(), *this));
6993 }
6994 
6995 // Dual-register instruction have the following syntax:
6996 // <mnemonic> <predicate>? <coproc>, <Rdest>, <Rdest+1>, <Rsrc>, ..., #imm
6997 // This function tries to remove <Rdest+1> and replace <Rdest> with a pair
6998 // operand. If the conversion fails an error is diagnosed, and the function
6999 // returns true.
7000 bool ARMAsmParser::CDEConvertDualRegOperand(StringRef Mnemonic,
7001                                             OperandVector &Operands,
7002                                             unsigned MnemonicOpsEndInd) {
7003   assert(MS.isCDEDualRegInstr(Mnemonic));
7004 
7005   if (Operands.size() < 3 + MnemonicOpsEndInd)
7006     return false;
7007 
7008   StringRef Op2Diag(
7009       "operand must be an even-numbered register in the range [r0, r10]");
7010 
7011   const MCParsedAsmOperand &Op2 = *Operands[MnemonicOpsEndInd + 1];
7012   if (!Op2.isReg())
7013     return Error(Op2.getStartLoc(), Op2Diag);
7014 
7015   MCRegister RNext;
7016   MCRegister RPair;
7017   switch (Op2.getReg().id()) {
7018   default:
7019     return Error(Op2.getStartLoc(), Op2Diag);
7020   case ARM::R0:
7021     RNext = ARM::R1;
7022     RPair = ARM::R0_R1;
7023     break;
7024   case ARM::R2:
7025     RNext = ARM::R3;
7026     RPair = ARM::R2_R3;
7027     break;
7028   case ARM::R4:
7029     RNext = ARM::R5;
7030     RPair = ARM::R4_R5;
7031     break;
7032   case ARM::R6:
7033     RNext = ARM::R7;
7034     RPair = ARM::R6_R7;
7035     break;
7036   case ARM::R8:
7037     RNext = ARM::R9;
7038     RPair = ARM::R8_R9;
7039     break;
7040   case ARM::R10:
7041     RNext = ARM::R11;
7042     RPair = ARM::R10_R11;
7043     break;
7044   }
7045 
7046   const MCParsedAsmOperand &Op3 = *Operands[MnemonicOpsEndInd + 2];
7047   if (!Op3.isReg() || Op3.getReg() != RNext)
7048     return Error(Op3.getStartLoc(), "operand must be a consecutive register");
7049 
7050   Operands.erase(Operands.begin() + MnemonicOpsEndInd + 2);
7051   Operands[MnemonicOpsEndInd + 1] =
7052       ARMOperand::CreateReg(RPair, Op2.getStartLoc(), Op2.getEndLoc(), *this);
7053   return false;
7054 }
7055 
7056 void removeCondCode(OperandVector &Operands, unsigned &MnemonicOpsEndInd) {
7057   for (unsigned I = 0; I < MnemonicOpsEndInd; ++I)
7058     if (static_cast<ARMOperand &>(*Operands[I]).isCondCode()) {
7059       Operands.erase(Operands.begin() + I);
7060       --MnemonicOpsEndInd;
7061       break;
7062     }
7063 }
7064 
7065 void removeCCOut(OperandVector &Operands, unsigned &MnemonicOpsEndInd) {
7066   for (unsigned I = 0; I < MnemonicOpsEndInd; ++I)
7067     if (static_cast<ARMOperand &>(*Operands[I]).isCCOut()) {
7068       Operands.erase(Operands.begin() + I);
7069       --MnemonicOpsEndInd;
7070       break;
7071     }
7072 }
7073 
7074 void removeVPTCondCode(OperandVector &Operands, unsigned &MnemonicOpsEndInd) {
7075   for (unsigned I = 0; I < MnemonicOpsEndInd; ++I)
7076     if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred()) {
7077       Operands.erase(Operands.begin() + I);
7078       --MnemonicOpsEndInd;
7079       break;
7080     }
7081 }
7082 
7083 /// Parse an arm instruction mnemonic followed by its operands.
7084 bool ARMAsmParser::parseInstruction(ParseInstructionInfo &Info, StringRef Name,
7085                                     SMLoc NameLoc, OperandVector &Operands) {
7086   MCAsmParser &Parser = getParser();
7087 
7088   // Apply mnemonic aliases before doing anything else, as the destination
7089   // mnemonic may include suffices and we want to handle them normally.
7090   // The generic tblgen'erated code does this later, at the start of
7091   // MatchInstructionImpl(), but that's too late for aliases that include
7092   // any sort of suffix.
7093   const FeatureBitset &AvailableFeatures = getAvailableFeatures();
7094   unsigned AssemblerDialect = getParser().getAssemblerDialect();
7095   applyMnemonicAliases(Name, AvailableFeatures, AssemblerDialect);
7096 
7097   // First check for the ARM-specific .req directive.
7098   if (Parser.getTok().is(AsmToken::Identifier) &&
7099       Parser.getTok().getIdentifier().lower() == ".req") {
7100     parseDirectiveReq(Name, NameLoc);
7101     // We always return 'error' for this, as we're done with this
7102     // statement and don't need to match the 'instruction."
7103     return true;
7104   }
7105 
7106   // Create the leading tokens for the mnemonic, split by '.' characters.
7107   size_t Start = 0, Next = Name.find('.');
7108   StringRef Mnemonic = Name.slice(Start, Next);
7109   StringRef ExtraToken = Name.slice(Next, Name.find(' ', Next + 1));
7110 
7111   // Split out the predication code and carry setting flag from the mnemonic.
7112   ARMCC::CondCodes PredicationCode;
7113   ARMVCC::VPTCodes VPTPredicationCode;
7114   unsigned ProcessorIMod;
7115   bool CarrySetting;
7116   StringRef ITMask;
7117   Mnemonic = splitMnemonic(Mnemonic, ExtraToken, PredicationCode, VPTPredicationCode,
7118                            CarrySetting, ProcessorIMod, ITMask);
7119 
7120   // In Thumb1, only the branch (B) instruction can be predicated.
7121   if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
7122     return Error(NameLoc, "conditional execution not supported in Thumb1");
7123   }
7124 
7125   Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc, *this));
7126 
7127   // Handle the mask for IT and VPT instructions. In ARMOperand and
7128   // MCOperand, this is stored in a format independent of the
7129   // condition code: the lowest set bit indicates the end of the
7130   // encoding, and above that, a 1 bit indicates 'else', and an 0
7131   // indicates 'then'. E.g.
7132   //    IT    -> 1000
7133   //    ITx   -> x100    (ITT -> 0100, ITE -> 1100)
7134   //    ITxy  -> xy10    (e.g. ITET -> 1010)
7135   //    ITxyz -> xyz1    (e.g. ITEET -> 1101)
7136   // Note: See the ARM::PredBlockMask enum in
7137   //   /lib/Target/ARM/Utils/ARMBaseInfo.h
7138   if (Mnemonic == "it" || Mnemonic.starts_with("vpt") ||
7139       Mnemonic.starts_with("vpst")) {
7140     SMLoc Loc = Mnemonic == "it"  ? SMLoc::getFromPointer(NameLoc.getPointer() + 2) :
7141                 Mnemonic == "vpt" ? SMLoc::getFromPointer(NameLoc.getPointer() + 3) :
7142                                     SMLoc::getFromPointer(NameLoc.getPointer() + 4);
7143     if (ITMask.size() > 3) {
7144       if (Mnemonic == "it")
7145         return Error(Loc, "too many conditions on IT instruction");
7146       return Error(Loc, "too many conditions on VPT instruction");
7147     }
7148     unsigned Mask = 8;
7149     for (char Pos : llvm::reverse(ITMask)) {
7150       if (Pos != 't' && Pos != 'e') {
7151         return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
7152       }
7153       Mask >>= 1;
7154       if (Pos == 'e')
7155         Mask |= 8;
7156     }
7157     Operands.push_back(ARMOperand::CreateITMask(Mask, Loc, *this));
7158   }
7159 
7160   // FIXME: This is all a pretty gross hack. We should automatically handle
7161   // optional operands like this via tblgen.
7162 
7163   // Next, add the CCOut and ConditionCode operands, if needed.
7164   //
7165   // For mnemonics which can ever incorporate a carry setting bit or predication
7166   // code, our matching model involves us always generating CCOut and
7167   // ConditionCode operands to match the mnemonic "as written" and then we let
7168   // the matcher deal with finding the right instruction or generating an
7169   // appropriate error.
7170   bool CanAcceptCarrySet, CanAcceptPredicationCode, CanAcceptVPTPredicationCode;
7171   getMnemonicAcceptInfo(Mnemonic, ExtraToken, Name, CanAcceptCarrySet,
7172                         CanAcceptPredicationCode, CanAcceptVPTPredicationCode);
7173 
7174   // If we had a carry-set on an instruction that can't do that, issue an
7175   // error.
7176   if (!CanAcceptCarrySet && CarrySetting) {
7177     return Error(NameLoc, "instruction '" + Mnemonic +
7178                  "' can not set flags, but 's' suffix specified");
7179   }
7180   // If we had a predication code on an instruction that can't do that, issue an
7181   // error.
7182   if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
7183     return Error(NameLoc, "instruction '" + Mnemonic +
7184                  "' is not predicable, but condition code specified");
7185   }
7186 
7187   // If we had a VPT predication code on an instruction that can't do that, issue an
7188   // error.
7189   if (!CanAcceptVPTPredicationCode && VPTPredicationCode != ARMVCC::None) {
7190     return Error(NameLoc, "instruction '" + Mnemonic +
7191                  "' is not VPT predicable, but VPT code T/E is specified");
7192   }
7193 
7194   // Add the carry setting operand, if necessary.
7195   if (CanAcceptCarrySet && CarrySetting) {
7196     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
7197     Operands.push_back(ARMOperand::CreateCCOut(
7198         CarrySetting ? ARM::CPSR : ARM::NoRegister, Loc, *this));
7199   }
7200 
7201   // Add the predication code operand, if necessary.
7202   if (CanAcceptPredicationCode && PredicationCode != llvm::ARMCC::AL) {
7203     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
7204                                       CarrySetting);
7205     Operands.push_back(ARMOperand::CreateCondCode(
7206         ARMCC::CondCodes(PredicationCode), Loc, *this));
7207   }
7208 
7209   // Add the VPT predication code operand, if necessary.
7210   // Dont add in certain cases of VCVT as this needs to be disambiguated
7211   // after operand parsing.
7212   if (CanAcceptVPTPredicationCode && VPTPredicationCode != llvm::ARMVCC::None &&
7213       !(Mnemonic.starts_with("vcvt") && Mnemonic != "vcvta" &&
7214         Mnemonic != "vcvtn" && Mnemonic != "vcvtp" && Mnemonic != "vcvtm")) {
7215     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
7216                                       CarrySetting);
7217     Operands.push_back(ARMOperand::CreateVPTPred(
7218         ARMVCC::VPTCodes(VPTPredicationCode), Loc, *this));
7219   }
7220 
7221   // Add the processor imod operand, if necessary.
7222   if (ProcessorIMod) {
7223     Operands.push_back(ARMOperand::CreateImm(
7224         MCConstantExpr::create(ProcessorIMod, getContext()), NameLoc, NameLoc,
7225         *this));
7226   } else if (Mnemonic == "cps" && isMClass()) {
7227     return Error(NameLoc, "instruction 'cps' requires effect for M-class");
7228   }
7229 
7230   // Add the remaining tokens in the mnemonic.
7231   while (Next != StringRef::npos) {
7232     Start = Next;
7233     Next = Name.find('.', Start + 1);
7234     ExtraToken = Name.slice(Start, Next);
7235 
7236     // Some NEON instructions have an optional datatype suffix that is
7237     // completely ignored. Check for that.
7238     if (isDataTypeToken(ExtraToken) &&
7239         doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
7240       continue;
7241 
7242     // For for ARM mode generate an error if the .n qualifier is used.
7243     if (ExtraToken == ".n" && !isThumb()) {
7244       SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
7245       return Error(Loc, "instruction with .n (narrow) qualifier not allowed in "
7246                    "arm mode");
7247     }
7248 
7249     // The .n qualifier is always discarded as that is what the tables
7250     // and matcher expect.  In ARM mode the .w qualifier has no effect,
7251     // so discard it to avoid errors that can be caused by the matcher.
7252     if (ExtraToken != ".n" && (isThumb() || ExtraToken != ".w")) {
7253       SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
7254       Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc, *this));
7255     }
7256   }
7257 
7258   // This marks the end of the LHS Mnemonic operators.
7259   // This is used for indexing into the non-menmonic operators as some of the
7260   // mnemonic operators are optional and therfore indexes can differ.
7261   unsigned MnemonicOpsEndInd = Operands.size();
7262 
7263   // Read the remaining operands.
7264   if (getLexer().isNot(AsmToken::EndOfStatement)) {
7265     // Read the first operand.
7266     if (parseOperand(Operands, Mnemonic)) {
7267       return true;
7268     }
7269 
7270     while (parseOptionalToken(AsmToken::Comma)) {
7271       // Parse and remember the operand.
7272       if (parseOperand(Operands, Mnemonic)) {
7273         return true;
7274       }
7275     }
7276   }
7277 
7278   if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
7279     return true;
7280 
7281   tryConvertingToTwoOperandForm(Mnemonic, PredicationCode, CarrySetting,
7282                                 Operands, MnemonicOpsEndInd);
7283 
7284   if (hasCDE() && MS.isCDEInstr(Mnemonic)) {
7285     // Dual-register instructions use even-odd register pairs as their
7286     // destination operand, in assembly such pair is spelled as two
7287     // consecutive registers, without any special syntax. ConvertDualRegOperand
7288     // tries to convert such operand into register pair, e.g. r2, r3 -> r2_r3.
7289     // It returns true, if an error message has been emitted. If the function
7290     // returns false, the function either succeeded or an error (e.g. missing
7291     // operand) will be diagnosed elsewhere.
7292     if (MS.isCDEDualRegInstr(Mnemonic)) {
7293       bool GotError =
7294           CDEConvertDualRegOperand(Mnemonic, Operands, MnemonicOpsEndInd);
7295       if (GotError)
7296         return GotError;
7297     }
7298   }
7299 
7300   if (hasMVE()) {
7301     if (!shouldOmitVectorPredicateOperand(Mnemonic, Operands,
7302                                           MnemonicOpsEndInd) &&
7303         Mnemonic == "vmov" && PredicationCode == ARMCC::LT) {
7304       // Very nasty hack to deal with the vector predicated variant of vmovlt
7305       // the scalar predicated vmov with condition 'lt'.  We can not tell them
7306       // apart until we have parsed their operands.
7307       Operands.erase(Operands.begin() + 1);
7308       Operands.erase(Operands.begin());
7309       SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
7310       SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
7311                                          Mnemonic.size() - 1 + CarrySetting);
7312       Operands.insert(Operands.begin(),
7313                       ARMOperand::CreateVPTPred(ARMVCC::None, PLoc, *this));
7314       Operands.insert(Operands.begin(), ARMOperand::CreateToken(
7315                                             StringRef("vmovlt"), MLoc, *this));
7316     } else if (Mnemonic == "vcvt" && PredicationCode == ARMCC::NE &&
7317                !shouldOmitVectorPredicateOperand(Mnemonic, Operands,
7318                                                  MnemonicOpsEndInd)) {
7319       // Another nasty hack to deal with the ambiguity between vcvt with scalar
7320       // predication 'ne' and vcvtn with vector predication 'e'.  As above we
7321       // can only distinguish between the two after we have parsed their
7322       // operands.
7323       Operands.erase(Operands.begin() + 1);
7324       Operands.erase(Operands.begin());
7325       SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
7326       SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
7327                                          Mnemonic.size() - 1 + CarrySetting);
7328       Operands.insert(Operands.begin(),
7329                       ARMOperand::CreateVPTPred(ARMVCC::Else, PLoc, *this));
7330       Operands.insert(Operands.begin(),
7331                       ARMOperand::CreateToken(StringRef("vcvtn"), MLoc, *this));
7332     } else if (Mnemonic == "vmul" && PredicationCode == ARMCC::LT &&
7333                !shouldOmitVectorPredicateOperand(Mnemonic, Operands,
7334                                                  MnemonicOpsEndInd)) {
7335       // Another hack, this time to distinguish between scalar predicated vmul
7336       // with 'lt' predication code and the vector instruction vmullt with
7337       // vector predication code "none"
7338       removeCondCode(Operands, MnemonicOpsEndInd);
7339       Operands.erase(Operands.begin());
7340       SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
7341       Operands.insert(Operands.begin(), ARMOperand::CreateToken(
7342                                             StringRef("vmullt"), MLoc, *this));
7343     } else if (Mnemonic.starts_with("vcvt") && !Mnemonic.starts_with("vcvta") &&
7344                !Mnemonic.starts_with("vcvtn") &&
7345                !Mnemonic.starts_with("vcvtp") &&
7346                !Mnemonic.starts_with("vcvtm")) {
7347       if (!shouldOmitVectorPredicateOperand(Mnemonic, Operands,
7348                                             MnemonicOpsEndInd)) {
7349         // We could not split the vector predicate off vcvt because it might
7350         // have been the scalar vcvtt instruction.  Now we know its a vector
7351         // instruction, we still need to check whether its the vector
7352         // predicated vcvt with 'Then' predication or the vector vcvtt.  We can
7353         // distinguish the two based on the suffixes, if it is any of
7354         // ".f16.f32", ".f32.f16", ".f16.f64" or ".f64.f16" then it is the vcvtt.
7355         if (Mnemonic.starts_with("vcvtt") && MnemonicOpsEndInd > 2) {
7356           auto Sz1 =
7357               static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd - 2]);
7358           auto Sz2 =
7359               static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd - 1]);
7360           if (!(Sz1.isToken() && Sz1.getToken().starts_with(".f") &&
7361                 Sz2.isToken() && Sz2.getToken().starts_with(".f"))) {
7362             Operands.erase(Operands.begin());
7363             SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
7364             VPTPredicationCode = ARMVCC::Then;
7365 
7366             Mnemonic = Mnemonic.substr(0, 4);
7367             Operands.insert(Operands.begin(),
7368                             ARMOperand::CreateToken(Mnemonic, MLoc, *this));
7369           }
7370         }
7371         SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
7372                                           Mnemonic.size() + CarrySetting);
7373         // Add VPTPred
7374         Operands.insert(Operands.begin() + 1,
7375                         ARMOperand::CreateVPTPred(
7376                             ARMVCC::VPTCodes(VPTPredicationCode), PLoc, *this));
7377         ++MnemonicOpsEndInd;
7378       }
7379     } else if (CanAcceptVPTPredicationCode) {
7380       // For all other instructions, make sure only one of the two
7381       // predication operands is left behind, depending on whether we should
7382       // use the vector predication.
7383       if (shouldOmitVectorPredicateOperand(Mnemonic, Operands,
7384                                            MnemonicOpsEndInd)) {
7385         removeVPTCondCode(Operands, MnemonicOpsEndInd);
7386       }
7387     }
7388   }
7389 
7390   if (VPTPredicationCode != ARMVCC::None) {
7391     bool usedVPTPredicationCode = false;
7392     for (unsigned I = 1; I < Operands.size(); ++I)
7393       if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred())
7394         usedVPTPredicationCode = true;
7395     if (!usedVPTPredicationCode) {
7396       // If we have a VPT predication code and we haven't just turned it
7397       // into an operand, then it was a mistake for splitMnemonic to
7398       // separate it from the rest of the mnemonic in the first place,
7399       // and this may lead to wrong disassembly (e.g. scalar floating
7400       // point VCMPE is actually a different instruction from VCMP, so
7401       // we mustn't treat them the same). In that situation, glue it
7402       // back on.
7403       Mnemonic = Name.slice(0, Mnemonic.size() + 1);
7404       Operands.erase(Operands.begin());
7405       Operands.insert(Operands.begin(),
7406                       ARMOperand::CreateToken(Mnemonic, NameLoc, *this));
7407     }
7408   }
7409 
7410   // ARM mode 'blx' need special handling, as the register operand version
7411   // is predicable, but the label operand version is not. So, we can't rely
7412   // on the Mnemonic based checking to correctly figure out when to put
7413   // a k_CondCode operand in the list. If we're trying to match the label
7414   // version, remove the k_CondCode operand here.
7415   if (!isThumb() && Mnemonic == "blx" &&
7416       Operands.size() == MnemonicOpsEndInd + 1 &&
7417       static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]).isImm())
7418     removeCondCode(Operands, MnemonicOpsEndInd);
7419 
7420   // GNU Assembler extension (compatibility).
7421   fixupGNULDRDAlias(Mnemonic, Operands, MnemonicOpsEndInd);
7422 
7423   // Adjust operands of ldrexd/strexd to MCK_GPRPair.
7424   // ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
7425   // a single GPRPair reg operand is used in the .td file to replace the two
7426   // GPRs. However, when parsing from asm, the two GRPs cannot be
7427   // automatically
7428   // expressed as a GPRPair, so we have to manually merge them.
7429   // FIXME: We would really like to be able to tablegen'erate this.
7430   bool IsLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd");
7431   if (!isThumb() && Operands.size() > MnemonicOpsEndInd + 1 + (!IsLoad) &&
7432       (Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" ||
7433        Mnemonic == "stlexd")) {
7434     unsigned Idx = IsLoad ? MnemonicOpsEndInd : MnemonicOpsEndInd + 1;
7435     ARMOperand &Op1 = static_cast<ARMOperand &>(*Operands[Idx]);
7436     ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[Idx + 1]);
7437 
7438     const MCRegisterClass &MRC = MRI->getRegClass(ARM::GPRRegClassID);
7439     // Adjust only if Op1 is a GPR.
7440     if (Op1.isReg() && MRC.contains(Op1.getReg())) {
7441       MCRegister Reg1 = Op1.getReg();
7442       unsigned Rt = MRI->getEncodingValue(Reg1);
7443       MCRegister Reg2 = Op2.getReg();
7444       unsigned Rt2 = MRI->getEncodingValue(Reg2);
7445       // Rt2 must be Rt + 1.
7446       if (Rt + 1 != Rt2)
7447         return Error(Op2.getStartLoc(),
7448                      IsLoad ? "destination operands must be sequential"
7449                             : "source operands must be sequential");
7450 
7451       // Rt must be even
7452       if (Rt & 1)
7453         return Error(
7454             Op1.getStartLoc(),
7455             IsLoad ? "destination operands must start start at an even register"
7456                    : "source operands must start start at an even register");
7457 
7458       MCRegister NewReg = MRI->getMatchingSuperReg(
7459           Reg1, ARM::gsub_0, &(MRI->getRegClass(ARM::GPRPairRegClassID)));
7460       Operands[Idx] = ARMOperand::CreateReg(NewReg, Op1.getStartLoc(),
7461                                             Op2.getEndLoc(), *this);
7462       Operands.erase(Operands.begin() + Idx + 1);
7463     }
7464   }
7465 
7466   // FIXME: As said above, this is all a pretty gross hack.  This instruction
7467   // does not fit with other "subs" and tblgen.
7468   // Adjust operands of B9.3.19 SUBS PC, LR, #imm (Thumb2) system instruction
7469   // so the Mnemonic is the original name "subs" and delete the predicate
7470   // operand so it will match the table entry.
7471   if (isThumbTwo() && Mnemonic == "sub" &&
7472       Operands.size() == MnemonicOpsEndInd + 3 &&
7473       static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]).isReg() &&
7474       static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]).getReg() ==
7475           ARM::PC &&
7476       static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1]).isReg() &&
7477       static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1]).getReg() ==
7478           ARM::LR &&
7479       static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 2]).isImm()) {
7480     Operands.front() = ARMOperand::CreateToken(Name, NameLoc, *this);
7481     removeCCOut(Operands, MnemonicOpsEndInd);
7482   }
7483   return false;
7484 }
7485 
7486 // Validate context-sensitive operand constraints.
7487 
7488 // return 'true' if register list contains non-low GPR registers,
7489 // 'false' otherwise. If Reg is in the register list or is HiReg, set
7490 // 'containsReg' to true.
7491 static bool checkLowRegisterList(const MCInst &Inst, unsigned OpNo,
7492                                  MCRegister Reg, MCRegister HiReg,
7493                                  bool &containsReg) {
7494   containsReg = false;
7495   for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
7496     MCRegister OpReg = Inst.getOperand(i).getReg();
7497     if (OpReg == Reg)
7498       containsReg = true;
7499     // Anything other than a low register isn't legal here.
7500     if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
7501       return true;
7502   }
7503   return false;
7504 }
7505 
7506 // Check if the specified regisgter is in the register list of the inst,
7507 // starting at the indicated operand number.
7508 static bool listContainsReg(const MCInst &Inst, unsigned OpNo, MCRegister Reg) {
7509   for (unsigned i = OpNo, e = Inst.getNumOperands(); i < e; ++i) {
7510     MCRegister OpReg = Inst.getOperand(i).getReg();
7511     if (OpReg == Reg)
7512       return true;
7513   }
7514   return false;
7515 }
7516 
7517 // Return true if instruction has the interesting property of being
7518 // allowed in IT blocks, but not being predicable.
7519 static bool instIsBreakpoint(const MCInst &Inst) {
7520     return Inst.getOpcode() == ARM::tBKPT ||
7521            Inst.getOpcode() == ARM::BKPT ||
7522            Inst.getOpcode() == ARM::tHLT ||
7523            Inst.getOpcode() == ARM::HLT;
7524 }
7525 
7526 unsigned getRegListInd(const OperandVector &Operands,
7527                        unsigned MnemonicOpsEndInd) {
7528   for (unsigned I = MnemonicOpsEndInd; I < Operands.size(); ++I) {
7529     const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[I]);
7530     if (Op.isRegList()) {
7531       return I;
7532     }
7533   }
7534   return 0;
7535 }
7536 
7537 bool ARMAsmParser::validatetLDMRegList(const MCInst &Inst,
7538                                        const OperandVector &Operands,
7539                                        unsigned MnemonicOpsEndInd,
7540                                        unsigned ListIndex, bool IsARPop) {
7541   bool ListContainsSP = listContainsReg(Inst, ListIndex, ARM::SP);
7542   bool ListContainsLR = listContainsReg(Inst, ListIndex, ARM::LR);
7543   bool ListContainsPC = listContainsReg(Inst, ListIndex, ARM::PC);
7544 
7545   if (!IsARPop && ListContainsSP)
7546     return Error(
7547         Operands[getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
7548         "SP may not be in the register list");
7549   if (ListContainsPC && ListContainsLR)
7550     return Error(
7551         Operands[getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
7552         "PC and LR may not be in the register list simultaneously");
7553   return false;
7554 }
7555 
7556 bool ARMAsmParser::validatetSTMRegList(const MCInst &Inst,
7557                                        const OperandVector &Operands,
7558                                        unsigned MnemonicOpsEndInd,
7559                                        unsigned ListIndex) {
7560   bool ListContainsSP = listContainsReg(Inst, ListIndex, ARM::SP);
7561   bool ListContainsPC = listContainsReg(Inst, ListIndex, ARM::PC);
7562 
7563   if (ListContainsSP && ListContainsPC)
7564     return Error(
7565         Operands[getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
7566         "SP and PC may not be in the register list");
7567   if (ListContainsSP)
7568     return Error(
7569         Operands[getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
7570         "SP may not be in the register list");
7571   if (ListContainsPC)
7572     return Error(
7573         Operands[getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
7574         "PC may not be in the register list");
7575   return false;
7576 }
7577 
7578 bool ARMAsmParser::validateLDRDSTRD(MCInst &Inst, const OperandVector &Operands,
7579                                     bool Load, bool ARMMode, bool Writeback,
7580                                     unsigned MnemonicOpsEndInd) {
7581   unsigned RtIndex = Load || !Writeback ? 0 : 1;
7582   unsigned Rt = MRI->getEncodingValue(Inst.getOperand(RtIndex).getReg());
7583   unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(RtIndex + 1).getReg());
7584 
7585   if (ARMMode) {
7586     // Rt can't be R14.
7587     if (Rt == 14)
7588       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7589                    "Rt can't be R14");
7590 
7591     // Rt must be even-numbered.
7592     if ((Rt & 1) == 1)
7593       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7594                    "Rt must be even-numbered");
7595 
7596     // Rt2 must be Rt + 1.
7597     if (Rt2 != Rt + 1) {
7598       if (Load)
7599         return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7600                      "destination operands must be sequential");
7601       else
7602         return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7603                      "source operands must be sequential");
7604     }
7605 
7606     // FIXME: Diagnose m == 15
7607     // FIXME: Diagnose ldrd with m == t || m == t2.
7608   }
7609 
7610   if (!ARMMode && Load) {
7611     if (Rt2 == Rt)
7612       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7613                    "destination operands can't be identical");
7614   }
7615 
7616   if (Writeback) {
7617     unsigned Rn = MRI->getEncodingValue(Inst.getOperand(3).getReg());
7618 
7619     if (Rn == Rt || Rn == Rt2) {
7620       if (Load)
7621         return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7622                      "base register needs to be different from destination "
7623                      "registers");
7624       else
7625         return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7626                      "source register and base register can't be identical");
7627     }
7628 
7629     // FIXME: Diagnose ldrd/strd with writeback and n == 15.
7630     // (Except the immediate form of ldrd?)
7631   }
7632 
7633   return false;
7634 }
7635 
7636 static int findFirstVectorPredOperandIdx(const MCInstrDesc &MCID) {
7637   for (unsigned i = 0; i < MCID.NumOperands; ++i) {
7638     if (ARM::isVpred(MCID.operands()[i].OperandType))
7639       return i;
7640   }
7641   return -1;
7642 }
7643 
7644 static bool isVectorPredicable(const MCInstrDesc &MCID) {
7645   return findFirstVectorPredOperandIdx(MCID) != -1;
7646 }
7647 
7648 static bool isARMMCExpr(MCParsedAsmOperand &MCOp) {
7649   ARMOperand &Op = static_cast<ARMOperand &>(MCOp);
7650   if (!Op.isImm())
7651     return false;
7652   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
7653   if (CE)
7654     return false;
7655   const MCExpr *E = dyn_cast<MCExpr>(Op.getImm());
7656   if (!E)
7657     return false;
7658   return true;
7659 }
7660 
7661 // FIXME: We would really like to be able to tablegen'erate this.
7662 bool ARMAsmParser::validateInstruction(MCInst &Inst,
7663                                        const OperandVector &Operands,
7664                                        unsigned MnemonicOpsEndInd) {
7665   const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
7666   SMLoc Loc = Operands[0]->getStartLoc();
7667 
7668   // Check the IT block state first.
7669   // NOTE: BKPT and HLT instructions have the interesting property of being
7670   // allowed in IT blocks, but not being predicable. They just always execute.
7671   if (inITBlock() && !instIsBreakpoint(Inst)) {
7672     // The instruction must be predicable.
7673     if (!MCID.isPredicable())
7674       return Error(Loc, "instructions in IT block must be predicable");
7675     ARMCC::CondCodes Cond = ARMCC::CondCodes(
7676         Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm());
7677     if (Cond != currentITCond()) {
7678       // Find the condition code Operand to get its SMLoc information.
7679       SMLoc CondLoc = Operands[0]->getEndLoc();
7680       for (unsigned I = 1; I < Operands.size(); ++I)
7681         if (static_cast<ARMOperand &>(*Operands[I]).isCondCode())
7682           CondLoc = Operands[I]->getStartLoc();
7683       return Error(CondLoc, "incorrect condition in IT block; got '" +
7684                                 StringRef(ARMCondCodeToString(Cond)) +
7685                                 "', but expected '" +
7686                                 ARMCondCodeToString(currentITCond()) + "'");
7687     }
7688   // Check for non-'al' condition codes outside of the IT block.
7689   } else if (isThumbTwo() && MCID.isPredicable() &&
7690              Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
7691              ARMCC::AL && Inst.getOpcode() != ARM::tBcc &&
7692              Inst.getOpcode() != ARM::t2Bcc &&
7693              Inst.getOpcode() != ARM::t2BFic) {
7694     return Error(Loc, "predicated instructions must be in IT block");
7695   } else if (!isThumb() && !useImplicitITARM() && MCID.isPredicable() &&
7696              Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
7697                  ARMCC::AL) {
7698     return Warning(Loc, "predicated instructions should be in IT block");
7699   } else if (!MCID.isPredicable()) {
7700     // Check the instruction doesn't have a predicate operand anyway
7701     // that it's not allowed to use. Sometimes this happens in order
7702     // to keep instructions the same shape even though one cannot
7703     // legally be predicated, e.g. vmul.f16 vs vmul.f32.
7704     for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) {
7705       if (MCID.operands()[i].isPredicate()) {
7706         if (Inst.getOperand(i).getImm() != ARMCC::AL)
7707           return Error(Loc, "instruction is not predicable");
7708         break;
7709       }
7710     }
7711   }
7712 
7713   // PC-setting instructions in an IT block, but not the last instruction of
7714   // the block, are UNPREDICTABLE.
7715   if (inExplicitITBlock() && !lastInITBlock() && isITBlockTerminator(Inst)) {
7716     return Error(Loc, "instruction must be outside of IT block or the last instruction in an IT block");
7717   }
7718 
7719   if (inVPTBlock() && !instIsBreakpoint(Inst)) {
7720     unsigned Bit = extractITMaskBit(VPTState.Mask, VPTState.CurPosition);
7721     if (!isVectorPredicable(MCID))
7722       return Error(Loc, "instruction in VPT block must be predicable");
7723     unsigned Pred = Inst.getOperand(findFirstVectorPredOperandIdx(MCID)).getImm();
7724     unsigned VPTPred = Bit ? ARMVCC::Else : ARMVCC::Then;
7725     if (Pred != VPTPred) {
7726       SMLoc PredLoc;
7727       for (unsigned I = 1; I < Operands.size(); ++I)
7728         if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred())
7729           PredLoc = Operands[I]->getStartLoc();
7730       return Error(PredLoc, "incorrect predication in VPT block; got '" +
7731                    StringRef(ARMVPTPredToString(ARMVCC::VPTCodes(Pred))) +
7732                    "', but expected '" +
7733                    ARMVPTPredToString(ARMVCC::VPTCodes(VPTPred)) + "'");
7734     }
7735   }
7736   else if (isVectorPredicable(MCID) &&
7737            Inst.getOperand(findFirstVectorPredOperandIdx(MCID)).getImm() !=
7738            ARMVCC::None)
7739     return Error(Loc, "VPT predicated instructions must be in VPT block");
7740 
7741   const unsigned Opcode = Inst.getOpcode();
7742   switch (Opcode) {
7743   case ARM::VLLDM:
7744   case ARM::VLLDM_T2:
7745   case ARM::VLSTM:
7746   case ARM::VLSTM_T2: {
7747     // Since in some cases both T1 and T2 are valid, tablegen can not always
7748     // pick the correct instruction.
7749     if (Operands.size() ==
7750         MnemonicOpsEndInd + 2) { // a register list has been provided
7751       ARMOperand &Op = static_cast<ARMOperand &>(
7752           *Operands[MnemonicOpsEndInd + 1]); // the register list, a dpr_reglist
7753       assert(Op.isDPRRegList());
7754       auto &RegList = Op.getRegList();
7755       // T2 requires v8.1-M.Main (cannot be handled by tablegen)
7756       if (RegList.size() == 32 && !hasV8_1MMainline()) {
7757         return Error(Op.getEndLoc(), "T2 version requires v8.1-M.Main");
7758       }
7759       // When target has 32 D registers, T1 is undefined.
7760       if (hasD32() && RegList.size() != 32) {
7761         return Error(Op.getEndLoc(), "operand must be exactly {d0-d31}");
7762       }
7763       // When target has 16 D registers, both T1 and T2 are valid.
7764       if (!hasD32() && (RegList.size() != 16 && RegList.size() != 32)) {
7765         return Error(Op.getEndLoc(),
7766                      "operand must be exactly {d0-d15} (T1) or {d0-d31} (T2)");
7767       }
7768     }
7769     return false;
7770   }
7771   case ARM::t2IT: {
7772     // Encoding is unpredictable if it ever results in a notional 'NV'
7773     // predicate. Since we don't parse 'NV' directly this means an 'AL'
7774     // predicate with an "else" mask bit.
7775     unsigned Cond = Inst.getOperand(0).getImm();
7776     unsigned Mask = Inst.getOperand(1).getImm();
7777 
7778     // Conditions only allowing a 't' are those with no set bit except
7779     // the lowest-order one that indicates the end of the sequence. In
7780     // other words, powers of 2.
7781     if (Cond == ARMCC::AL && llvm::popcount(Mask) != 1)
7782       return Error(Loc, "unpredictable IT predicate sequence");
7783     break;
7784   }
7785   case ARM::LDRD:
7786     if (validateLDRDSTRD(Inst, Operands, /*Load*/ true, /*ARMMode*/ true,
7787                          /*Writeback*/ false, MnemonicOpsEndInd))
7788       return true;
7789     break;
7790   case ARM::LDRD_PRE:
7791   case ARM::LDRD_POST:
7792     if (validateLDRDSTRD(Inst, Operands, /*Load*/ true, /*ARMMode*/ true,
7793                          /*Writeback*/ true, MnemonicOpsEndInd))
7794       return true;
7795     break;
7796   case ARM::t2LDRDi8:
7797     if (validateLDRDSTRD(Inst, Operands, /*Load*/ true, /*ARMMode*/ false,
7798                          /*Writeback*/ false, MnemonicOpsEndInd))
7799       return true;
7800     break;
7801   case ARM::t2LDRD_PRE:
7802   case ARM::t2LDRD_POST:
7803     if (validateLDRDSTRD(Inst, Operands, /*Load*/ true, /*ARMMode*/ false,
7804                          /*Writeback*/ true, MnemonicOpsEndInd))
7805       return true;
7806     break;
7807   case ARM::t2BXJ: {
7808     const MCRegister RmReg = Inst.getOperand(0).getReg();
7809     // Rm = SP is no longer unpredictable in v8-A
7810     if (RmReg == ARM::SP && !hasV8Ops())
7811       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7812                    "r13 (SP) is an unpredictable operand to BXJ");
7813     return false;
7814   }
7815   case ARM::STRD:
7816     if (validateLDRDSTRD(Inst, Operands, /*Load*/ false, /*ARMMode*/ true,
7817                          /*Writeback*/ false, MnemonicOpsEndInd))
7818       return true;
7819     break;
7820   case ARM::STRD_PRE:
7821   case ARM::STRD_POST:
7822     if (validateLDRDSTRD(Inst, Operands, /*Load*/ false, /*ARMMode*/ true,
7823                          /*Writeback*/ true, MnemonicOpsEndInd))
7824       return true;
7825     break;
7826   case ARM::t2STRD_PRE:
7827   case ARM::t2STRD_POST:
7828     if (validateLDRDSTRD(Inst, Operands, /*Load*/ false, /*ARMMode*/ false,
7829                          /*Writeback*/ true, MnemonicOpsEndInd))
7830       return true;
7831     break;
7832   case ARM::STR_PRE_IMM:
7833   case ARM::STR_PRE_REG:
7834   case ARM::t2STR_PRE:
7835   case ARM::STR_POST_IMM:
7836   case ARM::STR_POST_REG:
7837   case ARM::t2STR_POST:
7838   case ARM::STRH_PRE:
7839   case ARM::t2STRH_PRE:
7840   case ARM::STRH_POST:
7841   case ARM::t2STRH_POST:
7842   case ARM::STRB_PRE_IMM:
7843   case ARM::STRB_PRE_REG:
7844   case ARM::t2STRB_PRE:
7845   case ARM::STRB_POST_IMM:
7846   case ARM::STRB_POST_REG:
7847   case ARM::t2STRB_POST: {
7848     // Rt must be different from Rn.
7849     const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
7850     const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
7851 
7852     if (Rt == Rn)
7853       return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
7854                    "source register and base register can't be identical");
7855     return false;
7856   }
7857   case ARM::t2LDR_PRE_imm:
7858   case ARM::t2LDR_POST_imm:
7859   case ARM::t2STR_PRE_imm:
7860   case ARM::t2STR_POST_imm: {
7861     // Rt must be different from Rn.
7862     const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
7863     const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(1).getReg());
7864 
7865     if (Rt == Rn)
7866       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7867                    "destination register and base register can't be identical");
7868     if (Inst.getOpcode() == ARM::t2LDR_POST_imm ||
7869         Inst.getOpcode() == ARM::t2STR_POST_imm) {
7870       int Imm = Inst.getOperand(2).getImm();
7871       if (Imm > 255 || Imm < -255)
7872         return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7873                      "operand must be in range [-255, 255]");
7874     }
7875     if (Inst.getOpcode() == ARM::t2STR_PRE_imm ||
7876         Inst.getOpcode() == ARM::t2STR_POST_imm) {
7877       if (Inst.getOperand(0).getReg() == ARM::PC) {
7878         return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7879                      "operand must be a register in range [r0, r14]");
7880       }
7881     }
7882     return false;
7883   }
7884 
7885   case ARM::t2LDRB_OFFSET_imm:
7886   case ARM::t2LDRB_PRE_imm:
7887   case ARM::t2LDRB_POST_imm:
7888   case ARM::t2STRB_OFFSET_imm:
7889   case ARM::t2STRB_PRE_imm:
7890   case ARM::t2STRB_POST_imm: {
7891     if (Inst.getOpcode() == ARM::t2LDRB_POST_imm ||
7892         Inst.getOpcode() == ARM::t2STRB_POST_imm ||
7893         Inst.getOpcode() == ARM::t2LDRB_PRE_imm ||
7894         Inst.getOpcode() == ARM::t2STRB_PRE_imm) {
7895       int Imm = Inst.getOperand(2).getImm();
7896       if (Imm > 255 || Imm < -255)
7897         return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7898                      "operand must be in range [-255, 255]");
7899     } else if (Inst.getOpcode() == ARM::t2LDRB_OFFSET_imm ||
7900                Inst.getOpcode() == ARM::t2STRB_OFFSET_imm) {
7901       int Imm = Inst.getOperand(2).getImm();
7902       if (Imm > 0 || Imm < -255)
7903         return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7904                      "operand must be in range [0, 255] with a negative sign");
7905     }
7906     if (Inst.getOperand(0).getReg() == ARM::PC) {
7907       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7908                    "if operand is PC, should call the LDRB (literal)");
7909     }
7910     return false;
7911   }
7912 
7913   case ARM::t2LDRH_OFFSET_imm:
7914   case ARM::t2LDRH_PRE_imm:
7915   case ARM::t2LDRH_POST_imm:
7916   case ARM::t2STRH_OFFSET_imm:
7917   case ARM::t2STRH_PRE_imm:
7918   case ARM::t2STRH_POST_imm: {
7919     if (Inst.getOpcode() == ARM::t2LDRH_POST_imm ||
7920         Inst.getOpcode() == ARM::t2STRH_POST_imm ||
7921         Inst.getOpcode() == ARM::t2LDRH_PRE_imm ||
7922         Inst.getOpcode() == ARM::t2STRH_PRE_imm) {
7923       int Imm = Inst.getOperand(2).getImm();
7924       if (Imm > 255 || Imm < -255)
7925         return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7926                      "operand must be in range [-255, 255]");
7927     } else if (Inst.getOpcode() == ARM::t2LDRH_OFFSET_imm ||
7928                Inst.getOpcode() == ARM::t2STRH_OFFSET_imm) {
7929       int Imm = Inst.getOperand(2).getImm();
7930       if (Imm > 0 || Imm < -255)
7931         return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7932                      "operand must be in range [0, 255] with a negative sign");
7933     }
7934     if (Inst.getOperand(0).getReg() == ARM::PC) {
7935       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7936                    "if operand is PC, should call the LDRH (literal)");
7937     }
7938     return false;
7939   }
7940 
7941   case ARM::t2LDRSB_OFFSET_imm:
7942   case ARM::t2LDRSB_PRE_imm:
7943   case ARM::t2LDRSB_POST_imm: {
7944     if (Inst.getOpcode() == ARM::t2LDRSB_POST_imm ||
7945         Inst.getOpcode() == ARM::t2LDRSB_PRE_imm) {
7946       int Imm = Inst.getOperand(2).getImm();
7947       if (Imm > 255 || Imm < -255)
7948         return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7949                      "operand must be in range [-255, 255]");
7950     } else if (Inst.getOpcode() == ARM::t2LDRSB_OFFSET_imm) {
7951       int Imm = Inst.getOperand(2).getImm();
7952       if (Imm > 0 || Imm < -255)
7953         return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7954                      "operand must be in range [0, 255] with a negative sign");
7955     }
7956     if (Inst.getOperand(0).getReg() == ARM::PC) {
7957       return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7958                    "if operand is PC, should call the LDRH (literal)");
7959     }
7960     return false;
7961   }
7962 
7963   case ARM::t2LDRSH_OFFSET_imm:
7964   case ARM::t2LDRSH_PRE_imm:
7965   case ARM::t2LDRSH_POST_imm: {
7966     if (Inst.getOpcode() == ARM::t2LDRSH_POST_imm ||
7967         Inst.getOpcode() == ARM::t2LDRSH_PRE_imm) {
7968       int Imm = Inst.getOperand(2).getImm();
7969       if (Imm > 255 || Imm < -255)
7970         return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7971                      "operand must be in range [-255, 255]");
7972     } else if (Inst.getOpcode() == ARM::t2LDRSH_OFFSET_imm) {
7973       int Imm = Inst.getOperand(2).getImm();
7974       if (Imm > 0 || Imm < -255)
7975         return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7976                      "operand must be in range [0, 255] with a negative sign");
7977     }
7978     if (Inst.getOperand(0).getReg() == ARM::PC) {
7979       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7980                    "if operand is PC, should call the LDRH (literal)");
7981     }
7982     return false;
7983   }
7984 
7985   case ARM::LDR_PRE_IMM:
7986   case ARM::LDR_PRE_REG:
7987   case ARM::t2LDR_PRE:
7988   case ARM::LDR_POST_IMM:
7989   case ARM::LDR_POST_REG:
7990   case ARM::t2LDR_POST:
7991   case ARM::LDRH_PRE:
7992   case ARM::t2LDRH_PRE:
7993   case ARM::LDRH_POST:
7994   case ARM::t2LDRH_POST:
7995   case ARM::LDRSH_PRE:
7996   case ARM::t2LDRSH_PRE:
7997   case ARM::LDRSH_POST:
7998   case ARM::t2LDRSH_POST:
7999   case ARM::LDRB_PRE_IMM:
8000   case ARM::LDRB_PRE_REG:
8001   case ARM::t2LDRB_PRE:
8002   case ARM::LDRB_POST_IMM:
8003   case ARM::LDRB_POST_REG:
8004   case ARM::t2LDRB_POST:
8005   case ARM::LDRSB_PRE:
8006   case ARM::t2LDRSB_PRE:
8007   case ARM::LDRSB_POST:
8008   case ARM::t2LDRSB_POST: {
8009     // Rt must be different from Rn.
8010     const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
8011     const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
8012 
8013     if (Rt == Rn)
8014       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8015                    "destination register and base register can't be identical");
8016     return false;
8017   }
8018 
8019   case ARM::MVE_VLDRBU8_rq:
8020   case ARM::MVE_VLDRBU16_rq:
8021   case ARM::MVE_VLDRBS16_rq:
8022   case ARM::MVE_VLDRBU32_rq:
8023   case ARM::MVE_VLDRBS32_rq:
8024   case ARM::MVE_VLDRHU16_rq:
8025   case ARM::MVE_VLDRHU16_rq_u:
8026   case ARM::MVE_VLDRHU32_rq:
8027   case ARM::MVE_VLDRHU32_rq_u:
8028   case ARM::MVE_VLDRHS32_rq:
8029   case ARM::MVE_VLDRHS32_rq_u:
8030   case ARM::MVE_VLDRWU32_rq:
8031   case ARM::MVE_VLDRWU32_rq_u:
8032   case ARM::MVE_VLDRDU64_rq:
8033   case ARM::MVE_VLDRDU64_rq_u:
8034   case ARM::MVE_VLDRWU32_qi:
8035   case ARM::MVE_VLDRWU32_qi_pre:
8036   case ARM::MVE_VLDRDU64_qi:
8037   case ARM::MVE_VLDRDU64_qi_pre: {
8038     // Qd must be different from Qm.
8039     unsigned QdIdx = 0, QmIdx = 2;
8040     bool QmIsPointer = false;
8041     switch (Opcode) {
8042     case ARM::MVE_VLDRWU32_qi:
8043     case ARM::MVE_VLDRDU64_qi:
8044       QmIdx = 1;
8045       QmIsPointer = true;
8046       break;
8047     case ARM::MVE_VLDRWU32_qi_pre:
8048     case ARM::MVE_VLDRDU64_qi_pre:
8049       QdIdx = 1;
8050       QmIsPointer = true;
8051       break;
8052     }
8053 
8054     const unsigned Qd = MRI->getEncodingValue(Inst.getOperand(QdIdx).getReg());
8055     const unsigned Qm = MRI->getEncodingValue(Inst.getOperand(QmIdx).getReg());
8056 
8057     if (Qd == Qm) {
8058       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8059                    Twine("destination vector register and vector ") +
8060                        (QmIsPointer ? "pointer" : "offset") +
8061                        " register can't be identical");
8062     }
8063     return false;
8064   }
8065 
8066   case ARM::SBFX:
8067   case ARM::t2SBFX:
8068   case ARM::UBFX:
8069   case ARM::t2UBFX: {
8070     // Width must be in range [1, 32-lsb].
8071     unsigned LSB = Inst.getOperand(2).getImm();
8072     unsigned Widthm1 = Inst.getOperand(3).getImm();
8073     if (Widthm1 >= 32 - LSB)
8074       return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8075                    "bitfield width must be in range [1,32-lsb]");
8076     return false;
8077   }
8078   // Notionally handles ARM::tLDMIA_UPD too.
8079   case ARM::tLDMIA: {
8080     // If we're parsing Thumb2, the .w variant is available and handles
8081     // most cases that are normally illegal for a Thumb1 LDM instruction.
8082     // We'll make the transformation in processInstruction() if necessary.
8083     //
8084     // Thumb LDM instructions are writeback iff the base register is not
8085     // in the register list.
8086     MCRegister Rn = Inst.getOperand(0).getReg();
8087     bool HasWritebackToken =
8088         (static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
8089              .isToken() &&
8090          static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
8091                  .getToken() == "!");
8092 
8093     bool ListContainsBase;
8094     if (checkLowRegisterList(Inst, 3, Rn, MCRegister(), ListContainsBase) &&
8095         !isThumbTwo())
8096       return Error(
8097           Operands[getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
8098           "registers must be in range r0-r7");
8099     // If we should have writeback, then there should be a '!' token.
8100     if (!ListContainsBase && !HasWritebackToken && !isThumbTwo())
8101       return Error(
8102           Operands[getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
8103           "writeback operator '!' expected");
8104     // If we should not have writeback, there must not be a '!'. This is
8105     // true even for the 32-bit wide encodings.
8106     if (ListContainsBase && HasWritebackToken)
8107       return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8108                    "writeback operator '!' not allowed when base register "
8109                    "in register list");
8110 
8111     if (validatetLDMRegList(Inst, Operands, MnemonicOpsEndInd, 3))
8112       return true;
8113     break;
8114   }
8115   case ARM::LDMIA_UPD:
8116   case ARM::LDMDB_UPD:
8117   case ARM::LDMIB_UPD:
8118   case ARM::LDMDA_UPD:
8119     // ARM variants loading and updating the same register are only officially
8120     // UNPREDICTABLE on v7 upwards. Goodness knows what they did before.
8121     if (!hasV7Ops())
8122       break;
8123     if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
8124       return Error(Operands.back()->getStartLoc(),
8125                    "writeback register not allowed in register list");
8126     break;
8127   case ARM::t2LDMIA:
8128   case ARM::t2LDMDB:
8129     if (validatetLDMRegList(Inst, Operands, MnemonicOpsEndInd, 3))
8130       return true;
8131     break;
8132   case ARM::t2STMIA:
8133   case ARM::t2STMDB:
8134     if (validatetSTMRegList(Inst, Operands, MnemonicOpsEndInd, 3))
8135       return true;
8136     break;
8137   case ARM::t2LDMIA_UPD:
8138   case ARM::t2LDMDB_UPD:
8139   case ARM::t2STMIA_UPD:
8140   case ARM::t2STMDB_UPD:
8141     if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
8142       return Error(Operands.back()->getStartLoc(),
8143                    "writeback register not allowed in register list");
8144 
8145     if (Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
8146       if (validatetLDMRegList(Inst, Operands, MnemonicOpsEndInd, 3))
8147         return true;
8148     } else {
8149       if (validatetSTMRegList(Inst, Operands, MnemonicOpsEndInd, 3))
8150         return true;
8151     }
8152     break;
8153 
8154   case ARM::sysLDMIA_UPD:
8155   case ARM::sysLDMDA_UPD:
8156   case ARM::sysLDMDB_UPD:
8157   case ARM::sysLDMIB_UPD:
8158     if (!listContainsReg(Inst, 3, ARM::PC))
8159       return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8160                    "writeback register only allowed on system LDM "
8161                    "if PC in register-list");
8162     break;
8163   case ARM::sysSTMIA_UPD:
8164   case ARM::sysSTMDA_UPD:
8165   case ARM::sysSTMDB_UPD:
8166   case ARM::sysSTMIB_UPD:
8167     return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8168                  "system STM cannot have writeback register");
8169   // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
8170   // so only issue a diagnostic for thumb1. The instructions will be
8171   // switched to the t2 encodings in processInstruction() if necessary.
8172   case ARM::tPOP: {
8173     bool ListContainsBase;
8174     if (checkLowRegisterList(Inst, 2, MCRegister(), ARM::PC,
8175                              ListContainsBase) &&
8176         !isThumbTwo())
8177       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8178                    "registers must be in range r0-r7 or pc");
8179     if (validatetLDMRegList(Inst, Operands, MnemonicOpsEndInd, 2, !isMClass()))
8180       return true;
8181     break;
8182   }
8183   case ARM::tPUSH: {
8184     bool ListContainsBase;
8185     if (checkLowRegisterList(Inst, 2, MCRegister(), ARM::LR,
8186                              ListContainsBase) &&
8187         !isThumbTwo())
8188       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8189                    "registers must be in range r0-r7 or lr");
8190     if (validatetSTMRegList(Inst, Operands, MnemonicOpsEndInd, 2))
8191       return true;
8192     break;
8193   }
8194   case ARM::tSTMIA_UPD: {
8195     bool ListContainsBase, InvalidLowList;
8196     InvalidLowList = checkLowRegisterList(Inst, 4, Inst.getOperand(0).getReg(),
8197                                           0, ListContainsBase);
8198     if (InvalidLowList && !isThumbTwo())
8199       return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8200                    "registers must be in range r0-r7");
8201 
8202     // This would be converted to a 32-bit stm, but that's not valid if the
8203     // writeback register is in the list.
8204     if (InvalidLowList && ListContainsBase)
8205       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8206                    "writeback operator '!' not allowed when base register "
8207                    "in register list");
8208 
8209     if (validatetSTMRegList(Inst, Operands, MnemonicOpsEndInd, 4))
8210       return true;
8211     break;
8212   }
8213   case ARM::tADDrSP:
8214     // If the non-SP source operand and the destination operand are not the
8215     // same, we need thumb2 (for the wide encoding), or we have an error.
8216     if (!isThumbTwo() &&
8217         Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
8218       return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8219                    "source register must be the same as destination");
8220     }
8221     break;
8222 
8223   case ARM::t2ADDrr:
8224   case ARM::t2ADDrs:
8225   case ARM::t2SUBrr:
8226   case ARM::t2SUBrs:
8227     if (Inst.getOperand(0).getReg() == ARM::SP &&
8228         Inst.getOperand(1).getReg() != ARM::SP)
8229       return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8230                    "source register must be sp if destination is sp");
8231     break;
8232 
8233   // Final range checking for Thumb unconditional branch instructions.
8234   case ARM::tB:
8235     if (!(static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]))
8236              .isSignedOffset<11, 1>())
8237       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8238                    "branch target out of range");
8239     break;
8240   case ARM::t2B: {
8241     int op = (Operands[MnemonicOpsEndInd]->isImm()) ? MnemonicOpsEndInd
8242                                                     : MnemonicOpsEndInd + 1;
8243     ARMOperand &Operand = static_cast<ARMOperand &>(*Operands[op]);
8244     // Delay the checks of symbolic expressions until they are resolved.
8245     if (!isa<MCBinaryExpr>(Operand.getImm()) &&
8246         !Operand.isSignedOffset<24, 1>())
8247       return Error(Operands[op]->getStartLoc(), "branch target out of range");
8248     break;
8249   }
8250   // Final range checking for Thumb conditional branch instructions.
8251   case ARM::tBcc:
8252     if (!static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd])
8253              .isSignedOffset<8, 1>())
8254       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8255                    "branch target out of range");
8256     break;
8257   case ARM::t2Bcc: {
8258     int Op = (Operands[MnemonicOpsEndInd]->isImm()) ? MnemonicOpsEndInd
8259                                                     : MnemonicOpsEndInd + 1;
8260     if (!static_cast<ARMOperand &>(*Operands[Op]).isSignedOffset<20, 1>())
8261       return Error(Operands[Op]->getStartLoc(), "branch target out of range");
8262     break;
8263   }
8264   case ARM::tCBZ:
8265   case ARM::tCBNZ: {
8266     if (!static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
8267              .isUnsignedOffset<6, 1>())
8268       return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8269                    "branch target out of range");
8270     break;
8271   }
8272   case ARM::MOVi16:
8273   case ARM::MOVTi16:
8274   case ARM::t2MOVi16:
8275   case ARM::t2MOVTi16:
8276     {
8277     // We want to avoid misleadingly allowing something like "mov r0, <symbol>"
8278     // especially when we turn it into a movw and the expression <symbol> does
8279     // not have a :lower16: or :upper16 as part of the expression.  We don't
8280     // want the behavior of silently truncating, which can be unexpected and
8281     // lead to bugs that are difficult to find since this is an easy mistake
8282     // to make.
8283     int i = (Operands[MnemonicOpsEndInd]->isImm()) ? MnemonicOpsEndInd
8284                                                    : MnemonicOpsEndInd + 1;
8285     ARMOperand &Op = static_cast<ARMOperand &>(*Operands[i]);
8286     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
8287     if (CE) break;
8288     const MCExpr *E = dyn_cast<MCExpr>(Op.getImm());
8289     if (!E) break;
8290     const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E);
8291     if (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
8292                        ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16))
8293       return Error(
8294           Op.getStartLoc(),
8295           "immediate expression for mov requires :lower16: or :upper16");
8296     break;
8297   }
8298   case ARM::tADDi8: {
8299     int i = (Operands[MnemonicOpsEndInd + 1]->isImm()) ? MnemonicOpsEndInd + 1
8300                                                        : MnemonicOpsEndInd + 2;
8301     MCParsedAsmOperand &Op = *Operands[i];
8302     if (isARMMCExpr(Op) && !isThumbI8Relocation(Op))
8303       return Error(Op.getStartLoc(),
8304                    "Immediate expression for Thumb adds requires :lower0_7:,"
8305                    " :lower8_15:, :upper0_7: or :upper8_15:");
8306     break;
8307   }
8308   case ARM::tMOVi8: {
8309     MCParsedAsmOperand &Op = *Operands[MnemonicOpsEndInd + 1];
8310     if (isARMMCExpr(Op) && !isThumbI8Relocation(Op))
8311       return Error(Op.getStartLoc(),
8312                    "Immediate expression for Thumb movs requires :lower0_7:,"
8313                    " :lower8_15:, :upper0_7: or :upper8_15:");
8314     break;
8315   }
8316   case ARM::HINT:
8317   case ARM::t2HINT: {
8318     unsigned Imm8 = Inst.getOperand(0).getImm();
8319     unsigned Pred = Inst.getOperand(1).getImm();
8320     // ESB is not predicable (pred must be AL). Without the RAS extension, this
8321     // behaves as any other unallocated hint.
8322     if (Imm8 == 0x10 && Pred != ARMCC::AL && hasRAS())
8323       return Error(Operands[1]->getStartLoc(), "instruction 'esb' is not "
8324                                                "predicable, but condition "
8325                                                "code specified");
8326     if (Imm8 == 0x14 && Pred != ARMCC::AL)
8327       return Error(Operands[1]->getStartLoc(), "instruction 'csdb' is not "
8328                                                "predicable, but condition "
8329                                                "code specified");
8330     break;
8331   }
8332   case ARM::t2BFi:
8333   case ARM::t2BFr:
8334   case ARM::t2BFLi:
8335   case ARM::t2BFLr: {
8336     if (!static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd])
8337              .isUnsignedOffset<4, 1>() ||
8338         (Inst.getOperand(0).isImm() && Inst.getOperand(0).getImm() == 0)) {
8339       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8340                    "branch location out of range or not a multiple of 2");
8341     }
8342 
8343     if (Opcode == ARM::t2BFi) {
8344       if (!static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
8345                .isSignedOffset<16, 1>())
8346         return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8347                      "branch target out of range or not a multiple of 2");
8348     } else if (Opcode == ARM::t2BFLi) {
8349       if (!static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
8350                .isSignedOffset<18, 1>())
8351         return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8352                      "branch target out of range or not a multiple of 2");
8353     }
8354     break;
8355   }
8356   case ARM::t2BFic: {
8357     if (!static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd])
8358              .isUnsignedOffset<4, 1>() ||
8359         (Inst.getOperand(0).isImm() && Inst.getOperand(0).getImm() == 0))
8360       return Error(Operands[1]->getStartLoc(),
8361                    "branch location out of range or not a multiple of 2");
8362 
8363     if (!static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
8364              .isSignedOffset<16, 1>())
8365       return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8366                    "branch target out of range or not a multiple of 2");
8367 
8368     assert(Inst.getOperand(0).isImm() == Inst.getOperand(2).isImm() &&
8369            "branch location and else branch target should either both be "
8370            "immediates or both labels");
8371 
8372     if (Inst.getOperand(0).isImm() && Inst.getOperand(2).isImm()) {
8373       int Diff = Inst.getOperand(2).getImm() - Inst.getOperand(0).getImm();
8374       if (Diff != 4 && Diff != 2)
8375         return Error(
8376             Operands[3]->getStartLoc(),
8377             "else branch target must be 2 or 4 greater than the branch location");
8378     }
8379     break;
8380   }
8381   case ARM::t2CLRM: {
8382     for (unsigned i = 2; i < Inst.getNumOperands(); i++) {
8383       if (Inst.getOperand(i).isReg() &&
8384           !ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(
8385               Inst.getOperand(i).getReg())) {
8386         return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8387                      "invalid register in register list. Valid registers are "
8388                      "r0-r12, lr/r14 and APSR.");
8389       }
8390     }
8391     break;
8392   }
8393   case ARM::DSB:
8394   case ARM::t2DSB: {
8395 
8396     if (Inst.getNumOperands() < 2)
8397       break;
8398 
8399     unsigned Option = Inst.getOperand(0).getImm();
8400     unsigned Pred = Inst.getOperand(1).getImm();
8401 
8402     // SSBB and PSSBB (DSB #0|#4) are not predicable (pred must be AL).
8403     if (Option == 0 && Pred != ARMCC::AL)
8404       return Error(Operands[1]->getStartLoc(),
8405                    "instruction 'ssbb' is not predicable, but condition code "
8406                    "specified");
8407     if (Option == 4 && Pred != ARMCC::AL)
8408       return Error(Operands[1]->getStartLoc(),
8409                    "instruction 'pssbb' is not predicable, but condition code "
8410                    "specified");
8411     break;
8412   }
8413   case ARM::VMOVRRS: {
8414     // Source registers must be sequential.
8415     const unsigned Sm = MRI->getEncodingValue(Inst.getOperand(2).getReg());
8416     const unsigned Sm1 = MRI->getEncodingValue(Inst.getOperand(3).getReg());
8417     if (Sm1 != Sm + 1)
8418       return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8419                    "source operands must be sequential");
8420     break;
8421   }
8422   case ARM::VMOVSRR: {
8423     // Destination registers must be sequential.
8424     const unsigned Sm = MRI->getEncodingValue(Inst.getOperand(0).getReg());
8425     const unsigned Sm1 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
8426     if (Sm1 != Sm + 1)
8427       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8428                    "destination operands must be sequential");
8429     break;
8430   }
8431   case ARM::VLDMDIA:
8432   case ARM::VSTMDIA: {
8433     ARMOperand &Op =
8434         static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1]);
8435     auto &RegList = Op.getRegList();
8436     if (RegList.size() < 1 || RegList.size() > 16)
8437       return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8438                    "list of registers must be at least 1 and at most 16");
8439     break;
8440   }
8441   case ARM::MVE_VQDMULLs32bh:
8442   case ARM::MVE_VQDMULLs32th:
8443   case ARM::MVE_VCMULf32:
8444   case ARM::MVE_VMULLBs32:
8445   case ARM::MVE_VMULLTs32:
8446   case ARM::MVE_VMULLBu32:
8447   case ARM::MVE_VMULLTu32: {
8448     if (Operands[MnemonicOpsEndInd]->getReg() ==
8449         Operands[MnemonicOpsEndInd + 1]->getReg()) {
8450       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8451                    "Qd register and Qn register can't be identical");
8452     }
8453     if (Operands[MnemonicOpsEndInd]->getReg() ==
8454         Operands[MnemonicOpsEndInd + 2]->getReg()) {
8455       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8456                    "Qd register and Qm register can't be identical");
8457     }
8458     break;
8459   }
8460   case ARM::MVE_VREV64_8:
8461   case ARM::MVE_VREV64_16:
8462   case ARM::MVE_VREV64_32:
8463   case ARM::MVE_VQDMULL_qr_s32bh:
8464   case ARM::MVE_VQDMULL_qr_s32th: {
8465     if (Operands[MnemonicOpsEndInd]->getReg() ==
8466         Operands[MnemonicOpsEndInd + 1]->getReg()) {
8467       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8468                    "Qd register and Qn register can't be identical");
8469     }
8470     break;
8471   }
8472   case ARM::MVE_VCADDi32:
8473   case ARM::MVE_VCADDf32:
8474   case ARM::MVE_VHCADDs32: {
8475     if (Operands[MnemonicOpsEndInd]->getReg() ==
8476         Operands[MnemonicOpsEndInd + 2]->getReg()) {
8477       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8478                    "Qd register and Qm register can't be identical");
8479     }
8480     break;
8481   }
8482   case ARM::MVE_VMOV_rr_q: {
8483     if (Operands[MnemonicOpsEndInd + 2]->getReg() !=
8484         Operands[MnemonicOpsEndInd + 4]->getReg())
8485       return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8486                    "Q-registers must be the same");
8487     if (static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 3])
8488             .getVectorIndex() !=
8489         static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 5])
8490                 .getVectorIndex() +
8491             2)
8492       return Error(Operands[MnemonicOpsEndInd + 3]->getStartLoc(),
8493                    "Q-register indexes must be 2 and 0 or 3 and 1");
8494     break;
8495   }
8496   case ARM::MVE_VMOV_q_rr: {
8497     if (Operands[MnemonicOpsEndInd]->getReg() !=
8498         Operands[MnemonicOpsEndInd + 2]->getReg())
8499       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8500                    "Q-registers must be the same");
8501     if (static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
8502             .getVectorIndex() !=
8503         static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 3])
8504                 .getVectorIndex() +
8505             2)
8506       return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8507                    "Q-register indexes must be 2 and 0 or 3 and 1");
8508     break;
8509   }
8510   case ARM::MVE_SQRSHR:
8511   case ARM::MVE_UQRSHL: {
8512     if (Operands[MnemonicOpsEndInd]->getReg() ==
8513         Operands[MnemonicOpsEndInd + 1]->getReg()) {
8514       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8515                    "Rda register and Rm register can't be identical");
8516     }
8517     break;
8518   }
8519   case ARM::UMAAL:
8520   case ARM::UMLAL:
8521   case ARM::UMULL:
8522   case ARM::t2UMAAL:
8523   case ARM::t2UMLAL:
8524   case ARM::t2UMULL:
8525   case ARM::SMLAL:
8526   case ARM::SMLALBB:
8527   case ARM::SMLALBT:
8528   case ARM::SMLALD:
8529   case ARM::SMLALDX:
8530   case ARM::SMLALTB:
8531   case ARM::SMLALTT:
8532   case ARM::SMLSLD:
8533   case ARM::SMLSLDX:
8534   case ARM::SMULL:
8535   case ARM::t2SMLAL:
8536   case ARM::t2SMLALBB:
8537   case ARM::t2SMLALBT:
8538   case ARM::t2SMLALD:
8539   case ARM::t2SMLALDX:
8540   case ARM::t2SMLALTB:
8541   case ARM::t2SMLALTT:
8542   case ARM::t2SMLSLD:
8543   case ARM::t2SMLSLDX:
8544   case ARM::t2SMULL: {
8545     MCRegister RdHi = Inst.getOperand(0).getReg();
8546     MCRegister RdLo = Inst.getOperand(1).getReg();
8547     if(RdHi == RdLo) {
8548       return Error(Loc,
8549                    "unpredictable instruction, RdHi and RdLo must be different");
8550     }
8551     break;
8552   }
8553 
8554   case ARM::CDE_CX1:
8555   case ARM::CDE_CX1A:
8556   case ARM::CDE_CX1D:
8557   case ARM::CDE_CX1DA:
8558   case ARM::CDE_CX2:
8559   case ARM::CDE_CX2A:
8560   case ARM::CDE_CX2D:
8561   case ARM::CDE_CX2DA:
8562   case ARM::CDE_CX3:
8563   case ARM::CDE_CX3A:
8564   case ARM::CDE_CX3D:
8565   case ARM::CDE_CX3DA:
8566   case ARM::CDE_VCX1_vec:
8567   case ARM::CDE_VCX1_fpsp:
8568   case ARM::CDE_VCX1_fpdp:
8569   case ARM::CDE_VCX1A_vec:
8570   case ARM::CDE_VCX1A_fpsp:
8571   case ARM::CDE_VCX1A_fpdp:
8572   case ARM::CDE_VCX2_vec:
8573   case ARM::CDE_VCX2_fpsp:
8574   case ARM::CDE_VCX2_fpdp:
8575   case ARM::CDE_VCX2A_vec:
8576   case ARM::CDE_VCX2A_fpsp:
8577   case ARM::CDE_VCX2A_fpdp:
8578   case ARM::CDE_VCX3_vec:
8579   case ARM::CDE_VCX3_fpsp:
8580   case ARM::CDE_VCX3_fpdp:
8581   case ARM::CDE_VCX3A_vec:
8582   case ARM::CDE_VCX3A_fpsp:
8583   case ARM::CDE_VCX3A_fpdp: {
8584     assert(Inst.getOperand(1).isImm() &&
8585            "CDE operand 1 must be a coprocessor ID");
8586     int64_t Coproc = Inst.getOperand(1).getImm();
8587     if (Coproc < 8 && !ARM::isCDECoproc(Coproc, *STI))
8588       return Error(Operands[1]->getStartLoc(),
8589                    "coprocessor must be configured as CDE");
8590     else if (Coproc >= 8)
8591       return Error(Operands[1]->getStartLoc(),
8592                    "coprocessor must be in the range [p0, p7]");
8593     break;
8594   }
8595 
8596   case ARM::t2CDP:
8597   case ARM::t2CDP2:
8598   case ARM::t2LDC2L_OFFSET:
8599   case ARM::t2LDC2L_OPTION:
8600   case ARM::t2LDC2L_POST:
8601   case ARM::t2LDC2L_PRE:
8602   case ARM::t2LDC2_OFFSET:
8603   case ARM::t2LDC2_OPTION:
8604   case ARM::t2LDC2_POST:
8605   case ARM::t2LDC2_PRE:
8606   case ARM::t2LDCL_OFFSET:
8607   case ARM::t2LDCL_OPTION:
8608   case ARM::t2LDCL_POST:
8609   case ARM::t2LDCL_PRE:
8610   case ARM::t2LDC_OFFSET:
8611   case ARM::t2LDC_OPTION:
8612   case ARM::t2LDC_POST:
8613   case ARM::t2LDC_PRE:
8614   case ARM::t2MCR:
8615   case ARM::t2MCR2:
8616   case ARM::t2MCRR:
8617   case ARM::t2MCRR2:
8618   case ARM::t2MRC:
8619   case ARM::t2MRC2:
8620   case ARM::t2MRRC:
8621   case ARM::t2MRRC2:
8622   case ARM::t2STC2L_OFFSET:
8623   case ARM::t2STC2L_OPTION:
8624   case ARM::t2STC2L_POST:
8625   case ARM::t2STC2L_PRE:
8626   case ARM::t2STC2_OFFSET:
8627   case ARM::t2STC2_OPTION:
8628   case ARM::t2STC2_POST:
8629   case ARM::t2STC2_PRE:
8630   case ARM::t2STCL_OFFSET:
8631   case ARM::t2STCL_OPTION:
8632   case ARM::t2STCL_POST:
8633   case ARM::t2STCL_PRE:
8634   case ARM::t2STC_OFFSET:
8635   case ARM::t2STC_OPTION:
8636   case ARM::t2STC_POST:
8637   case ARM::t2STC_PRE: {
8638     unsigned Opcode = Inst.getOpcode();
8639     // Inst.getOperand indexes operands in the (oops ...) and (iops ...) dags,
8640     // CopInd is the index of the coprocessor operand.
8641     size_t CopInd = 0;
8642     if (Opcode == ARM::t2MRRC || Opcode == ARM::t2MRRC2)
8643       CopInd = 2;
8644     else if (Opcode == ARM::t2MRC || Opcode == ARM::t2MRC2)
8645       CopInd = 1;
8646     assert(Inst.getOperand(CopInd).isImm() &&
8647            "Operand must be a coprocessor ID");
8648     int64_t Coproc = Inst.getOperand(CopInd).getImm();
8649     // Operands[2] is the coprocessor operand at syntactic level
8650     if (ARM::isCDECoproc(Coproc, *STI))
8651       return Error(Operands[2]->getStartLoc(),
8652                    "coprocessor must be configured as GCP");
8653     break;
8654   }
8655   }
8656 
8657   return false;
8658 }
8659 
8660 static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
8661   switch(Opc) {
8662   default: llvm_unreachable("unexpected opcode!");
8663   // VST1LN
8664   case ARM::VST1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
8665   case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
8666   case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
8667   case ARM::VST1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
8668   case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
8669   case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
8670   case ARM::VST1LNdAsm_8:  Spacing = 1; return ARM::VST1LNd8;
8671   case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
8672   case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
8673 
8674   // VST2LN
8675   case ARM::VST2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
8676   case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
8677   case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
8678   case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
8679   case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
8680 
8681   case ARM::VST2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
8682   case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
8683   case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
8684   case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
8685   case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
8686 
8687   case ARM::VST2LNdAsm_8:  Spacing = 1; return ARM::VST2LNd8;
8688   case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
8689   case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
8690   case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
8691   case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
8692 
8693   // VST3LN
8694   case ARM::VST3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
8695   case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
8696   case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
8697   case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
8698   case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
8699   case ARM::VST3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
8700   case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
8701   case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
8702   case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
8703   case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
8704   case ARM::VST3LNdAsm_8:  Spacing = 1; return ARM::VST3LNd8;
8705   case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
8706   case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
8707   case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
8708   case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
8709 
8710   // VST3
8711   case ARM::VST3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
8712   case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
8713   case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
8714   case ARM::VST3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
8715   case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
8716   case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
8717   case ARM::VST3dWB_register_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
8718   case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
8719   case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
8720   case ARM::VST3qWB_register_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
8721   case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
8722   case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
8723   case ARM::VST3dAsm_8:  Spacing = 1; return ARM::VST3d8;
8724   case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
8725   case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
8726   case ARM::VST3qAsm_8:  Spacing = 2; return ARM::VST3q8;
8727   case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
8728   case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
8729 
8730   // VST4LN
8731   case ARM::VST4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
8732   case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
8733   case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
8734   case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
8735   case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
8736   case ARM::VST4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
8737   case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
8738   case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
8739   case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
8740   case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
8741   case ARM::VST4LNdAsm_8:  Spacing = 1; return ARM::VST4LNd8;
8742   case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
8743   case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
8744   case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
8745   case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
8746 
8747   // VST4
8748   case ARM::VST4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
8749   case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
8750   case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
8751   case ARM::VST4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
8752   case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
8753   case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
8754   case ARM::VST4dWB_register_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
8755   case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
8756   case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
8757   case ARM::VST4qWB_register_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
8758   case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
8759   case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
8760   case ARM::VST4dAsm_8:  Spacing = 1; return ARM::VST4d8;
8761   case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
8762   case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
8763   case ARM::VST4qAsm_8:  Spacing = 2; return ARM::VST4q8;
8764   case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
8765   case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
8766   }
8767 }
8768 
8769 static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
8770   switch(Opc) {
8771   default: llvm_unreachable("unexpected opcode!");
8772   // VLD1LN
8773   case ARM::VLD1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
8774   case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
8775   case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
8776   case ARM::VLD1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
8777   case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
8778   case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
8779   case ARM::VLD1LNdAsm_8:  Spacing = 1; return ARM::VLD1LNd8;
8780   case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
8781   case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
8782 
8783   // VLD2LN
8784   case ARM::VLD2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
8785   case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
8786   case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
8787   case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
8788   case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
8789   case ARM::VLD2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
8790   case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
8791   case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
8792   case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
8793   case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
8794   case ARM::VLD2LNdAsm_8:  Spacing = 1; return ARM::VLD2LNd8;
8795   case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
8796   case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
8797   case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
8798   case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
8799 
8800   // VLD3DUP
8801   case ARM::VLD3DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
8802   case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
8803   case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
8804   case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
8805   case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
8806   case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
8807   case ARM::VLD3DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
8808   case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
8809   case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
8810   case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
8811   case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
8812   case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
8813   case ARM::VLD3DUPdAsm_8:  Spacing = 1; return ARM::VLD3DUPd8;
8814   case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
8815   case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
8816   case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
8817   case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
8818   case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
8819 
8820   // VLD3LN
8821   case ARM::VLD3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
8822   case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
8823   case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
8824   case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
8825   case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
8826   case ARM::VLD3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
8827   case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
8828   case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
8829   case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
8830   case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
8831   case ARM::VLD3LNdAsm_8:  Spacing = 1; return ARM::VLD3LNd8;
8832   case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
8833   case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
8834   case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
8835   case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
8836 
8837   // VLD3
8838   case ARM::VLD3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
8839   case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
8840   case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
8841   case ARM::VLD3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
8842   case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
8843   case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
8844   case ARM::VLD3dWB_register_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
8845   case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
8846   case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
8847   case ARM::VLD3qWB_register_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
8848   case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
8849   case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
8850   case ARM::VLD3dAsm_8:  Spacing = 1; return ARM::VLD3d8;
8851   case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
8852   case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
8853   case ARM::VLD3qAsm_8:  Spacing = 2; return ARM::VLD3q8;
8854   case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
8855   case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
8856 
8857   // VLD4LN
8858   case ARM::VLD4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
8859   case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
8860   case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
8861   case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
8862   case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
8863   case ARM::VLD4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
8864   case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
8865   case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
8866   case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
8867   case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
8868   case ARM::VLD4LNdAsm_8:  Spacing = 1; return ARM::VLD4LNd8;
8869   case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
8870   case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
8871   case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
8872   case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
8873 
8874   // VLD4DUP
8875   case ARM::VLD4DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
8876   case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
8877   case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
8878   case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
8879   case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
8880   case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
8881   case ARM::VLD4DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
8882   case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
8883   case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
8884   case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
8885   case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
8886   case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
8887   case ARM::VLD4DUPdAsm_8:  Spacing = 1; return ARM::VLD4DUPd8;
8888   case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
8889   case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
8890   case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
8891   case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
8892   case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
8893 
8894   // VLD4
8895   case ARM::VLD4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
8896   case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
8897   case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
8898   case ARM::VLD4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
8899   case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
8900   case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
8901   case ARM::VLD4dWB_register_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
8902   case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
8903   case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
8904   case ARM::VLD4qWB_register_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
8905   case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
8906   case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
8907   case ARM::VLD4dAsm_8:  Spacing = 1; return ARM::VLD4d8;
8908   case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
8909   case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
8910   case ARM::VLD4qAsm_8:  Spacing = 2; return ARM::VLD4q8;
8911   case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
8912   case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
8913   }
8914 }
8915 
8916 bool ARMAsmParser::processInstruction(MCInst &Inst,
8917                                       const OperandVector &Operands,
8918                                       unsigned MnemonicOpsEndInd,
8919                                       MCStreamer &Out) {
8920   // Check if we have the wide qualifier, because if it's present we
8921   // must avoid selecting a 16-bit thumb instruction.
8922   bool HasWideQualifier = false;
8923   for (auto &Op : Operands) {
8924     ARMOperand &ARMOp = static_cast<ARMOperand&>(*Op);
8925     if (ARMOp.isToken() && ARMOp.getToken() == ".w") {
8926       HasWideQualifier = true;
8927       break;
8928     }
8929   }
8930 
8931   switch (Inst.getOpcode()) {
8932   case ARM::VLLDM:
8933   case ARM::VLSTM: {
8934     // In some cases both T1 and T2 are valid, causing tablegen pick T1 instead
8935     // of T2
8936     if (Operands.size() ==
8937         MnemonicOpsEndInd + 2) { // a register list has been provided
8938       ARMOperand &Op = static_cast<ARMOperand &>(
8939           *Operands[MnemonicOpsEndInd + 1]); // the register list, a dpr_reglist
8940       assert(Op.isDPRRegList());
8941       auto &RegList = Op.getRegList();
8942       // When the register list is {d0-d31} the instruction has to be the T2
8943       // variant
8944       if (RegList.size() == 32) {
8945         const unsigned Opcode =
8946             (Inst.getOpcode() == ARM::VLLDM) ? ARM::VLLDM_T2 : ARM::VLSTM_T2;
8947         MCInst TmpInst;
8948         TmpInst.setOpcode(Opcode);
8949         TmpInst.addOperand(Inst.getOperand(0));
8950         TmpInst.addOperand(Inst.getOperand(1));
8951         TmpInst.addOperand(Inst.getOperand(2));
8952         TmpInst.addOperand(Inst.getOperand(3));
8953         Inst = TmpInst;
8954         return true;
8955       }
8956     }
8957     return false;
8958   }
8959   // Alias for alternate form of 'ldr{,b}t Rt, [Rn], #imm' instruction.
8960   case ARM::LDRT_POST:
8961   case ARM::LDRBT_POST: {
8962     const unsigned Opcode =
8963       (Inst.getOpcode() == ARM::LDRT_POST) ? ARM::LDRT_POST_IMM
8964                                            : ARM::LDRBT_POST_IMM;
8965     MCInst TmpInst;
8966     TmpInst.setOpcode(Opcode);
8967     TmpInst.addOperand(Inst.getOperand(0));
8968     TmpInst.addOperand(Inst.getOperand(1));
8969     TmpInst.addOperand(Inst.getOperand(1));
8970     TmpInst.addOperand(MCOperand::createReg(0));
8971     TmpInst.addOperand(MCOperand::createImm(0));
8972     TmpInst.addOperand(Inst.getOperand(2));
8973     TmpInst.addOperand(Inst.getOperand(3));
8974     Inst = TmpInst;
8975     return true;
8976   }
8977   // Alias for 'ldr{sb,h,sh}t Rt, [Rn] {, #imm}' for ommitted immediate.
8978   case ARM::LDRSBTii:
8979   case ARM::LDRHTii:
8980   case ARM::LDRSHTii: {
8981     MCInst TmpInst;
8982 
8983     if (Inst.getOpcode() == ARM::LDRSBTii)
8984       TmpInst.setOpcode(ARM::LDRSBTi);
8985     else if (Inst.getOpcode() == ARM::LDRHTii)
8986       TmpInst.setOpcode(ARM::LDRHTi);
8987     else if (Inst.getOpcode() == ARM::LDRSHTii)
8988       TmpInst.setOpcode(ARM::LDRSHTi);
8989     TmpInst.addOperand(Inst.getOperand(0));
8990     TmpInst.addOperand(Inst.getOperand(1));
8991     TmpInst.addOperand(Inst.getOperand(1));
8992     TmpInst.addOperand(MCOperand::createImm(256));
8993     TmpInst.addOperand(Inst.getOperand(2));
8994     Inst = TmpInst;
8995     return true;
8996   }
8997   // Alias for alternate form of 'str{,b}t Rt, [Rn], #imm' instruction.
8998   case ARM::STRT_POST:
8999   case ARM::STRBT_POST: {
9000     const unsigned Opcode =
9001       (Inst.getOpcode() == ARM::STRT_POST) ? ARM::STRT_POST_IMM
9002                                            : ARM::STRBT_POST_IMM;
9003     MCInst TmpInst;
9004     TmpInst.setOpcode(Opcode);
9005     TmpInst.addOperand(Inst.getOperand(1));
9006     TmpInst.addOperand(Inst.getOperand(0));
9007     TmpInst.addOperand(Inst.getOperand(1));
9008     TmpInst.addOperand(MCOperand::createReg(0));
9009     TmpInst.addOperand(MCOperand::createImm(0));
9010     TmpInst.addOperand(Inst.getOperand(2));
9011     TmpInst.addOperand(Inst.getOperand(3));
9012     Inst = TmpInst;
9013     return true;
9014   }
9015   // Alias for alternate form of 'ADR Rd, #imm' instruction.
9016   case ARM::ADDri: {
9017     if (Inst.getOperand(1).getReg() != ARM::PC || Inst.getOperand(5).getReg() ||
9018         !(Inst.getOperand(2).isExpr() || Inst.getOperand(2).isImm()))
9019       return false;
9020     MCInst TmpInst;
9021     TmpInst.setOpcode(ARM::ADR);
9022     TmpInst.addOperand(Inst.getOperand(0));
9023     if (Inst.getOperand(2).isImm()) {
9024       // Immediate (mod_imm) will be in its encoded form, we must unencode it
9025       // before passing it to the ADR instruction.
9026       unsigned Enc = Inst.getOperand(2).getImm();
9027       TmpInst.addOperand(MCOperand::createImm(
9028           llvm::rotr<uint32_t>(Enc & 0xFF, (Enc & 0xF00) >> 7)));
9029     } else {
9030       // Turn PC-relative expression into absolute expression.
9031       // Reading PC provides the start of the current instruction + 8 and
9032       // the transform to adr is biased by that.
9033       MCSymbol *Dot = getContext().createTempSymbol();
9034       Out.emitLabel(Dot);
9035       const MCExpr *OpExpr = Inst.getOperand(2).getExpr();
9036       const MCExpr *InstPC = MCSymbolRefExpr::create(Dot,
9037                                                      MCSymbolRefExpr::VK_None,
9038                                                      getContext());
9039       const MCExpr *Const8 = MCConstantExpr::create(8, getContext());
9040       const MCExpr *ReadPC = MCBinaryExpr::createAdd(InstPC, Const8,
9041                                                      getContext());
9042       const MCExpr *FixupAddr = MCBinaryExpr::createAdd(ReadPC, OpExpr,
9043                                                         getContext());
9044       TmpInst.addOperand(MCOperand::createExpr(FixupAddr));
9045     }
9046     TmpInst.addOperand(Inst.getOperand(3));
9047     TmpInst.addOperand(Inst.getOperand(4));
9048     Inst = TmpInst;
9049     return true;
9050   }
9051   // Aliases for imm syntax of LDR instructions.
9052   case ARM::t2LDR_PRE_imm:
9053   case ARM::t2LDR_POST_imm: {
9054     MCInst TmpInst;
9055     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDR_PRE_imm ? ARM::t2LDR_PRE
9056                                                              : ARM::t2LDR_POST);
9057     TmpInst.addOperand(Inst.getOperand(0)); // Rt
9058     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb
9059     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9060     TmpInst.addOperand(Inst.getOperand(2)); // imm
9061     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9062     TmpInst.addOperand(Inst.getOperand(4));
9063     Inst = TmpInst;
9064     return true;
9065   }
9066   // Aliases for imm syntax of STR instructions.
9067   case ARM::t2STR_PRE_imm:
9068   case ARM::t2STR_POST_imm: {
9069     MCInst TmpInst;
9070     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2STR_PRE_imm ? ARM::t2STR_PRE
9071                                                              : ARM::t2STR_POST);
9072     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb
9073     TmpInst.addOperand(Inst.getOperand(0)); // Rt
9074     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9075     TmpInst.addOperand(Inst.getOperand(2)); // imm
9076     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9077     TmpInst.addOperand(Inst.getOperand(4));
9078     Inst = TmpInst;
9079     return true;
9080   }
9081   // Aliases for imm syntax of LDRB instructions.
9082   case ARM::t2LDRB_OFFSET_imm: {
9083     MCInst TmpInst;
9084     TmpInst.setOpcode(ARM::t2LDRBi8);
9085     TmpInst.addOperand(Inst.getOperand(0)); // Rt
9086     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9087     TmpInst.addOperand(Inst.getOperand(2)); // imm
9088     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9089     Inst = TmpInst;
9090     return true;
9091   }
9092   case ARM::t2LDRB_PRE_imm:
9093   case ARM::t2LDRB_POST_imm: {
9094     MCInst TmpInst;
9095     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDRB_PRE_imm
9096                           ? ARM::t2LDRB_PRE
9097                           : ARM::t2LDRB_POST);
9098     TmpInst.addOperand(Inst.getOperand(0)); // Rt
9099     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb
9100     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9101     TmpInst.addOperand(Inst.getOperand(2)); // imm
9102     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9103     TmpInst.addOperand(Inst.getOperand(4));
9104     Inst = TmpInst;
9105     return true;
9106   }
9107   // Aliases for imm syntax of STRB instructions.
9108   case ARM::t2STRB_OFFSET_imm: {
9109     MCInst TmpInst;
9110     TmpInst.setOpcode(ARM::t2STRBi8);
9111     TmpInst.addOperand(Inst.getOperand(0)); // Rt
9112     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9113     TmpInst.addOperand(Inst.getOperand(2)); // imm
9114     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9115     Inst = TmpInst;
9116     return true;
9117   }
9118   case ARM::t2STRB_PRE_imm:
9119   case ARM::t2STRB_POST_imm: {
9120     MCInst TmpInst;
9121     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2STRB_PRE_imm
9122                           ? ARM::t2STRB_PRE
9123                           : ARM::t2STRB_POST);
9124     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb
9125     TmpInst.addOperand(Inst.getOperand(0)); // Rt
9126     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9127     TmpInst.addOperand(Inst.getOperand(2)); // imm
9128     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9129     TmpInst.addOperand(Inst.getOperand(4));
9130     Inst = TmpInst;
9131     return true;
9132   }
9133   // Aliases for imm syntax of LDRH instructions.
9134   case ARM::t2LDRH_OFFSET_imm: {
9135     MCInst TmpInst;
9136     TmpInst.setOpcode(ARM::t2LDRHi8);
9137     TmpInst.addOperand(Inst.getOperand(0)); // Rt
9138     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9139     TmpInst.addOperand(Inst.getOperand(2)); // imm
9140     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9141     Inst = TmpInst;
9142     return true;
9143   }
9144   case ARM::t2LDRH_PRE_imm:
9145   case ARM::t2LDRH_POST_imm: {
9146     MCInst TmpInst;
9147     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDRH_PRE_imm
9148                           ? ARM::t2LDRH_PRE
9149                           : ARM::t2LDRH_POST);
9150     TmpInst.addOperand(Inst.getOperand(0)); // Rt
9151     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb
9152     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9153     TmpInst.addOperand(Inst.getOperand(2)); // imm
9154     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9155     TmpInst.addOperand(Inst.getOperand(4));
9156     Inst = TmpInst;
9157     return true;
9158   }
9159   // Aliases for imm syntax of STRH instructions.
9160   case ARM::t2STRH_OFFSET_imm: {
9161     MCInst TmpInst;
9162     TmpInst.setOpcode(ARM::t2STRHi8);
9163     TmpInst.addOperand(Inst.getOperand(0)); // Rt
9164     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9165     TmpInst.addOperand(Inst.getOperand(2)); // imm
9166     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9167     Inst = TmpInst;
9168     return true;
9169   }
9170   case ARM::t2STRH_PRE_imm:
9171   case ARM::t2STRH_POST_imm: {
9172     MCInst TmpInst;
9173     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2STRH_PRE_imm
9174                           ? ARM::t2STRH_PRE
9175                           : ARM::t2STRH_POST);
9176     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb
9177     TmpInst.addOperand(Inst.getOperand(0)); // Rt
9178     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9179     TmpInst.addOperand(Inst.getOperand(2)); // imm
9180     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9181     TmpInst.addOperand(Inst.getOperand(4));
9182     Inst = TmpInst;
9183     return true;
9184   }
9185   // Aliases for imm syntax of LDRSB instructions.
9186   case ARM::t2LDRSB_OFFSET_imm: {
9187     MCInst TmpInst;
9188     TmpInst.setOpcode(ARM::t2LDRSBi8);
9189     TmpInst.addOperand(Inst.getOperand(0)); // Rt
9190     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9191     TmpInst.addOperand(Inst.getOperand(2)); // imm
9192     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9193     Inst = TmpInst;
9194     return true;
9195   }
9196   case ARM::t2LDRSB_PRE_imm:
9197   case ARM::t2LDRSB_POST_imm: {
9198     MCInst TmpInst;
9199     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDRSB_PRE_imm
9200                           ? ARM::t2LDRSB_PRE
9201                           : ARM::t2LDRSB_POST);
9202     TmpInst.addOperand(Inst.getOperand(0)); // Rt
9203     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb
9204     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9205     TmpInst.addOperand(Inst.getOperand(2)); // imm
9206     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9207     TmpInst.addOperand(Inst.getOperand(4));
9208     Inst = TmpInst;
9209     return true;
9210   }
9211   // Aliases for imm syntax of LDRSH instructions.
9212   case ARM::t2LDRSH_OFFSET_imm: {
9213     MCInst TmpInst;
9214     TmpInst.setOpcode(ARM::t2LDRSHi8);
9215     TmpInst.addOperand(Inst.getOperand(0)); // Rt
9216     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9217     TmpInst.addOperand(Inst.getOperand(2)); // imm
9218     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9219     Inst = TmpInst;
9220     return true;
9221   }
9222   case ARM::t2LDRSH_PRE_imm:
9223   case ARM::t2LDRSH_POST_imm: {
9224     MCInst TmpInst;
9225     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDRSH_PRE_imm
9226                           ? ARM::t2LDRSH_PRE
9227                           : ARM::t2LDRSH_POST);
9228     TmpInst.addOperand(Inst.getOperand(0)); // Rt
9229     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb
9230     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9231     TmpInst.addOperand(Inst.getOperand(2)); // imm
9232     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9233     TmpInst.addOperand(Inst.getOperand(4));
9234     Inst = TmpInst;
9235     return true;
9236   }
9237   // Aliases for alternate PC+imm syntax of LDR instructions.
9238   case ARM::t2LDRpcrel:
9239     // Select the narrow version if the immediate will fit.
9240     if (Inst.getOperand(1).getImm() > 0 &&
9241         Inst.getOperand(1).getImm() <= 0xff &&
9242         !HasWideQualifier)
9243       Inst.setOpcode(ARM::tLDRpci);
9244     else
9245       Inst.setOpcode(ARM::t2LDRpci);
9246     return true;
9247   case ARM::t2LDRBpcrel:
9248     Inst.setOpcode(ARM::t2LDRBpci);
9249     return true;
9250   case ARM::t2LDRHpcrel:
9251     Inst.setOpcode(ARM::t2LDRHpci);
9252     return true;
9253   case ARM::t2LDRSBpcrel:
9254     Inst.setOpcode(ARM::t2LDRSBpci);
9255     return true;
9256   case ARM::t2LDRSHpcrel:
9257     Inst.setOpcode(ARM::t2LDRSHpci);
9258     return true;
9259   case ARM::LDRConstPool:
9260   case ARM::tLDRConstPool:
9261   case ARM::t2LDRConstPool: {
9262     // Pseudo instruction ldr rt, =immediate is converted to a
9263     // MOV rt, immediate if immediate is known and representable
9264     // otherwise we create a constant pool entry that we load from.
9265     MCInst TmpInst;
9266     if (Inst.getOpcode() == ARM::LDRConstPool)
9267       TmpInst.setOpcode(ARM::LDRi12);
9268     else if (Inst.getOpcode() == ARM::tLDRConstPool)
9269       TmpInst.setOpcode(ARM::tLDRpci);
9270     else if (Inst.getOpcode() == ARM::t2LDRConstPool)
9271       TmpInst.setOpcode(ARM::t2LDRpci);
9272     const ARMOperand &PoolOperand =
9273         static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1]);
9274     const MCExpr *SubExprVal = PoolOperand.getConstantPoolImm();
9275     // If SubExprVal is a constant we may be able to use a MOV
9276     if (isa<MCConstantExpr>(SubExprVal) &&
9277         Inst.getOperand(0).getReg() != ARM::PC &&
9278         Inst.getOperand(0).getReg() != ARM::SP) {
9279       int64_t Value =
9280         (int64_t) (cast<MCConstantExpr>(SubExprVal))->getValue();
9281       bool UseMov  = true;
9282       bool MovHasS = true;
9283       if (Inst.getOpcode() == ARM::LDRConstPool) {
9284         // ARM Constant
9285         if (ARM_AM::getSOImmVal(Value) != -1) {
9286           Value = ARM_AM::getSOImmVal(Value);
9287           TmpInst.setOpcode(ARM::MOVi);
9288         }
9289         else if (ARM_AM::getSOImmVal(~Value) != -1) {
9290           Value = ARM_AM::getSOImmVal(~Value);
9291           TmpInst.setOpcode(ARM::MVNi);
9292         }
9293         else if (hasV6T2Ops() &&
9294                  Value >=0 && Value < 65536) {
9295           TmpInst.setOpcode(ARM::MOVi16);
9296           MovHasS = false;
9297         }
9298         else
9299           UseMov = false;
9300       }
9301       else {
9302         // Thumb/Thumb2 Constant
9303         if (hasThumb2() &&
9304             ARM_AM::getT2SOImmVal(Value) != -1)
9305           TmpInst.setOpcode(ARM::t2MOVi);
9306         else if (hasThumb2() &&
9307                  ARM_AM::getT2SOImmVal(~Value) != -1) {
9308           TmpInst.setOpcode(ARM::t2MVNi);
9309           Value = ~Value;
9310         }
9311         else if (hasV8MBaseline() &&
9312                  Value >=0 && Value < 65536) {
9313           TmpInst.setOpcode(ARM::t2MOVi16);
9314           MovHasS = false;
9315         }
9316         else
9317           UseMov = false;
9318       }
9319       if (UseMov) {
9320         TmpInst.addOperand(Inst.getOperand(0));           // Rt
9321         TmpInst.addOperand(MCOperand::createImm(Value));  // Immediate
9322         TmpInst.addOperand(Inst.getOperand(2));           // CondCode
9323         TmpInst.addOperand(Inst.getOperand(3));           // CondCode
9324         if (MovHasS)
9325           TmpInst.addOperand(MCOperand::createReg(0));    // S
9326         Inst = TmpInst;
9327         return true;
9328       }
9329     }
9330     // No opportunity to use MOV/MVN create constant pool
9331     const MCExpr *CPLoc =
9332       getTargetStreamer().addConstantPoolEntry(SubExprVal,
9333                                                PoolOperand.getStartLoc());
9334     TmpInst.addOperand(Inst.getOperand(0));           // Rt
9335     TmpInst.addOperand(MCOperand::createExpr(CPLoc)); // offset to constpool
9336     if (TmpInst.getOpcode() == ARM::LDRi12)
9337       TmpInst.addOperand(MCOperand::createImm(0));    // unused offset
9338     TmpInst.addOperand(Inst.getOperand(2));           // CondCode
9339     TmpInst.addOperand(Inst.getOperand(3));           // CondCode
9340     Inst = TmpInst;
9341     return true;
9342   }
9343   // Handle NEON VST complex aliases.
9344   case ARM::VST1LNdWB_register_Asm_8:
9345   case ARM::VST1LNdWB_register_Asm_16:
9346   case ARM::VST1LNdWB_register_Asm_32: {
9347     MCInst TmpInst;
9348     // Shuffle the operands around so the lane index operand is in the
9349     // right place.
9350     unsigned Spacing;
9351     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9352     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9353     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9354     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9355     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9356     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9357     TmpInst.addOperand(Inst.getOperand(1)); // lane
9358     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9359     TmpInst.addOperand(Inst.getOperand(6));
9360     Inst = TmpInst;
9361     return true;
9362   }
9363 
9364   case ARM::VST2LNdWB_register_Asm_8:
9365   case ARM::VST2LNdWB_register_Asm_16:
9366   case ARM::VST2LNdWB_register_Asm_32:
9367   case ARM::VST2LNqWB_register_Asm_16:
9368   case ARM::VST2LNqWB_register_Asm_32: {
9369     MCInst TmpInst;
9370     // Shuffle the operands around so the lane index operand is in the
9371     // right place.
9372     unsigned Spacing;
9373     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9374     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9375     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9376     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9377     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9378     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9379     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9380                                             Spacing));
9381     TmpInst.addOperand(Inst.getOperand(1)); // lane
9382     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9383     TmpInst.addOperand(Inst.getOperand(6));
9384     Inst = TmpInst;
9385     return true;
9386   }
9387 
9388   case ARM::VST3LNdWB_register_Asm_8:
9389   case ARM::VST3LNdWB_register_Asm_16:
9390   case ARM::VST3LNdWB_register_Asm_32:
9391   case ARM::VST3LNqWB_register_Asm_16:
9392   case ARM::VST3LNqWB_register_Asm_32: {
9393     MCInst TmpInst;
9394     // Shuffle the operands around so the lane index operand is in the
9395     // right place.
9396     unsigned Spacing;
9397     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9398     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9399     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9400     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9401     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9402     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9403     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9404                                             Spacing));
9405     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9406                                             Spacing * 2));
9407     TmpInst.addOperand(Inst.getOperand(1)); // lane
9408     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9409     TmpInst.addOperand(Inst.getOperand(6));
9410     Inst = TmpInst;
9411     return true;
9412   }
9413 
9414   case ARM::VST4LNdWB_register_Asm_8:
9415   case ARM::VST4LNdWB_register_Asm_16:
9416   case ARM::VST4LNdWB_register_Asm_32:
9417   case ARM::VST4LNqWB_register_Asm_16:
9418   case ARM::VST4LNqWB_register_Asm_32: {
9419     MCInst TmpInst;
9420     // Shuffle the operands around so the lane index operand is in the
9421     // right place.
9422     unsigned Spacing;
9423     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9424     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9425     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9426     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9427     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9428     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9429     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9430                                             Spacing));
9431     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9432                                             Spacing * 2));
9433     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9434                                             Spacing * 3));
9435     TmpInst.addOperand(Inst.getOperand(1)); // lane
9436     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9437     TmpInst.addOperand(Inst.getOperand(6));
9438     Inst = TmpInst;
9439     return true;
9440   }
9441 
9442   case ARM::VST1LNdWB_fixed_Asm_8:
9443   case ARM::VST1LNdWB_fixed_Asm_16:
9444   case ARM::VST1LNdWB_fixed_Asm_32: {
9445     MCInst TmpInst;
9446     // Shuffle the operands around so the lane index operand is in the
9447     // right place.
9448     unsigned Spacing;
9449     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9450     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9451     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9452     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9453     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9454     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9455     TmpInst.addOperand(Inst.getOperand(1)); // lane
9456     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9457     TmpInst.addOperand(Inst.getOperand(5));
9458     Inst = TmpInst;
9459     return true;
9460   }
9461 
9462   case ARM::VST2LNdWB_fixed_Asm_8:
9463   case ARM::VST2LNdWB_fixed_Asm_16:
9464   case ARM::VST2LNdWB_fixed_Asm_32:
9465   case ARM::VST2LNqWB_fixed_Asm_16:
9466   case ARM::VST2LNqWB_fixed_Asm_32: {
9467     MCInst TmpInst;
9468     // Shuffle the operands around so the lane index operand is in the
9469     // right place.
9470     unsigned Spacing;
9471     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9472     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9473     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9474     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9475     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9476     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9477     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9478                                             Spacing));
9479     TmpInst.addOperand(Inst.getOperand(1)); // lane
9480     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9481     TmpInst.addOperand(Inst.getOperand(5));
9482     Inst = TmpInst;
9483     return true;
9484   }
9485 
9486   case ARM::VST3LNdWB_fixed_Asm_8:
9487   case ARM::VST3LNdWB_fixed_Asm_16:
9488   case ARM::VST3LNdWB_fixed_Asm_32:
9489   case ARM::VST3LNqWB_fixed_Asm_16:
9490   case ARM::VST3LNqWB_fixed_Asm_32: {
9491     MCInst TmpInst;
9492     // Shuffle the operands around so the lane index operand is in the
9493     // right place.
9494     unsigned Spacing;
9495     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9496     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9497     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9498     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9499     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9500     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9501     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9502                                             Spacing));
9503     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9504                                             Spacing * 2));
9505     TmpInst.addOperand(Inst.getOperand(1)); // lane
9506     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9507     TmpInst.addOperand(Inst.getOperand(5));
9508     Inst = TmpInst;
9509     return true;
9510   }
9511 
9512   case ARM::VST4LNdWB_fixed_Asm_8:
9513   case ARM::VST4LNdWB_fixed_Asm_16:
9514   case ARM::VST4LNdWB_fixed_Asm_32:
9515   case ARM::VST4LNqWB_fixed_Asm_16:
9516   case ARM::VST4LNqWB_fixed_Asm_32: {
9517     MCInst TmpInst;
9518     // Shuffle the operands around so the lane index operand is in the
9519     // right place.
9520     unsigned Spacing;
9521     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9522     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9523     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9524     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9525     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9526     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9527     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9528                                             Spacing));
9529     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9530                                             Spacing * 2));
9531     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9532                                             Spacing * 3));
9533     TmpInst.addOperand(Inst.getOperand(1)); // lane
9534     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9535     TmpInst.addOperand(Inst.getOperand(5));
9536     Inst = TmpInst;
9537     return true;
9538   }
9539 
9540   case ARM::VST1LNdAsm_8:
9541   case ARM::VST1LNdAsm_16:
9542   case ARM::VST1LNdAsm_32: {
9543     MCInst TmpInst;
9544     // Shuffle the operands around so the lane index operand is in the
9545     // right place.
9546     unsigned Spacing;
9547     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9548     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9549     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9550     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9551     TmpInst.addOperand(Inst.getOperand(1)); // lane
9552     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9553     TmpInst.addOperand(Inst.getOperand(5));
9554     Inst = TmpInst;
9555     return true;
9556   }
9557 
9558   case ARM::VST2LNdAsm_8:
9559   case ARM::VST2LNdAsm_16:
9560   case ARM::VST2LNdAsm_32:
9561   case ARM::VST2LNqAsm_16:
9562   case ARM::VST2LNqAsm_32: {
9563     MCInst TmpInst;
9564     // Shuffle the operands around so the lane index operand is in the
9565     // right place.
9566     unsigned Spacing;
9567     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9568     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9569     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9570     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9571     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9572                                             Spacing));
9573     TmpInst.addOperand(Inst.getOperand(1)); // lane
9574     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9575     TmpInst.addOperand(Inst.getOperand(5));
9576     Inst = TmpInst;
9577     return true;
9578   }
9579 
9580   case ARM::VST3LNdAsm_8:
9581   case ARM::VST3LNdAsm_16:
9582   case ARM::VST3LNdAsm_32:
9583   case ARM::VST3LNqAsm_16:
9584   case ARM::VST3LNqAsm_32: {
9585     MCInst TmpInst;
9586     // Shuffle the operands around so the lane index operand is in the
9587     // right place.
9588     unsigned Spacing;
9589     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9590     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9591     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9592     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9593     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9594                                             Spacing));
9595     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9596                                             Spacing * 2));
9597     TmpInst.addOperand(Inst.getOperand(1)); // lane
9598     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9599     TmpInst.addOperand(Inst.getOperand(5));
9600     Inst = TmpInst;
9601     return true;
9602   }
9603 
9604   case ARM::VST4LNdAsm_8:
9605   case ARM::VST4LNdAsm_16:
9606   case ARM::VST4LNdAsm_32:
9607   case ARM::VST4LNqAsm_16:
9608   case ARM::VST4LNqAsm_32: {
9609     MCInst TmpInst;
9610     // Shuffle the operands around so the lane index operand is in the
9611     // right place.
9612     unsigned Spacing;
9613     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9614     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9615     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9616     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9617     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9618                                             Spacing));
9619     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9620                                             Spacing * 2));
9621     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9622                                             Spacing * 3));
9623     TmpInst.addOperand(Inst.getOperand(1)); // lane
9624     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9625     TmpInst.addOperand(Inst.getOperand(5));
9626     Inst = TmpInst;
9627     return true;
9628   }
9629 
9630   // Handle NEON VLD complex aliases.
9631   case ARM::VLD1LNdWB_register_Asm_8:
9632   case ARM::VLD1LNdWB_register_Asm_16:
9633   case ARM::VLD1LNdWB_register_Asm_32: {
9634     MCInst TmpInst;
9635     // Shuffle the operands around so the lane index operand is in the
9636     // right place.
9637     unsigned Spacing;
9638     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9639     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9640     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9641     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9642     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9643     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9644     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9645     TmpInst.addOperand(Inst.getOperand(1)); // lane
9646     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9647     TmpInst.addOperand(Inst.getOperand(6));
9648     Inst = TmpInst;
9649     return true;
9650   }
9651 
9652   case ARM::VLD2LNdWB_register_Asm_8:
9653   case ARM::VLD2LNdWB_register_Asm_16:
9654   case ARM::VLD2LNdWB_register_Asm_32:
9655   case ARM::VLD2LNqWB_register_Asm_16:
9656   case ARM::VLD2LNqWB_register_Asm_32: {
9657     MCInst TmpInst;
9658     // Shuffle the operands around so the lane index operand is in the
9659     // right place.
9660     unsigned Spacing;
9661     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9662     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9663     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9664                                             Spacing));
9665     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9666     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9667     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9668     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9669     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9670     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9671                                             Spacing));
9672     TmpInst.addOperand(Inst.getOperand(1)); // lane
9673     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9674     TmpInst.addOperand(Inst.getOperand(6));
9675     Inst = TmpInst;
9676     return true;
9677   }
9678 
9679   case ARM::VLD3LNdWB_register_Asm_8:
9680   case ARM::VLD3LNdWB_register_Asm_16:
9681   case ARM::VLD3LNdWB_register_Asm_32:
9682   case ARM::VLD3LNqWB_register_Asm_16:
9683   case ARM::VLD3LNqWB_register_Asm_32: {
9684     MCInst TmpInst;
9685     // Shuffle the operands around so the lane index operand is in the
9686     // right place.
9687     unsigned Spacing;
9688     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9689     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9690     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9691                                             Spacing));
9692     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9693                                             Spacing * 2));
9694     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9695     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9696     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9697     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9698     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9699     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9700                                             Spacing));
9701     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9702                                             Spacing * 2));
9703     TmpInst.addOperand(Inst.getOperand(1)); // lane
9704     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9705     TmpInst.addOperand(Inst.getOperand(6));
9706     Inst = TmpInst;
9707     return true;
9708   }
9709 
9710   case ARM::VLD4LNdWB_register_Asm_8:
9711   case ARM::VLD4LNdWB_register_Asm_16:
9712   case ARM::VLD4LNdWB_register_Asm_32:
9713   case ARM::VLD4LNqWB_register_Asm_16:
9714   case ARM::VLD4LNqWB_register_Asm_32: {
9715     MCInst TmpInst;
9716     // Shuffle the operands around so the lane index operand is in the
9717     // right place.
9718     unsigned Spacing;
9719     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9720     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9721     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9722                                             Spacing));
9723     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9724                                             Spacing * 2));
9725     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9726                                             Spacing * 3));
9727     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9728     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9729     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9730     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9731     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9732     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9733                                             Spacing));
9734     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9735                                             Spacing * 2));
9736     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9737                                             Spacing * 3));
9738     TmpInst.addOperand(Inst.getOperand(1)); // lane
9739     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9740     TmpInst.addOperand(Inst.getOperand(6));
9741     Inst = TmpInst;
9742     return true;
9743   }
9744 
9745   case ARM::VLD1LNdWB_fixed_Asm_8:
9746   case ARM::VLD1LNdWB_fixed_Asm_16:
9747   case ARM::VLD1LNdWB_fixed_Asm_32: {
9748     MCInst TmpInst;
9749     // Shuffle the operands around so the lane index operand is in the
9750     // right place.
9751     unsigned Spacing;
9752     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9753     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9754     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9755     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9756     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9757     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9758     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9759     TmpInst.addOperand(Inst.getOperand(1)); // lane
9760     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9761     TmpInst.addOperand(Inst.getOperand(5));
9762     Inst = TmpInst;
9763     return true;
9764   }
9765 
9766   case ARM::VLD2LNdWB_fixed_Asm_8:
9767   case ARM::VLD2LNdWB_fixed_Asm_16:
9768   case ARM::VLD2LNdWB_fixed_Asm_32:
9769   case ARM::VLD2LNqWB_fixed_Asm_16:
9770   case ARM::VLD2LNqWB_fixed_Asm_32: {
9771     MCInst TmpInst;
9772     // Shuffle the operands around so the lane index operand is in the
9773     // right place.
9774     unsigned Spacing;
9775     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9776     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9777     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9778                                             Spacing));
9779     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9780     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9781     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9782     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9783     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9784     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9785                                             Spacing));
9786     TmpInst.addOperand(Inst.getOperand(1)); // lane
9787     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9788     TmpInst.addOperand(Inst.getOperand(5));
9789     Inst = TmpInst;
9790     return true;
9791   }
9792 
9793   case ARM::VLD3LNdWB_fixed_Asm_8:
9794   case ARM::VLD3LNdWB_fixed_Asm_16:
9795   case ARM::VLD3LNdWB_fixed_Asm_32:
9796   case ARM::VLD3LNqWB_fixed_Asm_16:
9797   case ARM::VLD3LNqWB_fixed_Asm_32: {
9798     MCInst TmpInst;
9799     // Shuffle the operands around so the lane index operand is in the
9800     // right place.
9801     unsigned Spacing;
9802     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9803     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9804     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9805                                             Spacing));
9806     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9807                                             Spacing * 2));
9808     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9809     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9810     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9811     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9812     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9813     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9814                                             Spacing));
9815     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9816                                             Spacing * 2));
9817     TmpInst.addOperand(Inst.getOperand(1)); // lane
9818     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9819     TmpInst.addOperand(Inst.getOperand(5));
9820     Inst = TmpInst;
9821     return true;
9822   }
9823 
9824   case ARM::VLD4LNdWB_fixed_Asm_8:
9825   case ARM::VLD4LNdWB_fixed_Asm_16:
9826   case ARM::VLD4LNdWB_fixed_Asm_32:
9827   case ARM::VLD4LNqWB_fixed_Asm_16:
9828   case ARM::VLD4LNqWB_fixed_Asm_32: {
9829     MCInst TmpInst;
9830     // Shuffle the operands around so the lane index operand is in the
9831     // right place.
9832     unsigned Spacing;
9833     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9834     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9835     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9836                                             Spacing));
9837     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9838                                             Spacing * 2));
9839     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9840                                             Spacing * 3));
9841     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9842     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9843     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9844     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9845     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9846     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9847                                             Spacing));
9848     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9849                                             Spacing * 2));
9850     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9851                                             Spacing * 3));
9852     TmpInst.addOperand(Inst.getOperand(1)); // lane
9853     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9854     TmpInst.addOperand(Inst.getOperand(5));
9855     Inst = TmpInst;
9856     return true;
9857   }
9858 
9859   case ARM::VLD1LNdAsm_8:
9860   case ARM::VLD1LNdAsm_16:
9861   case ARM::VLD1LNdAsm_32: {
9862     MCInst TmpInst;
9863     // Shuffle the operands around so the lane index operand is in the
9864     // right place.
9865     unsigned Spacing;
9866     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9867     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9868     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9869     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9870     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9871     TmpInst.addOperand(Inst.getOperand(1)); // lane
9872     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9873     TmpInst.addOperand(Inst.getOperand(5));
9874     Inst = TmpInst;
9875     return true;
9876   }
9877 
9878   case ARM::VLD2LNdAsm_8:
9879   case ARM::VLD2LNdAsm_16:
9880   case ARM::VLD2LNdAsm_32:
9881   case ARM::VLD2LNqAsm_16:
9882   case ARM::VLD2LNqAsm_32: {
9883     MCInst TmpInst;
9884     // Shuffle the operands around so the lane index operand is in the
9885     // right place.
9886     unsigned Spacing;
9887     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9888     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9889     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9890                                             Spacing));
9891     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9892     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9893     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9894     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9895                                             Spacing));
9896     TmpInst.addOperand(Inst.getOperand(1)); // lane
9897     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9898     TmpInst.addOperand(Inst.getOperand(5));
9899     Inst = TmpInst;
9900     return true;
9901   }
9902 
9903   case ARM::VLD3LNdAsm_8:
9904   case ARM::VLD3LNdAsm_16:
9905   case ARM::VLD3LNdAsm_32:
9906   case ARM::VLD3LNqAsm_16:
9907   case ARM::VLD3LNqAsm_32: {
9908     MCInst TmpInst;
9909     // Shuffle the operands around so the lane index operand is in the
9910     // right place.
9911     unsigned Spacing;
9912     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9913     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9914     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9915                                             Spacing));
9916     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9917                                             Spacing * 2));
9918     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9919     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9920     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9921     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9922                                             Spacing));
9923     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9924                                             Spacing * 2));
9925     TmpInst.addOperand(Inst.getOperand(1)); // lane
9926     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9927     TmpInst.addOperand(Inst.getOperand(5));
9928     Inst = TmpInst;
9929     return true;
9930   }
9931 
9932   case ARM::VLD4LNdAsm_8:
9933   case ARM::VLD4LNdAsm_16:
9934   case ARM::VLD4LNdAsm_32:
9935   case ARM::VLD4LNqAsm_16:
9936   case ARM::VLD4LNqAsm_32: {
9937     MCInst TmpInst;
9938     // Shuffle the operands around so the lane index operand is in the
9939     // right place.
9940     unsigned Spacing;
9941     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9942     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9943     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9944                                             Spacing));
9945     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9946                                             Spacing * 2));
9947     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9948                                             Spacing * 3));
9949     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9950     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9951     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9952     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9953                                             Spacing));
9954     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9955                                             Spacing * 2));
9956     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9957                                             Spacing * 3));
9958     TmpInst.addOperand(Inst.getOperand(1)); // lane
9959     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9960     TmpInst.addOperand(Inst.getOperand(5));
9961     Inst = TmpInst;
9962     return true;
9963   }
9964 
9965   // VLD3DUP single 3-element structure to all lanes instructions.
9966   case ARM::VLD3DUPdAsm_8:
9967   case ARM::VLD3DUPdAsm_16:
9968   case ARM::VLD3DUPdAsm_32:
9969   case ARM::VLD3DUPqAsm_8:
9970   case ARM::VLD3DUPqAsm_16:
9971   case ARM::VLD3DUPqAsm_32: {
9972     MCInst TmpInst;
9973     unsigned Spacing;
9974     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9975     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9976     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9977                                             Spacing));
9978     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9979                                             Spacing * 2));
9980     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9981     TmpInst.addOperand(Inst.getOperand(2)); // alignment
9982     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9983     TmpInst.addOperand(Inst.getOperand(4));
9984     Inst = TmpInst;
9985     return true;
9986   }
9987 
9988   case ARM::VLD3DUPdWB_fixed_Asm_8:
9989   case ARM::VLD3DUPdWB_fixed_Asm_16:
9990   case ARM::VLD3DUPdWB_fixed_Asm_32:
9991   case ARM::VLD3DUPqWB_fixed_Asm_8:
9992   case ARM::VLD3DUPqWB_fixed_Asm_16:
9993   case ARM::VLD3DUPqWB_fixed_Asm_32: {
9994     MCInst TmpInst;
9995     unsigned Spacing;
9996     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9997     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9998     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9999                                             Spacing));
10000     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10001                                             Spacing * 2));
10002     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10003     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10004     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10005     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
10006     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10007     TmpInst.addOperand(Inst.getOperand(4));
10008     Inst = TmpInst;
10009     return true;
10010   }
10011 
10012   case ARM::VLD3DUPdWB_register_Asm_8:
10013   case ARM::VLD3DUPdWB_register_Asm_16:
10014   case ARM::VLD3DUPdWB_register_Asm_32:
10015   case ARM::VLD3DUPqWB_register_Asm_8:
10016   case ARM::VLD3DUPqWB_register_Asm_16:
10017   case ARM::VLD3DUPqWB_register_Asm_32: {
10018     MCInst TmpInst;
10019     unsigned Spacing;
10020     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10021     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10022     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10023                                             Spacing));
10024     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10025                                             Spacing * 2));
10026     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10027     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10028     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10029     TmpInst.addOperand(Inst.getOperand(3)); // Rm
10030     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10031     TmpInst.addOperand(Inst.getOperand(5));
10032     Inst = TmpInst;
10033     return true;
10034   }
10035 
10036   // VLD3 multiple 3-element structure instructions.
10037   case ARM::VLD3dAsm_8:
10038   case ARM::VLD3dAsm_16:
10039   case ARM::VLD3dAsm_32:
10040   case ARM::VLD3qAsm_8:
10041   case ARM::VLD3qAsm_16:
10042   case ARM::VLD3qAsm_32: {
10043     MCInst TmpInst;
10044     unsigned Spacing;
10045     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10046     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10047     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10048                                             Spacing));
10049     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10050                                             Spacing * 2));
10051     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10052     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10053     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10054     TmpInst.addOperand(Inst.getOperand(4));
10055     Inst = TmpInst;
10056     return true;
10057   }
10058 
10059   case ARM::VLD3dWB_fixed_Asm_8:
10060   case ARM::VLD3dWB_fixed_Asm_16:
10061   case ARM::VLD3dWB_fixed_Asm_32:
10062   case ARM::VLD3qWB_fixed_Asm_8:
10063   case ARM::VLD3qWB_fixed_Asm_16:
10064   case ARM::VLD3qWB_fixed_Asm_32: {
10065     MCInst TmpInst;
10066     unsigned Spacing;
10067     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10068     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10069     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10070                                             Spacing));
10071     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10072                                             Spacing * 2));
10073     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10074     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10075     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10076     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
10077     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10078     TmpInst.addOperand(Inst.getOperand(4));
10079     Inst = TmpInst;
10080     return true;
10081   }
10082 
10083   case ARM::VLD3dWB_register_Asm_8:
10084   case ARM::VLD3dWB_register_Asm_16:
10085   case ARM::VLD3dWB_register_Asm_32:
10086   case ARM::VLD3qWB_register_Asm_8:
10087   case ARM::VLD3qWB_register_Asm_16:
10088   case ARM::VLD3qWB_register_Asm_32: {
10089     MCInst TmpInst;
10090     unsigned Spacing;
10091     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10092     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10093     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10094                                             Spacing));
10095     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10096                                             Spacing * 2));
10097     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10098     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10099     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10100     TmpInst.addOperand(Inst.getOperand(3)); // Rm
10101     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10102     TmpInst.addOperand(Inst.getOperand(5));
10103     Inst = TmpInst;
10104     return true;
10105   }
10106 
10107   // VLD4DUP single 3-element structure to all lanes instructions.
10108   case ARM::VLD4DUPdAsm_8:
10109   case ARM::VLD4DUPdAsm_16:
10110   case ARM::VLD4DUPdAsm_32:
10111   case ARM::VLD4DUPqAsm_8:
10112   case ARM::VLD4DUPqAsm_16:
10113   case ARM::VLD4DUPqAsm_32: {
10114     MCInst TmpInst;
10115     unsigned Spacing;
10116     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10117     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10118     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10119                                             Spacing));
10120     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10121                                             Spacing * 2));
10122     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10123                                             Spacing * 3));
10124     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10125     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10126     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10127     TmpInst.addOperand(Inst.getOperand(4));
10128     Inst = TmpInst;
10129     return true;
10130   }
10131 
10132   case ARM::VLD4DUPdWB_fixed_Asm_8:
10133   case ARM::VLD4DUPdWB_fixed_Asm_16:
10134   case ARM::VLD4DUPdWB_fixed_Asm_32:
10135   case ARM::VLD4DUPqWB_fixed_Asm_8:
10136   case ARM::VLD4DUPqWB_fixed_Asm_16:
10137   case ARM::VLD4DUPqWB_fixed_Asm_32: {
10138     MCInst TmpInst;
10139     unsigned Spacing;
10140     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10141     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10142     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10143                                             Spacing));
10144     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10145                                             Spacing * 2));
10146     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10147                                             Spacing * 3));
10148     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10149     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10150     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10151     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
10152     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10153     TmpInst.addOperand(Inst.getOperand(4));
10154     Inst = TmpInst;
10155     return true;
10156   }
10157 
10158   case ARM::VLD4DUPdWB_register_Asm_8:
10159   case ARM::VLD4DUPdWB_register_Asm_16:
10160   case ARM::VLD4DUPdWB_register_Asm_32:
10161   case ARM::VLD4DUPqWB_register_Asm_8:
10162   case ARM::VLD4DUPqWB_register_Asm_16:
10163   case ARM::VLD4DUPqWB_register_Asm_32: {
10164     MCInst TmpInst;
10165     unsigned Spacing;
10166     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10167     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10168     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10169                                             Spacing));
10170     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10171                                             Spacing * 2));
10172     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10173                                             Spacing * 3));
10174     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10175     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10176     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10177     TmpInst.addOperand(Inst.getOperand(3)); // Rm
10178     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10179     TmpInst.addOperand(Inst.getOperand(5));
10180     Inst = TmpInst;
10181     return true;
10182   }
10183 
10184   // VLD4 multiple 4-element structure instructions.
10185   case ARM::VLD4dAsm_8:
10186   case ARM::VLD4dAsm_16:
10187   case ARM::VLD4dAsm_32:
10188   case ARM::VLD4qAsm_8:
10189   case ARM::VLD4qAsm_16:
10190   case ARM::VLD4qAsm_32: {
10191     MCInst TmpInst;
10192     unsigned Spacing;
10193     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10194     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10195     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10196                                             Spacing));
10197     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10198                                             Spacing * 2));
10199     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10200                                             Spacing * 3));
10201     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10202     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10203     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10204     TmpInst.addOperand(Inst.getOperand(4));
10205     Inst = TmpInst;
10206     return true;
10207   }
10208 
10209   case ARM::VLD4dWB_fixed_Asm_8:
10210   case ARM::VLD4dWB_fixed_Asm_16:
10211   case ARM::VLD4dWB_fixed_Asm_32:
10212   case ARM::VLD4qWB_fixed_Asm_8:
10213   case ARM::VLD4qWB_fixed_Asm_16:
10214   case ARM::VLD4qWB_fixed_Asm_32: {
10215     MCInst TmpInst;
10216     unsigned Spacing;
10217     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10218     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10219     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10220                                             Spacing));
10221     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10222                                             Spacing * 2));
10223     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10224                                             Spacing * 3));
10225     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10226     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10227     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10228     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
10229     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10230     TmpInst.addOperand(Inst.getOperand(4));
10231     Inst = TmpInst;
10232     return true;
10233   }
10234 
10235   case ARM::VLD4dWB_register_Asm_8:
10236   case ARM::VLD4dWB_register_Asm_16:
10237   case ARM::VLD4dWB_register_Asm_32:
10238   case ARM::VLD4qWB_register_Asm_8:
10239   case ARM::VLD4qWB_register_Asm_16:
10240   case ARM::VLD4qWB_register_Asm_32: {
10241     MCInst TmpInst;
10242     unsigned Spacing;
10243     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10244     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10245     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10246                                             Spacing));
10247     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10248                                             Spacing * 2));
10249     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10250                                             Spacing * 3));
10251     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10252     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10253     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10254     TmpInst.addOperand(Inst.getOperand(3)); // Rm
10255     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10256     TmpInst.addOperand(Inst.getOperand(5));
10257     Inst = TmpInst;
10258     return true;
10259   }
10260 
10261   // VST3 multiple 3-element structure instructions.
10262   case ARM::VST3dAsm_8:
10263   case ARM::VST3dAsm_16:
10264   case ARM::VST3dAsm_32:
10265   case ARM::VST3qAsm_8:
10266   case ARM::VST3qAsm_16:
10267   case ARM::VST3qAsm_32: {
10268     MCInst TmpInst;
10269     unsigned Spacing;
10270     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10271     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10272     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10273     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10274     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10275                                             Spacing));
10276     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10277                                             Spacing * 2));
10278     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10279     TmpInst.addOperand(Inst.getOperand(4));
10280     Inst = TmpInst;
10281     return true;
10282   }
10283 
10284   case ARM::VST3dWB_fixed_Asm_8:
10285   case ARM::VST3dWB_fixed_Asm_16:
10286   case ARM::VST3dWB_fixed_Asm_32:
10287   case ARM::VST3qWB_fixed_Asm_8:
10288   case ARM::VST3qWB_fixed_Asm_16:
10289   case ARM::VST3qWB_fixed_Asm_32: {
10290     MCInst TmpInst;
10291     unsigned Spacing;
10292     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10293     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10294     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10295     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10296     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
10297     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10298     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10299                                             Spacing));
10300     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10301                                             Spacing * 2));
10302     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10303     TmpInst.addOperand(Inst.getOperand(4));
10304     Inst = TmpInst;
10305     return true;
10306   }
10307 
10308   case ARM::VST3dWB_register_Asm_8:
10309   case ARM::VST3dWB_register_Asm_16:
10310   case ARM::VST3dWB_register_Asm_32:
10311   case ARM::VST3qWB_register_Asm_8:
10312   case ARM::VST3qWB_register_Asm_16:
10313   case ARM::VST3qWB_register_Asm_32: {
10314     MCInst TmpInst;
10315     unsigned Spacing;
10316     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10317     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10318     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10319     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10320     TmpInst.addOperand(Inst.getOperand(3)); // Rm
10321     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10322     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10323                                             Spacing));
10324     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10325                                             Spacing * 2));
10326     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10327     TmpInst.addOperand(Inst.getOperand(5));
10328     Inst = TmpInst;
10329     return true;
10330   }
10331 
10332   // VST4 multiple 3-element structure instructions.
10333   case ARM::VST4dAsm_8:
10334   case ARM::VST4dAsm_16:
10335   case ARM::VST4dAsm_32:
10336   case ARM::VST4qAsm_8:
10337   case ARM::VST4qAsm_16:
10338   case ARM::VST4qAsm_32: {
10339     MCInst TmpInst;
10340     unsigned Spacing;
10341     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10342     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10343     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10344     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10345     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10346                                             Spacing));
10347     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10348                                             Spacing * 2));
10349     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10350                                             Spacing * 3));
10351     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10352     TmpInst.addOperand(Inst.getOperand(4));
10353     Inst = TmpInst;
10354     return true;
10355   }
10356 
10357   case ARM::VST4dWB_fixed_Asm_8:
10358   case ARM::VST4dWB_fixed_Asm_16:
10359   case ARM::VST4dWB_fixed_Asm_32:
10360   case ARM::VST4qWB_fixed_Asm_8:
10361   case ARM::VST4qWB_fixed_Asm_16:
10362   case ARM::VST4qWB_fixed_Asm_32: {
10363     MCInst TmpInst;
10364     unsigned Spacing;
10365     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10366     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10367     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10368     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10369     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
10370     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10371     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10372                                             Spacing));
10373     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10374                                             Spacing * 2));
10375     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10376                                             Spacing * 3));
10377     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10378     TmpInst.addOperand(Inst.getOperand(4));
10379     Inst = TmpInst;
10380     return true;
10381   }
10382 
10383   case ARM::VST4dWB_register_Asm_8:
10384   case ARM::VST4dWB_register_Asm_16:
10385   case ARM::VST4dWB_register_Asm_32:
10386   case ARM::VST4qWB_register_Asm_8:
10387   case ARM::VST4qWB_register_Asm_16:
10388   case ARM::VST4qWB_register_Asm_32: {
10389     MCInst TmpInst;
10390     unsigned Spacing;
10391     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10392     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10393     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10394     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10395     TmpInst.addOperand(Inst.getOperand(3)); // Rm
10396     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10397     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10398                                             Spacing));
10399     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10400                                             Spacing * 2));
10401     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10402                                             Spacing * 3));
10403     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10404     TmpInst.addOperand(Inst.getOperand(5));
10405     Inst = TmpInst;
10406     return true;
10407   }
10408 
10409   // Handle encoding choice for the shift-immediate instructions.
10410   case ARM::t2LSLri:
10411   case ARM::t2LSRri:
10412   case ARM::t2ASRri:
10413     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10414         isARMLowRegister(Inst.getOperand(1).getReg()) &&
10415         Inst.getOperand(5).getReg() ==
10416             (inITBlock() ? ARM::NoRegister : ARM::CPSR) &&
10417         !HasWideQualifier) {
10418       unsigned NewOpc;
10419       switch (Inst.getOpcode()) {
10420       default: llvm_unreachable("unexpected opcode");
10421       case ARM::t2LSLri: NewOpc = ARM::tLSLri; break;
10422       case ARM::t2LSRri: NewOpc = ARM::tLSRri; break;
10423       case ARM::t2ASRri: NewOpc = ARM::tASRri; break;
10424       }
10425       // The Thumb1 operands aren't in the same order. Awesome, eh?
10426       MCInst TmpInst;
10427       TmpInst.setOpcode(NewOpc);
10428       TmpInst.addOperand(Inst.getOperand(0));
10429       TmpInst.addOperand(Inst.getOperand(5));
10430       TmpInst.addOperand(Inst.getOperand(1));
10431       TmpInst.addOperand(Inst.getOperand(2));
10432       TmpInst.addOperand(Inst.getOperand(3));
10433       TmpInst.addOperand(Inst.getOperand(4));
10434       Inst = TmpInst;
10435       return true;
10436     }
10437     return false;
10438 
10439   // Handle the Thumb2 mode MOV complex aliases.
10440   case ARM::t2MOVsr:
10441   case ARM::t2MOVSsr: {
10442     // Which instruction to expand to depends on the CCOut operand and
10443     // whether we're in an IT block if the register operands are low
10444     // registers.
10445     bool isNarrow = false;
10446     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10447         isARMLowRegister(Inst.getOperand(1).getReg()) &&
10448         isARMLowRegister(Inst.getOperand(2).getReg()) &&
10449         Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
10450         inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr) &&
10451         !HasWideQualifier)
10452       isNarrow = true;
10453     MCInst TmpInst;
10454     unsigned newOpc;
10455     switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
10456     default: llvm_unreachable("unexpected opcode!");
10457     case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
10458     case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
10459     case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
10460     case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
10461     }
10462     TmpInst.setOpcode(newOpc);
10463     TmpInst.addOperand(Inst.getOperand(0)); // Rd
10464     if (isNarrow)
10465       TmpInst.addOperand(MCOperand::createReg(
10466           Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : ARM::NoRegister));
10467     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10468     TmpInst.addOperand(Inst.getOperand(2)); // Rm
10469     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10470     TmpInst.addOperand(Inst.getOperand(5));
10471     if (!isNarrow)
10472       TmpInst.addOperand(MCOperand::createReg(
10473           Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : ARM::NoRegister));
10474     Inst = TmpInst;
10475     return true;
10476   }
10477   case ARM::t2MOVsi:
10478   case ARM::t2MOVSsi: {
10479     // Which instruction to expand to depends on the CCOut operand and
10480     // whether we're in an IT block if the register operands are low
10481     // registers.
10482     bool isNarrow = false;
10483     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10484         isARMLowRegister(Inst.getOperand(1).getReg()) &&
10485         inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi) &&
10486         !HasWideQualifier)
10487       isNarrow = true;
10488     MCInst TmpInst;
10489     unsigned newOpc;
10490     unsigned Shift = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
10491     unsigned Amount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
10492     bool isMov = false;
10493     // MOV rd, rm, LSL #0 is actually a MOV instruction
10494     if (Shift == ARM_AM::lsl && Amount == 0) {
10495       isMov = true;
10496       // The 16-bit encoding of MOV rd, rm, LSL #N is explicitly encoding T2 of
10497       // MOV (register) in the ARMv8-A and ARMv8-M manuals, and immediate 0 is
10498       // unpredictable in an IT block so the 32-bit encoding T3 has to be used
10499       // instead.
10500       if (inITBlock()) {
10501         isNarrow = false;
10502       }
10503       newOpc = isNarrow ? ARM::tMOVSr : ARM::t2MOVr;
10504     } else {
10505       switch(Shift) {
10506       default: llvm_unreachable("unexpected opcode!");
10507       case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
10508       case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
10509       case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
10510       case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
10511       case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
10512       }
10513     }
10514     if (Amount == 32) Amount = 0;
10515     TmpInst.setOpcode(newOpc);
10516     TmpInst.addOperand(Inst.getOperand(0)); // Rd
10517     if (isNarrow && !isMov)
10518       TmpInst.addOperand(MCOperand::createReg(
10519           Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : ARM::NoRegister));
10520     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10521     if (newOpc != ARM::t2RRX && !isMov)
10522       TmpInst.addOperand(MCOperand::createImm(Amount));
10523     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10524     TmpInst.addOperand(Inst.getOperand(4));
10525     if (!isNarrow)
10526       TmpInst.addOperand(MCOperand::createReg(
10527           Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : ARM::NoRegister));
10528     Inst = TmpInst;
10529     return true;
10530   }
10531   // Handle the ARM mode MOV complex aliases.
10532   case ARM::ASRr:
10533   case ARM::LSRr:
10534   case ARM::LSLr:
10535   case ARM::RORr: {
10536     ARM_AM::ShiftOpc ShiftTy;
10537     switch(Inst.getOpcode()) {
10538     default: llvm_unreachable("unexpected opcode!");
10539     case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
10540     case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
10541     case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
10542     case ARM::RORr: ShiftTy = ARM_AM::ror; break;
10543     }
10544     unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
10545     MCInst TmpInst;
10546     TmpInst.setOpcode(ARM::MOVsr);
10547     TmpInst.addOperand(Inst.getOperand(0)); // Rd
10548     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10549     TmpInst.addOperand(Inst.getOperand(2)); // Rm
10550     TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
10551     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10552     TmpInst.addOperand(Inst.getOperand(4));
10553     TmpInst.addOperand(Inst.getOperand(5)); // cc_out
10554     Inst = TmpInst;
10555     return true;
10556   }
10557   case ARM::ASRi:
10558   case ARM::LSRi:
10559   case ARM::LSLi:
10560   case ARM::RORi: {
10561     ARM_AM::ShiftOpc ShiftTy;
10562     switch(Inst.getOpcode()) {
10563     default: llvm_unreachable("unexpected opcode!");
10564     case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
10565     case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
10566     case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
10567     case ARM::RORi: ShiftTy = ARM_AM::ror; break;
10568     }
10569     // A shift by zero is a plain MOVr, not a MOVsi.
10570     unsigned Amt = Inst.getOperand(2).getImm();
10571     unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
10572     // A shift by 32 should be encoded as 0 when permitted
10573     if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr))
10574       Amt = 0;
10575     unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
10576     MCInst TmpInst;
10577     TmpInst.setOpcode(Opc);
10578     TmpInst.addOperand(Inst.getOperand(0)); // Rd
10579     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10580     if (Opc == ARM::MOVsi)
10581       TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
10582     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10583     TmpInst.addOperand(Inst.getOperand(4));
10584     TmpInst.addOperand(Inst.getOperand(5)); // cc_out
10585     Inst = TmpInst;
10586     return true;
10587   }
10588   case ARM::RRXi: {
10589     unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
10590     MCInst TmpInst;
10591     TmpInst.setOpcode(ARM::MOVsi);
10592     TmpInst.addOperand(Inst.getOperand(0)); // Rd
10593     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10594     TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
10595     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10596     TmpInst.addOperand(Inst.getOperand(3));
10597     TmpInst.addOperand(Inst.getOperand(4)); // cc_out
10598     Inst = TmpInst;
10599     return true;
10600   }
10601   case ARM::t2LDMIA_UPD: {
10602     // If this is a load of a single register, then we should use
10603     // a post-indexed LDR instruction instead, per the ARM ARM.
10604     if (Inst.getNumOperands() != 5)
10605       return false;
10606     MCInst TmpInst;
10607     TmpInst.setOpcode(ARM::t2LDR_POST);
10608     TmpInst.addOperand(Inst.getOperand(4)); // Rt
10609     TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
10610     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10611     TmpInst.addOperand(MCOperand::createImm(4));
10612     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10613     TmpInst.addOperand(Inst.getOperand(3));
10614     Inst = TmpInst;
10615     return true;
10616   }
10617   case ARM::t2STMDB_UPD: {
10618     // If this is a store of a single register, then we should use
10619     // a pre-indexed STR instruction instead, per the ARM ARM.
10620     if (Inst.getNumOperands() != 5)
10621       return false;
10622     MCInst TmpInst;
10623     TmpInst.setOpcode(ARM::t2STR_PRE);
10624     TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
10625     TmpInst.addOperand(Inst.getOperand(4)); // Rt
10626     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10627     TmpInst.addOperand(MCOperand::createImm(-4));
10628     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10629     TmpInst.addOperand(Inst.getOperand(3));
10630     Inst = TmpInst;
10631     return true;
10632   }
10633   case ARM::LDMIA_UPD:
10634     // If this is a load of a single register via a 'pop', then we should use
10635     // a post-indexed LDR instruction instead, per the ARM ARM.
10636     if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "pop" &&
10637         Inst.getNumOperands() == 5) {
10638       MCInst TmpInst;
10639       TmpInst.setOpcode(ARM::LDR_POST_IMM);
10640       TmpInst.addOperand(Inst.getOperand(4)); // Rt
10641       TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
10642       TmpInst.addOperand(Inst.getOperand(1)); // Rn
10643       TmpInst.addOperand(MCOperand::createReg(0));  // am2offset
10644       TmpInst.addOperand(MCOperand::createImm(4));
10645       TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10646       TmpInst.addOperand(Inst.getOperand(3));
10647       Inst = TmpInst;
10648       return true;
10649     }
10650     break;
10651   case ARM::STMDB_UPD:
10652     // If this is a store of a single register via a 'push', then we should use
10653     // a pre-indexed STR instruction instead, per the ARM ARM.
10654     if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "push" &&
10655         Inst.getNumOperands() == 5) {
10656       MCInst TmpInst;
10657       TmpInst.setOpcode(ARM::STR_PRE_IMM);
10658       TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
10659       TmpInst.addOperand(Inst.getOperand(4)); // Rt
10660       TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
10661       TmpInst.addOperand(MCOperand::createImm(-4));
10662       TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10663       TmpInst.addOperand(Inst.getOperand(3));
10664       Inst = TmpInst;
10665     }
10666     break;
10667   case ARM::t2ADDri12:
10668   case ARM::t2SUBri12:
10669   case ARM::t2ADDspImm12:
10670   case ARM::t2SUBspImm12: {
10671     // If the immediate fits for encoding T3 and the generic
10672     // mnemonic was used, encoding T3 is preferred.
10673     const StringRef Token = static_cast<ARMOperand &>(*Operands[0]).getToken();
10674     if ((Token != "add" && Token != "sub") ||
10675         ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
10676       break;
10677     switch (Inst.getOpcode()) {
10678     case ARM::t2ADDri12:
10679       Inst.setOpcode(ARM::t2ADDri);
10680       break;
10681     case ARM::t2SUBri12:
10682       Inst.setOpcode(ARM::t2SUBri);
10683       break;
10684     case ARM::t2ADDspImm12:
10685       Inst.setOpcode(ARM::t2ADDspImm);
10686       break;
10687     case ARM::t2SUBspImm12:
10688       Inst.setOpcode(ARM::t2SUBspImm);
10689       break;
10690     }
10691 
10692     Inst.addOperand(MCOperand::createReg(0)); // cc_out
10693     return true;
10694   }
10695   case ARM::tADDi8:
10696     // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
10697     // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
10698     // to encoding T2 if <Rd> is specified and encoding T2 is preferred
10699     // to encoding T1 if <Rd> is omitted."
10700     if (Inst.getOperand(3).isImm() &&
10701         (unsigned)Inst.getOperand(3).getImm() < 8 &&
10702         Operands.size() == MnemonicOpsEndInd + 3) {
10703       Inst.setOpcode(ARM::tADDi3);
10704       return true;
10705     }
10706     break;
10707   case ARM::tSUBi8:
10708     // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
10709     // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
10710     // to encoding T2 if <Rd> is specified and encoding T2 is preferred
10711     // to encoding T1 if <Rd> is omitted."
10712     if ((unsigned)Inst.getOperand(3).getImm() < 8 &&
10713         Operands.size() == MnemonicOpsEndInd + 3) {
10714       Inst.setOpcode(ARM::tSUBi3);
10715       return true;
10716     }
10717     break;
10718   case ARM::t2ADDri:
10719   case ARM::t2SUBri: {
10720     // If the destination and first source operand are the same, and
10721     // the flags are compatible with the current IT status, use encoding T2
10722     // instead of T3. For compatibility with the system 'as'. Make sure the
10723     // wide encoding wasn't explicit.
10724     if (HasWideQualifier)
10725       break; // source code has asked for the 32-bit instruction
10726     if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg())
10727       break; // tADDi8 can't take different input and output registers
10728     if (!isARMLowRegister(Inst.getOperand(0).getReg()))
10729       break; // high register that tADDi8 can't access
10730     if (Inst.getOperand(5).getReg() !=
10731         (inITBlock() ? ARM::NoRegister : ARM::CPSR))
10732       break; // flag-modification would require overriding the IT state
10733     if (Inst.getOperand(2).isImm()) {
10734       if ((unsigned)Inst.getOperand(2).getImm() > 255)
10735         break; // large immediate that tADDi8 can't contain
10736     } else {
10737       int i = (Operands[MnemonicOpsEndInd + 1]->isImm())
10738                   ? MnemonicOpsEndInd + 1
10739                   : MnemonicOpsEndInd + 2;
10740       MCParsedAsmOperand &Op = *Operands[i];
10741       if (isARMMCExpr(Op) && !isThumbI8Relocation(Op))
10742         break; // a type of non-immediate that tADDi8 can't represent
10743     }
10744     MCInst TmpInst;
10745     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
10746                       ARM::tADDi8 : ARM::tSUBi8);
10747     TmpInst.addOperand(Inst.getOperand(0));
10748     TmpInst.addOperand(Inst.getOperand(5));
10749     TmpInst.addOperand(Inst.getOperand(0));
10750     TmpInst.addOperand(Inst.getOperand(2));
10751     TmpInst.addOperand(Inst.getOperand(3));
10752     TmpInst.addOperand(Inst.getOperand(4));
10753     Inst = TmpInst;
10754     return true;
10755   }
10756   case ARM::t2ADDspImm:
10757   case ARM::t2SUBspImm: {
10758     // Prefer T1 encoding if possible
10759     if (Inst.getOperand(5).getReg() || HasWideQualifier)
10760       break;
10761     unsigned V = Inst.getOperand(2).getImm();
10762     if (V & 3 || V > ((1 << 7) - 1) << 2)
10763       break;
10764     MCInst TmpInst;
10765     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDspImm ? ARM::tADDspi
10766                                                           : ARM::tSUBspi);
10767     TmpInst.addOperand(MCOperand::createReg(ARM::SP)); // destination reg
10768     TmpInst.addOperand(MCOperand::createReg(ARM::SP)); // source reg
10769     TmpInst.addOperand(MCOperand::createImm(V / 4));   // immediate
10770     TmpInst.addOperand(Inst.getOperand(3));            // pred
10771     TmpInst.addOperand(Inst.getOperand(4));
10772     Inst = TmpInst;
10773     return true;
10774   }
10775   case ARM::t2ADDrr: {
10776     // If the destination and first source operand are the same, and
10777     // there's no setting of the flags, use encoding T2 instead of T3.
10778     // Note that this is only for ADD, not SUB. This mirrors the system
10779     // 'as' behaviour.  Also take advantage of ADD being commutative.
10780     // Make sure the wide encoding wasn't explicit.
10781     bool Swap = false;
10782     auto DestReg = Inst.getOperand(0).getReg();
10783     bool Transform = DestReg == Inst.getOperand(1).getReg();
10784     if (!Transform && DestReg == Inst.getOperand(2).getReg()) {
10785       Transform = true;
10786       Swap = true;
10787     }
10788     if (!Transform || Inst.getOperand(5).getReg() || HasWideQualifier)
10789       break;
10790     MCInst TmpInst;
10791     TmpInst.setOpcode(ARM::tADDhirr);
10792     TmpInst.addOperand(Inst.getOperand(0));
10793     TmpInst.addOperand(Inst.getOperand(0));
10794     TmpInst.addOperand(Inst.getOperand(Swap ? 1 : 2));
10795     TmpInst.addOperand(Inst.getOperand(3));
10796     TmpInst.addOperand(Inst.getOperand(4));
10797     Inst = TmpInst;
10798     return true;
10799   }
10800   case ARM::tADDrSP:
10801     // If the non-SP source operand and the destination operand are not the
10802     // same, we need to use the 32-bit encoding if it's available.
10803     if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
10804       Inst.setOpcode(ARM::t2ADDrr);
10805       Inst.addOperand(MCOperand::createReg(0)); // cc_out
10806       return true;
10807     }
10808     break;
10809   case ARM::tB:
10810     // A Thumb conditional branch outside of an IT block is a tBcc.
10811     if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
10812       Inst.setOpcode(ARM::tBcc);
10813       return true;
10814     }
10815     break;
10816   case ARM::t2B:
10817     // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
10818     if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
10819       Inst.setOpcode(ARM::t2Bcc);
10820       return true;
10821     }
10822     break;
10823   case ARM::t2Bcc:
10824     // If the conditional is AL or we're in an IT block, we really want t2B.
10825     if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
10826       Inst.setOpcode(ARM::t2B);
10827       return true;
10828     }
10829     break;
10830   case ARM::tBcc:
10831     // If the conditional is AL, we really want tB.
10832     if (Inst.getOperand(1).getImm() == ARMCC::AL) {
10833       Inst.setOpcode(ARM::tB);
10834       return true;
10835     }
10836     break;
10837   case ARM::tLDMIA: {
10838     // If the register list contains any high registers, or if the writeback
10839     // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
10840     // instead if we're in Thumb2. Otherwise, this should have generated
10841     // an error in validateInstruction().
10842     MCRegister Rn = Inst.getOperand(0).getReg();
10843     bool hasWritebackToken =
10844         (static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
10845              .isToken() &&
10846          static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
10847                  .getToken() == "!");
10848     bool listContainsBase;
10849     if (checkLowRegisterList(Inst, 3, Rn, MCRegister(), listContainsBase) ||
10850         (!listContainsBase && !hasWritebackToken) ||
10851         (listContainsBase && hasWritebackToken)) {
10852       // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
10853       assert(isThumbTwo());
10854       Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
10855       // If we're switching to the updating version, we need to insert
10856       // the writeback tied operand.
10857       if (hasWritebackToken)
10858         Inst.insert(Inst.begin(),
10859                     MCOperand::createReg(Inst.getOperand(0).getReg()));
10860       return true;
10861     }
10862     break;
10863   }
10864   case ARM::tSTMIA_UPD: {
10865     // If the register list contains any high registers, we need to use
10866     // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
10867     // should have generated an error in validateInstruction().
10868     MCRegister Rn = Inst.getOperand(0).getReg();
10869     bool listContainsBase;
10870     if (checkLowRegisterList(Inst, 4, Rn, MCRegister(), listContainsBase)) {
10871       // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
10872       assert(isThumbTwo());
10873       Inst.setOpcode(ARM::t2STMIA_UPD);
10874       return true;
10875     }
10876     break;
10877   }
10878   case ARM::tPOP: {
10879     bool listContainsBase;
10880     // If the register list contains any high registers, we need to use
10881     // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
10882     // should have generated an error in validateInstruction().
10883     if (!checkLowRegisterList(Inst, 2, MCRegister(), ARM::PC, listContainsBase))
10884       return false;
10885     assert(isThumbTwo());
10886     Inst.setOpcode(ARM::t2LDMIA_UPD);
10887     // Add the base register and writeback operands.
10888     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
10889     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
10890     return true;
10891   }
10892   case ARM::tPUSH: {
10893     bool listContainsBase;
10894     if (!checkLowRegisterList(Inst, 2, MCRegister(), ARM::LR, listContainsBase))
10895       return false;
10896     assert(isThumbTwo());
10897     Inst.setOpcode(ARM::t2STMDB_UPD);
10898     // Add the base register and writeback operands.
10899     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
10900     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
10901     return true;
10902   }
10903   case ARM::t2MOVi:
10904     // If we can use the 16-bit encoding and the user didn't explicitly
10905     // request the 32-bit variant, transform it here.
10906     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10907         (Inst.getOperand(1).isImm() &&
10908          (unsigned)Inst.getOperand(1).getImm() <= 255) &&
10909         Inst.getOperand(4).getReg() ==
10910             (inITBlock() ? ARM::NoRegister : ARM::CPSR) &&
10911         !HasWideQualifier) {
10912       // The operands aren't in the same order for tMOVi8...
10913       MCInst TmpInst;
10914       TmpInst.setOpcode(ARM::tMOVi8);
10915       TmpInst.addOperand(Inst.getOperand(0));
10916       TmpInst.addOperand(Inst.getOperand(4));
10917       TmpInst.addOperand(Inst.getOperand(1));
10918       TmpInst.addOperand(Inst.getOperand(2));
10919       TmpInst.addOperand(Inst.getOperand(3));
10920       Inst = TmpInst;
10921       return true;
10922     }
10923     break;
10924 
10925   case ARM::t2MOVr:
10926     // If we can use the 16-bit encoding and the user didn't explicitly
10927     // request the 32-bit variant, transform it here.
10928     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10929         isARMLowRegister(Inst.getOperand(1).getReg()) &&
10930         Inst.getOperand(2).getImm() == ARMCC::AL &&
10931         Inst.getOperand(4).getReg() == ARM::CPSR &&
10932         !HasWideQualifier) {
10933       // The operands aren't the same for tMOV[S]r... (no cc_out)
10934       MCInst TmpInst;
10935       unsigned Op = Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr;
10936       TmpInst.setOpcode(Op);
10937       TmpInst.addOperand(Inst.getOperand(0));
10938       TmpInst.addOperand(Inst.getOperand(1));
10939       if (Op == ARM::tMOVr) {
10940         TmpInst.addOperand(Inst.getOperand(2));
10941         TmpInst.addOperand(Inst.getOperand(3));
10942       }
10943       Inst = TmpInst;
10944       return true;
10945     }
10946     break;
10947 
10948   case ARM::t2SXTH:
10949   case ARM::t2SXTB:
10950   case ARM::t2UXTH:
10951   case ARM::t2UXTB:
10952     // If we can use the 16-bit encoding and the user didn't explicitly
10953     // request the 32-bit variant, transform it here.
10954     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10955         isARMLowRegister(Inst.getOperand(1).getReg()) &&
10956         Inst.getOperand(2).getImm() == 0 &&
10957         !HasWideQualifier) {
10958       unsigned NewOpc;
10959       switch (Inst.getOpcode()) {
10960       default: llvm_unreachable("Illegal opcode!");
10961       case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
10962       case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
10963       case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
10964       case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
10965       }
10966       // The operands aren't the same for thumb1 (no rotate operand).
10967       MCInst TmpInst;
10968       TmpInst.setOpcode(NewOpc);
10969       TmpInst.addOperand(Inst.getOperand(0));
10970       TmpInst.addOperand(Inst.getOperand(1));
10971       TmpInst.addOperand(Inst.getOperand(3));
10972       TmpInst.addOperand(Inst.getOperand(4));
10973       Inst = TmpInst;
10974       return true;
10975     }
10976     break;
10977 
10978   case ARM::MOVsi: {
10979     ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
10980     // rrx shifts and asr/lsr of #32 is encoded as 0
10981     if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr)
10982       return false;
10983     if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
10984       // Shifting by zero is accepted as a vanilla 'MOVr'
10985       MCInst TmpInst;
10986       TmpInst.setOpcode(ARM::MOVr);
10987       TmpInst.addOperand(Inst.getOperand(0));
10988       TmpInst.addOperand(Inst.getOperand(1));
10989       TmpInst.addOperand(Inst.getOperand(3));
10990       TmpInst.addOperand(Inst.getOperand(4));
10991       TmpInst.addOperand(Inst.getOperand(5));
10992       Inst = TmpInst;
10993       return true;
10994     }
10995     return false;
10996   }
10997   case ARM::ANDrsi:
10998   case ARM::ORRrsi:
10999   case ARM::EORrsi:
11000   case ARM::BICrsi:
11001   case ARM::SUBrsi:
11002   case ARM::ADDrsi: {
11003     unsigned newOpc;
11004     ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
11005     if (SOpc == ARM_AM::rrx) return false;
11006     switch (Inst.getOpcode()) {
11007     default: llvm_unreachable("unexpected opcode!");
11008     case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
11009     case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
11010     case ARM::EORrsi: newOpc = ARM::EORrr; break;
11011     case ARM::BICrsi: newOpc = ARM::BICrr; break;
11012     case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
11013     case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
11014     }
11015     // If the shift is by zero, use the non-shifted instruction definition.
11016     // The exception is for right shifts, where 0 == 32
11017     if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0 &&
11018         !(SOpc == ARM_AM::lsr || SOpc == ARM_AM::asr)) {
11019       MCInst TmpInst;
11020       TmpInst.setOpcode(newOpc);
11021       TmpInst.addOperand(Inst.getOperand(0));
11022       TmpInst.addOperand(Inst.getOperand(1));
11023       TmpInst.addOperand(Inst.getOperand(2));
11024       TmpInst.addOperand(Inst.getOperand(4));
11025       TmpInst.addOperand(Inst.getOperand(5));
11026       TmpInst.addOperand(Inst.getOperand(6));
11027       Inst = TmpInst;
11028       return true;
11029     }
11030     return false;
11031   }
11032   case ARM::ITasm:
11033   case ARM::t2IT: {
11034     // Set up the IT block state according to the IT instruction we just
11035     // matched.
11036     assert(!inITBlock() && "nested IT blocks?!");
11037     startExplicitITBlock(ARMCC::CondCodes(Inst.getOperand(0).getImm()),
11038                          Inst.getOperand(1).getImm());
11039     break;
11040   }
11041   case ARM::t2LSLrr:
11042   case ARM::t2LSRrr:
11043   case ARM::t2ASRrr:
11044   case ARM::t2SBCrr:
11045   case ARM::t2RORrr:
11046   case ARM::t2BICrr:
11047     // Assemblers should use the narrow encodings of these instructions when permissible.
11048     if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
11049          isARMLowRegister(Inst.getOperand(2).getReg())) &&
11050         Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
11051         Inst.getOperand(5).getReg() ==
11052             (inITBlock() ? ARM::NoRegister : ARM::CPSR) &&
11053         !HasWideQualifier) {
11054       unsigned NewOpc;
11055       switch (Inst.getOpcode()) {
11056         default: llvm_unreachable("unexpected opcode");
11057         case ARM::t2LSLrr: NewOpc = ARM::tLSLrr; break;
11058         case ARM::t2LSRrr: NewOpc = ARM::tLSRrr; break;
11059         case ARM::t2ASRrr: NewOpc = ARM::tASRrr; break;
11060         case ARM::t2SBCrr: NewOpc = ARM::tSBC; break;
11061         case ARM::t2RORrr: NewOpc = ARM::tROR; break;
11062         case ARM::t2BICrr: NewOpc = ARM::tBIC; break;
11063       }
11064       MCInst TmpInst;
11065       TmpInst.setOpcode(NewOpc);
11066       TmpInst.addOperand(Inst.getOperand(0));
11067       TmpInst.addOperand(Inst.getOperand(5));
11068       TmpInst.addOperand(Inst.getOperand(1));
11069       TmpInst.addOperand(Inst.getOperand(2));
11070       TmpInst.addOperand(Inst.getOperand(3));
11071       TmpInst.addOperand(Inst.getOperand(4));
11072       Inst = TmpInst;
11073       return true;
11074     }
11075     return false;
11076 
11077   case ARM::t2ANDrr:
11078   case ARM::t2EORrr:
11079   case ARM::t2ADCrr:
11080   case ARM::t2ORRrr:
11081     // Assemblers should use the narrow encodings of these instructions when permissible.
11082     // These instructions are special in that they are commutable, so shorter encodings
11083     // are available more often.
11084     if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
11085          isARMLowRegister(Inst.getOperand(2).getReg())) &&
11086         (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() ||
11087          Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) &&
11088         Inst.getOperand(5).getReg() ==
11089             (inITBlock() ? ARM::NoRegister : ARM::CPSR) &&
11090         !HasWideQualifier) {
11091       unsigned NewOpc;
11092       switch (Inst.getOpcode()) {
11093         default: llvm_unreachable("unexpected opcode");
11094         case ARM::t2ADCrr: NewOpc = ARM::tADC; break;
11095         case ARM::t2ANDrr: NewOpc = ARM::tAND; break;
11096         case ARM::t2EORrr: NewOpc = ARM::tEOR; break;
11097         case ARM::t2ORRrr: NewOpc = ARM::tORR; break;
11098       }
11099       MCInst TmpInst;
11100       TmpInst.setOpcode(NewOpc);
11101       TmpInst.addOperand(Inst.getOperand(0));
11102       TmpInst.addOperand(Inst.getOperand(5));
11103       if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) {
11104         TmpInst.addOperand(Inst.getOperand(1));
11105         TmpInst.addOperand(Inst.getOperand(2));
11106       } else {
11107         TmpInst.addOperand(Inst.getOperand(2));
11108         TmpInst.addOperand(Inst.getOperand(1));
11109       }
11110       TmpInst.addOperand(Inst.getOperand(3));
11111       TmpInst.addOperand(Inst.getOperand(4));
11112       Inst = TmpInst;
11113       return true;
11114     }
11115     return false;
11116   case ARM::MVE_VPST:
11117   case ARM::MVE_VPTv16i8:
11118   case ARM::MVE_VPTv8i16:
11119   case ARM::MVE_VPTv4i32:
11120   case ARM::MVE_VPTv16u8:
11121   case ARM::MVE_VPTv8u16:
11122   case ARM::MVE_VPTv4u32:
11123   case ARM::MVE_VPTv16s8:
11124   case ARM::MVE_VPTv8s16:
11125   case ARM::MVE_VPTv4s32:
11126   case ARM::MVE_VPTv4f32:
11127   case ARM::MVE_VPTv8f16:
11128   case ARM::MVE_VPTv16i8r:
11129   case ARM::MVE_VPTv8i16r:
11130   case ARM::MVE_VPTv4i32r:
11131   case ARM::MVE_VPTv16u8r:
11132   case ARM::MVE_VPTv8u16r:
11133   case ARM::MVE_VPTv4u32r:
11134   case ARM::MVE_VPTv16s8r:
11135   case ARM::MVE_VPTv8s16r:
11136   case ARM::MVE_VPTv4s32r:
11137   case ARM::MVE_VPTv4f32r:
11138   case ARM::MVE_VPTv8f16r: {
11139     assert(!inVPTBlock() && "Nested VPT blocks are not allowed");
11140     MCOperand &MO = Inst.getOperand(0);
11141     VPTState.Mask = MO.getImm();
11142     VPTState.CurPosition = 0;
11143     break;
11144   }
11145   }
11146   return false;
11147 }
11148 
11149 unsigned
11150 ARMAsmParser::checkEarlyTargetMatchPredicate(MCInst &Inst,
11151                                              const OperandVector &Operands) {
11152   unsigned Opc = Inst.getOpcode();
11153   switch (Opc) {
11154   // Prevent the mov r8 r8 encoding for nop being selected when the v6/thumb 2
11155   // encoding is available.
11156   case ARM::tMOVr: {
11157     if (Operands[0]->isToken() &&
11158         static_cast<ARMOperand &>(*Operands[0]).getToken() == "nop" &&
11159         ((isThumb() && !isThumbOne()) || hasV6MOps())) {
11160       return Match_MnemonicFail;
11161     }
11162   }
11163     [[fallthrough]];
11164   default:
11165     return Match_Success;
11166   }
11167 }
11168 
11169 unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
11170   // 16-bit thumb arithmetic instructions either require or preclude the 'S'
11171   // suffix depending on whether they're in an IT block or not.
11172   unsigned Opc = Inst.getOpcode();
11173   const MCInstrDesc &MCID = MII.get(Opc);
11174   if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
11175     assert(MCID.hasOptionalDef() &&
11176            "optionally flag setting instruction missing optional def operand");
11177     assert(MCID.NumOperands == Inst.getNumOperands() &&
11178            "operand count mismatch!");
11179     bool IsCPSR = false;
11180     // Check if the instruction has CPSR set.
11181     for (unsigned OpNo = 0; OpNo < MCID.NumOperands; ++OpNo) {
11182       if (MCID.operands()[OpNo].isOptionalDef() &&
11183           Inst.getOperand(OpNo).isReg() &&
11184           Inst.getOperand(OpNo).getReg() == ARM::CPSR)
11185         IsCPSR = true;
11186     }
11187 
11188     // If we're parsing Thumb1, reject it completely.
11189     if (isThumbOne() && !IsCPSR)
11190       return Match_RequiresFlagSetting;
11191     // If we're parsing Thumb2, which form is legal depends on whether we're
11192     // in an IT block.
11193     if (isThumbTwo() && !IsCPSR && !inITBlock())
11194       return Match_RequiresITBlock;
11195     if (isThumbTwo() && IsCPSR && inITBlock())
11196       return Match_RequiresNotITBlock;
11197     // LSL with zero immediate is not allowed in an IT block
11198     if (Opc == ARM::tLSLri && Inst.getOperand(3).getImm() == 0 && inITBlock())
11199       return Match_RequiresNotITBlock;
11200   } else if (isThumbOne()) {
11201     // Some high-register supporting Thumb1 encodings only allow both registers
11202     // to be from r0-r7 when in Thumb2.
11203     if (Opc == ARM::tADDhirr && !hasV6MOps() &&
11204         isARMLowRegister(Inst.getOperand(1).getReg()) &&
11205         isARMLowRegister(Inst.getOperand(2).getReg()))
11206       return Match_RequiresThumb2;
11207     // Others only require ARMv6 or later.
11208     else if (Opc == ARM::tMOVr && !hasV6Ops() &&
11209              isARMLowRegister(Inst.getOperand(0).getReg()) &&
11210              isARMLowRegister(Inst.getOperand(1).getReg()))
11211       return Match_RequiresV6;
11212   }
11213 
11214   // Before ARMv8 the rules for when SP is allowed in t2MOVr are more complex
11215   // than the loop below can handle, so it uses the GPRnopc register class and
11216   // we do SP handling here.
11217   if (Opc == ARM::t2MOVr && !hasV8Ops())
11218   {
11219     // SP as both source and destination is not allowed
11220     if (Inst.getOperand(0).getReg() == ARM::SP &&
11221         Inst.getOperand(1).getReg() == ARM::SP)
11222       return Match_RequiresV8;
11223     // When flags-setting SP as either source or destination is not allowed
11224     if (Inst.getOperand(4).getReg() == ARM::CPSR &&
11225         (Inst.getOperand(0).getReg() == ARM::SP ||
11226          Inst.getOperand(1).getReg() == ARM::SP))
11227       return Match_RequiresV8;
11228   }
11229 
11230   switch (Inst.getOpcode()) {
11231   case ARM::VMRS:
11232   case ARM::VMSR:
11233   case ARM::VMRS_FPCXTS:
11234   case ARM::VMRS_FPCXTNS:
11235   case ARM::VMSR_FPCXTS:
11236   case ARM::VMSR_FPCXTNS:
11237   case ARM::VMRS_FPSCR_NZCVQC:
11238   case ARM::VMSR_FPSCR_NZCVQC:
11239   case ARM::FMSTAT:
11240   case ARM::VMRS_VPR:
11241   case ARM::VMRS_P0:
11242   case ARM::VMSR_VPR:
11243   case ARM::VMSR_P0:
11244     // Use of SP for VMRS/VMSR is only allowed in ARM mode with the exception of
11245     // ARMv8-A.
11246     if (Inst.getOperand(0).isReg() && Inst.getOperand(0).getReg() == ARM::SP &&
11247         (isThumb() && !hasV8Ops()))
11248       return Match_InvalidOperand;
11249     break;
11250   case ARM::t2TBB:
11251   case ARM::t2TBH:
11252     // Rn = sp is only allowed with ARMv8-A
11253     if (!hasV8Ops() && (Inst.getOperand(0).getReg() == ARM::SP))
11254       return Match_RequiresV8;
11255     break;
11256   case ARM::tMUL:
11257     // The second source operand must be the same register as the destination
11258     // operand.
11259     // FIXME: Ideally this would be handled by ARMGenAsmMatcher and
11260     // emitAsmTiedOperandConstraints.
11261     if (Inst.getOperand(0).getReg() != Inst.getOperand(3).getReg())
11262       return Match_InvalidTiedOperand;
11263     break;
11264   default:
11265     break;
11266   }
11267 
11268   for (unsigned I = 0; I < MCID.NumOperands; ++I)
11269     if (MCID.operands()[I].RegClass == ARM::rGPRRegClassID) {
11270       // rGPRRegClass excludes PC, and also excluded SP before ARMv8
11271       const auto &Op = Inst.getOperand(I);
11272       if (!Op.isReg()) {
11273         // This can happen in awkward cases with tied operands, e.g. a
11274         // writeback load/store with a complex addressing mode in
11275         // which there's an output operand corresponding to the
11276         // updated written-back base register: the Tablegen-generated
11277         // AsmMatcher will have written a placeholder operand to that
11278         // slot in the form of an immediate 0, because it can't
11279         // generate the register part of the complex addressing-mode
11280         // operand ahead of time.
11281         continue;
11282       }
11283 
11284       MCRegister Reg = Op.getReg();
11285       if ((Reg == ARM::SP) && !hasV8Ops())
11286         return Match_RequiresV8;
11287       else if (Reg == ARM::PC)
11288         return Match_InvalidOperand;
11289     }
11290 
11291   return Match_Success;
11292 }
11293 
11294 namespace llvm {
11295 
11296 template <> inline bool IsCPSRDead<MCInst>(const MCInst *Instr) {
11297   return true; // In an assembly source, no need to second-guess
11298 }
11299 
11300 } // end namespace llvm
11301 
11302 // Returns true if Inst is unpredictable if it is in and IT block, but is not
11303 // the last instruction in the block.
11304 bool ARMAsmParser::isITBlockTerminator(MCInst &Inst) const {
11305   const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
11306 
11307   // All branch & call instructions terminate IT blocks with the exception of
11308   // SVC.
11309   if (MCID.isTerminator() || (MCID.isCall() && Inst.getOpcode() != ARM::tSVC) ||
11310       MCID.isReturn() || MCID.isBranch() || MCID.isIndirectBranch())
11311     return true;
11312 
11313   // Any arithmetic instruction which writes to the PC also terminates the IT
11314   // block.
11315   if (MCID.hasDefOfPhysReg(Inst, ARM::PC, *MRI))
11316     return true;
11317 
11318   return false;
11319 }
11320 
11321 unsigned ARMAsmParser::MatchInstruction(OperandVector &Operands, MCInst &Inst,
11322                                           SmallVectorImpl<NearMissInfo> &NearMisses,
11323                                           bool MatchingInlineAsm,
11324                                           bool &EmitInITBlock,
11325                                           MCStreamer &Out) {
11326   // If we can't use an implicit IT block here, just match as normal.
11327   if (inExplicitITBlock() || !isThumbTwo() || !useImplicitITThumb())
11328     return MatchInstructionImpl(Operands, Inst, &NearMisses, MatchingInlineAsm);
11329 
11330   // Try to match the instruction in an extension of the current IT block (if
11331   // there is one).
11332   if (inImplicitITBlock()) {
11333     extendImplicitITBlock(ITState.Cond);
11334     if (MatchInstructionImpl(Operands, Inst, nullptr, MatchingInlineAsm) ==
11335             Match_Success) {
11336       // The match succeded, but we still have to check that the instruction is
11337       // valid in this implicit IT block.
11338       const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
11339       if (MCID.isPredicable()) {
11340         ARMCC::CondCodes InstCond =
11341             (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
11342                 .getImm();
11343         ARMCC::CondCodes ITCond = currentITCond();
11344         if (InstCond == ITCond) {
11345           EmitInITBlock = true;
11346           return Match_Success;
11347         } else if (InstCond == ARMCC::getOppositeCondition(ITCond)) {
11348           invertCurrentITCondition();
11349           EmitInITBlock = true;
11350           return Match_Success;
11351         }
11352       }
11353     }
11354     rewindImplicitITPosition();
11355   }
11356 
11357   // Finish the current IT block, and try to match outside any IT block.
11358   flushPendingInstructions(Out);
11359   unsigned PlainMatchResult =
11360       MatchInstructionImpl(Operands, Inst, &NearMisses, MatchingInlineAsm);
11361   if (PlainMatchResult == Match_Success) {
11362     const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
11363     if (MCID.isPredicable()) {
11364       ARMCC::CondCodes InstCond =
11365           (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
11366               .getImm();
11367       // Some forms of the branch instruction have their own condition code
11368       // fields, so can be conditionally executed without an IT block.
11369       if (Inst.getOpcode() == ARM::tBcc || Inst.getOpcode() == ARM::t2Bcc) {
11370         EmitInITBlock = false;
11371         return Match_Success;
11372       }
11373       if (InstCond == ARMCC::AL) {
11374         EmitInITBlock = false;
11375         return Match_Success;
11376       }
11377     } else {
11378       EmitInITBlock = false;
11379       return Match_Success;
11380     }
11381   }
11382 
11383   // Try to match in a new IT block. The matcher doesn't check the actual
11384   // condition, so we create an IT block with a dummy condition, and fix it up
11385   // once we know the actual condition.
11386   startImplicitITBlock();
11387   if (MatchInstructionImpl(Operands, Inst, nullptr, MatchingInlineAsm) ==
11388       Match_Success) {
11389     const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
11390     if (MCID.isPredicable()) {
11391       ITState.Cond =
11392           (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
11393               .getImm();
11394       EmitInITBlock = true;
11395       return Match_Success;
11396     }
11397   }
11398   discardImplicitITBlock();
11399 
11400   // If none of these succeed, return the error we got when trying to match
11401   // outside any IT blocks.
11402   EmitInITBlock = false;
11403   return PlainMatchResult;
11404 }
11405 
11406 static std::string ARMMnemonicSpellCheck(StringRef S, const FeatureBitset &FBS,
11407                                          unsigned VariantID = 0);
11408 
11409 static const char *getSubtargetFeatureName(uint64_t Val);
11410 bool ARMAsmParser::matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
11411                                            OperandVector &Operands,
11412                                            MCStreamer &Out, uint64_t &ErrorInfo,
11413                                            bool MatchingInlineAsm) {
11414   MCInst Inst;
11415   unsigned MatchResult;
11416   bool PendConditionalInstruction = false;
11417 
11418   SmallVector<NearMissInfo, 4> NearMisses;
11419   MatchResult = MatchInstruction(Operands, Inst, NearMisses, MatchingInlineAsm,
11420                                  PendConditionalInstruction, Out);
11421 
11422   // Find the number of operators that are part of the Mnumonic (LHS).
11423   unsigned MnemonicOpsEndInd = getMnemonicOpsEndInd(Operands);
11424 
11425   switch (MatchResult) {
11426   case Match_Success:
11427     LLVM_DEBUG(dbgs() << "Parsed as: ";
11428                Inst.dump_pretty(dbgs(), MII.getName(Inst.getOpcode()));
11429                dbgs() << "\n");
11430 
11431     // Context sensitive operand constraints aren't handled by the matcher,
11432     // so check them here.
11433     if (validateInstruction(Inst, Operands, MnemonicOpsEndInd)) {
11434       // Still progress the IT block, otherwise one wrong condition causes
11435       // nasty cascading errors.
11436       forwardITPosition();
11437       forwardVPTPosition();
11438       return true;
11439     }
11440 
11441     {
11442       // Some instructions need post-processing to, for example, tweak which
11443       // encoding is selected. Loop on it while changes happen so the
11444       // individual transformations can chain off each other. E.g.,
11445       // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
11446       while (processInstruction(Inst, Operands, MnemonicOpsEndInd, Out))
11447         LLVM_DEBUG(dbgs() << "Changed to: ";
11448                    Inst.dump_pretty(dbgs(), MII.getName(Inst.getOpcode()));
11449                    dbgs() << "\n");
11450     }
11451 
11452     // Only move forward at the very end so that everything in validate
11453     // and process gets a consistent answer about whether we're in an IT
11454     // block.
11455     forwardITPosition();
11456     forwardVPTPosition();
11457 
11458     // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
11459     // doesn't actually encode.
11460     if (Inst.getOpcode() == ARM::ITasm)
11461       return false;
11462 
11463     Inst.setLoc(IDLoc);
11464     if (PendConditionalInstruction) {
11465       PendingConditionalInsts.push_back(Inst);
11466       if (isITBlockFull() || isITBlockTerminator(Inst))
11467         flushPendingInstructions(Out);
11468     } else {
11469       Out.emitInstruction(Inst, getSTI());
11470     }
11471     return false;
11472   case Match_NearMisses:
11473     ReportNearMisses(NearMisses, IDLoc, Operands);
11474     return true;
11475   case Match_MnemonicFail: {
11476     FeatureBitset FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
11477     std::string Suggestion = ARMMnemonicSpellCheck(
11478       ((ARMOperand &)*Operands[0]).getToken(), FBS);
11479     return Error(IDLoc, "invalid instruction" + Suggestion,
11480                  ((ARMOperand &)*Operands[0]).getLocRange());
11481   }
11482   }
11483 
11484   llvm_unreachable("Implement any new match types added!");
11485 }
11486 
11487 /// ParseDirective parses the arm specific directives
11488 bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
11489   const MCContext::Environment Format = getContext().getObjectFileType();
11490   bool IsMachO = Format == MCContext::IsMachO;
11491   bool IsCOFF = Format == MCContext::IsCOFF;
11492 
11493   std::string IDVal = DirectiveID.getIdentifier().lower();
11494   if (IDVal == ".word")
11495     parseLiteralValues(4, DirectiveID.getLoc());
11496   else if (IDVal == ".short" || IDVal == ".hword")
11497     parseLiteralValues(2, DirectiveID.getLoc());
11498   else if (IDVal == ".thumb")
11499     parseDirectiveThumb(DirectiveID.getLoc());
11500   else if (IDVal == ".arm")
11501     parseDirectiveARM(DirectiveID.getLoc());
11502   else if (IDVal == ".thumb_func")
11503     parseDirectiveThumbFunc(DirectiveID.getLoc());
11504   else if (IDVal == ".code")
11505     parseDirectiveCode(DirectiveID.getLoc());
11506   else if (IDVal == ".syntax")
11507     parseDirectiveSyntax(DirectiveID.getLoc());
11508   else if (IDVal == ".unreq")
11509     parseDirectiveUnreq(DirectiveID.getLoc());
11510   else if (IDVal == ".fnend")
11511     parseDirectiveFnEnd(DirectiveID.getLoc());
11512   else if (IDVal == ".cantunwind")
11513     parseDirectiveCantUnwind(DirectiveID.getLoc());
11514   else if (IDVal == ".personality")
11515     parseDirectivePersonality(DirectiveID.getLoc());
11516   else if (IDVal == ".handlerdata")
11517     parseDirectiveHandlerData(DirectiveID.getLoc());
11518   else if (IDVal == ".setfp")
11519     parseDirectiveSetFP(DirectiveID.getLoc());
11520   else if (IDVal == ".pad")
11521     parseDirectivePad(DirectiveID.getLoc());
11522   else if (IDVal == ".save")
11523     parseDirectiveRegSave(DirectiveID.getLoc(), false);
11524   else if (IDVal == ".vsave")
11525     parseDirectiveRegSave(DirectiveID.getLoc(), true);
11526   else if (IDVal == ".ltorg" || IDVal == ".pool")
11527     parseDirectiveLtorg(DirectiveID.getLoc());
11528   else if (IDVal == ".even")
11529     parseDirectiveEven(DirectiveID.getLoc());
11530   else if (IDVal == ".personalityindex")
11531     parseDirectivePersonalityIndex(DirectiveID.getLoc());
11532   else if (IDVal == ".unwind_raw")
11533     parseDirectiveUnwindRaw(DirectiveID.getLoc());
11534   else if (IDVal == ".movsp")
11535     parseDirectiveMovSP(DirectiveID.getLoc());
11536   else if (IDVal == ".arch_extension")
11537     parseDirectiveArchExtension(DirectiveID.getLoc());
11538   else if (IDVal == ".align")
11539     return parseDirectiveAlign(DirectiveID.getLoc()); // Use Generic on failure.
11540   else if (IDVal == ".thumb_set")
11541     parseDirectiveThumbSet(DirectiveID.getLoc());
11542   else if (IDVal == ".inst")
11543     parseDirectiveInst(DirectiveID.getLoc());
11544   else if (IDVal == ".inst.n")
11545     parseDirectiveInst(DirectiveID.getLoc(), 'n');
11546   else if (IDVal == ".inst.w")
11547     parseDirectiveInst(DirectiveID.getLoc(), 'w');
11548   else if (!IsMachO && !IsCOFF) {
11549     if (IDVal == ".arch")
11550       parseDirectiveArch(DirectiveID.getLoc());
11551     else if (IDVal == ".cpu")
11552       parseDirectiveCPU(DirectiveID.getLoc());
11553     else if (IDVal == ".eabi_attribute")
11554       parseDirectiveEabiAttr(DirectiveID.getLoc());
11555     else if (IDVal == ".fpu")
11556       parseDirectiveFPU(DirectiveID.getLoc());
11557     else if (IDVal == ".fnstart")
11558       parseDirectiveFnStart(DirectiveID.getLoc());
11559     else if (IDVal == ".object_arch")
11560       parseDirectiveObjectArch(DirectiveID.getLoc());
11561     else if (IDVal == ".tlsdescseq")
11562       parseDirectiveTLSDescSeq(DirectiveID.getLoc());
11563     else
11564       return true;
11565   } else if (IsCOFF) {
11566     if (IDVal == ".seh_stackalloc")
11567       parseDirectiveSEHAllocStack(DirectiveID.getLoc(), /*Wide=*/false);
11568     else if (IDVal == ".seh_stackalloc_w")
11569       parseDirectiveSEHAllocStack(DirectiveID.getLoc(), /*Wide=*/true);
11570     else if (IDVal == ".seh_save_regs")
11571       parseDirectiveSEHSaveRegs(DirectiveID.getLoc(), /*Wide=*/false);
11572     else if (IDVal == ".seh_save_regs_w")
11573       parseDirectiveSEHSaveRegs(DirectiveID.getLoc(), /*Wide=*/true);
11574     else if (IDVal == ".seh_save_sp")
11575       parseDirectiveSEHSaveSP(DirectiveID.getLoc());
11576     else if (IDVal == ".seh_save_fregs")
11577       parseDirectiveSEHSaveFRegs(DirectiveID.getLoc());
11578     else if (IDVal == ".seh_save_lr")
11579       parseDirectiveSEHSaveLR(DirectiveID.getLoc());
11580     else if (IDVal == ".seh_endprologue")
11581       parseDirectiveSEHPrologEnd(DirectiveID.getLoc(), /*Fragment=*/false);
11582     else if (IDVal == ".seh_endprologue_fragment")
11583       parseDirectiveSEHPrologEnd(DirectiveID.getLoc(), /*Fragment=*/true);
11584     else if (IDVal == ".seh_nop")
11585       parseDirectiveSEHNop(DirectiveID.getLoc(), /*Wide=*/false);
11586     else if (IDVal == ".seh_nop_w")
11587       parseDirectiveSEHNop(DirectiveID.getLoc(), /*Wide=*/true);
11588     else if (IDVal == ".seh_startepilogue")
11589       parseDirectiveSEHEpilogStart(DirectiveID.getLoc(), /*Condition=*/false);
11590     else if (IDVal == ".seh_startepilogue_cond")
11591       parseDirectiveSEHEpilogStart(DirectiveID.getLoc(), /*Condition=*/true);
11592     else if (IDVal == ".seh_endepilogue")
11593       parseDirectiveSEHEpilogEnd(DirectiveID.getLoc());
11594     else if (IDVal == ".seh_custom")
11595       parseDirectiveSEHCustom(DirectiveID.getLoc());
11596     else
11597       return true;
11598   } else
11599     return true;
11600   return false;
11601 }
11602 
11603 /// parseLiteralValues
11604 ///  ::= .hword expression [, expression]*
11605 ///  ::= .short expression [, expression]*
11606 ///  ::= .word expression [, expression]*
11607 bool ARMAsmParser::parseLiteralValues(unsigned Size, SMLoc L) {
11608   auto parseOne = [&]() -> bool {
11609     const MCExpr *Value;
11610     if (getParser().parseExpression(Value))
11611       return true;
11612     getParser().getStreamer().emitValue(Value, Size, L);
11613     return false;
11614   };
11615   return (parseMany(parseOne));
11616 }
11617 
11618 /// parseDirectiveThumb
11619 ///  ::= .thumb
11620 bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
11621   if (parseEOL() || check(!hasThumb(), L, "target does not support Thumb mode"))
11622     return true;
11623 
11624   if (!isThumb())
11625     SwitchMode();
11626 
11627   getParser().getStreamer().emitAssemblerFlag(MCAF_Code16);
11628   getParser().getStreamer().emitCodeAlignment(Align(2), &getSTI(), 0);
11629   return false;
11630 }
11631 
11632 /// parseDirectiveARM
11633 ///  ::= .arm
11634 bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
11635   if (parseEOL() || check(!hasARM(), L, "target does not support ARM mode"))
11636     return true;
11637 
11638   if (isThumb())
11639     SwitchMode();
11640   getParser().getStreamer().emitAssemblerFlag(MCAF_Code32);
11641   getParser().getStreamer().emitCodeAlignment(Align(4), &getSTI(), 0);
11642   return false;
11643 }
11644 
11645 MCSymbolRefExpr::VariantKind
11646 ARMAsmParser::getVariantKindForName(StringRef Name) const {
11647   return StringSwitch<MCSymbolRefExpr::VariantKind>(Name.lower())
11648       .Case("funcdesc", MCSymbolRefExpr::VK_FUNCDESC)
11649       .Case("got", MCSymbolRefExpr::VK_GOT)
11650       .Case("got_prel", MCSymbolRefExpr::VK_ARM_GOT_PREL)
11651       .Case("gotfuncdesc", MCSymbolRefExpr::VK_GOTFUNCDESC)
11652       .Case("gotoff", MCSymbolRefExpr::VK_GOTOFF)
11653       .Case("gotofffuncdesc", MCSymbolRefExpr::VK_GOTOFFFUNCDESC)
11654       .Case("gottpoff", MCSymbolRefExpr::VK_GOTTPOFF)
11655       .Case("gottpoff_fdpic", MCSymbolRefExpr::VK_GOTTPOFF_FDPIC)
11656       .Case("imgrel", MCSymbolRefExpr::VK_COFF_IMGREL32)
11657       .Case("none", MCSymbolRefExpr::VK_ARM_NONE)
11658       .Case("plt", MCSymbolRefExpr::VK_PLT)
11659       .Case("prel31", MCSymbolRefExpr::VK_ARM_PREL31)
11660       .Case("sbrel", MCSymbolRefExpr::VK_ARM_SBREL)
11661       .Case("secrel32", MCSymbolRefExpr::VK_SECREL)
11662       .Case("target1", MCSymbolRefExpr::VK_ARM_TARGET1)
11663       .Case("target2", MCSymbolRefExpr::VK_ARM_TARGET2)
11664       .Case("tlscall", MCSymbolRefExpr::VK_TLSCALL)
11665       .Case("tlsdesc", MCSymbolRefExpr::VK_TLSDESC)
11666       .Case("tlsgd", MCSymbolRefExpr::VK_TLSGD)
11667       .Case("tlsgd_fdpic", MCSymbolRefExpr::VK_TLSGD_FDPIC)
11668       .Case("tlsld", MCSymbolRefExpr::VK_TLSLD)
11669       .Case("tlsldm", MCSymbolRefExpr::VK_TLSLDM)
11670       .Case("tlsldm_fdpic", MCSymbolRefExpr::VK_TLSLDM_FDPIC)
11671       .Case("tlsldo", MCSymbolRefExpr::VK_ARM_TLSLDO)
11672       .Case("tpoff", MCSymbolRefExpr::VK_TPOFF)
11673       .Default(MCSymbolRefExpr::VK_Invalid);
11674 }
11675 
11676 void ARMAsmParser::doBeforeLabelEmit(MCSymbol *Symbol, SMLoc IDLoc) {
11677   // We need to flush the current implicit IT block on a label, because it is
11678   // not legal to branch into an IT block.
11679   flushPendingInstructions(getStreamer());
11680 }
11681 
11682 void ARMAsmParser::onLabelParsed(MCSymbol *Symbol) {
11683   if (NextSymbolIsThumb) {
11684     getParser().getStreamer().emitThumbFunc(Symbol);
11685     NextSymbolIsThumb = false;
11686   }
11687 }
11688 
11689 /// parseDirectiveThumbFunc
11690 ///  ::= .thumbfunc symbol_name
11691 bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
11692   MCAsmParser &Parser = getParser();
11693   const auto Format = getContext().getObjectFileType();
11694   bool IsMachO = Format == MCContext::IsMachO;
11695 
11696   // Darwin asm has (optionally) function name after .thumb_func direction
11697   // ELF doesn't
11698 
11699   if (IsMachO) {
11700     if (Parser.getTok().is(AsmToken::Identifier) ||
11701         Parser.getTok().is(AsmToken::String)) {
11702       MCSymbol *Func = getParser().getContext().getOrCreateSymbol(
11703           Parser.getTok().getIdentifier());
11704       getParser().getStreamer().emitThumbFunc(Func);
11705       Parser.Lex();
11706       if (parseEOL())
11707         return true;
11708       return false;
11709     }
11710   }
11711 
11712   if (parseEOL())
11713     return true;
11714 
11715   // .thumb_func implies .thumb
11716   if (!isThumb())
11717     SwitchMode();
11718 
11719   getParser().getStreamer().emitAssemblerFlag(MCAF_Code16);
11720 
11721   NextSymbolIsThumb = true;
11722   return false;
11723 }
11724 
11725 /// parseDirectiveSyntax
11726 ///  ::= .syntax unified | divided
11727 bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
11728   MCAsmParser &Parser = getParser();
11729   const AsmToken &Tok = Parser.getTok();
11730   if (Tok.isNot(AsmToken::Identifier)) {
11731     Error(L, "unexpected token in .syntax directive");
11732     return false;
11733   }
11734 
11735   StringRef Mode = Tok.getString();
11736   Parser.Lex();
11737   if (check(Mode == "divided" || Mode == "DIVIDED", L,
11738             "'.syntax divided' arm assembly not supported") ||
11739       check(Mode != "unified" && Mode != "UNIFIED", L,
11740             "unrecognized syntax mode in .syntax directive") ||
11741       parseEOL())
11742     return true;
11743 
11744   // TODO tell the MC streamer the mode
11745   // getParser().getStreamer().Emit???();
11746   return false;
11747 }
11748 
11749 /// parseDirectiveCode
11750 ///  ::= .code 16 | 32
11751 bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
11752   MCAsmParser &Parser = getParser();
11753   const AsmToken &Tok = Parser.getTok();
11754   if (Tok.isNot(AsmToken::Integer))
11755     return Error(L, "unexpected token in .code directive");
11756   int64_t Val = Parser.getTok().getIntVal();
11757   if (Val != 16 && Val != 32) {
11758     Error(L, "invalid operand to .code directive");
11759     return false;
11760   }
11761   Parser.Lex();
11762 
11763   if (parseEOL())
11764     return true;
11765 
11766   if (Val == 16) {
11767     if (!hasThumb())
11768       return Error(L, "target does not support Thumb mode");
11769 
11770     if (!isThumb())
11771       SwitchMode();
11772     getParser().getStreamer().emitAssemblerFlag(MCAF_Code16);
11773   } else {
11774     if (!hasARM())
11775       return Error(L, "target does not support ARM mode");
11776 
11777     if (isThumb())
11778       SwitchMode();
11779     getParser().getStreamer().emitAssemblerFlag(MCAF_Code32);
11780   }
11781 
11782   return false;
11783 }
11784 
11785 /// parseDirectiveReq
11786 ///  ::= name .req registername
11787 bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
11788   MCAsmParser &Parser = getParser();
11789   Parser.Lex(); // Eat the '.req' token.
11790   MCRegister Reg;
11791   SMLoc SRegLoc, ERegLoc;
11792   if (check(parseRegister(Reg, SRegLoc, ERegLoc), SRegLoc,
11793             "register name expected") ||
11794       parseEOL())
11795     return true;
11796 
11797   if (RegisterReqs.insert(std::make_pair(Name, Reg)).first->second != Reg)
11798     return Error(SRegLoc,
11799                  "redefinition of '" + Name + "' does not match original.");
11800 
11801   return false;
11802 }
11803 
11804 /// parseDirectiveUneq
11805 ///  ::= .unreq registername
11806 bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
11807   MCAsmParser &Parser = getParser();
11808   if (Parser.getTok().isNot(AsmToken::Identifier))
11809     return Error(L, "unexpected input in .unreq directive.");
11810   RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
11811   Parser.Lex(); // Eat the identifier.
11812   return parseEOL();
11813 }
11814 
11815 // After changing arch/CPU, try to put the ARM/Thumb mode back to what it was
11816 // before, if supported by the new target, or emit mapping symbols for the mode
11817 // switch.
11818 void ARMAsmParser::FixModeAfterArchChange(bool WasThumb, SMLoc Loc) {
11819   if (WasThumb != isThumb()) {
11820     if (WasThumb && hasThumb()) {
11821       // Stay in Thumb mode
11822       SwitchMode();
11823     } else if (!WasThumb && hasARM()) {
11824       // Stay in ARM mode
11825       SwitchMode();
11826     } else {
11827       // Mode switch forced, because the new arch doesn't support the old mode.
11828       getParser().getStreamer().emitAssemblerFlag(isThumb() ? MCAF_Code16
11829                                                             : MCAF_Code32);
11830       // Warn about the implcit mode switch. GAS does not switch modes here,
11831       // but instead stays in the old mode, reporting an error on any following
11832       // instructions as the mode does not exist on the target.
11833       Warning(Loc, Twine("new target does not support ") +
11834                        (WasThumb ? "thumb" : "arm") + " mode, switching to " +
11835                        (!WasThumb ? "thumb" : "arm") + " mode");
11836     }
11837   }
11838 }
11839 
11840 /// parseDirectiveArch
11841 ///  ::= .arch token
11842 bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
11843   StringRef Arch = getParser().parseStringToEndOfStatement().trim();
11844   ARM::ArchKind ID = ARM::parseArch(Arch);
11845 
11846   if (ID == ARM::ArchKind::INVALID)
11847     return Error(L, "Unknown arch name");
11848 
11849   bool WasThumb = isThumb();
11850   Triple T;
11851   MCSubtargetInfo &STI = copySTI();
11852   STI.setDefaultFeatures("", /*TuneCPU*/ "",
11853                          ("+" + ARM::getArchName(ID)).str());
11854   setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
11855   FixModeAfterArchChange(WasThumb, L);
11856 
11857   getTargetStreamer().emitArch(ID);
11858   return false;
11859 }
11860 
11861 /// parseDirectiveEabiAttr
11862 ///  ::= .eabi_attribute int, int [, "str"]
11863 ///  ::= .eabi_attribute Tag_name, int [, "str"]
11864 bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
11865   MCAsmParser &Parser = getParser();
11866   int64_t Tag;
11867   SMLoc TagLoc;
11868   TagLoc = Parser.getTok().getLoc();
11869   if (Parser.getTok().is(AsmToken::Identifier)) {
11870     StringRef Name = Parser.getTok().getIdentifier();
11871     std::optional<unsigned> Ret = ELFAttrs::attrTypeFromString(
11872         Name, ARMBuildAttrs::getARMAttributeTags());
11873     if (!Ret) {
11874       Error(TagLoc, "attribute name not recognised: " + Name);
11875       return false;
11876     }
11877     Tag = *Ret;
11878     Parser.Lex();
11879   } else {
11880     const MCExpr *AttrExpr;
11881 
11882     TagLoc = Parser.getTok().getLoc();
11883     if (Parser.parseExpression(AttrExpr))
11884       return true;
11885 
11886     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(AttrExpr);
11887     if (check(!CE, TagLoc, "expected numeric constant"))
11888       return true;
11889 
11890     Tag = CE->getValue();
11891   }
11892 
11893   if (Parser.parseComma())
11894     return true;
11895 
11896   StringRef StringValue = "";
11897   bool IsStringValue = false;
11898 
11899   int64_t IntegerValue = 0;
11900   bool IsIntegerValue = false;
11901 
11902   if (Tag == ARMBuildAttrs::CPU_raw_name || Tag == ARMBuildAttrs::CPU_name)
11903     IsStringValue = true;
11904   else if (Tag == ARMBuildAttrs::compatibility) {
11905     IsStringValue = true;
11906     IsIntegerValue = true;
11907   } else if (Tag < 32 || Tag % 2 == 0)
11908     IsIntegerValue = true;
11909   else if (Tag % 2 == 1)
11910     IsStringValue = true;
11911   else
11912     llvm_unreachable("invalid tag type");
11913 
11914   if (IsIntegerValue) {
11915     const MCExpr *ValueExpr;
11916     SMLoc ValueExprLoc = Parser.getTok().getLoc();
11917     if (Parser.parseExpression(ValueExpr))
11918       return true;
11919 
11920     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ValueExpr);
11921     if (!CE)
11922       return Error(ValueExprLoc, "expected numeric constant");
11923     IntegerValue = CE->getValue();
11924   }
11925 
11926   if (Tag == ARMBuildAttrs::compatibility) {
11927     if (Parser.parseComma())
11928       return true;
11929   }
11930 
11931   std::string EscapedValue;
11932   if (IsStringValue) {
11933     if (Parser.getTok().isNot(AsmToken::String))
11934       return Error(Parser.getTok().getLoc(), "bad string constant");
11935 
11936     if (Tag == ARMBuildAttrs::also_compatible_with) {
11937       if (Parser.parseEscapedString(EscapedValue))
11938         return Error(Parser.getTok().getLoc(), "bad escaped string constant");
11939 
11940       StringValue = EscapedValue;
11941     } else {
11942       StringValue = Parser.getTok().getStringContents();
11943       Parser.Lex();
11944     }
11945   }
11946 
11947   if (Parser.parseEOL())
11948     return true;
11949 
11950   if (IsIntegerValue && IsStringValue) {
11951     assert(Tag == ARMBuildAttrs::compatibility);
11952     getTargetStreamer().emitIntTextAttribute(Tag, IntegerValue, StringValue);
11953   } else if (IsIntegerValue)
11954     getTargetStreamer().emitAttribute(Tag, IntegerValue);
11955   else if (IsStringValue)
11956     getTargetStreamer().emitTextAttribute(Tag, StringValue);
11957   return false;
11958 }
11959 
11960 /// parseDirectiveCPU
11961 ///  ::= .cpu str
11962 bool ARMAsmParser::parseDirectiveCPU(SMLoc L) {
11963   StringRef CPU = getParser().parseStringToEndOfStatement().trim();
11964   getTargetStreamer().emitTextAttribute(ARMBuildAttrs::CPU_name, CPU);
11965 
11966   // FIXME: This is using table-gen data, but should be moved to
11967   // ARMTargetParser once that is table-gen'd.
11968   if (!getSTI().isCPUStringValid(CPU))
11969     return Error(L, "Unknown CPU name");
11970 
11971   bool WasThumb = isThumb();
11972   MCSubtargetInfo &STI = copySTI();
11973   STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
11974   setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
11975   FixModeAfterArchChange(WasThumb, L);
11976 
11977   return false;
11978 }
11979 
11980 /// parseDirectiveFPU
11981 ///  ::= .fpu str
11982 bool ARMAsmParser::parseDirectiveFPU(SMLoc L) {
11983   SMLoc FPUNameLoc = getTok().getLoc();
11984   StringRef FPU = getParser().parseStringToEndOfStatement().trim();
11985 
11986   ARM::FPUKind ID = ARM::parseFPU(FPU);
11987   std::vector<StringRef> Features;
11988   if (!ARM::getFPUFeatures(ID, Features))
11989     return Error(FPUNameLoc, "Unknown FPU name");
11990 
11991   MCSubtargetInfo &STI = copySTI();
11992   for (auto Feature : Features)
11993     STI.ApplyFeatureFlag(Feature);
11994   setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
11995 
11996   getTargetStreamer().emitFPU(ID);
11997   return false;
11998 }
11999 
12000 /// parseDirectiveFnStart
12001 ///  ::= .fnstart
12002 bool ARMAsmParser::parseDirectiveFnStart(SMLoc L) {
12003   if (parseEOL())
12004     return true;
12005 
12006   if (UC.hasFnStart()) {
12007     Error(L, ".fnstart starts before the end of previous one");
12008     UC.emitFnStartLocNotes();
12009     return true;
12010   }
12011 
12012   // Reset the unwind directives parser state
12013   UC.reset();
12014 
12015   getTargetStreamer().emitFnStart();
12016 
12017   UC.recordFnStart(L);
12018   return false;
12019 }
12020 
12021 /// parseDirectiveFnEnd
12022 ///  ::= .fnend
12023 bool ARMAsmParser::parseDirectiveFnEnd(SMLoc L) {
12024   if (parseEOL())
12025     return true;
12026   // Check the ordering of unwind directives
12027   if (!UC.hasFnStart())
12028     return Error(L, ".fnstart must precede .fnend directive");
12029 
12030   // Reset the unwind directives parser state
12031   getTargetStreamer().emitFnEnd();
12032 
12033   UC.reset();
12034   return false;
12035 }
12036 
12037 /// parseDirectiveCantUnwind
12038 ///  ::= .cantunwind
12039 bool ARMAsmParser::parseDirectiveCantUnwind(SMLoc L) {
12040   if (parseEOL())
12041     return true;
12042 
12043   UC.recordCantUnwind(L);
12044   // Check the ordering of unwind directives
12045   if (check(!UC.hasFnStart(), L, ".fnstart must precede .cantunwind directive"))
12046     return true;
12047 
12048   if (UC.hasHandlerData()) {
12049     Error(L, ".cantunwind can't be used with .handlerdata directive");
12050     UC.emitHandlerDataLocNotes();
12051     return true;
12052   }
12053   if (UC.hasPersonality()) {
12054     Error(L, ".cantunwind can't be used with .personality directive");
12055     UC.emitPersonalityLocNotes();
12056     return true;
12057   }
12058 
12059   getTargetStreamer().emitCantUnwind();
12060   return false;
12061 }
12062 
12063 /// parseDirectivePersonality
12064 ///  ::= .personality name
12065 bool ARMAsmParser::parseDirectivePersonality(SMLoc L) {
12066   MCAsmParser &Parser = getParser();
12067   bool HasExistingPersonality = UC.hasPersonality();
12068 
12069   // Parse the name of the personality routine
12070   if (Parser.getTok().isNot(AsmToken::Identifier))
12071     return Error(L, "unexpected input in .personality directive.");
12072   StringRef Name(Parser.getTok().getIdentifier());
12073   Parser.Lex();
12074 
12075   if (parseEOL())
12076     return true;
12077 
12078   UC.recordPersonality(L);
12079 
12080   // Check the ordering of unwind directives
12081   if (!UC.hasFnStart())
12082     return Error(L, ".fnstart must precede .personality directive");
12083   if (UC.cantUnwind()) {
12084     Error(L, ".personality can't be used with .cantunwind directive");
12085     UC.emitCantUnwindLocNotes();
12086     return true;
12087   }
12088   if (UC.hasHandlerData()) {
12089     Error(L, ".personality must precede .handlerdata directive");
12090     UC.emitHandlerDataLocNotes();
12091     return true;
12092   }
12093   if (HasExistingPersonality) {
12094     Error(L, "multiple personality directives");
12095     UC.emitPersonalityLocNotes();
12096     return true;
12097   }
12098 
12099   MCSymbol *PR = getParser().getContext().getOrCreateSymbol(Name);
12100   getTargetStreamer().emitPersonality(PR);
12101   return false;
12102 }
12103 
12104 /// parseDirectiveHandlerData
12105 ///  ::= .handlerdata
12106 bool ARMAsmParser::parseDirectiveHandlerData(SMLoc L) {
12107   if (parseEOL())
12108     return true;
12109 
12110   UC.recordHandlerData(L);
12111   // Check the ordering of unwind directives
12112   if (!UC.hasFnStart())
12113     return Error(L, ".fnstart must precede .personality directive");
12114   if (UC.cantUnwind()) {
12115     Error(L, ".handlerdata can't be used with .cantunwind directive");
12116     UC.emitCantUnwindLocNotes();
12117     return true;
12118   }
12119 
12120   getTargetStreamer().emitHandlerData();
12121   return false;
12122 }
12123 
12124 /// parseDirectiveSetFP
12125 ///  ::= .setfp fpreg, spreg [, offset]
12126 bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) {
12127   MCAsmParser &Parser = getParser();
12128   // Check the ordering of unwind directives
12129   if (check(!UC.hasFnStart(), L, ".fnstart must precede .setfp directive") ||
12130       check(UC.hasHandlerData(), L,
12131             ".setfp must precede .handlerdata directive"))
12132     return true;
12133 
12134   // Parse fpreg
12135   SMLoc FPRegLoc = Parser.getTok().getLoc();
12136   MCRegister FPReg = tryParseRegister();
12137 
12138   if (check(!FPReg, FPRegLoc, "frame pointer register expected") ||
12139       Parser.parseComma())
12140     return true;
12141 
12142   // Parse spreg
12143   SMLoc SPRegLoc = Parser.getTok().getLoc();
12144   MCRegister SPReg = tryParseRegister();
12145   if (check(!SPReg, SPRegLoc, "stack pointer register expected") ||
12146       check(SPReg != ARM::SP && SPReg != UC.getFPReg(), SPRegLoc,
12147             "register should be either $sp or the latest fp register"))
12148     return true;
12149 
12150   // Update the frame pointer register
12151   UC.saveFPReg(FPReg);
12152 
12153   // Parse offset
12154   int64_t Offset = 0;
12155   if (Parser.parseOptionalToken(AsmToken::Comma)) {
12156     if (Parser.getTok().isNot(AsmToken::Hash) &&
12157         Parser.getTok().isNot(AsmToken::Dollar))
12158       return Error(Parser.getTok().getLoc(), "'#' expected");
12159     Parser.Lex(); // skip hash token.
12160 
12161     const MCExpr *OffsetExpr;
12162     SMLoc ExLoc = Parser.getTok().getLoc();
12163     SMLoc EndLoc;
12164     if (getParser().parseExpression(OffsetExpr, EndLoc))
12165       return Error(ExLoc, "malformed setfp offset");
12166     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
12167     if (check(!CE, ExLoc, "setfp offset must be an immediate"))
12168       return true;
12169     Offset = CE->getValue();
12170   }
12171 
12172   if (Parser.parseEOL())
12173     return true;
12174 
12175   getTargetStreamer().emitSetFP(FPReg, SPReg, Offset);
12176   return false;
12177 }
12178 
12179 /// parseDirectivePad
12180 ///  ::= .pad offset
12181 bool ARMAsmParser::parseDirectivePad(SMLoc L) {
12182   MCAsmParser &Parser = getParser();
12183   // Check the ordering of unwind directives
12184   if (!UC.hasFnStart())
12185     return Error(L, ".fnstart must precede .pad directive");
12186   if (UC.hasHandlerData())
12187     return Error(L, ".pad must precede .handlerdata directive");
12188 
12189   // Parse the offset
12190   if (Parser.getTok().isNot(AsmToken::Hash) &&
12191       Parser.getTok().isNot(AsmToken::Dollar))
12192     return Error(Parser.getTok().getLoc(), "'#' expected");
12193   Parser.Lex(); // skip hash token.
12194 
12195   const MCExpr *OffsetExpr;
12196   SMLoc ExLoc = Parser.getTok().getLoc();
12197   SMLoc EndLoc;
12198   if (getParser().parseExpression(OffsetExpr, EndLoc))
12199     return Error(ExLoc, "malformed pad offset");
12200   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
12201   if (!CE)
12202     return Error(ExLoc, "pad offset must be an immediate");
12203 
12204   if (parseEOL())
12205     return true;
12206 
12207   getTargetStreamer().emitPad(CE->getValue());
12208   return false;
12209 }
12210 
12211 /// parseDirectiveRegSave
12212 ///  ::= .save  { registers }
12213 ///  ::= .vsave { registers }
12214 bool ARMAsmParser::parseDirectiveRegSave(SMLoc L, bool IsVector) {
12215   // Check the ordering of unwind directives
12216   if (!UC.hasFnStart())
12217     return Error(L, ".fnstart must precede .save or .vsave directives");
12218   if (UC.hasHandlerData())
12219     return Error(L, ".save or .vsave must precede .handlerdata directive");
12220 
12221   // RAII object to make sure parsed operands are deleted.
12222   SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands;
12223 
12224   // Parse the register list
12225   if (parseRegisterList(Operands, true, true) || parseEOL())
12226     return true;
12227   ARMOperand &Op = (ARMOperand &)*Operands[0];
12228   if (!IsVector && !Op.isRegList())
12229     return Error(L, ".save expects GPR registers");
12230   if (IsVector && !Op.isDPRRegList())
12231     return Error(L, ".vsave expects DPR registers");
12232 
12233   getTargetStreamer().emitRegSave(Op.getRegList(), IsVector);
12234   return false;
12235 }
12236 
12237 /// parseDirectiveInst
12238 ///  ::= .inst opcode [, ...]
12239 ///  ::= .inst.n opcode [, ...]
12240 ///  ::= .inst.w opcode [, ...]
12241 bool ARMAsmParser::parseDirectiveInst(SMLoc Loc, char Suffix) {
12242   int Width = 4;
12243 
12244   if (isThumb()) {
12245     switch (Suffix) {
12246     case 'n':
12247       Width = 2;
12248       break;
12249     case 'w':
12250       break;
12251     default:
12252       Width = 0;
12253       break;
12254     }
12255   } else {
12256     if (Suffix)
12257       return Error(Loc, "width suffixes are invalid in ARM mode");
12258   }
12259 
12260   auto parseOne = [&]() -> bool {
12261     const MCExpr *Expr;
12262     if (getParser().parseExpression(Expr))
12263       return true;
12264     const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
12265     if (!Value) {
12266       return Error(Loc, "expected constant expression");
12267     }
12268 
12269     char CurSuffix = Suffix;
12270     switch (Width) {
12271     case 2:
12272       if (Value->getValue() > 0xffff)
12273         return Error(Loc, "inst.n operand is too big, use inst.w instead");
12274       break;
12275     case 4:
12276       if (Value->getValue() > 0xffffffff)
12277         return Error(Loc, StringRef(Suffix ? "inst.w" : "inst") +
12278                               " operand is too big");
12279       break;
12280     case 0:
12281       // Thumb mode, no width indicated. Guess from the opcode, if possible.
12282       if (Value->getValue() < 0xe800)
12283         CurSuffix = 'n';
12284       else if (Value->getValue() >= 0xe8000000)
12285         CurSuffix = 'w';
12286       else
12287         return Error(Loc, "cannot determine Thumb instruction size, "
12288                           "use inst.n/inst.w instead");
12289       break;
12290     default:
12291       llvm_unreachable("only supported widths are 2 and 4");
12292     }
12293 
12294     getTargetStreamer().emitInst(Value->getValue(), CurSuffix);
12295     forwardITPosition();
12296     forwardVPTPosition();
12297     return false;
12298   };
12299 
12300   if (parseOptionalToken(AsmToken::EndOfStatement))
12301     return Error(Loc, "expected expression following directive");
12302   if (parseMany(parseOne))
12303     return true;
12304   return false;
12305 }
12306 
12307 /// parseDirectiveLtorg
12308 ///  ::= .ltorg | .pool
12309 bool ARMAsmParser::parseDirectiveLtorg(SMLoc L) {
12310   if (parseEOL())
12311     return true;
12312   getTargetStreamer().emitCurrentConstantPool();
12313   return false;
12314 }
12315 
12316 bool ARMAsmParser::parseDirectiveEven(SMLoc L) {
12317   const MCSection *Section = getStreamer().getCurrentSectionOnly();
12318 
12319   if (parseEOL())
12320     return true;
12321 
12322   if (!Section) {
12323     getStreamer().initSections(false, getSTI());
12324     Section = getStreamer().getCurrentSectionOnly();
12325   }
12326 
12327   assert(Section && "must have section to emit alignment");
12328   if (Section->useCodeAlign())
12329     getStreamer().emitCodeAlignment(Align(2), &getSTI());
12330   else
12331     getStreamer().emitValueToAlignment(Align(2));
12332 
12333   return false;
12334 }
12335 
12336 /// parseDirectivePersonalityIndex
12337 ///   ::= .personalityindex index
12338 bool ARMAsmParser::parseDirectivePersonalityIndex(SMLoc L) {
12339   MCAsmParser &Parser = getParser();
12340   bool HasExistingPersonality = UC.hasPersonality();
12341 
12342   const MCExpr *IndexExpression;
12343   SMLoc IndexLoc = Parser.getTok().getLoc();
12344   if (Parser.parseExpression(IndexExpression) || parseEOL()) {
12345     return true;
12346   }
12347 
12348   UC.recordPersonalityIndex(L);
12349 
12350   if (!UC.hasFnStart()) {
12351     return Error(L, ".fnstart must precede .personalityindex directive");
12352   }
12353   if (UC.cantUnwind()) {
12354     Error(L, ".personalityindex cannot be used with .cantunwind");
12355     UC.emitCantUnwindLocNotes();
12356     return true;
12357   }
12358   if (UC.hasHandlerData()) {
12359     Error(L, ".personalityindex must precede .handlerdata directive");
12360     UC.emitHandlerDataLocNotes();
12361     return true;
12362   }
12363   if (HasExistingPersonality) {
12364     Error(L, "multiple personality directives");
12365     UC.emitPersonalityLocNotes();
12366     return true;
12367   }
12368 
12369   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(IndexExpression);
12370   if (!CE)
12371     return Error(IndexLoc, "index must be a constant number");
12372   if (CE->getValue() < 0 || CE->getValue() >= ARM::EHABI::NUM_PERSONALITY_INDEX)
12373     return Error(IndexLoc,
12374                  "personality routine index should be in range [0-3]");
12375 
12376   getTargetStreamer().emitPersonalityIndex(CE->getValue());
12377   return false;
12378 }
12379 
12380 /// parseDirectiveUnwindRaw
12381 ///   ::= .unwind_raw offset, opcode [, opcode...]
12382 bool ARMAsmParser::parseDirectiveUnwindRaw(SMLoc L) {
12383   MCAsmParser &Parser = getParser();
12384   int64_t StackOffset;
12385   const MCExpr *OffsetExpr;
12386   SMLoc OffsetLoc = getLexer().getLoc();
12387 
12388   if (!UC.hasFnStart())
12389     return Error(L, ".fnstart must precede .unwind_raw directives");
12390   if (getParser().parseExpression(OffsetExpr))
12391     return Error(OffsetLoc, "expected expression");
12392 
12393   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
12394   if (!CE)
12395     return Error(OffsetLoc, "offset must be a constant");
12396 
12397   StackOffset = CE->getValue();
12398 
12399   if (Parser.parseComma())
12400     return true;
12401 
12402   SmallVector<uint8_t, 16> Opcodes;
12403 
12404   auto parseOne = [&]() -> bool {
12405     const MCExpr *OE = nullptr;
12406     SMLoc OpcodeLoc = getLexer().getLoc();
12407     if (check(getLexer().is(AsmToken::EndOfStatement) ||
12408                   Parser.parseExpression(OE),
12409               OpcodeLoc, "expected opcode expression"))
12410       return true;
12411     const MCConstantExpr *OC = dyn_cast<MCConstantExpr>(OE);
12412     if (!OC)
12413       return Error(OpcodeLoc, "opcode value must be a constant");
12414     const int64_t Opcode = OC->getValue();
12415     if (Opcode & ~0xff)
12416       return Error(OpcodeLoc, "invalid opcode");
12417     Opcodes.push_back(uint8_t(Opcode));
12418     return false;
12419   };
12420 
12421   // Must have at least 1 element
12422   SMLoc OpcodeLoc = getLexer().getLoc();
12423   if (parseOptionalToken(AsmToken::EndOfStatement))
12424     return Error(OpcodeLoc, "expected opcode expression");
12425   if (parseMany(parseOne))
12426     return true;
12427 
12428   getTargetStreamer().emitUnwindRaw(StackOffset, Opcodes);
12429   return false;
12430 }
12431 
12432 /// parseDirectiveTLSDescSeq
12433 ///   ::= .tlsdescseq tls-variable
12434 bool ARMAsmParser::parseDirectiveTLSDescSeq(SMLoc L) {
12435   MCAsmParser &Parser = getParser();
12436 
12437   if (getLexer().isNot(AsmToken::Identifier))
12438     return TokError("expected variable after '.tlsdescseq' directive");
12439 
12440   const MCSymbolRefExpr *SRE =
12441     MCSymbolRefExpr::create(Parser.getTok().getIdentifier(),
12442                             MCSymbolRefExpr::VK_ARM_TLSDESCSEQ, getContext());
12443   Lex();
12444 
12445   if (parseEOL())
12446     return true;
12447 
12448   getTargetStreamer().annotateTLSDescriptorSequence(SRE);
12449   return false;
12450 }
12451 
12452 /// parseDirectiveMovSP
12453 ///  ::= .movsp reg [, #offset]
12454 bool ARMAsmParser::parseDirectiveMovSP(SMLoc L) {
12455   MCAsmParser &Parser = getParser();
12456   if (!UC.hasFnStart())
12457     return Error(L, ".fnstart must precede .movsp directives");
12458   if (UC.getFPReg() != ARM::SP)
12459     return Error(L, "unexpected .movsp directive");
12460 
12461   SMLoc SPRegLoc = Parser.getTok().getLoc();
12462   MCRegister SPReg = tryParseRegister();
12463   if (!SPReg)
12464     return Error(SPRegLoc, "register expected");
12465   if (SPReg == ARM::SP || SPReg == ARM::PC)
12466     return Error(SPRegLoc, "sp and pc are not permitted in .movsp directive");
12467 
12468   int64_t Offset = 0;
12469   if (Parser.parseOptionalToken(AsmToken::Comma)) {
12470     if (Parser.parseToken(AsmToken::Hash, "expected #constant"))
12471       return true;
12472 
12473     const MCExpr *OffsetExpr;
12474     SMLoc OffsetLoc = Parser.getTok().getLoc();
12475 
12476     if (Parser.parseExpression(OffsetExpr))
12477       return Error(OffsetLoc, "malformed offset expression");
12478 
12479     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
12480     if (!CE)
12481       return Error(OffsetLoc, "offset must be an immediate constant");
12482 
12483     Offset = CE->getValue();
12484   }
12485 
12486   if (parseEOL())
12487     return true;
12488 
12489   getTargetStreamer().emitMovSP(SPReg, Offset);
12490   UC.saveFPReg(SPReg);
12491 
12492   return false;
12493 }
12494 
12495 /// parseDirectiveObjectArch
12496 ///   ::= .object_arch name
12497 bool ARMAsmParser::parseDirectiveObjectArch(SMLoc L) {
12498   MCAsmParser &Parser = getParser();
12499   if (getLexer().isNot(AsmToken::Identifier))
12500     return Error(getLexer().getLoc(), "unexpected token");
12501 
12502   StringRef Arch = Parser.getTok().getString();
12503   SMLoc ArchLoc = Parser.getTok().getLoc();
12504   Lex();
12505 
12506   ARM::ArchKind ID = ARM::parseArch(Arch);
12507 
12508   if (ID == ARM::ArchKind::INVALID)
12509     return Error(ArchLoc, "unknown architecture '" + Arch + "'");
12510   if (parseToken(AsmToken::EndOfStatement))
12511     return true;
12512 
12513   getTargetStreamer().emitObjectArch(ID);
12514   return false;
12515 }
12516 
12517 /// parseDirectiveAlign
12518 ///   ::= .align
12519 bool ARMAsmParser::parseDirectiveAlign(SMLoc L) {
12520   // NOTE: if this is not the end of the statement, fall back to the target
12521   // agnostic handling for this directive which will correctly handle this.
12522   if (parseOptionalToken(AsmToken::EndOfStatement)) {
12523     // '.align' is target specifically handled to mean 2**2 byte alignment.
12524     const MCSection *Section = getStreamer().getCurrentSectionOnly();
12525     assert(Section && "must have section to emit alignment");
12526     if (Section->useCodeAlign())
12527       getStreamer().emitCodeAlignment(Align(4), &getSTI(), 0);
12528     else
12529       getStreamer().emitValueToAlignment(Align(4), 0, 1, 0);
12530     return false;
12531   }
12532   return true;
12533 }
12534 
12535 /// parseDirectiveThumbSet
12536 ///  ::= .thumb_set name, value
12537 bool ARMAsmParser::parseDirectiveThumbSet(SMLoc L) {
12538   MCAsmParser &Parser = getParser();
12539 
12540   StringRef Name;
12541   if (check(Parser.parseIdentifier(Name),
12542             "expected identifier after '.thumb_set'") ||
12543       Parser.parseComma())
12544     return true;
12545 
12546   MCSymbol *Sym;
12547   const MCExpr *Value;
12548   if (MCParserUtils::parseAssignmentExpression(Name, /* allow_redef */ true,
12549                                                Parser, Sym, Value))
12550     return true;
12551 
12552   getTargetStreamer().emitThumbSet(Sym, Value);
12553   return false;
12554 }
12555 
12556 /// parseDirectiveSEHAllocStack
12557 /// ::= .seh_stackalloc
12558 /// ::= .seh_stackalloc_w
12559 bool ARMAsmParser::parseDirectiveSEHAllocStack(SMLoc L, bool Wide) {
12560   int64_t Size;
12561   if (parseImmExpr(Size))
12562     return true;
12563   getTargetStreamer().emitARMWinCFIAllocStack(Size, Wide);
12564   return false;
12565 }
12566 
12567 /// parseDirectiveSEHSaveRegs
12568 /// ::= .seh_save_regs
12569 /// ::= .seh_save_regs_w
12570 bool ARMAsmParser::parseDirectiveSEHSaveRegs(SMLoc L, bool Wide) {
12571   SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands;
12572 
12573   if (parseRegisterList(Operands) || parseEOL())
12574     return true;
12575   ARMOperand &Op = (ARMOperand &)*Operands[0];
12576   if (!Op.isRegList())
12577     return Error(L, ".seh_save_regs{_w} expects GPR registers");
12578   const SmallVectorImpl<MCRegister> &RegList = Op.getRegList();
12579   uint32_t Mask = 0;
12580   for (size_t i = 0; i < RegList.size(); ++i) {
12581     unsigned Reg = MRI->getEncodingValue(RegList[i]);
12582     if (Reg == 15) // pc -> lr
12583       Reg = 14;
12584     if (Reg == 13)
12585       return Error(L, ".seh_save_regs{_w} can't include SP");
12586     assert(Reg < 16U && "Register out of range");
12587     unsigned Bit = (1u << Reg);
12588     Mask |= Bit;
12589   }
12590   if (!Wide && (Mask & 0x1f00) != 0)
12591     return Error(L,
12592                  ".seh_save_regs cannot save R8-R12, needs .seh_save_regs_w");
12593   getTargetStreamer().emitARMWinCFISaveRegMask(Mask, Wide);
12594   return false;
12595 }
12596 
12597 /// parseDirectiveSEHSaveSP
12598 /// ::= .seh_save_sp
12599 bool ARMAsmParser::parseDirectiveSEHSaveSP(SMLoc L) {
12600   MCRegister Reg = tryParseRegister();
12601   if (!Reg || !MRI->getRegClass(ARM::GPRRegClassID).contains(Reg))
12602     return Error(L, "expected GPR");
12603   unsigned Index = MRI->getEncodingValue(Reg);
12604   if (Index > 14 || Index == 13)
12605     return Error(L, "invalid register for .seh_save_sp");
12606   getTargetStreamer().emitARMWinCFISaveSP(Index);
12607   return false;
12608 }
12609 
12610 /// parseDirectiveSEHSaveFRegs
12611 /// ::= .seh_save_fregs
12612 bool ARMAsmParser::parseDirectiveSEHSaveFRegs(SMLoc L) {
12613   SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands;
12614 
12615   if (parseRegisterList(Operands) || parseEOL())
12616     return true;
12617   ARMOperand &Op = (ARMOperand &)*Operands[0];
12618   if (!Op.isDPRRegList())
12619     return Error(L, ".seh_save_fregs expects DPR registers");
12620   const SmallVectorImpl<MCRegister> &RegList = Op.getRegList();
12621   uint32_t Mask = 0;
12622   for (size_t i = 0; i < RegList.size(); ++i) {
12623     unsigned Reg = MRI->getEncodingValue(RegList[i]);
12624     assert(Reg < 32U && "Register out of range");
12625     unsigned Bit = (1u << Reg);
12626     Mask |= Bit;
12627   }
12628 
12629   if (Mask == 0)
12630     return Error(L, ".seh_save_fregs missing registers");
12631 
12632   unsigned First = 0;
12633   while ((Mask & 1) == 0) {
12634     First++;
12635     Mask >>= 1;
12636   }
12637   if (((Mask + 1) & Mask) != 0)
12638     return Error(L,
12639                  ".seh_save_fregs must take a contiguous range of registers");
12640   unsigned Last = First;
12641   while ((Mask & 2) != 0) {
12642     Last++;
12643     Mask >>= 1;
12644   }
12645   if (First < 16 && Last >= 16)
12646     return Error(L, ".seh_save_fregs must be all d0-d15 or d16-d31");
12647   getTargetStreamer().emitARMWinCFISaveFRegs(First, Last);
12648   return false;
12649 }
12650 
12651 /// parseDirectiveSEHSaveLR
12652 /// ::= .seh_save_lr
12653 bool ARMAsmParser::parseDirectiveSEHSaveLR(SMLoc L) {
12654   int64_t Offset;
12655   if (parseImmExpr(Offset))
12656     return true;
12657   getTargetStreamer().emitARMWinCFISaveLR(Offset);
12658   return false;
12659 }
12660 
12661 /// parseDirectiveSEHPrologEnd
12662 /// ::= .seh_endprologue
12663 /// ::= .seh_endprologue_fragment
12664 bool ARMAsmParser::parseDirectiveSEHPrologEnd(SMLoc L, bool Fragment) {
12665   getTargetStreamer().emitARMWinCFIPrologEnd(Fragment);
12666   return false;
12667 }
12668 
12669 /// parseDirectiveSEHNop
12670 /// ::= .seh_nop
12671 /// ::= .seh_nop_w
12672 bool ARMAsmParser::parseDirectiveSEHNop(SMLoc L, bool Wide) {
12673   getTargetStreamer().emitARMWinCFINop(Wide);
12674   return false;
12675 }
12676 
12677 /// parseDirectiveSEHEpilogStart
12678 /// ::= .seh_startepilogue
12679 /// ::= .seh_startepilogue_cond
12680 bool ARMAsmParser::parseDirectiveSEHEpilogStart(SMLoc L, bool Condition) {
12681   unsigned CC = ARMCC::AL;
12682   if (Condition) {
12683     MCAsmParser &Parser = getParser();
12684     SMLoc S = Parser.getTok().getLoc();
12685     const AsmToken &Tok = Parser.getTok();
12686     if (!Tok.is(AsmToken::Identifier))
12687       return Error(S, ".seh_startepilogue_cond missing condition");
12688     CC = ARMCondCodeFromString(Tok.getString());
12689     if (CC == ~0U)
12690       return Error(S, "invalid condition");
12691     Parser.Lex(); // Eat the token.
12692   }
12693 
12694   getTargetStreamer().emitARMWinCFIEpilogStart(CC);
12695   return false;
12696 }
12697 
12698 /// parseDirectiveSEHEpilogEnd
12699 /// ::= .seh_endepilogue
12700 bool ARMAsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
12701   getTargetStreamer().emitARMWinCFIEpilogEnd();
12702   return false;
12703 }
12704 
12705 /// parseDirectiveSEHCustom
12706 /// ::= .seh_custom
12707 bool ARMAsmParser::parseDirectiveSEHCustom(SMLoc L) {
12708   unsigned Opcode = 0;
12709   do {
12710     int64_t Byte;
12711     if (parseImmExpr(Byte))
12712       return true;
12713     if (Byte > 0xff || Byte < 0)
12714       return Error(L, "Invalid byte value in .seh_custom");
12715     if (Opcode > 0x00ffffff)
12716       return Error(L, "Too many bytes in .seh_custom");
12717     // Store the bytes as one big endian number in Opcode. In a multi byte
12718     // opcode sequence, the first byte can't be zero.
12719     Opcode = (Opcode << 8) | Byte;
12720   } while (parseOptionalToken(AsmToken::Comma));
12721   getTargetStreamer().emitARMWinCFICustom(Opcode);
12722   return false;
12723 }
12724 
12725 /// Force static initialization.
12726 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeARMAsmParser() {
12727   RegisterMCAsmParser<ARMAsmParser> X(getTheARMLETarget());
12728   RegisterMCAsmParser<ARMAsmParser> Y(getTheARMBETarget());
12729   RegisterMCAsmParser<ARMAsmParser> A(getTheThumbLETarget());
12730   RegisterMCAsmParser<ARMAsmParser> B(getTheThumbBETarget());
12731 }
12732 
12733 #define GET_REGISTER_MATCHER
12734 #define GET_SUBTARGET_FEATURE_NAME
12735 #define GET_MATCHER_IMPLEMENTATION
12736 #define GET_MNEMONIC_SPELL_CHECKER
12737 #include "ARMGenAsmMatcher.inc"
12738 
12739 // Some diagnostics need to vary with subtarget features, so they are handled
12740 // here. For example, the DPR class has either 16 or 32 registers, depending
12741 // on the FPU available.
12742 const char *
12743 ARMAsmParser::getCustomOperandDiag(ARMMatchResultTy MatchError) {
12744   switch (MatchError) {
12745   // rGPR contains sp starting with ARMv8.
12746   case Match_rGPR:
12747     return hasV8Ops() ? "operand must be a register in range [r0, r14]"
12748                       : "operand must be a register in range [r0, r12] or r14";
12749   // DPR contains 16 registers for some FPUs, and 32 for others.
12750   case Match_DPR:
12751     return hasD32() ? "operand must be a register in range [d0, d31]"
12752                     : "operand must be a register in range [d0, d15]";
12753   case Match_DPR_RegList:
12754     return hasD32() ? "operand must be a list of registers in range [d0, d31]"
12755                     : "operand must be a list of registers in range [d0, d15]";
12756 
12757   // For all other diags, use the static string from tablegen.
12758   default:
12759     return getMatchKindDiag(MatchError);
12760   }
12761 }
12762 
12763 // Process the list of near-misses, throwing away ones we don't want to report
12764 // to the user, and converting the rest to a source location and string that
12765 // should be reported.
12766 void
12767 ARMAsmParser::FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
12768                                SmallVectorImpl<NearMissMessage> &NearMissesOut,
12769                                SMLoc IDLoc, OperandVector &Operands) {
12770   // TODO: If operand didn't match, sub in a dummy one and run target
12771   // predicate, so that we can avoid reporting near-misses that are invalid?
12772   // TODO: Many operand types dont have SuperClasses set, so we report
12773   // redundant ones.
12774   // TODO: Some operands are superclasses of registers (e.g.
12775   // MCK_RegShiftedImm), we don't have any way to represent that currently.
12776   // TODO: This is not all ARM-specific, can some of it be factored out?
12777 
12778   // Record some information about near-misses that we have already seen, so
12779   // that we can avoid reporting redundant ones. For example, if there are
12780   // variants of an instruction that take 8- and 16-bit immediates, we want
12781   // to only report the widest one.
12782   std::multimap<unsigned, unsigned> OperandMissesSeen;
12783   SmallSet<FeatureBitset, 4> FeatureMissesSeen;
12784   bool ReportedTooFewOperands = false;
12785 
12786   unsigned MnemonicOpsEndInd = getMnemonicOpsEndInd(Operands);
12787 
12788   // Process the near-misses in reverse order, so that we see more general ones
12789   // first, and so can avoid emitting more specific ones.
12790   for (NearMissInfo &I : reverse(NearMissesIn)) {
12791     switch (I.getKind()) {
12792     case NearMissInfo::NearMissOperand: {
12793       SMLoc OperandLoc =
12794           ((ARMOperand &)*Operands[I.getOperandIndex()]).getStartLoc();
12795       const char *OperandDiag =
12796           getCustomOperandDiag((ARMMatchResultTy)I.getOperandError());
12797 
12798       // If we have already emitted a message for a superclass, don't also report
12799       // the sub-class. We consider all operand classes that we don't have a
12800       // specialised diagnostic for to be equal for the propose of this check,
12801       // so that we don't report the generic error multiple times on the same
12802       // operand.
12803       unsigned DupCheckMatchClass = OperandDiag ? I.getOperandClass() : ~0U;
12804       auto PrevReports = OperandMissesSeen.equal_range(I.getOperandIndex());
12805       if (std::any_of(PrevReports.first, PrevReports.second,
12806                       [DupCheckMatchClass](
12807                           const std::pair<unsigned, unsigned> Pair) {
12808             if (DupCheckMatchClass == ~0U || Pair.second == ~0U)
12809               return Pair.second == DupCheckMatchClass;
12810             else
12811               return isSubclass((MatchClassKind)DupCheckMatchClass,
12812                                 (MatchClassKind)Pair.second);
12813           }))
12814         break;
12815       OperandMissesSeen.insert(
12816           std::make_pair(I.getOperandIndex(), DupCheckMatchClass));
12817 
12818       NearMissMessage Message;
12819       Message.Loc = OperandLoc;
12820       if (OperandDiag) {
12821         Message.Message = OperandDiag;
12822       } else if (I.getOperandClass() == InvalidMatchClass) {
12823         Message.Message = "too many operands for instruction";
12824       } else {
12825         Message.Message = "invalid operand for instruction";
12826         LLVM_DEBUG(
12827             dbgs() << "Missing diagnostic string for operand class "
12828                    << getMatchClassName((MatchClassKind)I.getOperandClass())
12829                    << I.getOperandClass() << ", error " << I.getOperandError()
12830                    << ", opcode " << MII.getName(I.getOpcode()) << "\n");
12831       }
12832       NearMissesOut.emplace_back(Message);
12833       break;
12834     }
12835     case NearMissInfo::NearMissFeature: {
12836       const FeatureBitset &MissingFeatures = I.getFeatures();
12837       // Don't report the same set of features twice.
12838       if (FeatureMissesSeen.count(MissingFeatures))
12839         break;
12840       FeatureMissesSeen.insert(MissingFeatures);
12841 
12842       // Special case: don't report a feature set which includes arm-mode for
12843       // targets that don't have ARM mode.
12844       if (MissingFeatures.test(Feature_IsARMBit) && !hasARM())
12845         break;
12846       // Don't report any near-misses that both require switching instruction
12847       // set, and adding other subtarget features.
12848       if (isThumb() && MissingFeatures.test(Feature_IsARMBit) &&
12849           MissingFeatures.count() > 1)
12850         break;
12851       if (!isThumb() && MissingFeatures.test(Feature_IsThumbBit) &&
12852           MissingFeatures.count() > 1)
12853         break;
12854       if (!isThumb() && MissingFeatures.test(Feature_IsThumb2Bit) &&
12855           (MissingFeatures & ~FeatureBitset({Feature_IsThumb2Bit,
12856                                              Feature_IsThumbBit})).any())
12857         break;
12858       if (isMClass() && MissingFeatures.test(Feature_HasNEONBit))
12859         break;
12860 
12861       NearMissMessage Message;
12862       Message.Loc = IDLoc;
12863       raw_svector_ostream OS(Message.Message);
12864 
12865       OS << "instruction requires:";
12866       for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i)
12867         if (MissingFeatures.test(i))
12868           OS << ' ' << getSubtargetFeatureName(i);
12869 
12870       NearMissesOut.emplace_back(Message);
12871 
12872       break;
12873     }
12874     case NearMissInfo::NearMissPredicate: {
12875       NearMissMessage Message;
12876       Message.Loc = IDLoc;
12877       switch (I.getPredicateError()) {
12878       case Match_RequiresNotITBlock:
12879         Message.Message = "flag setting instruction only valid outside IT block";
12880         break;
12881       case Match_RequiresITBlock:
12882         Message.Message = "instruction only valid inside IT block";
12883         break;
12884       case Match_RequiresV6:
12885         Message.Message = "instruction variant requires ARMv6 or later";
12886         break;
12887       case Match_RequiresThumb2:
12888         Message.Message = "instruction variant requires Thumb2";
12889         break;
12890       case Match_RequiresV8:
12891         Message.Message = "instruction variant requires ARMv8 or later";
12892         break;
12893       case Match_RequiresFlagSetting:
12894         Message.Message = "no flag-preserving variant of this instruction available";
12895         break;
12896       case Match_InvalidTiedOperand: {
12897         ARMOperand &Op = static_cast<ARMOperand &>(*Operands[0]);
12898         if (Op.isToken() && Op.getToken() == "mul") {
12899           Message.Message = "destination register must match a source register";
12900           Message.Loc = Operands[MnemonicOpsEndInd]->getStartLoc();
12901         } else {
12902           llvm_unreachable("Match_InvalidTiedOperand only used for tMUL.");
12903         }
12904         break;
12905       }
12906       case Match_InvalidOperand:
12907         Message.Message = "invalid operand for instruction";
12908         break;
12909       default:
12910         llvm_unreachable("Unhandled target predicate error");
12911         break;
12912       }
12913       NearMissesOut.emplace_back(Message);
12914       break;
12915     }
12916     case NearMissInfo::NearMissTooFewOperands: {
12917       if (!ReportedTooFewOperands) {
12918         SMLoc EndLoc = ((ARMOperand &)*Operands.back()).getEndLoc();
12919         NearMissesOut.emplace_back(NearMissMessage{
12920             EndLoc, StringRef("too few operands for instruction")});
12921         ReportedTooFewOperands = true;
12922       }
12923       break;
12924     }
12925     case NearMissInfo::NoNearMiss:
12926       // This should never leave the matcher.
12927       llvm_unreachable("not a near-miss");
12928       break;
12929     }
12930   }
12931 }
12932 
12933 void ARMAsmParser::ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses,
12934                                     SMLoc IDLoc, OperandVector &Operands) {
12935   SmallVector<NearMissMessage, 4> Messages;
12936   FilterNearMisses(NearMisses, Messages, IDLoc, Operands);
12937 
12938   if (Messages.size() == 0) {
12939     // No near-misses were found, so the best we can do is "invalid
12940     // instruction".
12941     Error(IDLoc, "invalid instruction");
12942   } else if (Messages.size() == 1) {
12943     // One near miss was found, report it as the sole error.
12944     Error(Messages[0].Loc, Messages[0].Message);
12945   } else {
12946     // More than one near miss, so report a generic "invalid instruction"
12947     // error, followed by notes for each of the near-misses.
12948     Error(IDLoc, "invalid instruction, any one of the following would fix this:");
12949     for (auto &M : Messages) {
12950       Note(M.Loc, M.Message);
12951     }
12952   }
12953 }
12954 
12955 bool ARMAsmParser::enableArchExtFeature(StringRef Name, SMLoc &ExtLoc) {
12956   // FIXME: This structure should be moved inside ARMTargetParser
12957   // when we start to table-generate them, and we can use the ARM
12958   // flags below, that were generated by table-gen.
12959   static const struct {
12960     const uint64_t Kind;
12961     const FeatureBitset ArchCheck;
12962     const FeatureBitset Features;
12963   } Extensions[] = {
12964       {ARM::AEK_CRC, {Feature_HasV8Bit}, {ARM::FeatureCRC}},
12965       {ARM::AEK_AES,
12966        {Feature_HasV8Bit},
12967        {ARM::FeatureAES, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12968       {ARM::AEK_SHA2,
12969        {Feature_HasV8Bit},
12970        {ARM::FeatureSHA2, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12971       {ARM::AEK_CRYPTO,
12972        {Feature_HasV8Bit},
12973        {ARM::FeatureCrypto, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12974       {(ARM::AEK_DSP | ARM::AEK_SIMD | ARM::AEK_FP),
12975        {Feature_HasV8_1MMainlineBit},
12976        {ARM::HasMVEFloatOps}},
12977       {ARM::AEK_FP,
12978        {Feature_HasV8Bit},
12979        {ARM::FeatureVFP2_SP, ARM::FeatureFPARMv8}},
12980       {(ARM::AEK_HWDIVTHUMB | ARM::AEK_HWDIVARM),
12981        {Feature_HasV7Bit, Feature_IsNotMClassBit},
12982        {ARM::FeatureHWDivThumb, ARM::FeatureHWDivARM}},
12983       {ARM::AEK_MP,
12984        {Feature_HasV7Bit, Feature_IsNotMClassBit},
12985        {ARM::FeatureMP}},
12986       {ARM::AEK_SIMD,
12987        {Feature_HasV8Bit},
12988        {ARM::FeatureNEON, ARM::FeatureVFP2_SP, ARM::FeatureFPARMv8}},
12989       {ARM::AEK_SEC, {Feature_HasV6KBit}, {ARM::FeatureTrustZone}},
12990       // FIXME: Only available in A-class, isel not predicated
12991       {ARM::AEK_VIRT, {Feature_HasV7Bit}, {ARM::FeatureVirtualization}},
12992       {ARM::AEK_FP16,
12993        {Feature_HasV8_2aBit},
12994        {ARM::FeatureFPARMv8, ARM::FeatureFullFP16}},
12995       {ARM::AEK_RAS, {Feature_HasV8Bit}, {ARM::FeatureRAS}},
12996       {ARM::AEK_LOB, {Feature_HasV8_1MMainlineBit}, {ARM::FeatureLOB}},
12997       {ARM::AEK_PACBTI, {Feature_HasV8_1MMainlineBit}, {ARM::FeaturePACBTI}},
12998       // FIXME: Unsupported extensions.
12999       {ARM::AEK_OS, {}, {}},
13000       {ARM::AEK_IWMMXT, {}, {}},
13001       {ARM::AEK_IWMMXT2, {}, {}},
13002       {ARM::AEK_MAVERICK, {}, {}},
13003       {ARM::AEK_XSCALE, {}, {}},
13004   };
13005   bool EnableFeature = !Name.consume_front_insensitive("no");
13006   uint64_t FeatureKind = ARM::parseArchExt(Name);
13007   if (FeatureKind == ARM::AEK_INVALID)
13008     return Error(ExtLoc, "unknown architectural extension: " + Name);
13009 
13010   for (const auto &Extension : Extensions) {
13011     if (Extension.Kind != FeatureKind)
13012       continue;
13013 
13014     if (Extension.Features.none())
13015       return Error(ExtLoc, "unsupported architectural extension: " + Name);
13016 
13017     if ((getAvailableFeatures() & Extension.ArchCheck) != Extension.ArchCheck)
13018       return Error(ExtLoc, "architectural extension '" + Name +
13019                                "' is not "
13020                                "allowed for the current base architecture");
13021 
13022     MCSubtargetInfo &STI = copySTI();
13023     if (EnableFeature) {
13024       STI.SetFeatureBitsTransitively(Extension.Features);
13025     } else {
13026       STI.ClearFeatureBitsTransitively(Extension.Features);
13027     }
13028     FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
13029     setAvailableFeatures(Features);
13030     return true;
13031   }
13032   return false;
13033 }
13034 
13035 /// parseDirectiveArchExtension
13036 ///   ::= .arch_extension [no]feature
13037 bool ARMAsmParser::parseDirectiveArchExtension(SMLoc L) {
13038 
13039   MCAsmParser &Parser = getParser();
13040 
13041   if (getLexer().isNot(AsmToken::Identifier))
13042     return Error(getLexer().getLoc(), "expected architecture extension name");
13043 
13044   StringRef Name = Parser.getTok().getString();
13045   SMLoc ExtLoc = Parser.getTok().getLoc();
13046   Lex();
13047 
13048   if (parseEOL())
13049     return true;
13050 
13051   if (Name == "nocrypto") {
13052     enableArchExtFeature("nosha2", ExtLoc);
13053     enableArchExtFeature("noaes", ExtLoc);
13054   }
13055 
13056   if (enableArchExtFeature(Name, ExtLoc))
13057     return false;
13058 
13059   return Error(ExtLoc, "unknown architectural extension: " + Name);
13060 }
13061 
13062 // Define this matcher function after the auto-generated include so we
13063 // have the match class enum definitions.
13064 unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
13065                                                   unsigned Kind) {
13066   ARMOperand &Op = static_cast<ARMOperand &>(AsmOp);
13067   // If the kind is a token for a literal immediate, check if our asm
13068   // operand matches. This is for InstAliases which have a fixed-value
13069   // immediate in the syntax.
13070   switch (Kind) {
13071   default: break;
13072   case MCK__HASH_0:
13073     if (Op.isImm())
13074       if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
13075         if (CE->getValue() == 0)
13076           return Match_Success;
13077     break;
13078   case MCK__HASH_8:
13079     if (Op.isImm())
13080       if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
13081         if (CE->getValue() == 8)
13082           return Match_Success;
13083     break;
13084   case MCK__HASH_16:
13085     if (Op.isImm())
13086       if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
13087         if (CE->getValue() == 16)
13088           return Match_Success;
13089     break;
13090   case MCK_ModImm:
13091     if (Op.isImm()) {
13092       const MCExpr *SOExpr = Op.getImm();
13093       int64_t Value;
13094       if (!SOExpr->evaluateAsAbsolute(Value))
13095         return Match_Success;
13096       assert((Value >= std::numeric_limits<int32_t>::min() &&
13097               Value <= std::numeric_limits<uint32_t>::max()) &&
13098              "expression value must be representable in 32 bits");
13099     }
13100     break;
13101   case MCK_rGPR:
13102     if (hasV8Ops() && Op.isReg() && Op.getReg() == ARM::SP)
13103       return Match_Success;
13104     return Match_rGPR;
13105   }
13106   return Match_InvalidOperand;
13107 }
13108 
13109 bool ARMAsmParser::isMnemonicVPTPredicable(StringRef Mnemonic,
13110                                            StringRef ExtraToken) {
13111   if (!hasMVE())
13112     return false;
13113 
13114   if (MS.isVPTPredicableCDEInstr(Mnemonic) ||
13115       (Mnemonic.starts_with("vldrh") && Mnemonic != "vldrhi") ||
13116       (Mnemonic.starts_with("vmov") &&
13117        !(ExtraToken == ".f16" || ExtraToken == ".32" || ExtraToken == ".16" ||
13118          ExtraToken == ".8")) ||
13119       (Mnemonic.starts_with("vrint") && Mnemonic != "vrintr") ||
13120       (Mnemonic.starts_with("vstrh") && Mnemonic != "vstrhi"))
13121     return true;
13122 
13123   const char *predicable_prefixes[] = {
13124       "vabav",      "vabd",     "vabs",      "vadc",       "vadd",
13125       "vaddlv",     "vaddv",    "vand",      "vbic",       "vbrsr",
13126       "vcadd",      "vcls",     "vclz",      "vcmla",      "vcmp",
13127       "vcmul",      "vctp",     "vcvt",      "vddup",      "vdup",
13128       "vdwdup",     "veor",     "vfma",      "vfmas",      "vfms",
13129       "vhadd",      "vhcadd",   "vhsub",     "vidup",      "viwdup",
13130       "vldrb",      "vldrd",    "vldrw",     "vmax",       "vmaxa",
13131       "vmaxav",     "vmaxnm",   "vmaxnma",   "vmaxnmav",   "vmaxnmv",
13132       "vmaxv",      "vmin",     "vminav",    "vminnm",     "vminnmav",
13133       "vminnmv",    "vminv",    "vmla",      "vmladav",    "vmlaldav",
13134       "vmlalv",     "vmlas",    "vmlav",     "vmlsdav",    "vmlsldav",
13135       "vmovlb",     "vmovlt",   "vmovnb",    "vmovnt",     "vmul",
13136       "vmvn",       "vneg",     "vorn",      "vorr",       "vpnot",
13137       "vpsel",      "vqabs",    "vqadd",     "vqdmladh",   "vqdmlah",
13138       "vqdmlash",   "vqdmlsdh", "vqdmulh",   "vqdmull",    "vqmovn",
13139       "vqmovun",    "vqneg",    "vqrdmladh", "vqrdmlah",   "vqrdmlash",
13140       "vqrdmlsdh",  "vqrdmulh", "vqrshl",    "vqrshrn",    "vqrshrun",
13141       "vqshl",      "vqshrn",   "vqshrun",   "vqsub",      "vrev16",
13142       "vrev32",     "vrev64",   "vrhadd",    "vrmlaldavh", "vrmlalvh",
13143       "vrmlsldavh", "vrmulh",   "vrshl",     "vrshr",      "vrshrn",
13144       "vsbc",       "vshl",     "vshlc",     "vshll",      "vshr",
13145       "vshrn",      "vsli",     "vsri",      "vstrb",      "vstrd",
13146       "vstrw",      "vsub"};
13147 
13148   return any_of(predicable_prefixes, [&Mnemonic](const char *prefix) {
13149     return Mnemonic.starts_with(prefix);
13150   });
13151 }
13152 
13153 std::unique_ptr<ARMOperand> ARMAsmParser::defaultCondCodeOp() {
13154   return ARMOperand::CreateCondCode(ARMCC::AL, SMLoc(), *this);
13155 }
13156 
13157 std::unique_ptr<ARMOperand> ARMAsmParser::defaultCCOutOp() {
13158   return ARMOperand::CreateCCOut(0, SMLoc(), *this);
13159 }
13160 
13161 std::unique_ptr<ARMOperand> ARMAsmParser::defaultVPTPredOp() {
13162   return ARMOperand::CreateVPTPred(ARMVCC::None, SMLoc(), *this);
13163 }
13164