xref: /freebsd-src/contrib/llvm-project/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp (revision 1db9f3b21e39176dd5b67cf8ac378633b172463e)
1 //===- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "ARMBaseInstrInfo.h"
10 #include "ARMFeatures.h"
11 #include "MCTargetDesc/ARMAddressingModes.h"
12 #include "MCTargetDesc/ARMBaseInfo.h"
13 #include "MCTargetDesc/ARMInstPrinter.h"
14 #include "MCTargetDesc/ARMMCExpr.h"
15 #include "MCTargetDesc/ARMMCTargetDesc.h"
16 #include "TargetInfo/ARMTargetInfo.h"
17 #include "Utils/ARMBaseInfo.h"
18 #include "llvm/ADT/APFloat.h"
19 #include "llvm/ADT/APInt.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/StringMap.h"
24 #include "llvm/ADT/StringRef.h"
25 #include "llvm/ADT/StringSet.h"
26 #include "llvm/ADT/StringSwitch.h"
27 #include "llvm/ADT/Twine.h"
28 #include "llvm/MC/MCContext.h"
29 #include "llvm/MC/MCExpr.h"
30 #include "llvm/MC/MCInst.h"
31 #include "llvm/MC/MCInstrDesc.h"
32 #include "llvm/MC/MCInstrInfo.h"
33 #include "llvm/MC/MCParser/MCAsmLexer.h"
34 #include "llvm/MC/MCParser/MCAsmParser.h"
35 #include "llvm/MC/MCParser/MCAsmParserExtension.h"
36 #include "llvm/MC/MCParser/MCAsmParserUtils.h"
37 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
38 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
39 #include "llvm/MC/MCRegisterInfo.h"
40 #include "llvm/MC/MCSection.h"
41 #include "llvm/MC/MCStreamer.h"
42 #include "llvm/MC/MCSubtargetInfo.h"
43 #include "llvm/MC/MCSymbol.h"
44 #include "llvm/MC/TargetRegistry.h"
45 #include "llvm/Support/ARMBuildAttributes.h"
46 #include "llvm/Support/ARMEHABI.h"
47 #include "llvm/Support/Casting.h"
48 #include "llvm/Support/CommandLine.h"
49 #include "llvm/Support/Compiler.h"
50 #include "llvm/Support/ErrorHandling.h"
51 #include "llvm/Support/MathExtras.h"
52 #include "llvm/Support/SMLoc.h"
53 #include "llvm/Support/raw_ostream.h"
54 #include "llvm/TargetParser/SubtargetFeature.h"
55 #include "llvm/TargetParser/TargetParser.h"
56 #include "llvm/TargetParser/Triple.h"
57 #include <algorithm>
58 #include <cassert>
59 #include <cstddef>
60 #include <cstdint>
61 #include <iterator>
62 #include <limits>
63 #include <memory>
64 #include <string>
65 #include <utility>
66 #include <vector>
67 
68 #define DEBUG_TYPE "asm-parser"
69 
70 using namespace llvm;
71 
72 namespace llvm {
73 struct ARMInstrTable {
74   MCInstrDesc Insts[4445];
75   MCOperandInfo OperandInfo[3026];
76   MCPhysReg ImplicitOps[130];
77 };
78 extern const ARMInstrTable ARMDescs;
79 } // end namespace llvm
80 
81 namespace {
82 
83 enum class ImplicitItModeTy { Always, Never, ARMOnly, ThumbOnly };
84 
85 static cl::opt<ImplicitItModeTy> ImplicitItMode(
86     "arm-implicit-it", cl::init(ImplicitItModeTy::ARMOnly),
87     cl::desc("Allow conditional instructions outdside of an IT block"),
88     cl::values(clEnumValN(ImplicitItModeTy::Always, "always",
89                           "Accept in both ISAs, emit implicit ITs in Thumb"),
90                clEnumValN(ImplicitItModeTy::Never, "never",
91                           "Warn in ARM, reject in Thumb"),
92                clEnumValN(ImplicitItModeTy::ARMOnly, "arm",
93                           "Accept in ARM, reject in Thumb"),
94                clEnumValN(ImplicitItModeTy::ThumbOnly, "thumb",
95                           "Warn in ARM, emit implicit ITs in Thumb")));
96 
97 static cl::opt<bool> AddBuildAttributes("arm-add-build-attributes",
98                                         cl::init(false));
99 
100 enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
101 
102 static inline unsigned extractITMaskBit(unsigned Mask, unsigned Position) {
103   // Position==0 means we're not in an IT block at all. Position==1
104   // means we want the first state bit, which is always 0 (Then).
105   // Position==2 means we want the second state bit, stored at bit 3
106   // of Mask, and so on downwards. So (5 - Position) will shift the
107   // right bit down to bit 0, including the always-0 bit at bit 4 for
108   // the mandatory initial Then.
109   return (Mask >> (5 - Position) & 1);
110 }
111 
112 class UnwindContext {
113   using Locs = SmallVector<SMLoc, 4>;
114 
115   MCAsmParser &Parser;
116   Locs FnStartLocs;
117   Locs CantUnwindLocs;
118   Locs PersonalityLocs;
119   Locs PersonalityIndexLocs;
120   Locs HandlerDataLocs;
121   int FPReg;
122 
123 public:
124   UnwindContext(MCAsmParser &P) : Parser(P), FPReg(ARM::SP) {}
125 
126   bool hasFnStart() const { return !FnStartLocs.empty(); }
127   bool cantUnwind() const { return !CantUnwindLocs.empty(); }
128   bool hasHandlerData() const { return !HandlerDataLocs.empty(); }
129 
130   bool hasPersonality() const {
131     return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
132   }
133 
134   void recordFnStart(SMLoc L) { FnStartLocs.push_back(L); }
135   void recordCantUnwind(SMLoc L) { CantUnwindLocs.push_back(L); }
136   void recordPersonality(SMLoc L) { PersonalityLocs.push_back(L); }
137   void recordHandlerData(SMLoc L) { HandlerDataLocs.push_back(L); }
138   void recordPersonalityIndex(SMLoc L) { PersonalityIndexLocs.push_back(L); }
139 
140   void saveFPReg(int Reg) { FPReg = Reg; }
141   int getFPReg() const { return FPReg; }
142 
143   void emitFnStartLocNotes() const {
144     for (const SMLoc &Loc : FnStartLocs)
145       Parser.Note(Loc, ".fnstart was specified here");
146   }
147 
148   void emitCantUnwindLocNotes() const {
149     for (const SMLoc &Loc : CantUnwindLocs)
150       Parser.Note(Loc, ".cantunwind was specified here");
151   }
152 
153   void emitHandlerDataLocNotes() const {
154     for (const SMLoc &Loc : HandlerDataLocs)
155       Parser.Note(Loc, ".handlerdata was specified here");
156   }
157 
158   void emitPersonalityLocNotes() const {
159     for (Locs::const_iterator PI = PersonalityLocs.begin(),
160                               PE = PersonalityLocs.end(),
161                               PII = PersonalityIndexLocs.begin(),
162                               PIE = PersonalityIndexLocs.end();
163          PI != PE || PII != PIE;) {
164       if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
165         Parser.Note(*PI++, ".personality was specified here");
166       else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
167         Parser.Note(*PII++, ".personalityindex was specified here");
168       else
169         llvm_unreachable(".personality and .personalityindex cannot be "
170                          "at the same location");
171     }
172   }
173 
174   void reset() {
175     FnStartLocs = Locs();
176     CantUnwindLocs = Locs();
177     PersonalityLocs = Locs();
178     HandlerDataLocs = Locs();
179     PersonalityIndexLocs = Locs();
180     FPReg = ARM::SP;
181   }
182 };
183 
184 // Various sets of ARM instruction mnemonics which are used by the asm parser
185 class ARMMnemonicSets {
186   StringSet<> CDE;
187   StringSet<> CDEWithVPTSuffix;
188 public:
189   ARMMnemonicSets(const MCSubtargetInfo &STI);
190 
191   /// Returns true iff a given mnemonic is a CDE instruction
192   bool isCDEInstr(StringRef Mnemonic) {
193     // Quick check before searching the set
194     if (!Mnemonic.starts_with("cx") && !Mnemonic.starts_with("vcx"))
195       return false;
196     return CDE.count(Mnemonic);
197   }
198 
199   /// Returns true iff a given mnemonic is a VPT-predicable CDE instruction
200   /// (possibly with a predication suffix "e" or "t")
201   bool isVPTPredicableCDEInstr(StringRef Mnemonic) {
202     if (!Mnemonic.starts_with("vcx"))
203       return false;
204     return CDEWithVPTSuffix.count(Mnemonic);
205   }
206 
207   /// Returns true iff a given mnemonic is an IT-predicable CDE instruction
208   /// (possibly with a condition suffix)
209   bool isITPredicableCDEInstr(StringRef Mnemonic) {
210     if (!Mnemonic.starts_with("cx"))
211       return false;
212     return Mnemonic.starts_with("cx1a") || Mnemonic.starts_with("cx1da") ||
213            Mnemonic.starts_with("cx2a") || Mnemonic.starts_with("cx2da") ||
214            Mnemonic.starts_with("cx3a") || Mnemonic.starts_with("cx3da");
215   }
216 
217   /// Return true iff a given mnemonic is an integer CDE instruction with
218   /// dual-register destination
219   bool isCDEDualRegInstr(StringRef Mnemonic) {
220     if (!Mnemonic.starts_with("cx"))
221       return false;
222     return Mnemonic == "cx1d" || Mnemonic == "cx1da" ||
223            Mnemonic == "cx2d" || Mnemonic == "cx2da" ||
224            Mnemonic == "cx3d" || Mnemonic == "cx3da";
225   }
226 };
227 
228 ARMMnemonicSets::ARMMnemonicSets(const MCSubtargetInfo &STI) {
229   for (StringRef Mnemonic: { "cx1", "cx1a", "cx1d", "cx1da",
230                              "cx2", "cx2a", "cx2d", "cx2da",
231                              "cx3", "cx3a", "cx3d", "cx3da", })
232     CDE.insert(Mnemonic);
233   for (StringRef Mnemonic :
234        {"vcx1", "vcx1a", "vcx2", "vcx2a", "vcx3", "vcx3a"}) {
235     CDE.insert(Mnemonic);
236     CDEWithVPTSuffix.insert(Mnemonic);
237     CDEWithVPTSuffix.insert(std::string(Mnemonic) + "t");
238     CDEWithVPTSuffix.insert(std::string(Mnemonic) + "e");
239   }
240 }
241 
242 class ARMAsmParser : public MCTargetAsmParser {
243   const MCRegisterInfo *MRI;
244   UnwindContext UC;
245   ARMMnemonicSets MS;
246 
247   ARMTargetStreamer &getTargetStreamer() {
248     assert(getParser().getStreamer().getTargetStreamer() &&
249            "do not have a target streamer");
250     MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
251     return static_cast<ARMTargetStreamer &>(TS);
252   }
253 
254   // Map of register aliases registers via the .req directive.
255   StringMap<unsigned> RegisterReqs;
256 
257   bool NextSymbolIsThumb;
258 
259   bool useImplicitITThumb() const {
260     return ImplicitItMode == ImplicitItModeTy::Always ||
261            ImplicitItMode == ImplicitItModeTy::ThumbOnly;
262   }
263 
264   bool useImplicitITARM() const {
265     return ImplicitItMode == ImplicitItModeTy::Always ||
266            ImplicitItMode == ImplicitItModeTy::ARMOnly;
267   }
268 
269   struct {
270     ARMCC::CondCodes Cond;    // Condition for IT block.
271     unsigned Mask:4;          // Condition mask for instructions.
272                               // Starting at first 1 (from lsb).
273                               //   '1'  condition as indicated in IT.
274                               //   '0'  inverse of condition (else).
275                               // Count of instructions in IT block is
276                               // 4 - trailingzeroes(mask)
277                               // Note that this does not have the same encoding
278                               // as in the IT instruction, which also depends
279                               // on the low bit of the condition code.
280 
281     unsigned CurPosition;     // Current position in parsing of IT
282                               // block. In range [0,4], with 0 being the IT
283                               // instruction itself. Initialized according to
284                               // count of instructions in block.  ~0U if no
285                               // active IT block.
286 
287     bool IsExplicit;          // true  - The IT instruction was present in the
288                               //         input, we should not modify it.
289                               // false - The IT instruction was added
290                               //         implicitly, we can extend it if that
291                               //         would be legal.
292   } ITState;
293 
294   SmallVector<MCInst, 4> PendingConditionalInsts;
295 
296   void flushPendingInstructions(MCStreamer &Out) override {
297     if (!inImplicitITBlock()) {
298       assert(PendingConditionalInsts.size() == 0);
299       return;
300     }
301 
302     // Emit the IT instruction
303     MCInst ITInst;
304     ITInst.setOpcode(ARM::t2IT);
305     ITInst.addOperand(MCOperand::createImm(ITState.Cond));
306     ITInst.addOperand(MCOperand::createImm(ITState.Mask));
307     Out.emitInstruction(ITInst, getSTI());
308 
309     // Emit the conditional instructions
310     assert(PendingConditionalInsts.size() <= 4);
311     for (const MCInst &Inst : PendingConditionalInsts) {
312       Out.emitInstruction(Inst, getSTI());
313     }
314     PendingConditionalInsts.clear();
315 
316     // Clear the IT state
317     ITState.Mask = 0;
318     ITState.CurPosition = ~0U;
319   }
320 
321   bool inITBlock() { return ITState.CurPosition != ~0U; }
322   bool inExplicitITBlock() { return inITBlock() && ITState.IsExplicit; }
323   bool inImplicitITBlock() { return inITBlock() && !ITState.IsExplicit; }
324 
325   bool lastInITBlock() {
326     return ITState.CurPosition == 4 - (unsigned)llvm::countr_zero(ITState.Mask);
327   }
328 
329   void forwardITPosition() {
330     if (!inITBlock()) return;
331     // Move to the next instruction in the IT block, if there is one. If not,
332     // mark the block as done, except for implicit IT blocks, which we leave
333     // open until we find an instruction that can't be added to it.
334     unsigned TZ = llvm::countr_zero(ITState.Mask);
335     if (++ITState.CurPosition == 5 - TZ && ITState.IsExplicit)
336       ITState.CurPosition = ~0U; // Done with the IT block after this.
337   }
338 
339   // Rewind the state of the current IT block, removing the last slot from it.
340   void rewindImplicitITPosition() {
341     assert(inImplicitITBlock());
342     assert(ITState.CurPosition > 1);
343     ITState.CurPosition--;
344     unsigned TZ = llvm::countr_zero(ITState.Mask);
345     unsigned NewMask = 0;
346     NewMask |= ITState.Mask & (0xC << TZ);
347     NewMask |= 0x2 << TZ;
348     ITState.Mask = NewMask;
349   }
350 
351   // Rewind the state of the current IT block, removing the last slot from it.
352   // If we were at the first slot, this closes the IT block.
353   void discardImplicitITBlock() {
354     assert(inImplicitITBlock());
355     assert(ITState.CurPosition == 1);
356     ITState.CurPosition = ~0U;
357   }
358 
359   // Return the low-subreg of a given Q register.
360   unsigned getDRegFromQReg(unsigned QReg) const {
361     return MRI->getSubReg(QReg, ARM::dsub_0);
362   }
363 
364   // Get the condition code corresponding to the current IT block slot.
365   ARMCC::CondCodes currentITCond() {
366     unsigned MaskBit = extractITMaskBit(ITState.Mask, ITState.CurPosition);
367     return MaskBit ? ARMCC::getOppositeCondition(ITState.Cond) : ITState.Cond;
368   }
369 
370   // Invert the condition of the current IT block slot without changing any
371   // other slots in the same block.
372   void invertCurrentITCondition() {
373     if (ITState.CurPosition == 1) {
374       ITState.Cond = ARMCC::getOppositeCondition(ITState.Cond);
375     } else {
376       ITState.Mask ^= 1 << (5 - ITState.CurPosition);
377     }
378   }
379 
380   // Returns true if the current IT block is full (all 4 slots used).
381   bool isITBlockFull() {
382     return inITBlock() && (ITState.Mask & 1);
383   }
384 
385   // Extend the current implicit IT block to have one more slot with the given
386   // condition code.
387   void extendImplicitITBlock(ARMCC::CondCodes Cond) {
388     assert(inImplicitITBlock());
389     assert(!isITBlockFull());
390     assert(Cond == ITState.Cond ||
391            Cond == ARMCC::getOppositeCondition(ITState.Cond));
392     unsigned TZ = llvm::countr_zero(ITState.Mask);
393     unsigned NewMask = 0;
394     // Keep any existing condition bits.
395     NewMask |= ITState.Mask & (0xE << TZ);
396     // Insert the new condition bit.
397     NewMask |= (Cond != ITState.Cond) << TZ;
398     // Move the trailing 1 down one bit.
399     NewMask |= 1 << (TZ - 1);
400     ITState.Mask = NewMask;
401   }
402 
403   // Create a new implicit IT block with a dummy condition code.
404   void startImplicitITBlock() {
405     assert(!inITBlock());
406     ITState.Cond = ARMCC::AL;
407     ITState.Mask = 8;
408     ITState.CurPosition = 1;
409     ITState.IsExplicit = false;
410   }
411 
412   // Create a new explicit IT block with the given condition and mask.
413   // The mask should be in the format used in ARMOperand and
414   // MCOperand, with a 1 implying 'e', regardless of the low bit of
415   // the condition.
416   void startExplicitITBlock(ARMCC::CondCodes Cond, unsigned Mask) {
417     assert(!inITBlock());
418     ITState.Cond = Cond;
419     ITState.Mask = Mask;
420     ITState.CurPosition = 0;
421     ITState.IsExplicit = true;
422   }
423 
424   struct {
425     unsigned Mask : 4;
426     unsigned CurPosition;
427   } VPTState;
428   bool inVPTBlock() { return VPTState.CurPosition != ~0U; }
429   void forwardVPTPosition() {
430     if (!inVPTBlock()) return;
431     unsigned TZ = llvm::countr_zero(VPTState.Mask);
432     if (++VPTState.CurPosition == 5 - TZ)
433       VPTState.CurPosition = ~0U;
434   }
435 
436   void Note(SMLoc L, const Twine &Msg, SMRange Range = std::nullopt) {
437     return getParser().Note(L, Msg, Range);
438   }
439 
440   bool Warning(SMLoc L, const Twine &Msg, SMRange Range = std::nullopt) {
441     return getParser().Warning(L, Msg, Range);
442   }
443 
444   bool Error(SMLoc L, const Twine &Msg, SMRange Range = std::nullopt) {
445     return getParser().Error(L, Msg, Range);
446   }
447 
448   bool validatetLDMRegList(const MCInst &Inst, const OperandVector &Operands,
449                            unsigned ListNo, bool IsARPop = false);
450   bool validatetSTMRegList(const MCInst &Inst, const OperandVector &Operands,
451                            unsigned ListNo);
452 
453   int tryParseRegister();
454   bool tryParseRegisterWithWriteBack(OperandVector &);
455   int tryParseShiftRegister(OperandVector &);
456   bool parseRegisterList(OperandVector &, bool EnforceOrder = true,
457                          bool AllowRAAC = false);
458   bool parseMemory(OperandVector &);
459   bool parseOperand(OperandVector &, StringRef Mnemonic);
460   bool parseImmExpr(int64_t &Out);
461   bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
462   bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
463                               unsigned &ShiftAmount);
464   bool parseLiteralValues(unsigned Size, SMLoc L);
465   bool parseDirectiveThumb(SMLoc L);
466   bool parseDirectiveARM(SMLoc L);
467   bool parseDirectiveThumbFunc(SMLoc L);
468   bool parseDirectiveCode(SMLoc L);
469   bool parseDirectiveSyntax(SMLoc L);
470   bool parseDirectiveReq(StringRef Name, SMLoc L);
471   bool parseDirectiveUnreq(SMLoc L);
472   bool parseDirectiveArch(SMLoc L);
473   bool parseDirectiveEabiAttr(SMLoc L);
474   bool parseDirectiveCPU(SMLoc L);
475   bool parseDirectiveFPU(SMLoc L);
476   bool parseDirectiveFnStart(SMLoc L);
477   bool parseDirectiveFnEnd(SMLoc L);
478   bool parseDirectiveCantUnwind(SMLoc L);
479   bool parseDirectivePersonality(SMLoc L);
480   bool parseDirectiveHandlerData(SMLoc L);
481   bool parseDirectiveSetFP(SMLoc L);
482   bool parseDirectivePad(SMLoc L);
483   bool parseDirectiveRegSave(SMLoc L, bool IsVector);
484   bool parseDirectiveInst(SMLoc L, char Suffix = '\0');
485   bool parseDirectiveLtorg(SMLoc L);
486   bool parseDirectiveEven(SMLoc L);
487   bool parseDirectivePersonalityIndex(SMLoc L);
488   bool parseDirectiveUnwindRaw(SMLoc L);
489   bool parseDirectiveTLSDescSeq(SMLoc L);
490   bool parseDirectiveMovSP(SMLoc L);
491   bool parseDirectiveObjectArch(SMLoc L);
492   bool parseDirectiveArchExtension(SMLoc L);
493   bool parseDirectiveAlign(SMLoc L);
494   bool parseDirectiveThumbSet(SMLoc L);
495 
496   bool parseDirectiveSEHAllocStack(SMLoc L, bool Wide);
497   bool parseDirectiveSEHSaveRegs(SMLoc L, bool Wide);
498   bool parseDirectiveSEHSaveSP(SMLoc L);
499   bool parseDirectiveSEHSaveFRegs(SMLoc L);
500   bool parseDirectiveSEHSaveLR(SMLoc L);
501   bool parseDirectiveSEHPrologEnd(SMLoc L, bool Fragment);
502   bool parseDirectiveSEHNop(SMLoc L, bool Wide);
503   bool parseDirectiveSEHEpilogStart(SMLoc L, bool Condition);
504   bool parseDirectiveSEHEpilogEnd(SMLoc L);
505   bool parseDirectiveSEHCustom(SMLoc L);
506 
507   bool isMnemonicVPTPredicable(StringRef Mnemonic, StringRef ExtraToken);
508   StringRef splitMnemonic(StringRef Mnemonic, StringRef ExtraToken,
509                           unsigned &PredicationCode,
510                           unsigned &VPTPredicationCode, bool &CarrySetting,
511                           unsigned &ProcessorIMod, StringRef &ITMask);
512   void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef ExtraToken,
513                              StringRef FullInst, bool &CanAcceptCarrySet,
514                              bool &CanAcceptPredicationCode,
515                              bool &CanAcceptVPTPredicationCode);
516   bool enableArchExtFeature(StringRef Name, SMLoc &ExtLoc);
517 
518   void tryConvertingToTwoOperandForm(StringRef Mnemonic, bool CarrySetting,
519                                      OperandVector &Operands);
520   bool CDEConvertDualRegOperand(StringRef Mnemonic, OperandVector &Operands);
521 
522   bool isThumb() const {
523     // FIXME: Can tablegen auto-generate this?
524     return getSTI().hasFeature(ARM::ModeThumb);
525   }
526 
527   bool isThumbOne() const {
528     return isThumb() && !getSTI().hasFeature(ARM::FeatureThumb2);
529   }
530 
531   bool isThumbTwo() const {
532     return isThumb() && getSTI().hasFeature(ARM::FeatureThumb2);
533   }
534 
535   bool hasThumb() const {
536     return getSTI().hasFeature(ARM::HasV4TOps);
537   }
538 
539   bool hasThumb2() const {
540     return getSTI().hasFeature(ARM::FeatureThumb2);
541   }
542 
543   bool hasV6Ops() const {
544     return getSTI().hasFeature(ARM::HasV6Ops);
545   }
546 
547   bool hasV6T2Ops() const {
548     return getSTI().hasFeature(ARM::HasV6T2Ops);
549   }
550 
551   bool hasV6MOps() const {
552     return getSTI().hasFeature(ARM::HasV6MOps);
553   }
554 
555   bool hasV7Ops() const {
556     return getSTI().hasFeature(ARM::HasV7Ops);
557   }
558 
559   bool hasV8Ops() const {
560     return getSTI().hasFeature(ARM::HasV8Ops);
561   }
562 
563   bool hasV8MBaseline() const {
564     return getSTI().hasFeature(ARM::HasV8MBaselineOps);
565   }
566 
567   bool hasV8MMainline() const {
568     return getSTI().hasFeature(ARM::HasV8MMainlineOps);
569   }
570   bool hasV8_1MMainline() const {
571     return getSTI().hasFeature(ARM::HasV8_1MMainlineOps);
572   }
573   bool hasMVE() const {
574     return getSTI().hasFeature(ARM::HasMVEIntegerOps);
575   }
576   bool hasMVEFloat() const {
577     return getSTI().hasFeature(ARM::HasMVEFloatOps);
578   }
579   bool hasCDE() const {
580     return getSTI().hasFeature(ARM::HasCDEOps);
581   }
582   bool has8MSecExt() const {
583     return getSTI().hasFeature(ARM::Feature8MSecExt);
584   }
585 
586   bool hasARM() const {
587     return !getSTI().hasFeature(ARM::FeatureNoARM);
588   }
589 
590   bool hasDSP() const {
591     return getSTI().hasFeature(ARM::FeatureDSP);
592   }
593 
594   bool hasD32() const {
595     return getSTI().hasFeature(ARM::FeatureD32);
596   }
597 
598   bool hasV8_1aOps() const {
599     return getSTI().hasFeature(ARM::HasV8_1aOps);
600   }
601 
602   bool hasRAS() const {
603     return getSTI().hasFeature(ARM::FeatureRAS);
604   }
605 
606   void SwitchMode() {
607     MCSubtargetInfo &STI = copySTI();
608     auto FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
609     setAvailableFeatures(FB);
610   }
611 
612   void FixModeAfterArchChange(bool WasThumb, SMLoc Loc);
613 
614   bool isMClass() const {
615     return getSTI().hasFeature(ARM::FeatureMClass);
616   }
617 
618   /// @name Auto-generated Match Functions
619   /// {
620 
621 #define GET_ASSEMBLER_HEADER
622 #include "ARMGenAsmMatcher.inc"
623 
624   /// }
625 
626   ParseStatus parseITCondCode(OperandVector &);
627   ParseStatus parseCoprocNumOperand(OperandVector &);
628   ParseStatus parseCoprocRegOperand(OperandVector &);
629   ParseStatus parseCoprocOptionOperand(OperandVector &);
630   ParseStatus parseMemBarrierOptOperand(OperandVector &);
631   ParseStatus parseTraceSyncBarrierOptOperand(OperandVector &);
632   ParseStatus parseInstSyncBarrierOptOperand(OperandVector &);
633   ParseStatus parseProcIFlagsOperand(OperandVector &);
634   ParseStatus parseMSRMaskOperand(OperandVector &);
635   ParseStatus parseBankedRegOperand(OperandVector &);
636   ParseStatus parsePKHImm(OperandVector &O, StringRef Op, int Low, int High);
637   ParseStatus parsePKHLSLImm(OperandVector &O) {
638     return parsePKHImm(O, "lsl", 0, 31);
639   }
640   ParseStatus parsePKHASRImm(OperandVector &O) {
641     return parsePKHImm(O, "asr", 1, 32);
642   }
643   ParseStatus parseSetEndImm(OperandVector &);
644   ParseStatus parseShifterImm(OperandVector &);
645   ParseStatus parseRotImm(OperandVector &);
646   ParseStatus parseModImm(OperandVector &);
647   ParseStatus parseBitfield(OperandVector &);
648   ParseStatus parsePostIdxReg(OperandVector &);
649   ParseStatus parseAM3Offset(OperandVector &);
650   ParseStatus parseFPImm(OperandVector &);
651   ParseStatus parseVectorList(OperandVector &);
652   ParseStatus parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
653                               SMLoc &EndLoc);
654 
655   // Asm Match Converter Methods
656   void cvtThumbMultiply(MCInst &Inst, const OperandVector &);
657   void cvtThumbBranches(MCInst &Inst, const OperandVector &);
658   void cvtMVEVMOVQtoDReg(MCInst &Inst, const OperandVector &);
659 
660   bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
661   bool processInstruction(MCInst &Inst, const OperandVector &Ops, MCStreamer &Out);
662   bool shouldOmitCCOutOperand(StringRef Mnemonic, OperandVector &Operands);
663   bool shouldOmitPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
664   bool shouldOmitVectorPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
665   bool isITBlockTerminator(MCInst &Inst) const;
666   void fixupGNULDRDAlias(StringRef Mnemonic, OperandVector &Operands);
667   bool validateLDRDSTRD(MCInst &Inst, const OperandVector &Operands,
668                         bool Load, bool ARMMode, bool Writeback);
669 
670 public:
671   enum ARMMatchResultTy {
672     Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
673     Match_RequiresNotITBlock,
674     Match_RequiresV6,
675     Match_RequiresThumb2,
676     Match_RequiresV8,
677     Match_RequiresFlagSetting,
678 #define GET_OPERAND_DIAGNOSTIC_TYPES
679 #include "ARMGenAsmMatcher.inc"
680 
681   };
682 
683   ARMAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
684                const MCInstrInfo &MII, const MCTargetOptions &Options)
685     : MCTargetAsmParser(Options, STI, MII), UC(Parser), MS(STI) {
686     MCAsmParserExtension::Initialize(Parser);
687 
688     // Cache the MCRegisterInfo.
689     MRI = getContext().getRegisterInfo();
690 
691     // Initialize the set of available features.
692     setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
693 
694     // Add build attributes based on the selected target.
695     if (AddBuildAttributes)
696       getTargetStreamer().emitTargetAttributes(STI);
697 
698     // Not in an ITBlock to start with.
699     ITState.CurPosition = ~0U;
700 
701     VPTState.CurPosition = ~0U;
702 
703     NextSymbolIsThumb = false;
704   }
705 
706   // Implementation of the MCTargetAsmParser interface:
707   bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
708   ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
709                                SMLoc &EndLoc) override;
710   bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
711                         SMLoc NameLoc, OperandVector &Operands) override;
712   bool ParseDirective(AsmToken DirectiveID) override;
713 
714   unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
715                                       unsigned Kind) override;
716   unsigned checkTargetMatchPredicate(MCInst &Inst) override;
717 
718   bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
719                                OperandVector &Operands, MCStreamer &Out,
720                                uint64_t &ErrorInfo,
721                                bool MatchingInlineAsm) override;
722   unsigned MatchInstruction(OperandVector &Operands, MCInst &Inst,
723                             SmallVectorImpl<NearMissInfo> &NearMisses,
724                             bool MatchingInlineAsm, bool &EmitInITBlock,
725                             MCStreamer &Out);
726 
727   struct NearMissMessage {
728     SMLoc Loc;
729     SmallString<128> Message;
730   };
731 
732   const char *getCustomOperandDiag(ARMMatchResultTy MatchError);
733 
734   void FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
735                         SmallVectorImpl<NearMissMessage> &NearMissesOut,
736                         SMLoc IDLoc, OperandVector &Operands);
737   void ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses, SMLoc IDLoc,
738                         OperandVector &Operands);
739 
740   void doBeforeLabelEmit(MCSymbol *Symbol, SMLoc IDLoc) override;
741 
742   void onLabelParsed(MCSymbol *Symbol) override;
743 };
744 
745 /// ARMOperand - Instances of this class represent a parsed ARM machine
746 /// operand.
747 class ARMOperand : public MCParsedAsmOperand {
748   enum KindTy {
749     k_CondCode,
750     k_VPTPred,
751     k_CCOut,
752     k_ITCondMask,
753     k_CoprocNum,
754     k_CoprocReg,
755     k_CoprocOption,
756     k_Immediate,
757     k_MemBarrierOpt,
758     k_InstSyncBarrierOpt,
759     k_TraceSyncBarrierOpt,
760     k_Memory,
761     k_PostIndexRegister,
762     k_MSRMask,
763     k_BankedReg,
764     k_ProcIFlags,
765     k_VectorIndex,
766     k_Register,
767     k_RegisterList,
768     k_RegisterListWithAPSR,
769     k_DPRRegisterList,
770     k_SPRRegisterList,
771     k_FPSRegisterListWithVPR,
772     k_FPDRegisterListWithVPR,
773     k_VectorList,
774     k_VectorListAllLanes,
775     k_VectorListIndexed,
776     k_ShiftedRegister,
777     k_ShiftedImmediate,
778     k_ShifterImmediate,
779     k_RotateImmediate,
780     k_ModifiedImmediate,
781     k_ConstantPoolImmediate,
782     k_BitfieldDescriptor,
783     k_Token,
784   } Kind;
785 
786   SMLoc StartLoc, EndLoc, AlignmentLoc;
787   SmallVector<unsigned, 8> Registers;
788 
789   struct CCOp {
790     ARMCC::CondCodes Val;
791   };
792 
793   struct VCCOp {
794     ARMVCC::VPTCodes Val;
795   };
796 
797   struct CopOp {
798     unsigned Val;
799   };
800 
801   struct CoprocOptionOp {
802     unsigned Val;
803   };
804 
805   struct ITMaskOp {
806     unsigned Mask:4;
807   };
808 
809   struct MBOptOp {
810     ARM_MB::MemBOpt Val;
811   };
812 
813   struct ISBOptOp {
814     ARM_ISB::InstSyncBOpt Val;
815   };
816 
817   struct TSBOptOp {
818     ARM_TSB::TraceSyncBOpt Val;
819   };
820 
821   struct IFlagsOp {
822     ARM_PROC::IFlags Val;
823   };
824 
825   struct MMaskOp {
826     unsigned Val;
827   };
828 
829   struct BankedRegOp {
830     unsigned Val;
831   };
832 
833   struct TokOp {
834     const char *Data;
835     unsigned Length;
836   };
837 
838   struct RegOp {
839     unsigned RegNum;
840   };
841 
842   // A vector register list is a sequential list of 1 to 4 registers.
843   struct VectorListOp {
844     unsigned RegNum;
845     unsigned Count;
846     unsigned LaneIndex;
847     bool isDoubleSpaced;
848   };
849 
850   struct VectorIndexOp {
851     unsigned Val;
852   };
853 
854   struct ImmOp {
855     const MCExpr *Val;
856   };
857 
858   /// Combined record for all forms of ARM address expressions.
859   struct MemoryOp {
860     unsigned BaseRegNum;
861     // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
862     // was specified.
863     const MCExpr *OffsetImm;  // Offset immediate value
864     unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
865     ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
866     unsigned ShiftImm;        // shift for OffsetReg.
867     unsigned Alignment;       // 0 = no alignment specified
868     // n = alignment in bytes (2, 4, 8, 16, or 32)
869     unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
870   };
871 
872   struct PostIdxRegOp {
873     unsigned RegNum;
874     bool isAdd;
875     ARM_AM::ShiftOpc ShiftTy;
876     unsigned ShiftImm;
877   };
878 
879   struct ShifterImmOp {
880     bool isASR;
881     unsigned Imm;
882   };
883 
884   struct RegShiftedRegOp {
885     ARM_AM::ShiftOpc ShiftTy;
886     unsigned SrcReg;
887     unsigned ShiftReg;
888     unsigned ShiftImm;
889   };
890 
891   struct RegShiftedImmOp {
892     ARM_AM::ShiftOpc ShiftTy;
893     unsigned SrcReg;
894     unsigned ShiftImm;
895   };
896 
897   struct RotImmOp {
898     unsigned Imm;
899   };
900 
901   struct ModImmOp {
902     unsigned Bits;
903     unsigned Rot;
904   };
905 
906   struct BitfieldOp {
907     unsigned LSB;
908     unsigned Width;
909   };
910 
911   union {
912     struct CCOp CC;
913     struct VCCOp VCC;
914     struct CopOp Cop;
915     struct CoprocOptionOp CoprocOption;
916     struct MBOptOp MBOpt;
917     struct ISBOptOp ISBOpt;
918     struct TSBOptOp TSBOpt;
919     struct ITMaskOp ITMask;
920     struct IFlagsOp IFlags;
921     struct MMaskOp MMask;
922     struct BankedRegOp BankedReg;
923     struct TokOp Tok;
924     struct RegOp Reg;
925     struct VectorListOp VectorList;
926     struct VectorIndexOp VectorIndex;
927     struct ImmOp Imm;
928     struct MemoryOp Memory;
929     struct PostIdxRegOp PostIdxReg;
930     struct ShifterImmOp ShifterImm;
931     struct RegShiftedRegOp RegShiftedReg;
932     struct RegShiftedImmOp RegShiftedImm;
933     struct RotImmOp RotImm;
934     struct ModImmOp ModImm;
935     struct BitfieldOp Bitfield;
936   };
937 
938 public:
939   ARMOperand(KindTy K) : Kind(K) {}
940 
941   /// getStartLoc - Get the location of the first token of this operand.
942   SMLoc getStartLoc() const override { return StartLoc; }
943 
944   /// getEndLoc - Get the location of the last token of this operand.
945   SMLoc getEndLoc() const override { return EndLoc; }
946 
947   /// getLocRange - Get the range between the first and last token of this
948   /// operand.
949   SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
950 
951   /// getAlignmentLoc - Get the location of the Alignment token of this operand.
952   SMLoc getAlignmentLoc() const {
953     assert(Kind == k_Memory && "Invalid access!");
954     return AlignmentLoc;
955   }
956 
957   ARMCC::CondCodes getCondCode() const {
958     assert(Kind == k_CondCode && "Invalid access!");
959     return CC.Val;
960   }
961 
962   ARMVCC::VPTCodes getVPTPred() const {
963     assert(isVPTPred() && "Invalid access!");
964     return VCC.Val;
965   }
966 
967   unsigned getCoproc() const {
968     assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
969     return Cop.Val;
970   }
971 
972   StringRef getToken() const {
973     assert(Kind == k_Token && "Invalid access!");
974     return StringRef(Tok.Data, Tok.Length);
975   }
976 
977   unsigned getReg() const override {
978     assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
979     return Reg.RegNum;
980   }
981 
982   const SmallVectorImpl<unsigned> &getRegList() const {
983     assert((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR ||
984             Kind == k_DPRRegisterList || Kind == k_SPRRegisterList ||
985             Kind == k_FPSRegisterListWithVPR ||
986             Kind == k_FPDRegisterListWithVPR) &&
987            "Invalid access!");
988     return Registers;
989   }
990 
991   const MCExpr *getImm() const {
992     assert(isImm() && "Invalid access!");
993     return Imm.Val;
994   }
995 
996   const MCExpr *getConstantPoolImm() const {
997     assert(isConstantPoolImm() && "Invalid access!");
998     return Imm.Val;
999   }
1000 
1001   unsigned getVectorIndex() const {
1002     assert(Kind == k_VectorIndex && "Invalid access!");
1003     return VectorIndex.Val;
1004   }
1005 
1006   ARM_MB::MemBOpt getMemBarrierOpt() const {
1007     assert(Kind == k_MemBarrierOpt && "Invalid access!");
1008     return MBOpt.Val;
1009   }
1010 
1011   ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const {
1012     assert(Kind == k_InstSyncBarrierOpt && "Invalid access!");
1013     return ISBOpt.Val;
1014   }
1015 
1016   ARM_TSB::TraceSyncBOpt getTraceSyncBarrierOpt() const {
1017     assert(Kind == k_TraceSyncBarrierOpt && "Invalid access!");
1018     return TSBOpt.Val;
1019   }
1020 
1021   ARM_PROC::IFlags getProcIFlags() const {
1022     assert(Kind == k_ProcIFlags && "Invalid access!");
1023     return IFlags.Val;
1024   }
1025 
1026   unsigned getMSRMask() const {
1027     assert(Kind == k_MSRMask && "Invalid access!");
1028     return MMask.Val;
1029   }
1030 
1031   unsigned getBankedReg() const {
1032     assert(Kind == k_BankedReg && "Invalid access!");
1033     return BankedReg.Val;
1034   }
1035 
1036   bool isCoprocNum() const { return Kind == k_CoprocNum; }
1037   bool isCoprocReg() const { return Kind == k_CoprocReg; }
1038   bool isCoprocOption() const { return Kind == k_CoprocOption; }
1039   bool isCondCode() const { return Kind == k_CondCode; }
1040   bool isVPTPred() const { return Kind == k_VPTPred; }
1041   bool isCCOut() const { return Kind == k_CCOut; }
1042   bool isITMask() const { return Kind == k_ITCondMask; }
1043   bool isITCondCode() const { return Kind == k_CondCode; }
1044   bool isImm() const override {
1045     return Kind == k_Immediate;
1046   }
1047 
1048   bool isARMBranchTarget() const {
1049     if (!isImm()) return false;
1050 
1051     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1052       return CE->getValue() % 4 == 0;
1053     return true;
1054   }
1055 
1056 
1057   bool isThumbBranchTarget() const {
1058     if (!isImm()) return false;
1059 
1060     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1061       return CE->getValue() % 2 == 0;
1062     return true;
1063   }
1064 
1065   // checks whether this operand is an unsigned offset which fits is a field
1066   // of specified width and scaled by a specific number of bits
1067   template<unsigned width, unsigned scale>
1068   bool isUnsignedOffset() const {
1069     if (!isImm()) return false;
1070     if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1071     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1072       int64_t Val = CE->getValue();
1073       int64_t Align = 1LL << scale;
1074       int64_t Max = Align * ((1LL << width) - 1);
1075       return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max);
1076     }
1077     return false;
1078   }
1079 
1080   // checks whether this operand is an signed offset which fits is a field
1081   // of specified width and scaled by a specific number of bits
1082   template<unsigned width, unsigned scale>
1083   bool isSignedOffset() const {
1084     if (!isImm()) return false;
1085     if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1086     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1087       int64_t Val = CE->getValue();
1088       int64_t Align = 1LL << scale;
1089       int64_t Max = Align * ((1LL << (width-1)) - 1);
1090       int64_t Min = -Align * (1LL << (width-1));
1091       return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max);
1092     }
1093     return false;
1094   }
1095 
1096   // checks whether this operand is an offset suitable for the LE /
1097   // LETP instructions in Arm v8.1M
1098   bool isLEOffset() const {
1099     if (!isImm()) return false;
1100     if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1101     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1102       int64_t Val = CE->getValue();
1103       return Val < 0 && Val >= -4094 && (Val & 1) == 0;
1104     }
1105     return false;
1106   }
1107 
1108   // checks whether this operand is a memory operand computed as an offset
1109   // applied to PC. the offset may have 8 bits of magnitude and is represented
1110   // with two bits of shift. textually it may be either [pc, #imm], #imm or
1111   // relocable expression...
1112   bool isThumbMemPC() const {
1113     int64_t Val = 0;
1114     if (isImm()) {
1115       if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1116       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
1117       if (!CE) return false;
1118       Val = CE->getValue();
1119     }
1120     else if (isGPRMem()) {
1121       if(!Memory.OffsetImm || Memory.OffsetRegNum) return false;
1122       if(Memory.BaseRegNum != ARM::PC) return false;
1123       if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
1124         Val = CE->getValue();
1125       else
1126         return false;
1127     }
1128     else return false;
1129     return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
1130   }
1131 
1132   bool isFPImm() const {
1133     if (!isImm()) return false;
1134     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1135     if (!CE) return false;
1136     int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1137     return Val != -1;
1138   }
1139 
1140   template<int64_t N, int64_t M>
1141   bool isImmediate() const {
1142     if (!isImm()) return false;
1143     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1144     if (!CE) return false;
1145     int64_t Value = CE->getValue();
1146     return Value >= N && Value <= M;
1147   }
1148 
1149   template<int64_t N, int64_t M>
1150   bool isImmediateS4() const {
1151     if (!isImm()) return false;
1152     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1153     if (!CE) return false;
1154     int64_t Value = CE->getValue();
1155     return ((Value & 3) == 0) && Value >= N && Value <= M;
1156   }
1157   template<int64_t N, int64_t M>
1158   bool isImmediateS2() const {
1159     if (!isImm()) return false;
1160     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1161     if (!CE) return false;
1162     int64_t Value = CE->getValue();
1163     return ((Value & 1) == 0) && Value >= N && Value <= M;
1164   }
1165   bool isFBits16() const {
1166     return isImmediate<0, 17>();
1167   }
1168   bool isFBits32() const {
1169     return isImmediate<1, 33>();
1170   }
1171   bool isImm8s4() const {
1172     return isImmediateS4<-1020, 1020>();
1173   }
1174   bool isImm7s4() const {
1175     return isImmediateS4<-508, 508>();
1176   }
1177   bool isImm7Shift0() const {
1178     return isImmediate<-127, 127>();
1179   }
1180   bool isImm7Shift1() const {
1181     return isImmediateS2<-255, 255>();
1182   }
1183   bool isImm7Shift2() const {
1184     return isImmediateS4<-511, 511>();
1185   }
1186   bool isImm7() const {
1187     return isImmediate<-127, 127>();
1188   }
1189   bool isImm0_1020s4() const {
1190     return isImmediateS4<0, 1020>();
1191   }
1192   bool isImm0_508s4() const {
1193     return isImmediateS4<0, 508>();
1194   }
1195   bool isImm0_508s4Neg() const {
1196     if (!isImm()) return false;
1197     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1198     if (!CE) return false;
1199     int64_t Value = -CE->getValue();
1200     // explicitly exclude zero. we want that to use the normal 0_508 version.
1201     return ((Value & 3) == 0) && Value > 0 && Value <= 508;
1202   }
1203 
1204   bool isImm0_4095Neg() const {
1205     if (!isImm()) return false;
1206     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1207     if (!CE) return false;
1208     // isImm0_4095Neg is used with 32-bit immediates only.
1209     // 32-bit immediates are zero extended to 64-bit when parsed,
1210     // thus simple -CE->getValue() results in a big negative number,
1211     // not a small positive number as intended
1212     if ((CE->getValue() >> 32) > 0) return false;
1213     uint32_t Value = -static_cast<uint32_t>(CE->getValue());
1214     return Value > 0 && Value < 4096;
1215   }
1216 
1217   bool isImm0_7() const {
1218     return isImmediate<0, 7>();
1219   }
1220 
1221   bool isImm1_16() const {
1222     return isImmediate<1, 16>();
1223   }
1224 
1225   bool isImm1_32() const {
1226     return isImmediate<1, 32>();
1227   }
1228 
1229   bool isImm8_255() const {
1230     return isImmediate<8, 255>();
1231   }
1232 
1233   bool isImm0_255Expr() const {
1234     if (!isImm())
1235       return false;
1236     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1237     // If it's not a constant expression, it'll generate a fixup and be
1238     // handled later.
1239     if (!CE)
1240       return true;
1241     int64_t Value = CE->getValue();
1242     return isUInt<8>(Value);
1243   }
1244 
1245   bool isImm256_65535Expr() const {
1246     if (!isImm()) return false;
1247     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1248     // If it's not a constant expression, it'll generate a fixup and be
1249     // handled later.
1250     if (!CE) return true;
1251     int64_t Value = CE->getValue();
1252     return Value >= 256 && Value < 65536;
1253   }
1254 
1255   bool isImm0_65535Expr() const {
1256     if (!isImm()) return false;
1257     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1258     // If it's not a constant expression, it'll generate a fixup and be
1259     // handled later.
1260     if (!CE) return true;
1261     int64_t Value = CE->getValue();
1262     return Value >= 0 && Value < 65536;
1263   }
1264 
1265   bool isImm24bit() const {
1266     return isImmediate<0, 0xffffff + 1>();
1267   }
1268 
1269   bool isImmThumbSR() const {
1270     return isImmediate<1, 33>();
1271   }
1272 
1273   bool isPKHLSLImm() const {
1274     return isImmediate<0, 32>();
1275   }
1276 
1277   bool isPKHASRImm() const {
1278     return isImmediate<0, 33>();
1279   }
1280 
1281   bool isAdrLabel() const {
1282     // If we have an immediate that's not a constant, treat it as a label
1283     // reference needing a fixup.
1284     if (isImm() && !isa<MCConstantExpr>(getImm()))
1285       return true;
1286 
1287     // If it is a constant, it must fit into a modified immediate encoding.
1288     if (!isImm()) return false;
1289     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1290     if (!CE) return false;
1291     int64_t Value = CE->getValue();
1292     return (ARM_AM::getSOImmVal(Value) != -1 ||
1293             ARM_AM::getSOImmVal(-Value) != -1);
1294   }
1295 
1296   bool isT2SOImm() const {
1297     // If we have an immediate that's not a constant, treat it as an expression
1298     // needing a fixup.
1299     if (isImm() && !isa<MCConstantExpr>(getImm())) {
1300       // We want to avoid matching :upper16: and :lower16: as we want these
1301       // expressions to match in isImm0_65535Expr()
1302       const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(getImm());
1303       return (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
1304                              ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16));
1305     }
1306     if (!isImm()) return false;
1307     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1308     if (!CE) return false;
1309     int64_t Value = CE->getValue();
1310     return ARM_AM::getT2SOImmVal(Value) != -1;
1311   }
1312 
1313   bool isT2SOImmNot() const {
1314     if (!isImm()) return false;
1315     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1316     if (!CE) return false;
1317     int64_t Value = CE->getValue();
1318     return ARM_AM::getT2SOImmVal(Value) == -1 &&
1319       ARM_AM::getT2SOImmVal(~Value) != -1;
1320   }
1321 
1322   bool isT2SOImmNeg() const {
1323     if (!isImm()) return false;
1324     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1325     if (!CE) return false;
1326     int64_t Value = CE->getValue();
1327     // Only use this when not representable as a plain so_imm.
1328     return ARM_AM::getT2SOImmVal(Value) == -1 &&
1329       ARM_AM::getT2SOImmVal(-Value) != -1;
1330   }
1331 
1332   bool isSetEndImm() const {
1333     if (!isImm()) return false;
1334     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1335     if (!CE) return false;
1336     int64_t Value = CE->getValue();
1337     return Value == 1 || Value == 0;
1338   }
1339 
1340   bool isReg() const override { return Kind == k_Register; }
1341   bool isRegList() const { return Kind == k_RegisterList; }
1342   bool isRegListWithAPSR() const {
1343     return Kind == k_RegisterListWithAPSR || Kind == k_RegisterList;
1344   }
1345   bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
1346   bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
1347   bool isFPSRegListWithVPR() const { return Kind == k_FPSRegisterListWithVPR; }
1348   bool isFPDRegListWithVPR() const { return Kind == k_FPDRegisterListWithVPR; }
1349   bool isToken() const override { return Kind == k_Token; }
1350   bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
1351   bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
1352   bool isTraceSyncBarrierOpt() const { return Kind == k_TraceSyncBarrierOpt; }
1353   bool isMem() const override {
1354       return isGPRMem() || isMVEMem();
1355   }
1356   bool isMVEMem() const {
1357     if (Kind != k_Memory)
1358       return false;
1359     if (Memory.BaseRegNum &&
1360         !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum) &&
1361         !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Memory.BaseRegNum))
1362       return false;
1363     if (Memory.OffsetRegNum &&
1364         !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1365             Memory.OffsetRegNum))
1366       return false;
1367     return true;
1368   }
1369   bool isGPRMem() const {
1370     if (Kind != k_Memory)
1371       return false;
1372     if (Memory.BaseRegNum &&
1373         !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum))
1374       return false;
1375     if (Memory.OffsetRegNum &&
1376         !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.OffsetRegNum))
1377       return false;
1378     return true;
1379   }
1380   bool isShifterImm() const { return Kind == k_ShifterImmediate; }
1381   bool isRegShiftedReg() const {
1382     return Kind == k_ShiftedRegister &&
1383            ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1384                RegShiftedReg.SrcReg) &&
1385            ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1386                RegShiftedReg.ShiftReg);
1387   }
1388   bool isRegShiftedImm() const {
1389     return Kind == k_ShiftedImmediate &&
1390            ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1391                RegShiftedImm.SrcReg);
1392   }
1393   bool isRotImm() const { return Kind == k_RotateImmediate; }
1394 
1395   template<unsigned Min, unsigned Max>
1396   bool isPowerTwoInRange() const {
1397     if (!isImm()) return false;
1398     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1399     if (!CE) return false;
1400     int64_t Value = CE->getValue();
1401     return Value > 0 && llvm::popcount((uint64_t)Value) == 1 && Value >= Min &&
1402            Value <= Max;
1403   }
1404   bool isModImm() const { return Kind == k_ModifiedImmediate; }
1405 
1406   bool isModImmNot() const {
1407     if (!isImm()) return false;
1408     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1409     if (!CE) return false;
1410     int64_t Value = CE->getValue();
1411     return ARM_AM::getSOImmVal(~Value) != -1;
1412   }
1413 
1414   bool isModImmNeg() const {
1415     if (!isImm()) return false;
1416     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1417     if (!CE) return false;
1418     int64_t Value = CE->getValue();
1419     return ARM_AM::getSOImmVal(Value) == -1 &&
1420       ARM_AM::getSOImmVal(-Value) != -1;
1421   }
1422 
1423   bool isThumbModImmNeg1_7() const {
1424     if (!isImm()) return false;
1425     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1426     if (!CE) return false;
1427     int32_t Value = -(int32_t)CE->getValue();
1428     return 0 < Value && Value < 8;
1429   }
1430 
1431   bool isThumbModImmNeg8_255() const {
1432     if (!isImm()) return false;
1433     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1434     if (!CE) return false;
1435     int32_t Value = -(int32_t)CE->getValue();
1436     return 7 < Value && Value < 256;
1437   }
1438 
1439   bool isConstantPoolImm() const { return Kind == k_ConstantPoolImmediate; }
1440   bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
1441   bool isPostIdxRegShifted() const {
1442     return Kind == k_PostIndexRegister &&
1443            ARMMCRegisterClasses[ARM::GPRRegClassID].contains(PostIdxReg.RegNum);
1444   }
1445   bool isPostIdxReg() const {
1446     return isPostIdxRegShifted() && PostIdxReg.ShiftTy == ARM_AM::no_shift;
1447   }
1448   bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const {
1449     if (!isGPRMem())
1450       return false;
1451     // No offset of any kind.
1452     return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1453      (alignOK || Memory.Alignment == Alignment);
1454   }
1455   bool isMemNoOffsetT2(bool alignOK = false, unsigned Alignment = 0) const {
1456     if (!isGPRMem())
1457       return false;
1458 
1459     if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1460             Memory.BaseRegNum))
1461       return false;
1462 
1463     // No offset of any kind.
1464     return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1465      (alignOK || Memory.Alignment == Alignment);
1466   }
1467   bool isMemNoOffsetT2NoSp(bool alignOK = false, unsigned Alignment = 0) const {
1468     if (!isGPRMem())
1469       return false;
1470 
1471     if (!ARMMCRegisterClasses[ARM::rGPRRegClassID].contains(
1472             Memory.BaseRegNum))
1473       return false;
1474 
1475     // No offset of any kind.
1476     return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1477      (alignOK || Memory.Alignment == Alignment);
1478   }
1479   bool isMemNoOffsetT(bool alignOK = false, unsigned Alignment = 0) const {
1480     if (!isGPRMem())
1481       return false;
1482 
1483     if (!ARMMCRegisterClasses[ARM::tGPRRegClassID].contains(
1484             Memory.BaseRegNum))
1485       return false;
1486 
1487     // No offset of any kind.
1488     return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1489      (alignOK || Memory.Alignment == Alignment);
1490   }
1491   bool isMemPCRelImm12() const {
1492     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1493       return false;
1494     // Base register must be PC.
1495     if (Memory.BaseRegNum != ARM::PC)
1496       return false;
1497     // Immediate offset in range [-4095, 4095].
1498     if (!Memory.OffsetImm) return true;
1499     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1500       int64_t Val = CE->getValue();
1501       return (Val > -4096 && Val < 4096) ||
1502              (Val == std::numeric_limits<int32_t>::min());
1503     }
1504     return false;
1505   }
1506 
1507   bool isAlignedMemory() const {
1508     return isMemNoOffset(true);
1509   }
1510 
1511   bool isAlignedMemoryNone() const {
1512     return isMemNoOffset(false, 0);
1513   }
1514 
1515   bool isDupAlignedMemoryNone() const {
1516     return isMemNoOffset(false, 0);
1517   }
1518 
1519   bool isAlignedMemory16() const {
1520     if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1521       return true;
1522     return isMemNoOffset(false, 0);
1523   }
1524 
1525   bool isDupAlignedMemory16() const {
1526     if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1527       return true;
1528     return isMemNoOffset(false, 0);
1529   }
1530 
1531   bool isAlignedMemory32() const {
1532     if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1533       return true;
1534     return isMemNoOffset(false, 0);
1535   }
1536 
1537   bool isDupAlignedMemory32() const {
1538     if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1539       return true;
1540     return isMemNoOffset(false, 0);
1541   }
1542 
1543   bool isAlignedMemory64() const {
1544     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1545       return true;
1546     return isMemNoOffset(false, 0);
1547   }
1548 
1549   bool isDupAlignedMemory64() const {
1550     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1551       return true;
1552     return isMemNoOffset(false, 0);
1553   }
1554 
1555   bool isAlignedMemory64or128() const {
1556     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1557       return true;
1558     if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1559       return true;
1560     return isMemNoOffset(false, 0);
1561   }
1562 
1563   bool isDupAlignedMemory64or128() const {
1564     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1565       return true;
1566     if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1567       return true;
1568     return isMemNoOffset(false, 0);
1569   }
1570 
1571   bool isAlignedMemory64or128or256() const {
1572     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1573       return true;
1574     if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1575       return true;
1576     if (isMemNoOffset(false, 32)) // alignment in bytes for 256-bits is 32.
1577       return true;
1578     return isMemNoOffset(false, 0);
1579   }
1580 
1581   bool isAddrMode2() const {
1582     if (!isGPRMem() || Memory.Alignment != 0) return false;
1583     // Check for register offset.
1584     if (Memory.OffsetRegNum) return true;
1585     // Immediate offset in range [-4095, 4095].
1586     if (!Memory.OffsetImm) return true;
1587     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1588       int64_t Val = CE->getValue();
1589       return Val > -4096 && Val < 4096;
1590     }
1591     return false;
1592   }
1593 
1594   bool isAM2OffsetImm() const {
1595     if (!isImm()) return false;
1596     // Immediate offset in range [-4095, 4095].
1597     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1598     if (!CE) return false;
1599     int64_t Val = CE->getValue();
1600     return (Val == std::numeric_limits<int32_t>::min()) ||
1601            (Val > -4096 && Val < 4096);
1602   }
1603 
1604   bool isAddrMode3() const {
1605     // If we have an immediate that's not a constant, treat it as a label
1606     // reference needing a fixup. If it is a constant, it's something else
1607     // and we reject it.
1608     if (isImm() && !isa<MCConstantExpr>(getImm()))
1609       return true;
1610     if (!isGPRMem() || Memory.Alignment != 0) return false;
1611     // No shifts are legal for AM3.
1612     if (Memory.ShiftType != ARM_AM::no_shift) return false;
1613     // Check for register offset.
1614     if (Memory.OffsetRegNum) return true;
1615     // Immediate offset in range [-255, 255].
1616     if (!Memory.OffsetImm) return true;
1617     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1618       int64_t Val = CE->getValue();
1619       // The #-0 offset is encoded as std::numeric_limits<int32_t>::min(), and
1620       // we have to check for this too.
1621       return (Val > -256 && Val < 256) ||
1622              Val == std::numeric_limits<int32_t>::min();
1623     }
1624     return false;
1625   }
1626 
1627   bool isAM3Offset() const {
1628     if (isPostIdxReg())
1629       return true;
1630     if (!isImm())
1631       return false;
1632     // Immediate offset in range [-255, 255].
1633     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1634     if (!CE) return false;
1635     int64_t Val = CE->getValue();
1636     // Special case, #-0 is std::numeric_limits<int32_t>::min().
1637     return (Val > -256 && Val < 256) ||
1638            Val == std::numeric_limits<int32_t>::min();
1639   }
1640 
1641   bool isAddrMode5() const {
1642     // If we have an immediate that's not a constant, treat it as a label
1643     // reference needing a fixup. If it is a constant, it's something else
1644     // and we reject it.
1645     if (isImm() && !isa<MCConstantExpr>(getImm()))
1646       return true;
1647     if (!isGPRMem() || Memory.Alignment != 0) return false;
1648     // Check for register offset.
1649     if (Memory.OffsetRegNum) return false;
1650     // Immediate offset in range [-1020, 1020] and a multiple of 4.
1651     if (!Memory.OffsetImm) return true;
1652     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1653       int64_t Val = CE->getValue();
1654       return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1655              Val == std::numeric_limits<int32_t>::min();
1656     }
1657     return false;
1658   }
1659 
1660   bool isAddrMode5FP16() const {
1661     // If we have an immediate that's not a constant, treat it as a label
1662     // reference needing a fixup. If it is a constant, it's something else
1663     // and we reject it.
1664     if (isImm() && !isa<MCConstantExpr>(getImm()))
1665       return true;
1666     if (!isGPRMem() || Memory.Alignment != 0) return false;
1667     // Check for register offset.
1668     if (Memory.OffsetRegNum) return false;
1669     // Immediate offset in range [-510, 510] and a multiple of 2.
1670     if (!Memory.OffsetImm) return true;
1671     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1672       int64_t Val = CE->getValue();
1673       return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) ||
1674              Val == std::numeric_limits<int32_t>::min();
1675     }
1676     return false;
1677   }
1678 
1679   bool isMemTBB() const {
1680     if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1681         Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1682       return false;
1683     return true;
1684   }
1685 
1686   bool isMemTBH() const {
1687     if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1688         Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
1689         Memory.Alignment != 0 )
1690       return false;
1691     return true;
1692   }
1693 
1694   bool isMemRegOffset() const {
1695     if (!isGPRMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
1696       return false;
1697     return true;
1698   }
1699 
1700   bool isT2MemRegOffset() const {
1701     if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1702         Memory.Alignment != 0 || Memory.BaseRegNum == ARM::PC)
1703       return false;
1704     // Only lsl #{0, 1, 2, 3} allowed.
1705     if (Memory.ShiftType == ARM_AM::no_shift)
1706       return true;
1707     if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
1708       return false;
1709     return true;
1710   }
1711 
1712   bool isMemThumbRR() const {
1713     // Thumb reg+reg addressing is simple. Just two registers, a base and
1714     // an offset. No shifts, negations or any other complicating factors.
1715     if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1716         Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1717       return false;
1718     return isARMLowRegister(Memory.BaseRegNum) &&
1719       (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
1720   }
1721 
1722   bool isMemThumbRIs4() const {
1723     if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1724         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1725       return false;
1726     // Immediate offset, multiple of 4 in range [0, 124].
1727     if (!Memory.OffsetImm) return true;
1728     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1729       int64_t Val = CE->getValue();
1730       return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1731     }
1732     return false;
1733   }
1734 
1735   bool isMemThumbRIs2() const {
1736     if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1737         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1738       return false;
1739     // Immediate offset, multiple of 4 in range [0, 62].
1740     if (!Memory.OffsetImm) return true;
1741     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1742       int64_t Val = CE->getValue();
1743       return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1744     }
1745     return false;
1746   }
1747 
1748   bool isMemThumbRIs1() const {
1749     if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1750         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1751       return false;
1752     // Immediate offset in range [0, 31].
1753     if (!Memory.OffsetImm) return true;
1754     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1755       int64_t Val = CE->getValue();
1756       return Val >= 0 && Val <= 31;
1757     }
1758     return false;
1759   }
1760 
1761   bool isMemThumbSPI() const {
1762     if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1763         Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1764       return false;
1765     // Immediate offset, multiple of 4 in range [0, 1020].
1766     if (!Memory.OffsetImm) return true;
1767     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1768       int64_t Val = CE->getValue();
1769       return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1770     }
1771     return false;
1772   }
1773 
1774   bool isMemImm8s4Offset() const {
1775     // If we have an immediate that's not a constant, treat it as a label
1776     // reference needing a fixup. If it is a constant, it's something else
1777     // and we reject it.
1778     if (isImm() && !isa<MCConstantExpr>(getImm()))
1779       return true;
1780     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1781       return false;
1782     // Immediate offset a multiple of 4 in range [-1020, 1020].
1783     if (!Memory.OffsetImm) return true;
1784     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1785       int64_t Val = CE->getValue();
1786       // Special case, #-0 is std::numeric_limits<int32_t>::min().
1787       return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) ||
1788              Val == std::numeric_limits<int32_t>::min();
1789     }
1790     return false;
1791   }
1792 
1793   bool isMemImm7s4Offset() const {
1794     // If we have an immediate that's not a constant, treat it as a label
1795     // reference needing a fixup. If it is a constant, it's something else
1796     // and we reject it.
1797     if (isImm() && !isa<MCConstantExpr>(getImm()))
1798       return true;
1799     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0 ||
1800         !ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1801             Memory.BaseRegNum))
1802       return false;
1803     // Immediate offset a multiple of 4 in range [-508, 508].
1804     if (!Memory.OffsetImm) return true;
1805     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1806       int64_t Val = CE->getValue();
1807       // Special case, #-0 is INT32_MIN.
1808       return (Val >= -508 && Val <= 508 && (Val & 3) == 0) || Val == INT32_MIN;
1809     }
1810     return false;
1811   }
1812 
1813   bool isMemImm0_1020s4Offset() const {
1814     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1815       return false;
1816     // Immediate offset a multiple of 4 in range [0, 1020].
1817     if (!Memory.OffsetImm) return true;
1818     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1819       int64_t Val = CE->getValue();
1820       return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1821     }
1822     return false;
1823   }
1824 
1825   bool isMemImm8Offset() const {
1826     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1827       return false;
1828     // Base reg of PC isn't allowed for these encodings.
1829     if (Memory.BaseRegNum == ARM::PC) return false;
1830     // Immediate offset in range [-255, 255].
1831     if (!Memory.OffsetImm) return true;
1832     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1833       int64_t Val = CE->getValue();
1834       return (Val == std::numeric_limits<int32_t>::min()) ||
1835              (Val > -256 && Val < 256);
1836     }
1837     return false;
1838   }
1839 
1840   template<unsigned Bits, unsigned RegClassID>
1841   bool isMemImm7ShiftedOffset() const {
1842     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0 ||
1843         !ARMMCRegisterClasses[RegClassID].contains(Memory.BaseRegNum))
1844       return false;
1845 
1846     // Expect an immediate offset equal to an element of the range
1847     // [-127, 127], shifted left by Bits.
1848 
1849     if (!Memory.OffsetImm) return true;
1850     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1851       int64_t Val = CE->getValue();
1852 
1853       // INT32_MIN is a special-case value (indicating the encoding with
1854       // zero offset and the subtract bit set)
1855       if (Val == INT32_MIN)
1856         return true;
1857 
1858       unsigned Divisor = 1U << Bits;
1859 
1860       // Check that the low bits are zero
1861       if (Val % Divisor != 0)
1862         return false;
1863 
1864       // Check that the remaining offset is within range.
1865       Val /= Divisor;
1866       return (Val >= -127 && Val <= 127);
1867     }
1868     return false;
1869   }
1870 
1871   template <int shift> bool isMemRegRQOffset() const {
1872     if (!isMVEMem() || Memory.OffsetImm != nullptr || Memory.Alignment != 0)
1873       return false;
1874 
1875     if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1876             Memory.BaseRegNum))
1877       return false;
1878     if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1879             Memory.OffsetRegNum))
1880       return false;
1881 
1882     if (shift == 0 && Memory.ShiftType != ARM_AM::no_shift)
1883       return false;
1884 
1885     if (shift > 0 &&
1886         (Memory.ShiftType != ARM_AM::uxtw || Memory.ShiftImm != shift))
1887       return false;
1888 
1889     return true;
1890   }
1891 
1892   template <int shift> bool isMemRegQOffset() const {
1893     if (!isMVEMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1894       return false;
1895 
1896     if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1897             Memory.BaseRegNum))
1898       return false;
1899 
1900     if (!Memory.OffsetImm)
1901       return true;
1902     static_assert(shift < 56,
1903                   "Such that we dont shift by a value higher than 62");
1904     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1905       int64_t Val = CE->getValue();
1906 
1907       // The value must be a multiple of (1 << shift)
1908       if ((Val & ((1U << shift) - 1)) != 0)
1909         return false;
1910 
1911       // And be in the right range, depending on the amount that it is shifted
1912       // by.  Shift 0, is equal to 7 unsigned bits, the sign bit is set
1913       // separately.
1914       int64_t Range = (1U << (7 + shift)) - 1;
1915       return (Val == INT32_MIN) || (Val > -Range && Val < Range);
1916     }
1917     return false;
1918   }
1919 
1920   bool isMemPosImm8Offset() const {
1921     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1922       return false;
1923     // Immediate offset in range [0, 255].
1924     if (!Memory.OffsetImm) return true;
1925     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1926       int64_t Val = CE->getValue();
1927       return Val >= 0 && Val < 256;
1928     }
1929     return false;
1930   }
1931 
1932   bool isMemNegImm8Offset() const {
1933     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1934       return false;
1935     // Base reg of PC isn't allowed for these encodings.
1936     if (Memory.BaseRegNum == ARM::PC) return false;
1937     // Immediate offset in range [-255, -1].
1938     if (!Memory.OffsetImm) return false;
1939     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1940       int64_t Val = CE->getValue();
1941       return (Val == std::numeric_limits<int32_t>::min()) ||
1942              (Val > -256 && Val < 0);
1943     }
1944     return false;
1945   }
1946 
1947   bool isMemUImm12Offset() const {
1948     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1949       return false;
1950     // Immediate offset in range [0, 4095].
1951     if (!Memory.OffsetImm) return true;
1952     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1953       int64_t Val = CE->getValue();
1954       return (Val >= 0 && Val < 4096);
1955     }
1956     return false;
1957   }
1958 
1959   bool isMemImm12Offset() const {
1960     // If we have an immediate that's not a constant, treat it as a label
1961     // reference needing a fixup. If it is a constant, it's something else
1962     // and we reject it.
1963 
1964     if (isImm() && !isa<MCConstantExpr>(getImm()))
1965       return true;
1966 
1967     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1968       return false;
1969     // Immediate offset in range [-4095, 4095].
1970     if (!Memory.OffsetImm) return true;
1971     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1972       int64_t Val = CE->getValue();
1973       return (Val > -4096 && Val < 4096) ||
1974              (Val == std::numeric_limits<int32_t>::min());
1975     }
1976     // If we have an immediate that's not a constant, treat it as a
1977     // symbolic expression needing a fixup.
1978     return true;
1979   }
1980 
1981   bool isConstPoolAsmImm() const {
1982     // Delay processing of Constant Pool Immediate, this will turn into
1983     // a constant. Match no other operand
1984     return (isConstantPoolImm());
1985   }
1986 
1987   bool isPostIdxImm8() const {
1988     if (!isImm()) return false;
1989     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1990     if (!CE) return false;
1991     int64_t Val = CE->getValue();
1992     return (Val > -256 && Val < 256) ||
1993            (Val == std::numeric_limits<int32_t>::min());
1994   }
1995 
1996   bool isPostIdxImm8s4() const {
1997     if (!isImm()) return false;
1998     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1999     if (!CE) return false;
2000     int64_t Val = CE->getValue();
2001     return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
2002            (Val == std::numeric_limits<int32_t>::min());
2003   }
2004 
2005   bool isMSRMask() const { return Kind == k_MSRMask; }
2006   bool isBankedReg() const { return Kind == k_BankedReg; }
2007   bool isProcIFlags() const { return Kind == k_ProcIFlags; }
2008 
2009   // NEON operands.
2010   bool isSingleSpacedVectorList() const {
2011     return Kind == k_VectorList && !VectorList.isDoubleSpaced;
2012   }
2013 
2014   bool isDoubleSpacedVectorList() const {
2015     return Kind == k_VectorList && VectorList.isDoubleSpaced;
2016   }
2017 
2018   bool isVecListOneD() const {
2019     if (!isSingleSpacedVectorList()) return false;
2020     return VectorList.Count == 1;
2021   }
2022 
2023   bool isVecListTwoMQ() const {
2024     return isSingleSpacedVectorList() && VectorList.Count == 2 &&
2025            ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2026                VectorList.RegNum);
2027   }
2028 
2029   bool isVecListDPair() const {
2030     if (!isSingleSpacedVectorList()) return false;
2031     return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2032               .contains(VectorList.RegNum));
2033   }
2034 
2035   bool isVecListThreeD() const {
2036     if (!isSingleSpacedVectorList()) return false;
2037     return VectorList.Count == 3;
2038   }
2039 
2040   bool isVecListFourD() const {
2041     if (!isSingleSpacedVectorList()) return false;
2042     return VectorList.Count == 4;
2043   }
2044 
2045   bool isVecListDPairSpaced() const {
2046     if (Kind != k_VectorList) return false;
2047     if (isSingleSpacedVectorList()) return false;
2048     return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
2049               .contains(VectorList.RegNum));
2050   }
2051 
2052   bool isVecListThreeQ() const {
2053     if (!isDoubleSpacedVectorList()) return false;
2054     return VectorList.Count == 3;
2055   }
2056 
2057   bool isVecListFourQ() const {
2058     if (!isDoubleSpacedVectorList()) return false;
2059     return VectorList.Count == 4;
2060   }
2061 
2062   bool isVecListFourMQ() const {
2063     return isSingleSpacedVectorList() && VectorList.Count == 4 &&
2064            ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2065                VectorList.RegNum);
2066   }
2067 
2068   bool isSingleSpacedVectorAllLanes() const {
2069     return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
2070   }
2071 
2072   bool isDoubleSpacedVectorAllLanes() const {
2073     return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
2074   }
2075 
2076   bool isVecListOneDAllLanes() const {
2077     if (!isSingleSpacedVectorAllLanes()) return false;
2078     return VectorList.Count == 1;
2079   }
2080 
2081   bool isVecListDPairAllLanes() const {
2082     if (!isSingleSpacedVectorAllLanes()) return false;
2083     return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2084               .contains(VectorList.RegNum));
2085   }
2086 
2087   bool isVecListDPairSpacedAllLanes() const {
2088     if (!isDoubleSpacedVectorAllLanes()) return false;
2089     return VectorList.Count == 2;
2090   }
2091 
2092   bool isVecListThreeDAllLanes() const {
2093     if (!isSingleSpacedVectorAllLanes()) return false;
2094     return VectorList.Count == 3;
2095   }
2096 
2097   bool isVecListThreeQAllLanes() const {
2098     if (!isDoubleSpacedVectorAllLanes()) return false;
2099     return VectorList.Count == 3;
2100   }
2101 
2102   bool isVecListFourDAllLanes() const {
2103     if (!isSingleSpacedVectorAllLanes()) return false;
2104     return VectorList.Count == 4;
2105   }
2106 
2107   bool isVecListFourQAllLanes() const {
2108     if (!isDoubleSpacedVectorAllLanes()) return false;
2109     return VectorList.Count == 4;
2110   }
2111 
2112   bool isSingleSpacedVectorIndexed() const {
2113     return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
2114   }
2115 
2116   bool isDoubleSpacedVectorIndexed() const {
2117     return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
2118   }
2119 
2120   bool isVecListOneDByteIndexed() const {
2121     if (!isSingleSpacedVectorIndexed()) return false;
2122     return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
2123   }
2124 
2125   bool isVecListOneDHWordIndexed() const {
2126     if (!isSingleSpacedVectorIndexed()) return false;
2127     return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
2128   }
2129 
2130   bool isVecListOneDWordIndexed() const {
2131     if (!isSingleSpacedVectorIndexed()) return false;
2132     return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
2133   }
2134 
2135   bool isVecListTwoDByteIndexed() const {
2136     if (!isSingleSpacedVectorIndexed()) return false;
2137     return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
2138   }
2139 
2140   bool isVecListTwoDHWordIndexed() const {
2141     if (!isSingleSpacedVectorIndexed()) return false;
2142     return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2143   }
2144 
2145   bool isVecListTwoQWordIndexed() const {
2146     if (!isDoubleSpacedVectorIndexed()) return false;
2147     return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2148   }
2149 
2150   bool isVecListTwoQHWordIndexed() const {
2151     if (!isDoubleSpacedVectorIndexed()) return false;
2152     return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2153   }
2154 
2155   bool isVecListTwoDWordIndexed() const {
2156     if (!isSingleSpacedVectorIndexed()) return false;
2157     return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2158   }
2159 
2160   bool isVecListThreeDByteIndexed() const {
2161     if (!isSingleSpacedVectorIndexed()) return false;
2162     return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
2163   }
2164 
2165   bool isVecListThreeDHWordIndexed() const {
2166     if (!isSingleSpacedVectorIndexed()) return false;
2167     return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2168   }
2169 
2170   bool isVecListThreeQWordIndexed() const {
2171     if (!isDoubleSpacedVectorIndexed()) return false;
2172     return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2173   }
2174 
2175   bool isVecListThreeQHWordIndexed() const {
2176     if (!isDoubleSpacedVectorIndexed()) return false;
2177     return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2178   }
2179 
2180   bool isVecListThreeDWordIndexed() const {
2181     if (!isSingleSpacedVectorIndexed()) return false;
2182     return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2183   }
2184 
2185   bool isVecListFourDByteIndexed() const {
2186     if (!isSingleSpacedVectorIndexed()) return false;
2187     return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
2188   }
2189 
2190   bool isVecListFourDHWordIndexed() const {
2191     if (!isSingleSpacedVectorIndexed()) return false;
2192     return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2193   }
2194 
2195   bool isVecListFourQWordIndexed() const {
2196     if (!isDoubleSpacedVectorIndexed()) return false;
2197     return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2198   }
2199 
2200   bool isVecListFourQHWordIndexed() const {
2201     if (!isDoubleSpacedVectorIndexed()) return false;
2202     return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2203   }
2204 
2205   bool isVecListFourDWordIndexed() const {
2206     if (!isSingleSpacedVectorIndexed()) return false;
2207     return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2208   }
2209 
2210   bool isVectorIndex() const { return Kind == k_VectorIndex; }
2211 
2212   template <unsigned NumLanes>
2213   bool isVectorIndexInRange() const {
2214     if (Kind != k_VectorIndex) return false;
2215     return VectorIndex.Val < NumLanes;
2216   }
2217 
2218   bool isVectorIndex8()  const { return isVectorIndexInRange<8>(); }
2219   bool isVectorIndex16() const { return isVectorIndexInRange<4>(); }
2220   bool isVectorIndex32() const { return isVectorIndexInRange<2>(); }
2221   bool isVectorIndex64() const { return isVectorIndexInRange<1>(); }
2222 
2223   template<int PermittedValue, int OtherPermittedValue>
2224   bool isMVEPairVectorIndex() const {
2225     if (Kind != k_VectorIndex) return false;
2226     return VectorIndex.Val == PermittedValue ||
2227            VectorIndex.Val == OtherPermittedValue;
2228   }
2229 
2230   bool isNEONi8splat() const {
2231     if (!isImm()) return false;
2232     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2233     // Must be a constant.
2234     if (!CE) return false;
2235     int64_t Value = CE->getValue();
2236     // i8 value splatted across 8 bytes. The immediate is just the 8 byte
2237     // value.
2238     return Value >= 0 && Value < 256;
2239   }
2240 
2241   bool isNEONi16splat() const {
2242     if (isNEONByteReplicate(2))
2243       return false; // Leave that for bytes replication and forbid by default.
2244     if (!isImm())
2245       return false;
2246     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2247     // Must be a constant.
2248     if (!CE) return false;
2249     unsigned Value = CE->getValue();
2250     return ARM_AM::isNEONi16splat(Value);
2251   }
2252 
2253   bool isNEONi16splatNot() const {
2254     if (!isImm())
2255       return false;
2256     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2257     // Must be a constant.
2258     if (!CE) return false;
2259     unsigned Value = CE->getValue();
2260     return ARM_AM::isNEONi16splat(~Value & 0xffff);
2261   }
2262 
2263   bool isNEONi32splat() const {
2264     if (isNEONByteReplicate(4))
2265       return false; // Leave that for bytes replication and forbid by default.
2266     if (!isImm())
2267       return false;
2268     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2269     // Must be a constant.
2270     if (!CE) return false;
2271     unsigned Value = CE->getValue();
2272     return ARM_AM::isNEONi32splat(Value);
2273   }
2274 
2275   bool isNEONi32splatNot() const {
2276     if (!isImm())
2277       return false;
2278     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2279     // Must be a constant.
2280     if (!CE) return false;
2281     unsigned Value = CE->getValue();
2282     return ARM_AM::isNEONi32splat(~Value);
2283   }
2284 
2285   static bool isValidNEONi32vmovImm(int64_t Value) {
2286     // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
2287     // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
2288     return ((Value & 0xffffffffffffff00) == 0) ||
2289            ((Value & 0xffffffffffff00ff) == 0) ||
2290            ((Value & 0xffffffffff00ffff) == 0) ||
2291            ((Value & 0xffffffff00ffffff) == 0) ||
2292            ((Value & 0xffffffffffff00ff) == 0xff) ||
2293            ((Value & 0xffffffffff00ffff) == 0xffff);
2294   }
2295 
2296   bool isNEONReplicate(unsigned Width, unsigned NumElems, bool Inv) const {
2297     assert((Width == 8 || Width == 16 || Width == 32) &&
2298            "Invalid element width");
2299     assert(NumElems * Width <= 64 && "Invalid result width");
2300 
2301     if (!isImm())
2302       return false;
2303     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2304     // Must be a constant.
2305     if (!CE)
2306       return false;
2307     int64_t Value = CE->getValue();
2308     if (!Value)
2309       return false; // Don't bother with zero.
2310     if (Inv)
2311       Value = ~Value;
2312 
2313     uint64_t Mask = (1ull << Width) - 1;
2314     uint64_t Elem = Value & Mask;
2315     if (Width == 16 && (Elem & 0x00ff) != 0 && (Elem & 0xff00) != 0)
2316       return false;
2317     if (Width == 32 && !isValidNEONi32vmovImm(Elem))
2318       return false;
2319 
2320     for (unsigned i = 1; i < NumElems; ++i) {
2321       Value >>= Width;
2322       if ((Value & Mask) != Elem)
2323         return false;
2324     }
2325     return true;
2326   }
2327 
2328   bool isNEONByteReplicate(unsigned NumBytes) const {
2329     return isNEONReplicate(8, NumBytes, false);
2330   }
2331 
2332   static void checkNeonReplicateArgs(unsigned FromW, unsigned ToW) {
2333     assert((FromW == 8 || FromW == 16 || FromW == 32) &&
2334            "Invalid source width");
2335     assert((ToW == 16 || ToW == 32 || ToW == 64) &&
2336            "Invalid destination width");
2337     assert(FromW < ToW && "ToW is not less than FromW");
2338   }
2339 
2340   template<unsigned FromW, unsigned ToW>
2341   bool isNEONmovReplicate() const {
2342     checkNeonReplicateArgs(FromW, ToW);
2343     if (ToW == 64 && isNEONi64splat())
2344       return false;
2345     return isNEONReplicate(FromW, ToW / FromW, false);
2346   }
2347 
2348   template<unsigned FromW, unsigned ToW>
2349   bool isNEONinvReplicate() const {
2350     checkNeonReplicateArgs(FromW, ToW);
2351     return isNEONReplicate(FromW, ToW / FromW, true);
2352   }
2353 
2354   bool isNEONi32vmov() const {
2355     if (isNEONByteReplicate(4))
2356       return false; // Let it to be classified as byte-replicate case.
2357     if (!isImm())
2358       return false;
2359     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2360     // Must be a constant.
2361     if (!CE)
2362       return false;
2363     return isValidNEONi32vmovImm(CE->getValue());
2364   }
2365 
2366   bool isNEONi32vmovNeg() const {
2367     if (!isImm()) return false;
2368     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2369     // Must be a constant.
2370     if (!CE) return false;
2371     return isValidNEONi32vmovImm(~CE->getValue());
2372   }
2373 
2374   bool isNEONi64splat() const {
2375     if (!isImm()) return false;
2376     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2377     // Must be a constant.
2378     if (!CE) return false;
2379     uint64_t Value = CE->getValue();
2380     // i64 value with each byte being either 0 or 0xff.
2381     for (unsigned i = 0; i < 8; ++i, Value >>= 8)
2382       if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
2383     return true;
2384   }
2385 
2386   template<int64_t Angle, int64_t Remainder>
2387   bool isComplexRotation() const {
2388     if (!isImm()) return false;
2389 
2390     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2391     if (!CE) return false;
2392     uint64_t Value = CE->getValue();
2393 
2394     return (Value % Angle == Remainder && Value <= 270);
2395   }
2396 
2397   bool isMVELongShift() const {
2398     if (!isImm()) return false;
2399     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2400     // Must be a constant.
2401     if (!CE) return false;
2402     uint64_t Value = CE->getValue();
2403     return Value >= 1 && Value <= 32;
2404   }
2405 
2406   bool isMveSaturateOp() const {
2407     if (!isImm()) return false;
2408     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2409     if (!CE) return false;
2410     uint64_t Value = CE->getValue();
2411     return Value == 48 || Value == 64;
2412   }
2413 
2414   bool isITCondCodeNoAL() const {
2415     if (!isITCondCode()) return false;
2416     ARMCC::CondCodes CC = getCondCode();
2417     return CC != ARMCC::AL;
2418   }
2419 
2420   bool isITCondCodeRestrictedI() const {
2421     if (!isITCondCode())
2422       return false;
2423     ARMCC::CondCodes CC = getCondCode();
2424     return CC == ARMCC::EQ || CC == ARMCC::NE;
2425   }
2426 
2427   bool isITCondCodeRestrictedS() const {
2428     if (!isITCondCode())
2429       return false;
2430     ARMCC::CondCodes CC = getCondCode();
2431     return CC == ARMCC::LT || CC == ARMCC::GT || CC == ARMCC::LE ||
2432            CC == ARMCC::GE;
2433   }
2434 
2435   bool isITCondCodeRestrictedU() const {
2436     if (!isITCondCode())
2437       return false;
2438     ARMCC::CondCodes CC = getCondCode();
2439     return CC == ARMCC::HS || CC == ARMCC::HI;
2440   }
2441 
2442   bool isITCondCodeRestrictedFP() const {
2443     if (!isITCondCode())
2444       return false;
2445     ARMCC::CondCodes CC = getCondCode();
2446     return CC == ARMCC::EQ || CC == ARMCC::NE || CC == ARMCC::LT ||
2447            CC == ARMCC::GT || CC == ARMCC::LE || CC == ARMCC::GE;
2448   }
2449 
2450   void addExpr(MCInst &Inst, const MCExpr *Expr) const {
2451     // Add as immediates when possible.  Null MCExpr = 0.
2452     if (!Expr)
2453       Inst.addOperand(MCOperand::createImm(0));
2454     else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
2455       Inst.addOperand(MCOperand::createImm(CE->getValue()));
2456     else
2457       Inst.addOperand(MCOperand::createExpr(Expr));
2458   }
2459 
2460   void addARMBranchTargetOperands(MCInst &Inst, unsigned N) const {
2461     assert(N == 1 && "Invalid number of operands!");
2462     addExpr(Inst, getImm());
2463   }
2464 
2465   void addThumbBranchTargetOperands(MCInst &Inst, unsigned N) const {
2466     assert(N == 1 && "Invalid number of operands!");
2467     addExpr(Inst, getImm());
2468   }
2469 
2470   void addCondCodeOperands(MCInst &Inst, unsigned N) const {
2471     assert(N == 2 && "Invalid number of operands!");
2472     Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2473     unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
2474     Inst.addOperand(MCOperand::createReg(RegNum));
2475   }
2476 
2477   void addVPTPredNOperands(MCInst &Inst, unsigned N) const {
2478     assert(N == 3 && "Invalid number of operands!");
2479     Inst.addOperand(MCOperand::createImm(unsigned(getVPTPred())));
2480     unsigned RegNum = getVPTPred() == ARMVCC::None ? 0: ARM::P0;
2481     Inst.addOperand(MCOperand::createReg(RegNum));
2482     Inst.addOperand(MCOperand::createReg(0));
2483   }
2484 
2485   void addVPTPredROperands(MCInst &Inst, unsigned N) const {
2486     assert(N == 4 && "Invalid number of operands!");
2487     addVPTPredNOperands(Inst, N-1);
2488     unsigned RegNum;
2489     if (getVPTPred() == ARMVCC::None) {
2490       RegNum = 0;
2491     } else {
2492       unsigned NextOpIndex = Inst.getNumOperands();
2493       const MCInstrDesc &MCID =
2494           ARMDescs.Insts[ARM::INSTRUCTION_LIST_END - 1 - Inst.getOpcode()];
2495       int TiedOp = MCID.getOperandConstraint(NextOpIndex, MCOI::TIED_TO);
2496       assert(TiedOp >= 0 &&
2497              "Inactive register in vpred_r is not tied to an output!");
2498       RegNum = Inst.getOperand(TiedOp).getReg();
2499     }
2500     Inst.addOperand(MCOperand::createReg(RegNum));
2501   }
2502 
2503   void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
2504     assert(N == 1 && "Invalid number of operands!");
2505     Inst.addOperand(MCOperand::createImm(getCoproc()));
2506   }
2507 
2508   void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
2509     assert(N == 1 && "Invalid number of operands!");
2510     Inst.addOperand(MCOperand::createImm(getCoproc()));
2511   }
2512 
2513   void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
2514     assert(N == 1 && "Invalid number of operands!");
2515     Inst.addOperand(MCOperand::createImm(CoprocOption.Val));
2516   }
2517 
2518   void addITMaskOperands(MCInst &Inst, unsigned N) const {
2519     assert(N == 1 && "Invalid number of operands!");
2520     Inst.addOperand(MCOperand::createImm(ITMask.Mask));
2521   }
2522 
2523   void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
2524     assert(N == 1 && "Invalid number of operands!");
2525     Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2526   }
2527 
2528   void addITCondCodeInvOperands(MCInst &Inst, unsigned N) const {
2529     assert(N == 1 && "Invalid number of operands!");
2530     Inst.addOperand(MCOperand::createImm(unsigned(ARMCC::getOppositeCondition(getCondCode()))));
2531   }
2532 
2533   void addCCOutOperands(MCInst &Inst, unsigned N) const {
2534     assert(N == 1 && "Invalid number of operands!");
2535     Inst.addOperand(MCOperand::createReg(getReg()));
2536   }
2537 
2538   void addRegOperands(MCInst &Inst, unsigned N) const {
2539     assert(N == 1 && "Invalid number of operands!");
2540     Inst.addOperand(MCOperand::createReg(getReg()));
2541   }
2542 
2543   void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
2544     assert(N == 3 && "Invalid number of operands!");
2545     assert(isRegShiftedReg() &&
2546            "addRegShiftedRegOperands() on non-RegShiftedReg!");
2547     Inst.addOperand(MCOperand::createReg(RegShiftedReg.SrcReg));
2548     Inst.addOperand(MCOperand::createReg(RegShiftedReg.ShiftReg));
2549     Inst.addOperand(MCOperand::createImm(
2550       ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
2551   }
2552 
2553   void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
2554     assert(N == 2 && "Invalid number of operands!");
2555     assert(isRegShiftedImm() &&
2556            "addRegShiftedImmOperands() on non-RegShiftedImm!");
2557     Inst.addOperand(MCOperand::createReg(RegShiftedImm.SrcReg));
2558     // Shift of #32 is encoded as 0 where permitted
2559     unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
2560     Inst.addOperand(MCOperand::createImm(
2561       ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
2562   }
2563 
2564   void addShifterImmOperands(MCInst &Inst, unsigned N) const {
2565     assert(N == 1 && "Invalid number of operands!");
2566     Inst.addOperand(MCOperand::createImm((ShifterImm.isASR << 5) |
2567                                          ShifterImm.Imm));
2568   }
2569 
2570   void addRegListOperands(MCInst &Inst, unsigned N) const {
2571     assert(N == 1 && "Invalid number of operands!");
2572     const SmallVectorImpl<unsigned> &RegList = getRegList();
2573     for (unsigned Reg : RegList)
2574       Inst.addOperand(MCOperand::createReg(Reg));
2575   }
2576 
2577   void addRegListWithAPSROperands(MCInst &Inst, unsigned N) const {
2578     assert(N == 1 && "Invalid number of operands!");
2579     const SmallVectorImpl<unsigned> &RegList = getRegList();
2580     for (unsigned Reg : RegList)
2581       Inst.addOperand(MCOperand::createReg(Reg));
2582   }
2583 
2584   void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
2585     addRegListOperands(Inst, N);
2586   }
2587 
2588   void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
2589     addRegListOperands(Inst, N);
2590   }
2591 
2592   void addFPSRegListWithVPROperands(MCInst &Inst, unsigned N) const {
2593     addRegListOperands(Inst, N);
2594   }
2595 
2596   void addFPDRegListWithVPROperands(MCInst &Inst, unsigned N) const {
2597     addRegListOperands(Inst, N);
2598   }
2599 
2600   void addRotImmOperands(MCInst &Inst, unsigned N) const {
2601     assert(N == 1 && "Invalid number of operands!");
2602     // Encoded as val>>3. The printer handles display as 8, 16, 24.
2603     Inst.addOperand(MCOperand::createImm(RotImm.Imm >> 3));
2604   }
2605 
2606   void addModImmOperands(MCInst &Inst, unsigned N) const {
2607     assert(N == 1 && "Invalid number of operands!");
2608 
2609     // Support for fixups (MCFixup)
2610     if (isImm())
2611       return addImmOperands(Inst, N);
2612 
2613     Inst.addOperand(MCOperand::createImm(ModImm.Bits | (ModImm.Rot << 7)));
2614   }
2615 
2616   void addModImmNotOperands(MCInst &Inst, unsigned N) const {
2617     assert(N == 1 && "Invalid number of operands!");
2618     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2619     uint32_t Enc = ARM_AM::getSOImmVal(~CE->getValue());
2620     Inst.addOperand(MCOperand::createImm(Enc));
2621   }
2622 
2623   void addModImmNegOperands(MCInst &Inst, unsigned N) const {
2624     assert(N == 1 && "Invalid number of operands!");
2625     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2626     uint32_t Enc = ARM_AM::getSOImmVal(-CE->getValue());
2627     Inst.addOperand(MCOperand::createImm(Enc));
2628   }
2629 
2630   void addThumbModImmNeg8_255Operands(MCInst &Inst, unsigned N) const {
2631     assert(N == 1 && "Invalid number of operands!");
2632     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2633     uint32_t Val = -CE->getValue();
2634     Inst.addOperand(MCOperand::createImm(Val));
2635   }
2636 
2637   void addThumbModImmNeg1_7Operands(MCInst &Inst, unsigned N) const {
2638     assert(N == 1 && "Invalid number of operands!");
2639     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2640     uint32_t Val = -CE->getValue();
2641     Inst.addOperand(MCOperand::createImm(Val));
2642   }
2643 
2644   void addBitfieldOperands(MCInst &Inst, unsigned N) const {
2645     assert(N == 1 && "Invalid number of operands!");
2646     // Munge the lsb/width into a bitfield mask.
2647     unsigned lsb = Bitfield.LSB;
2648     unsigned width = Bitfield.Width;
2649     // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
2650     uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
2651                       (32 - (lsb + width)));
2652     Inst.addOperand(MCOperand::createImm(Mask));
2653   }
2654 
2655   void addImmOperands(MCInst &Inst, unsigned N) const {
2656     assert(N == 1 && "Invalid number of operands!");
2657     addExpr(Inst, getImm());
2658   }
2659 
2660   void addFBits16Operands(MCInst &Inst, unsigned N) const {
2661     assert(N == 1 && "Invalid number of operands!");
2662     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2663     Inst.addOperand(MCOperand::createImm(16 - CE->getValue()));
2664   }
2665 
2666   void addFBits32Operands(MCInst &Inst, unsigned N) const {
2667     assert(N == 1 && "Invalid number of operands!");
2668     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2669     Inst.addOperand(MCOperand::createImm(32 - CE->getValue()));
2670   }
2671 
2672   void addFPImmOperands(MCInst &Inst, unsigned N) const {
2673     assert(N == 1 && "Invalid number of operands!");
2674     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2675     int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
2676     Inst.addOperand(MCOperand::createImm(Val));
2677   }
2678 
2679   void addImm8s4Operands(MCInst &Inst, unsigned N) const {
2680     assert(N == 1 && "Invalid number of operands!");
2681     // FIXME: We really want to scale the value here, but the LDRD/STRD
2682     // instruction don't encode operands that way yet.
2683     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2684     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2685   }
2686 
2687   void addImm7s4Operands(MCInst &Inst, unsigned N) const {
2688     assert(N == 1 && "Invalid number of operands!");
2689     // FIXME: We really want to scale the value here, but the VSTR/VLDR_VSYSR
2690     // instruction don't encode operands that way yet.
2691     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2692     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2693   }
2694 
2695   void addImm7Shift0Operands(MCInst &Inst, unsigned N) const {
2696     assert(N == 1 && "Invalid number of operands!");
2697     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2698     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2699   }
2700 
2701   void addImm7Shift1Operands(MCInst &Inst, unsigned N) const {
2702     assert(N == 1 && "Invalid number of operands!");
2703     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2704     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2705   }
2706 
2707   void addImm7Shift2Operands(MCInst &Inst, unsigned N) const {
2708     assert(N == 1 && "Invalid number of operands!");
2709     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2710     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2711   }
2712 
2713   void addImm7Operands(MCInst &Inst, unsigned N) const {
2714     assert(N == 1 && "Invalid number of operands!");
2715     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2716     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2717   }
2718 
2719   void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
2720     assert(N == 1 && "Invalid number of operands!");
2721     // The immediate is scaled by four in the encoding and is stored
2722     // in the MCInst as such. Lop off the low two bits here.
2723     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2724     Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2725   }
2726 
2727   void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
2728     assert(N == 1 && "Invalid number of operands!");
2729     // The immediate is scaled by four in the encoding and is stored
2730     // in the MCInst as such. Lop off the low two bits here.
2731     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2732     Inst.addOperand(MCOperand::createImm(-(CE->getValue() / 4)));
2733   }
2734 
2735   void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
2736     assert(N == 1 && "Invalid number of operands!");
2737     // The immediate is scaled by four in the encoding and is stored
2738     // in the MCInst as such. Lop off the low two bits here.
2739     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2740     Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2741   }
2742 
2743   void addImm1_16Operands(MCInst &Inst, unsigned N) const {
2744     assert(N == 1 && "Invalid number of operands!");
2745     // The constant encodes as the immediate-1, and we store in the instruction
2746     // the bits as encoded, so subtract off one here.
2747     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2748     Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2749   }
2750 
2751   void addImm1_32Operands(MCInst &Inst, unsigned N) const {
2752     assert(N == 1 && "Invalid number of operands!");
2753     // The constant encodes as the immediate-1, and we store in the instruction
2754     // the bits as encoded, so subtract off one here.
2755     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2756     Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2757   }
2758 
2759   void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
2760     assert(N == 1 && "Invalid number of operands!");
2761     // The constant encodes as the immediate, except for 32, which encodes as
2762     // zero.
2763     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2764     unsigned Imm = CE->getValue();
2765     Inst.addOperand(MCOperand::createImm((Imm == 32 ? 0 : Imm)));
2766   }
2767 
2768   void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
2769     assert(N == 1 && "Invalid number of operands!");
2770     // An ASR value of 32 encodes as 0, so that's how we want to add it to
2771     // the instruction as well.
2772     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2773     int Val = CE->getValue();
2774     Inst.addOperand(MCOperand::createImm(Val == 32 ? 0 : Val));
2775   }
2776 
2777   void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
2778     assert(N == 1 && "Invalid number of operands!");
2779     // The operand is actually a t2_so_imm, but we have its bitwise
2780     // negation in the assembly source, so twiddle it here.
2781     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2782     Inst.addOperand(MCOperand::createImm(~(uint32_t)CE->getValue()));
2783   }
2784 
2785   void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
2786     assert(N == 1 && "Invalid number of operands!");
2787     // The operand is actually a t2_so_imm, but we have its
2788     // negation in the assembly source, so twiddle it here.
2789     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2790     Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2791   }
2792 
2793   void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
2794     assert(N == 1 && "Invalid number of operands!");
2795     // The operand is actually an imm0_4095, but we have its
2796     // negation in the assembly source, so twiddle it here.
2797     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2798     Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2799   }
2800 
2801   void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const {
2802     if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
2803       Inst.addOperand(MCOperand::createImm(CE->getValue() >> 2));
2804       return;
2805     }
2806     const MCSymbolRefExpr *SR = cast<MCSymbolRefExpr>(Imm.Val);
2807     Inst.addOperand(MCOperand::createExpr(SR));
2808   }
2809 
2810   void addThumbMemPCOperands(MCInst &Inst, unsigned N) const {
2811     assert(N == 1 && "Invalid number of operands!");
2812     if (isImm()) {
2813       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2814       if (CE) {
2815         Inst.addOperand(MCOperand::createImm(CE->getValue()));
2816         return;
2817       }
2818       const MCSymbolRefExpr *SR = cast<MCSymbolRefExpr>(Imm.Val);
2819       Inst.addOperand(MCOperand::createExpr(SR));
2820       return;
2821     }
2822 
2823     assert(isGPRMem()  && "Unknown value type!");
2824     assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!");
2825     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
2826       Inst.addOperand(MCOperand::createImm(CE->getValue()));
2827     else
2828       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
2829   }
2830 
2831   void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
2832     assert(N == 1 && "Invalid number of operands!");
2833     Inst.addOperand(MCOperand::createImm(unsigned(getMemBarrierOpt())));
2834   }
2835 
2836   void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2837     assert(N == 1 && "Invalid number of operands!");
2838     Inst.addOperand(MCOperand::createImm(unsigned(getInstSyncBarrierOpt())));
2839   }
2840 
2841   void addTraceSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2842     assert(N == 1 && "Invalid number of operands!");
2843     Inst.addOperand(MCOperand::createImm(unsigned(getTraceSyncBarrierOpt())));
2844   }
2845 
2846   void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
2847     assert(N == 1 && "Invalid number of operands!");
2848     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2849   }
2850 
2851   void addMemNoOffsetT2Operands(MCInst &Inst, unsigned N) const {
2852     assert(N == 1 && "Invalid number of operands!");
2853     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2854   }
2855 
2856   void addMemNoOffsetT2NoSpOperands(MCInst &Inst, unsigned N) const {
2857     assert(N == 1 && "Invalid number of operands!");
2858     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2859   }
2860 
2861   void addMemNoOffsetTOperands(MCInst &Inst, unsigned N) const {
2862     assert(N == 1 && "Invalid number of operands!");
2863     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2864   }
2865 
2866   void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
2867     assert(N == 1 && "Invalid number of operands!");
2868     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
2869       Inst.addOperand(MCOperand::createImm(CE->getValue()));
2870     else
2871       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
2872   }
2873 
2874   void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2875     assert(N == 1 && "Invalid number of operands!");
2876     assert(isImm() && "Not an immediate!");
2877 
2878     // If we have an immediate that's not a constant, treat it as a label
2879     // reference needing a fixup.
2880     if (!isa<MCConstantExpr>(getImm())) {
2881       Inst.addOperand(MCOperand::createExpr(getImm()));
2882       return;
2883     }
2884 
2885     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2886     int Val = CE->getValue();
2887     Inst.addOperand(MCOperand::createImm(Val));
2888   }
2889 
2890   void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
2891     assert(N == 2 && "Invalid number of operands!");
2892     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2893     Inst.addOperand(MCOperand::createImm(Memory.Alignment));
2894   }
2895 
2896   void addDupAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2897     addAlignedMemoryOperands(Inst, N);
2898   }
2899 
2900   void addAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2901     addAlignedMemoryOperands(Inst, N);
2902   }
2903 
2904   void addAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2905     addAlignedMemoryOperands(Inst, N);
2906   }
2907 
2908   void addDupAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2909     addAlignedMemoryOperands(Inst, N);
2910   }
2911 
2912   void addAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2913     addAlignedMemoryOperands(Inst, N);
2914   }
2915 
2916   void addDupAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2917     addAlignedMemoryOperands(Inst, N);
2918   }
2919 
2920   void addAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2921     addAlignedMemoryOperands(Inst, N);
2922   }
2923 
2924   void addDupAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2925     addAlignedMemoryOperands(Inst, N);
2926   }
2927 
2928   void addAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2929     addAlignedMemoryOperands(Inst, N);
2930   }
2931 
2932   void addDupAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2933     addAlignedMemoryOperands(Inst, N);
2934   }
2935 
2936   void addAlignedMemory64or128or256Operands(MCInst &Inst, unsigned N) const {
2937     addAlignedMemoryOperands(Inst, N);
2938   }
2939 
2940   void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
2941     assert(N == 3 && "Invalid number of operands!");
2942     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2943     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2944     if (!Memory.OffsetRegNum) {
2945       if (!Memory.OffsetImm)
2946         Inst.addOperand(MCOperand::createImm(0));
2947       else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
2948         int32_t Val = CE->getValue();
2949         ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2950         // Special case for #-0
2951         if (Val == std::numeric_limits<int32_t>::min())
2952           Val = 0;
2953         if (Val < 0)
2954           Val = -Val;
2955         Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2956         Inst.addOperand(MCOperand::createImm(Val));
2957       } else
2958         Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
2959     } else {
2960       // For register offset, we encode the shift type and negation flag
2961       // here.
2962       int32_t Val =
2963           ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
2964                             Memory.ShiftImm, Memory.ShiftType);
2965       Inst.addOperand(MCOperand::createImm(Val));
2966     }
2967   }
2968 
2969   void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
2970     assert(N == 2 && "Invalid number of operands!");
2971     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2972     assert(CE && "non-constant AM2OffsetImm operand!");
2973     int32_t Val = CE->getValue();
2974     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2975     // Special case for #-0
2976     if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2977     if (Val < 0) Val = -Val;
2978     Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2979     Inst.addOperand(MCOperand::createReg(0));
2980     Inst.addOperand(MCOperand::createImm(Val));
2981   }
2982 
2983   void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
2984     assert(N == 3 && "Invalid number of operands!");
2985     // If we have an immediate that's not a constant, treat it as a label
2986     // reference needing a fixup. If it is a constant, it's something else
2987     // and we reject it.
2988     if (isImm()) {
2989       Inst.addOperand(MCOperand::createExpr(getImm()));
2990       Inst.addOperand(MCOperand::createReg(0));
2991       Inst.addOperand(MCOperand::createImm(0));
2992       return;
2993     }
2994 
2995     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2996     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2997     if (!Memory.OffsetRegNum) {
2998       if (!Memory.OffsetImm)
2999         Inst.addOperand(MCOperand::createImm(0));
3000       else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3001         int32_t Val = CE->getValue();
3002         ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3003         // Special case for #-0
3004         if (Val == std::numeric_limits<int32_t>::min())
3005           Val = 0;
3006         if (Val < 0)
3007           Val = -Val;
3008         Val = ARM_AM::getAM3Opc(AddSub, Val);
3009         Inst.addOperand(MCOperand::createImm(Val));
3010       } else
3011         Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3012     } else {
3013       // For register offset, we encode the shift type and negation flag
3014       // here.
3015       int32_t Val =
3016           ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
3017       Inst.addOperand(MCOperand::createImm(Val));
3018     }
3019   }
3020 
3021   void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
3022     assert(N == 2 && "Invalid number of operands!");
3023     if (Kind == k_PostIndexRegister) {
3024       int32_t Val =
3025         ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
3026       Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3027       Inst.addOperand(MCOperand::createImm(Val));
3028       return;
3029     }
3030 
3031     // Constant offset.
3032     const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
3033     int32_t Val = CE->getValue();
3034     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3035     // Special case for #-0
3036     if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
3037     if (Val < 0) Val = -Val;
3038     Val = ARM_AM::getAM3Opc(AddSub, Val);
3039     Inst.addOperand(MCOperand::createReg(0));
3040     Inst.addOperand(MCOperand::createImm(Val));
3041   }
3042 
3043   void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
3044     assert(N == 2 && "Invalid number of operands!");
3045     // If we have an immediate that's not a constant, treat it as a label
3046     // reference needing a fixup. If it is a constant, it's something else
3047     // and we reject it.
3048     if (isImm()) {
3049       Inst.addOperand(MCOperand::createExpr(getImm()));
3050       Inst.addOperand(MCOperand::createImm(0));
3051       return;
3052     }
3053 
3054     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3055     if (!Memory.OffsetImm)
3056       Inst.addOperand(MCOperand::createImm(0));
3057     else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3058       // The lower two bits are always zero and as such are not encoded.
3059       int32_t Val = CE->getValue() / 4;
3060       ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3061       // Special case for #-0
3062       if (Val == std::numeric_limits<int32_t>::min())
3063         Val = 0;
3064       if (Val < 0)
3065         Val = -Val;
3066       Val = ARM_AM::getAM5Opc(AddSub, Val);
3067       Inst.addOperand(MCOperand::createImm(Val));
3068     } else
3069       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3070   }
3071 
3072   void addAddrMode5FP16Operands(MCInst &Inst, unsigned N) const {
3073     assert(N == 2 && "Invalid number of operands!");
3074     // If we have an immediate that's not a constant, treat it as a label
3075     // reference needing a fixup. If it is a constant, it's something else
3076     // and we reject it.
3077     if (isImm()) {
3078       Inst.addOperand(MCOperand::createExpr(getImm()));
3079       Inst.addOperand(MCOperand::createImm(0));
3080       return;
3081     }
3082 
3083     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3084     // The lower bit is always zero and as such is not encoded.
3085     if (!Memory.OffsetImm)
3086       Inst.addOperand(MCOperand::createImm(0));
3087     else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3088       int32_t Val = CE->getValue() / 2;
3089       ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3090       // Special case for #-0
3091       if (Val == std::numeric_limits<int32_t>::min())
3092         Val = 0;
3093       if (Val < 0)
3094         Val = -Val;
3095       Val = ARM_AM::getAM5FP16Opc(AddSub, Val);
3096       Inst.addOperand(MCOperand::createImm(Val));
3097     } else
3098       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3099   }
3100 
3101   void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
3102     assert(N == 2 && "Invalid number of operands!");
3103     // If we have an immediate that's not a constant, treat it as a label
3104     // reference needing a fixup. If it is a constant, it's something else
3105     // and we reject it.
3106     if (isImm()) {
3107       Inst.addOperand(MCOperand::createExpr(getImm()));
3108       Inst.addOperand(MCOperand::createImm(0));
3109       return;
3110     }
3111 
3112     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3113     addExpr(Inst, Memory.OffsetImm);
3114   }
3115 
3116   void addMemImm7s4OffsetOperands(MCInst &Inst, unsigned N) const {
3117     assert(N == 2 && "Invalid number of operands!");
3118     // If we have an immediate that's not a constant, treat it as a label
3119     // reference needing a fixup. If it is a constant, it's something else
3120     // and we reject it.
3121     if (isImm()) {
3122       Inst.addOperand(MCOperand::createExpr(getImm()));
3123       Inst.addOperand(MCOperand::createImm(0));
3124       return;
3125     }
3126 
3127     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3128     addExpr(Inst, Memory.OffsetImm);
3129   }
3130 
3131   void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
3132     assert(N == 2 && "Invalid number of operands!");
3133     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3134     if (!Memory.OffsetImm)
3135       Inst.addOperand(MCOperand::createImm(0));
3136     else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3137       // The lower two bits are always zero and as such are not encoded.
3138       Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3139     else
3140       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3141   }
3142 
3143   void addMemImmOffsetOperands(MCInst &Inst, unsigned N) const {
3144     assert(N == 2 && "Invalid number of operands!");
3145     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3146     addExpr(Inst, Memory.OffsetImm);
3147   }
3148 
3149   void addMemRegRQOffsetOperands(MCInst &Inst, unsigned N) const {
3150     assert(N == 2 && "Invalid number of operands!");
3151     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3152     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3153   }
3154 
3155   void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
3156     assert(N == 2 && "Invalid number of operands!");
3157     // If this is an immediate, it's a label reference.
3158     if (isImm()) {
3159       addExpr(Inst, getImm());
3160       Inst.addOperand(MCOperand::createImm(0));
3161       return;
3162     }
3163 
3164     // Otherwise, it's a normal memory reg+offset.
3165     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3166     addExpr(Inst, Memory.OffsetImm);
3167   }
3168 
3169   void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
3170     assert(N == 2 && "Invalid number of operands!");
3171     // If this is an immediate, it's a label reference.
3172     if (isImm()) {
3173       addExpr(Inst, getImm());
3174       Inst.addOperand(MCOperand::createImm(0));
3175       return;
3176     }
3177 
3178     // Otherwise, it's a normal memory reg+offset.
3179     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3180     addExpr(Inst, Memory.OffsetImm);
3181   }
3182 
3183   void addConstPoolAsmImmOperands(MCInst &Inst, unsigned N) const {
3184     assert(N == 1 && "Invalid number of operands!");
3185     // This is container for the immediate that we will create the constant
3186     // pool from
3187     addExpr(Inst, getConstantPoolImm());
3188   }
3189 
3190   void addMemTBBOperands(MCInst &Inst, unsigned N) const {
3191     assert(N == 2 && "Invalid number of operands!");
3192     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3193     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3194   }
3195 
3196   void addMemTBHOperands(MCInst &Inst, unsigned N) const {
3197     assert(N == 2 && "Invalid number of operands!");
3198     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3199     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3200   }
3201 
3202   void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
3203     assert(N == 3 && "Invalid number of operands!");
3204     unsigned Val =
3205       ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
3206                         Memory.ShiftImm, Memory.ShiftType);
3207     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3208     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3209     Inst.addOperand(MCOperand::createImm(Val));
3210   }
3211 
3212   void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
3213     assert(N == 3 && "Invalid number of operands!");
3214     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3215     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3216     Inst.addOperand(MCOperand::createImm(Memory.ShiftImm));
3217   }
3218 
3219   void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
3220     assert(N == 2 && "Invalid number of operands!");
3221     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3222     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3223   }
3224 
3225   void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
3226     assert(N == 2 && "Invalid number of operands!");
3227     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3228     if (!Memory.OffsetImm)
3229       Inst.addOperand(MCOperand::createImm(0));
3230     else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3231       // The lower two bits are always zero and as such are not encoded.
3232       Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3233     else
3234       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3235   }
3236 
3237   void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
3238     assert(N == 2 && "Invalid number of operands!");
3239     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3240     if (!Memory.OffsetImm)
3241       Inst.addOperand(MCOperand::createImm(0));
3242     else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3243       Inst.addOperand(MCOperand::createImm(CE->getValue() / 2));
3244     else
3245       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3246   }
3247 
3248   void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
3249     assert(N == 2 && "Invalid number of operands!");
3250     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3251     addExpr(Inst, Memory.OffsetImm);
3252   }
3253 
3254   void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
3255     assert(N == 2 && "Invalid number of operands!");
3256     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3257     if (!Memory.OffsetImm)
3258       Inst.addOperand(MCOperand::createImm(0));
3259     else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3260       // The lower two bits are always zero and as such are not encoded.
3261       Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3262     else
3263       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3264   }
3265 
3266   void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
3267     assert(N == 1 && "Invalid number of operands!");
3268     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3269     assert(CE && "non-constant post-idx-imm8 operand!");
3270     int Imm = CE->getValue();
3271     bool isAdd = Imm >= 0;
3272     if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
3273     Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
3274     Inst.addOperand(MCOperand::createImm(Imm));
3275   }
3276 
3277   void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
3278     assert(N == 1 && "Invalid number of operands!");
3279     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3280     assert(CE && "non-constant post-idx-imm8s4 operand!");
3281     int Imm = CE->getValue();
3282     bool isAdd = Imm >= 0;
3283     if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
3284     // Immediate is scaled by 4.
3285     Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
3286     Inst.addOperand(MCOperand::createImm(Imm));
3287   }
3288 
3289   void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
3290     assert(N == 2 && "Invalid number of operands!");
3291     Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3292     Inst.addOperand(MCOperand::createImm(PostIdxReg.isAdd));
3293   }
3294 
3295   void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
3296     assert(N == 2 && "Invalid number of operands!");
3297     Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3298     // The sign, shift type, and shift amount are encoded in a single operand
3299     // using the AM2 encoding helpers.
3300     ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
3301     unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
3302                                      PostIdxReg.ShiftTy);
3303     Inst.addOperand(MCOperand::createImm(Imm));
3304   }
3305 
3306   void addPowerTwoOperands(MCInst &Inst, unsigned N) const {
3307     assert(N == 1 && "Invalid number of operands!");
3308     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3309     Inst.addOperand(MCOperand::createImm(CE->getValue()));
3310   }
3311 
3312   void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
3313     assert(N == 1 && "Invalid number of operands!");
3314     Inst.addOperand(MCOperand::createImm(unsigned(getMSRMask())));
3315   }
3316 
3317   void addBankedRegOperands(MCInst &Inst, unsigned N) const {
3318     assert(N == 1 && "Invalid number of operands!");
3319     Inst.addOperand(MCOperand::createImm(unsigned(getBankedReg())));
3320   }
3321 
3322   void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
3323     assert(N == 1 && "Invalid number of operands!");
3324     Inst.addOperand(MCOperand::createImm(unsigned(getProcIFlags())));
3325   }
3326 
3327   void addVecListOperands(MCInst &Inst, unsigned N) const {
3328     assert(N == 1 && "Invalid number of operands!");
3329     Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
3330   }
3331 
3332   void addMVEVecListOperands(MCInst &Inst, unsigned N) const {
3333     assert(N == 1 && "Invalid number of operands!");
3334 
3335     // When we come here, the VectorList field will identify a range
3336     // of q-registers by its base register and length, and it will
3337     // have already been error-checked to be the expected length of
3338     // range and contain only q-regs in the range q0-q7. So we can
3339     // count on the base register being in the range q0-q6 (for 2
3340     // regs) or q0-q4 (for 4)
3341     //
3342     // The MVE instructions taking a register range of this kind will
3343     // need an operand in the MQQPR or MQQQQPR class, representing the
3344     // entire range as a unit. So we must translate into that class,
3345     // by finding the index of the base register in the MQPR reg
3346     // class, and returning the super-register at the corresponding
3347     // index in the target class.
3348 
3349     const MCRegisterClass *RC_in = &ARMMCRegisterClasses[ARM::MQPRRegClassID];
3350     const MCRegisterClass *RC_out =
3351         (VectorList.Count == 2) ? &ARMMCRegisterClasses[ARM::MQQPRRegClassID]
3352                                 : &ARMMCRegisterClasses[ARM::MQQQQPRRegClassID];
3353 
3354     unsigned I, E = RC_out->getNumRegs();
3355     for (I = 0; I < E; I++)
3356       if (RC_in->getRegister(I) == VectorList.RegNum)
3357         break;
3358     assert(I < E && "Invalid vector list start register!");
3359 
3360     Inst.addOperand(MCOperand::createReg(RC_out->getRegister(I)));
3361   }
3362 
3363   void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
3364     assert(N == 2 && "Invalid number of operands!");
3365     Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
3366     Inst.addOperand(MCOperand::createImm(VectorList.LaneIndex));
3367   }
3368 
3369   void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
3370     assert(N == 1 && "Invalid number of operands!");
3371     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3372   }
3373 
3374   void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
3375     assert(N == 1 && "Invalid number of operands!");
3376     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3377   }
3378 
3379   void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
3380     assert(N == 1 && "Invalid number of operands!");
3381     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3382   }
3383 
3384   void addVectorIndex64Operands(MCInst &Inst, unsigned N) const {
3385     assert(N == 1 && "Invalid number of operands!");
3386     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3387   }
3388 
3389   void addMVEVectorIndexOperands(MCInst &Inst, unsigned N) const {
3390     assert(N == 1 && "Invalid number of operands!");
3391     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3392   }
3393 
3394   void addMVEPairVectorIndexOperands(MCInst &Inst, unsigned N) const {
3395     assert(N == 1 && "Invalid number of operands!");
3396     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3397   }
3398 
3399   void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
3400     assert(N == 1 && "Invalid number of operands!");
3401     // The immediate encodes the type of constant as well as the value.
3402     // Mask in that this is an i8 splat.
3403     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3404     Inst.addOperand(MCOperand::createImm(CE->getValue() | 0xe00));
3405   }
3406 
3407   void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
3408     assert(N == 1 && "Invalid number of operands!");
3409     // The immediate encodes the type of constant as well as the value.
3410     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3411     unsigned Value = CE->getValue();
3412     Value = ARM_AM::encodeNEONi16splat(Value);
3413     Inst.addOperand(MCOperand::createImm(Value));
3414   }
3415 
3416   void addNEONi16splatNotOperands(MCInst &Inst, unsigned N) const {
3417     assert(N == 1 && "Invalid number of operands!");
3418     // The immediate encodes the type of constant as well as the value.
3419     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3420     unsigned Value = CE->getValue();
3421     Value = ARM_AM::encodeNEONi16splat(~Value & 0xffff);
3422     Inst.addOperand(MCOperand::createImm(Value));
3423   }
3424 
3425   void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
3426     assert(N == 1 && "Invalid number of operands!");
3427     // The immediate encodes the type of constant as well as the value.
3428     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3429     unsigned Value = CE->getValue();
3430     Value = ARM_AM::encodeNEONi32splat(Value);
3431     Inst.addOperand(MCOperand::createImm(Value));
3432   }
3433 
3434   void addNEONi32splatNotOperands(MCInst &Inst, unsigned N) const {
3435     assert(N == 1 && "Invalid number of operands!");
3436     // The immediate encodes the type of constant as well as the value.
3437     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3438     unsigned Value = CE->getValue();
3439     Value = ARM_AM::encodeNEONi32splat(~Value);
3440     Inst.addOperand(MCOperand::createImm(Value));
3441   }
3442 
3443   void addNEONi8ReplicateOperands(MCInst &Inst, bool Inv) const {
3444     // The immediate encodes the type of constant as well as the value.
3445     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3446     assert((Inst.getOpcode() == ARM::VMOVv8i8 ||
3447             Inst.getOpcode() == ARM::VMOVv16i8) &&
3448           "All instructions that wants to replicate non-zero byte "
3449           "always must be replaced with VMOVv8i8 or VMOVv16i8.");
3450     unsigned Value = CE->getValue();
3451     if (Inv)
3452       Value = ~Value;
3453     unsigned B = Value & 0xff;
3454     B |= 0xe00; // cmode = 0b1110
3455     Inst.addOperand(MCOperand::createImm(B));
3456   }
3457 
3458   void addNEONinvi8ReplicateOperands(MCInst &Inst, unsigned N) const {
3459     assert(N == 1 && "Invalid number of operands!");
3460     addNEONi8ReplicateOperands(Inst, true);
3461   }
3462 
3463   static unsigned encodeNeonVMOVImmediate(unsigned Value) {
3464     if (Value >= 256 && Value <= 0xffff)
3465       Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
3466     else if (Value > 0xffff && Value <= 0xffffff)
3467       Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
3468     else if (Value > 0xffffff)
3469       Value = (Value >> 24) | 0x600;
3470     return Value;
3471   }
3472 
3473   void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
3474     assert(N == 1 && "Invalid number of operands!");
3475     // The immediate encodes the type of constant as well as the value.
3476     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3477     unsigned Value = encodeNeonVMOVImmediate(CE->getValue());
3478     Inst.addOperand(MCOperand::createImm(Value));
3479   }
3480 
3481   void addNEONvmovi8ReplicateOperands(MCInst &Inst, unsigned N) const {
3482     assert(N == 1 && "Invalid number of operands!");
3483     addNEONi8ReplicateOperands(Inst, false);
3484   }
3485 
3486   void addNEONvmovi16ReplicateOperands(MCInst &Inst, unsigned N) const {
3487     assert(N == 1 && "Invalid number of operands!");
3488     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3489     assert((Inst.getOpcode() == ARM::VMOVv4i16 ||
3490             Inst.getOpcode() == ARM::VMOVv8i16 ||
3491             Inst.getOpcode() == ARM::VMVNv4i16 ||
3492             Inst.getOpcode() == ARM::VMVNv8i16) &&
3493           "All instructions that want to replicate non-zero half-word "
3494           "always must be replaced with V{MOV,MVN}v{4,8}i16.");
3495     uint64_t Value = CE->getValue();
3496     unsigned Elem = Value & 0xffff;
3497     if (Elem >= 256)
3498       Elem = (Elem >> 8) | 0x200;
3499     Inst.addOperand(MCOperand::createImm(Elem));
3500   }
3501 
3502   void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
3503     assert(N == 1 && "Invalid number of operands!");
3504     // The immediate encodes the type of constant as well as the value.
3505     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3506     unsigned Value = encodeNeonVMOVImmediate(~CE->getValue());
3507     Inst.addOperand(MCOperand::createImm(Value));
3508   }
3509 
3510   void addNEONvmovi32ReplicateOperands(MCInst &Inst, unsigned N) const {
3511     assert(N == 1 && "Invalid number of operands!");
3512     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3513     assert((Inst.getOpcode() == ARM::VMOVv2i32 ||
3514             Inst.getOpcode() == ARM::VMOVv4i32 ||
3515             Inst.getOpcode() == ARM::VMVNv2i32 ||
3516             Inst.getOpcode() == ARM::VMVNv4i32) &&
3517           "All instructions that want to replicate non-zero word "
3518           "always must be replaced with V{MOV,MVN}v{2,4}i32.");
3519     uint64_t Value = CE->getValue();
3520     unsigned Elem = encodeNeonVMOVImmediate(Value & 0xffffffff);
3521     Inst.addOperand(MCOperand::createImm(Elem));
3522   }
3523 
3524   void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
3525     assert(N == 1 && "Invalid number of operands!");
3526     // The immediate encodes the type of constant as well as the value.
3527     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3528     uint64_t Value = CE->getValue();
3529     unsigned Imm = 0;
3530     for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
3531       Imm |= (Value & 1) << i;
3532     }
3533     Inst.addOperand(MCOperand::createImm(Imm | 0x1e00));
3534   }
3535 
3536   void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
3537     assert(N == 1 && "Invalid number of operands!");
3538     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3539     Inst.addOperand(MCOperand::createImm(CE->getValue() / 90));
3540   }
3541 
3542   void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
3543     assert(N == 1 && "Invalid number of operands!");
3544     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3545     Inst.addOperand(MCOperand::createImm((CE->getValue() - 90) / 180));
3546   }
3547 
3548   void addMveSaturateOperands(MCInst &Inst, unsigned N) const {
3549     assert(N == 1 && "Invalid number of operands!");
3550     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3551     unsigned Imm = CE->getValue();
3552     assert((Imm == 48 || Imm == 64) && "Invalid saturate operand");
3553     Inst.addOperand(MCOperand::createImm(Imm == 48 ? 1 : 0));
3554   }
3555 
3556   void print(raw_ostream &OS) const override;
3557 
3558   static std::unique_ptr<ARMOperand> CreateITMask(unsigned Mask, SMLoc S) {
3559     auto Op = std::make_unique<ARMOperand>(k_ITCondMask);
3560     Op->ITMask.Mask = Mask;
3561     Op->StartLoc = S;
3562     Op->EndLoc = S;
3563     return Op;
3564   }
3565 
3566   static std::unique_ptr<ARMOperand> CreateCondCode(ARMCC::CondCodes CC,
3567                                                     SMLoc S) {
3568     auto Op = std::make_unique<ARMOperand>(k_CondCode);
3569     Op->CC.Val = CC;
3570     Op->StartLoc = S;
3571     Op->EndLoc = S;
3572     return Op;
3573   }
3574 
3575   static std::unique_ptr<ARMOperand> CreateVPTPred(ARMVCC::VPTCodes CC,
3576                                                    SMLoc S) {
3577     auto Op = std::make_unique<ARMOperand>(k_VPTPred);
3578     Op->VCC.Val = CC;
3579     Op->StartLoc = S;
3580     Op->EndLoc = S;
3581     return Op;
3582   }
3583 
3584   static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S) {
3585     auto Op = std::make_unique<ARMOperand>(k_CoprocNum);
3586     Op->Cop.Val = CopVal;
3587     Op->StartLoc = S;
3588     Op->EndLoc = S;
3589     return Op;
3590   }
3591 
3592   static std::unique_ptr<ARMOperand> CreateCoprocReg(unsigned CopVal, SMLoc S) {
3593     auto Op = std::make_unique<ARMOperand>(k_CoprocReg);
3594     Op->Cop.Val = CopVal;
3595     Op->StartLoc = S;
3596     Op->EndLoc = S;
3597     return Op;
3598   }
3599 
3600   static std::unique_ptr<ARMOperand> CreateCoprocOption(unsigned Val, SMLoc S,
3601                                                         SMLoc E) {
3602     auto Op = std::make_unique<ARMOperand>(k_CoprocOption);
3603     Op->Cop.Val = Val;
3604     Op->StartLoc = S;
3605     Op->EndLoc = E;
3606     return Op;
3607   }
3608 
3609   static std::unique_ptr<ARMOperand> CreateCCOut(unsigned RegNum, SMLoc S) {
3610     auto Op = std::make_unique<ARMOperand>(k_CCOut);
3611     Op->Reg.RegNum = RegNum;
3612     Op->StartLoc = S;
3613     Op->EndLoc = S;
3614     return Op;
3615   }
3616 
3617   static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S) {
3618     auto Op = std::make_unique<ARMOperand>(k_Token);
3619     Op->Tok.Data = Str.data();
3620     Op->Tok.Length = Str.size();
3621     Op->StartLoc = S;
3622     Op->EndLoc = S;
3623     return Op;
3624   }
3625 
3626   static std::unique_ptr<ARMOperand> CreateReg(unsigned RegNum, SMLoc S,
3627                                                SMLoc E) {
3628     auto Op = std::make_unique<ARMOperand>(k_Register);
3629     Op->Reg.RegNum = RegNum;
3630     Op->StartLoc = S;
3631     Op->EndLoc = E;
3632     return Op;
3633   }
3634 
3635   static std::unique_ptr<ARMOperand>
3636   CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
3637                         unsigned ShiftReg, unsigned ShiftImm, SMLoc S,
3638                         SMLoc E) {
3639     auto Op = std::make_unique<ARMOperand>(k_ShiftedRegister);
3640     Op->RegShiftedReg.ShiftTy = ShTy;
3641     Op->RegShiftedReg.SrcReg = SrcReg;
3642     Op->RegShiftedReg.ShiftReg = ShiftReg;
3643     Op->RegShiftedReg.ShiftImm = ShiftImm;
3644     Op->StartLoc = S;
3645     Op->EndLoc = E;
3646     return Op;
3647   }
3648 
3649   static std::unique_ptr<ARMOperand>
3650   CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
3651                          unsigned ShiftImm, SMLoc S, SMLoc E) {
3652     auto Op = std::make_unique<ARMOperand>(k_ShiftedImmediate);
3653     Op->RegShiftedImm.ShiftTy = ShTy;
3654     Op->RegShiftedImm.SrcReg = SrcReg;
3655     Op->RegShiftedImm.ShiftImm = ShiftImm;
3656     Op->StartLoc = S;
3657     Op->EndLoc = E;
3658     return Op;
3659   }
3660 
3661   static std::unique_ptr<ARMOperand> CreateShifterImm(bool isASR, unsigned Imm,
3662                                                       SMLoc S, SMLoc E) {
3663     auto Op = std::make_unique<ARMOperand>(k_ShifterImmediate);
3664     Op->ShifterImm.isASR = isASR;
3665     Op->ShifterImm.Imm = Imm;
3666     Op->StartLoc = S;
3667     Op->EndLoc = E;
3668     return Op;
3669   }
3670 
3671   static std::unique_ptr<ARMOperand> CreateRotImm(unsigned Imm, SMLoc S,
3672                                                   SMLoc E) {
3673     auto Op = std::make_unique<ARMOperand>(k_RotateImmediate);
3674     Op->RotImm.Imm = Imm;
3675     Op->StartLoc = S;
3676     Op->EndLoc = E;
3677     return Op;
3678   }
3679 
3680   static std::unique_ptr<ARMOperand> CreateModImm(unsigned Bits, unsigned Rot,
3681                                                   SMLoc S, SMLoc E) {
3682     auto Op = std::make_unique<ARMOperand>(k_ModifiedImmediate);
3683     Op->ModImm.Bits = Bits;
3684     Op->ModImm.Rot = Rot;
3685     Op->StartLoc = S;
3686     Op->EndLoc = E;
3687     return Op;
3688   }
3689 
3690   static std::unique_ptr<ARMOperand>
3691   CreateConstantPoolImm(const MCExpr *Val, SMLoc S, SMLoc E) {
3692     auto Op = std::make_unique<ARMOperand>(k_ConstantPoolImmediate);
3693     Op->Imm.Val = Val;
3694     Op->StartLoc = S;
3695     Op->EndLoc = E;
3696     return Op;
3697   }
3698 
3699   static std::unique_ptr<ARMOperand>
3700   CreateBitfield(unsigned LSB, unsigned Width, SMLoc S, SMLoc E) {
3701     auto Op = std::make_unique<ARMOperand>(k_BitfieldDescriptor);
3702     Op->Bitfield.LSB = LSB;
3703     Op->Bitfield.Width = Width;
3704     Op->StartLoc = S;
3705     Op->EndLoc = E;
3706     return Op;
3707   }
3708 
3709   static std::unique_ptr<ARMOperand>
3710   CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
3711                 SMLoc StartLoc, SMLoc EndLoc) {
3712     assert(Regs.size() > 0 && "RegList contains no registers?");
3713     KindTy Kind = k_RegisterList;
3714 
3715     if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
3716             Regs.front().second)) {
3717       if (Regs.back().second == ARM::VPR)
3718         Kind = k_FPDRegisterListWithVPR;
3719       else
3720         Kind = k_DPRRegisterList;
3721     } else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
3722                    Regs.front().second)) {
3723       if (Regs.back().second == ARM::VPR)
3724         Kind = k_FPSRegisterListWithVPR;
3725       else
3726         Kind = k_SPRRegisterList;
3727     }
3728 
3729     if (Kind == k_RegisterList && Regs.back().second == ARM::APSR)
3730       Kind = k_RegisterListWithAPSR;
3731 
3732     assert(llvm::is_sorted(Regs) && "Register list must be sorted by encoding");
3733 
3734     auto Op = std::make_unique<ARMOperand>(Kind);
3735     for (const auto &P : Regs)
3736       Op->Registers.push_back(P.second);
3737 
3738     Op->StartLoc = StartLoc;
3739     Op->EndLoc = EndLoc;
3740     return Op;
3741   }
3742 
3743   static std::unique_ptr<ARMOperand> CreateVectorList(unsigned RegNum,
3744                                                       unsigned Count,
3745                                                       bool isDoubleSpaced,
3746                                                       SMLoc S, SMLoc E) {
3747     auto Op = std::make_unique<ARMOperand>(k_VectorList);
3748     Op->VectorList.RegNum = RegNum;
3749     Op->VectorList.Count = Count;
3750     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3751     Op->StartLoc = S;
3752     Op->EndLoc = E;
3753     return Op;
3754   }
3755 
3756   static std::unique_ptr<ARMOperand>
3757   CreateVectorListAllLanes(unsigned RegNum, unsigned Count, bool isDoubleSpaced,
3758                            SMLoc S, SMLoc E) {
3759     auto Op = std::make_unique<ARMOperand>(k_VectorListAllLanes);
3760     Op->VectorList.RegNum = RegNum;
3761     Op->VectorList.Count = Count;
3762     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3763     Op->StartLoc = S;
3764     Op->EndLoc = E;
3765     return Op;
3766   }
3767 
3768   static std::unique_ptr<ARMOperand>
3769   CreateVectorListIndexed(unsigned RegNum, unsigned Count, unsigned Index,
3770                           bool isDoubleSpaced, SMLoc S, SMLoc E) {
3771     auto Op = std::make_unique<ARMOperand>(k_VectorListIndexed);
3772     Op->VectorList.RegNum = RegNum;
3773     Op->VectorList.Count = Count;
3774     Op->VectorList.LaneIndex = Index;
3775     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3776     Op->StartLoc = S;
3777     Op->EndLoc = E;
3778     return Op;
3779   }
3780 
3781   static std::unique_ptr<ARMOperand>
3782   CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
3783     auto Op = std::make_unique<ARMOperand>(k_VectorIndex);
3784     Op->VectorIndex.Val = Idx;
3785     Op->StartLoc = S;
3786     Op->EndLoc = E;
3787     return Op;
3788   }
3789 
3790   static std::unique_ptr<ARMOperand> CreateImm(const MCExpr *Val, SMLoc S,
3791                                                SMLoc E) {
3792     auto Op = std::make_unique<ARMOperand>(k_Immediate);
3793     Op->Imm.Val = Val;
3794     Op->StartLoc = S;
3795     Op->EndLoc = E;
3796     return Op;
3797   }
3798 
3799   static std::unique_ptr<ARMOperand>
3800   CreateMem(unsigned BaseRegNum, const MCExpr *OffsetImm, unsigned OffsetRegNum,
3801             ARM_AM::ShiftOpc ShiftType, unsigned ShiftImm, unsigned Alignment,
3802             bool isNegative, SMLoc S, SMLoc E, SMLoc AlignmentLoc = SMLoc()) {
3803     auto Op = std::make_unique<ARMOperand>(k_Memory);
3804     Op->Memory.BaseRegNum = BaseRegNum;
3805     Op->Memory.OffsetImm = OffsetImm;
3806     Op->Memory.OffsetRegNum = OffsetRegNum;
3807     Op->Memory.ShiftType = ShiftType;
3808     Op->Memory.ShiftImm = ShiftImm;
3809     Op->Memory.Alignment = Alignment;
3810     Op->Memory.isNegative = isNegative;
3811     Op->StartLoc = S;
3812     Op->EndLoc = E;
3813     Op->AlignmentLoc = AlignmentLoc;
3814     return Op;
3815   }
3816 
3817   static std::unique_ptr<ARMOperand>
3818   CreatePostIdxReg(unsigned RegNum, bool isAdd, ARM_AM::ShiftOpc ShiftTy,
3819                    unsigned ShiftImm, SMLoc S, SMLoc E) {
3820     auto Op = std::make_unique<ARMOperand>(k_PostIndexRegister);
3821     Op->PostIdxReg.RegNum = RegNum;
3822     Op->PostIdxReg.isAdd = isAdd;
3823     Op->PostIdxReg.ShiftTy = ShiftTy;
3824     Op->PostIdxReg.ShiftImm = ShiftImm;
3825     Op->StartLoc = S;
3826     Op->EndLoc = E;
3827     return Op;
3828   }
3829 
3830   static std::unique_ptr<ARMOperand> CreateMemBarrierOpt(ARM_MB::MemBOpt Opt,
3831                                                          SMLoc S) {
3832     auto Op = std::make_unique<ARMOperand>(k_MemBarrierOpt);
3833     Op->MBOpt.Val = Opt;
3834     Op->StartLoc = S;
3835     Op->EndLoc = S;
3836     return Op;
3837   }
3838 
3839   static std::unique_ptr<ARMOperand>
3840   CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S) {
3841     auto Op = std::make_unique<ARMOperand>(k_InstSyncBarrierOpt);
3842     Op->ISBOpt.Val = Opt;
3843     Op->StartLoc = S;
3844     Op->EndLoc = S;
3845     return Op;
3846   }
3847 
3848   static std::unique_ptr<ARMOperand>
3849   CreateTraceSyncBarrierOpt(ARM_TSB::TraceSyncBOpt Opt, SMLoc S) {
3850     auto Op = std::make_unique<ARMOperand>(k_TraceSyncBarrierOpt);
3851     Op->TSBOpt.Val = Opt;
3852     Op->StartLoc = S;
3853     Op->EndLoc = S;
3854     return Op;
3855   }
3856 
3857   static std::unique_ptr<ARMOperand> CreateProcIFlags(ARM_PROC::IFlags IFlags,
3858                                                       SMLoc S) {
3859     auto Op = std::make_unique<ARMOperand>(k_ProcIFlags);
3860     Op->IFlags.Val = IFlags;
3861     Op->StartLoc = S;
3862     Op->EndLoc = S;
3863     return Op;
3864   }
3865 
3866   static std::unique_ptr<ARMOperand> CreateMSRMask(unsigned MMask, SMLoc S) {
3867     auto Op = std::make_unique<ARMOperand>(k_MSRMask);
3868     Op->MMask.Val = MMask;
3869     Op->StartLoc = S;
3870     Op->EndLoc = S;
3871     return Op;
3872   }
3873 
3874   static std::unique_ptr<ARMOperand> CreateBankedReg(unsigned Reg, SMLoc S) {
3875     auto Op = std::make_unique<ARMOperand>(k_BankedReg);
3876     Op->BankedReg.Val = Reg;
3877     Op->StartLoc = S;
3878     Op->EndLoc = S;
3879     return Op;
3880   }
3881 };
3882 
3883 } // end anonymous namespace.
3884 
3885 void ARMOperand::print(raw_ostream &OS) const {
3886   auto RegName = [](MCRegister Reg) {
3887     if (Reg)
3888       return ARMInstPrinter::getRegisterName(Reg);
3889     else
3890       return "noreg";
3891   };
3892 
3893   switch (Kind) {
3894   case k_CondCode:
3895     OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
3896     break;
3897   case k_VPTPred:
3898     OS << "<ARMVCC::" << ARMVPTPredToString(getVPTPred()) << ">";
3899     break;
3900   case k_CCOut:
3901     OS << "<ccout " << RegName(getReg()) << ">";
3902     break;
3903   case k_ITCondMask: {
3904     static const char *const MaskStr[] = {
3905       "(invalid)", "(tttt)", "(ttt)", "(ttte)",
3906       "(tt)",      "(ttet)", "(tte)", "(ttee)",
3907       "(t)",       "(tett)", "(tet)", "(tete)",
3908       "(te)",      "(teet)", "(tee)", "(teee)",
3909     };
3910     assert((ITMask.Mask & 0xf) == ITMask.Mask);
3911     OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
3912     break;
3913   }
3914   case k_CoprocNum:
3915     OS << "<coprocessor number: " << getCoproc() << ">";
3916     break;
3917   case k_CoprocReg:
3918     OS << "<coprocessor register: " << getCoproc() << ">";
3919     break;
3920   case k_CoprocOption:
3921     OS << "<coprocessor option: " << CoprocOption.Val << ">";
3922     break;
3923   case k_MSRMask:
3924     OS << "<mask: " << getMSRMask() << ">";
3925     break;
3926   case k_BankedReg:
3927     OS << "<banked reg: " << getBankedReg() << ">";
3928     break;
3929   case k_Immediate:
3930     OS << *getImm();
3931     break;
3932   case k_MemBarrierOpt:
3933     OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">";
3934     break;
3935   case k_InstSyncBarrierOpt:
3936     OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">";
3937     break;
3938   case k_TraceSyncBarrierOpt:
3939     OS << "<ARM_TSB::" << TraceSyncBOptToString(getTraceSyncBarrierOpt()) << ">";
3940     break;
3941   case k_Memory:
3942     OS << "<memory";
3943     if (Memory.BaseRegNum)
3944       OS << " base:" << RegName(Memory.BaseRegNum);
3945     if (Memory.OffsetImm)
3946       OS << " offset-imm:" << *Memory.OffsetImm;
3947     if (Memory.OffsetRegNum)
3948       OS << " offset-reg:" << (Memory.isNegative ? "-" : "")
3949          << RegName(Memory.OffsetRegNum);
3950     if (Memory.ShiftType != ARM_AM::no_shift) {
3951       OS << " shift-type:" << ARM_AM::getShiftOpcStr(Memory.ShiftType);
3952       OS << " shift-imm:" << Memory.ShiftImm;
3953     }
3954     if (Memory.Alignment)
3955       OS << " alignment:" << Memory.Alignment;
3956     OS << ">";
3957     break;
3958   case k_PostIndexRegister:
3959     OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
3960        << RegName(PostIdxReg.RegNum);
3961     if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
3962       OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
3963          << PostIdxReg.ShiftImm;
3964     OS << ">";
3965     break;
3966   case k_ProcIFlags: {
3967     OS << "<ARM_PROC::";
3968     unsigned IFlags = getProcIFlags();
3969     for (int i=2; i >= 0; --i)
3970       if (IFlags & (1 << i))
3971         OS << ARM_PROC::IFlagsToString(1 << i);
3972     OS << ">";
3973     break;
3974   }
3975   case k_Register:
3976     OS << "<register " << RegName(getReg()) << ">";
3977     break;
3978   case k_ShifterImmediate:
3979     OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
3980        << " #" << ShifterImm.Imm << ">";
3981     break;
3982   case k_ShiftedRegister:
3983     OS << "<so_reg_reg " << RegName(RegShiftedReg.SrcReg) << " "
3984        << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy) << " "
3985        << RegName(RegShiftedReg.ShiftReg) << ">";
3986     break;
3987   case k_ShiftedImmediate:
3988     OS << "<so_reg_imm " << RegName(RegShiftedImm.SrcReg) << " "
3989        << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy) << " #"
3990        << RegShiftedImm.ShiftImm << ">";
3991     break;
3992   case k_RotateImmediate:
3993     OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
3994     break;
3995   case k_ModifiedImmediate:
3996     OS << "<mod_imm #" << ModImm.Bits << ", #"
3997        <<  ModImm.Rot << ")>";
3998     break;
3999   case k_ConstantPoolImmediate:
4000     OS << "<constant_pool_imm #" << *getConstantPoolImm();
4001     break;
4002   case k_BitfieldDescriptor:
4003     OS << "<bitfield " << "lsb: " << Bitfield.LSB
4004        << ", width: " << Bitfield.Width << ">";
4005     break;
4006   case k_RegisterList:
4007   case k_RegisterListWithAPSR:
4008   case k_DPRRegisterList:
4009   case k_SPRRegisterList:
4010   case k_FPSRegisterListWithVPR:
4011   case k_FPDRegisterListWithVPR: {
4012     OS << "<register_list ";
4013 
4014     const SmallVectorImpl<unsigned> &RegList = getRegList();
4015     for (SmallVectorImpl<unsigned>::const_iterator
4016            I = RegList.begin(), E = RegList.end(); I != E; ) {
4017       OS << RegName(*I);
4018       if (++I < E) OS << ", ";
4019     }
4020 
4021     OS << ">";
4022     break;
4023   }
4024   case k_VectorList:
4025     OS << "<vector_list " << VectorList.Count << " * "
4026        << RegName(VectorList.RegNum) << ">";
4027     break;
4028   case k_VectorListAllLanes:
4029     OS << "<vector_list(all lanes) " << VectorList.Count << " * "
4030        << RegName(VectorList.RegNum) << ">";
4031     break;
4032   case k_VectorListIndexed:
4033     OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
4034        << VectorList.Count << " * " << RegName(VectorList.RegNum) << ">";
4035     break;
4036   case k_Token:
4037     OS << "'" << getToken() << "'";
4038     break;
4039   case k_VectorIndex:
4040     OS << "<vectorindex " << getVectorIndex() << ">";
4041     break;
4042   }
4043 }
4044 
4045 /// @name Auto-generated Match Functions
4046 /// {
4047 
4048 static unsigned MatchRegisterName(StringRef Name);
4049 
4050 /// }
4051 
4052 bool ARMAsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
4053                                  SMLoc &EndLoc) {
4054   const AsmToken &Tok = getParser().getTok();
4055   StartLoc = Tok.getLoc();
4056   EndLoc = Tok.getEndLoc();
4057   Reg = tryParseRegister();
4058 
4059   return Reg == (unsigned)-1;
4060 }
4061 
4062 ParseStatus ARMAsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
4063                                            SMLoc &EndLoc) {
4064   if (parseRegister(Reg, StartLoc, EndLoc))
4065     return ParseStatus::NoMatch;
4066   return ParseStatus::Success;
4067 }
4068 
4069 /// Try to parse a register name.  The token must be an Identifier when called,
4070 /// and if it is a register name the token is eaten and the register number is
4071 /// returned.  Otherwise return -1.
4072 int ARMAsmParser::tryParseRegister() {
4073   MCAsmParser &Parser = getParser();
4074   const AsmToken &Tok = Parser.getTok();
4075   if (Tok.isNot(AsmToken::Identifier)) return -1;
4076 
4077   std::string lowerCase = Tok.getString().lower();
4078   unsigned RegNum = MatchRegisterName(lowerCase);
4079   if (!RegNum) {
4080     RegNum = StringSwitch<unsigned>(lowerCase)
4081       .Case("r13", ARM::SP)
4082       .Case("r14", ARM::LR)
4083       .Case("r15", ARM::PC)
4084       .Case("ip", ARM::R12)
4085       // Additional register name aliases for 'gas' compatibility.
4086       .Case("a1", ARM::R0)
4087       .Case("a2", ARM::R1)
4088       .Case("a3", ARM::R2)
4089       .Case("a4", ARM::R3)
4090       .Case("v1", ARM::R4)
4091       .Case("v2", ARM::R5)
4092       .Case("v3", ARM::R6)
4093       .Case("v4", ARM::R7)
4094       .Case("v5", ARM::R8)
4095       .Case("v6", ARM::R9)
4096       .Case("v7", ARM::R10)
4097       .Case("v8", ARM::R11)
4098       .Case("sb", ARM::R9)
4099       .Case("sl", ARM::R10)
4100       .Case("fp", ARM::R11)
4101       .Default(0);
4102   }
4103   if (!RegNum) {
4104     // Check for aliases registered via .req. Canonicalize to lower case.
4105     // That's more consistent since register names are case insensitive, and
4106     // it's how the original entry was passed in from MC/MCParser/AsmParser.
4107     StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
4108     // If no match, return failure.
4109     if (Entry == RegisterReqs.end())
4110       return -1;
4111     Parser.Lex(); // Eat identifier token.
4112     return Entry->getValue();
4113   }
4114 
4115   // Some FPUs only have 16 D registers, so D16-D31 are invalid
4116   if (!hasD32() && RegNum >= ARM::D16 && RegNum <= ARM::D31)
4117     return -1;
4118 
4119   Parser.Lex(); // Eat identifier token.
4120 
4121   return RegNum;
4122 }
4123 
4124 // Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
4125 // If a recoverable error occurs, return 1. If an irrecoverable error
4126 // occurs, return -1. An irrecoverable error is one where tokens have been
4127 // consumed in the process of trying to parse the shifter (i.e., when it is
4128 // indeed a shifter operand, but malformed).
4129 int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) {
4130   MCAsmParser &Parser = getParser();
4131   SMLoc S = Parser.getTok().getLoc();
4132   const AsmToken &Tok = Parser.getTok();
4133   if (Tok.isNot(AsmToken::Identifier))
4134     return -1;
4135 
4136   std::string lowerCase = Tok.getString().lower();
4137   ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
4138       .Case("asl", ARM_AM::lsl)
4139       .Case("lsl", ARM_AM::lsl)
4140       .Case("lsr", ARM_AM::lsr)
4141       .Case("asr", ARM_AM::asr)
4142       .Case("ror", ARM_AM::ror)
4143       .Case("rrx", ARM_AM::rrx)
4144       .Default(ARM_AM::no_shift);
4145 
4146   if (ShiftTy == ARM_AM::no_shift)
4147     return 1;
4148 
4149   Parser.Lex(); // Eat the operator.
4150 
4151   // The source register for the shift has already been added to the
4152   // operand list, so we need to pop it off and combine it into the shifted
4153   // register operand instead.
4154   std::unique_ptr<ARMOperand> PrevOp(
4155       (ARMOperand *)Operands.pop_back_val().release());
4156   if (!PrevOp->isReg())
4157     return Error(PrevOp->getStartLoc(), "shift must be of a register");
4158   int SrcReg = PrevOp->getReg();
4159 
4160   SMLoc EndLoc;
4161   int64_t Imm = 0;
4162   int ShiftReg = 0;
4163   if (ShiftTy == ARM_AM::rrx) {
4164     // RRX Doesn't have an explicit shift amount. The encoder expects
4165     // the shift register to be the same as the source register. Seems odd,
4166     // but OK.
4167     ShiftReg = SrcReg;
4168   } else {
4169     // Figure out if this is shifted by a constant or a register (for non-RRX).
4170     if (Parser.getTok().is(AsmToken::Hash) ||
4171         Parser.getTok().is(AsmToken::Dollar)) {
4172       Parser.Lex(); // Eat hash.
4173       SMLoc ImmLoc = Parser.getTok().getLoc();
4174       const MCExpr *ShiftExpr = nullptr;
4175       if (getParser().parseExpression(ShiftExpr, EndLoc)) {
4176         Error(ImmLoc, "invalid immediate shift value");
4177         return -1;
4178       }
4179       // The expression must be evaluatable as an immediate.
4180       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
4181       if (!CE) {
4182         Error(ImmLoc, "invalid immediate shift value");
4183         return -1;
4184       }
4185       // Range check the immediate.
4186       // lsl, ror: 0 <= imm <= 31
4187       // lsr, asr: 0 <= imm <= 32
4188       Imm = CE->getValue();
4189       if (Imm < 0 ||
4190           ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
4191           ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
4192         Error(ImmLoc, "immediate shift value out of range");
4193         return -1;
4194       }
4195       // shift by zero is a nop. Always send it through as lsl.
4196       // ('as' compatibility)
4197       if (Imm == 0)
4198         ShiftTy = ARM_AM::lsl;
4199     } else if (Parser.getTok().is(AsmToken::Identifier)) {
4200       SMLoc L = Parser.getTok().getLoc();
4201       EndLoc = Parser.getTok().getEndLoc();
4202       ShiftReg = tryParseRegister();
4203       if (ShiftReg == -1) {
4204         Error(L, "expected immediate or register in shift operand");
4205         return -1;
4206       }
4207     } else {
4208       Error(Parser.getTok().getLoc(),
4209             "expected immediate or register in shift operand");
4210       return -1;
4211     }
4212   }
4213 
4214   if (ShiftReg && ShiftTy != ARM_AM::rrx)
4215     Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
4216                                                          ShiftReg, Imm,
4217                                                          S, EndLoc));
4218   else
4219     Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
4220                                                           S, EndLoc));
4221 
4222   return 0;
4223 }
4224 
4225 /// Try to parse a register name.  The token must be an Identifier when called.
4226 /// If it's a register, an AsmOperand is created. Another AsmOperand is created
4227 /// if there is a "writeback". 'true' if it's not a register.
4228 ///
4229 /// TODO this is likely to change to allow different register types and or to
4230 /// parse for a specific register type.
4231 bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) {
4232   MCAsmParser &Parser = getParser();
4233   SMLoc RegStartLoc = Parser.getTok().getLoc();
4234   SMLoc RegEndLoc = Parser.getTok().getEndLoc();
4235   int RegNo = tryParseRegister();
4236   if (RegNo == -1)
4237     return true;
4238 
4239   Operands.push_back(ARMOperand::CreateReg(RegNo, RegStartLoc, RegEndLoc));
4240 
4241   const AsmToken &ExclaimTok = Parser.getTok();
4242   if (ExclaimTok.is(AsmToken::Exclaim)) {
4243     Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
4244                                                ExclaimTok.getLoc()));
4245     Parser.Lex(); // Eat exclaim token
4246     return false;
4247   }
4248 
4249   // Also check for an index operand. This is only legal for vector registers,
4250   // but that'll get caught OK in operand matching, so we don't need to
4251   // explicitly filter everything else out here.
4252   if (Parser.getTok().is(AsmToken::LBrac)) {
4253     SMLoc SIdx = Parser.getTok().getLoc();
4254     Parser.Lex(); // Eat left bracket token.
4255 
4256     const MCExpr *ImmVal;
4257     if (getParser().parseExpression(ImmVal))
4258       return true;
4259     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4260     if (!MCE)
4261       return TokError("immediate value expected for vector index");
4262 
4263     if (Parser.getTok().isNot(AsmToken::RBrac))
4264       return Error(Parser.getTok().getLoc(), "']' expected");
4265 
4266     SMLoc E = Parser.getTok().getEndLoc();
4267     Parser.Lex(); // Eat right bracket token.
4268 
4269     Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
4270                                                      SIdx, E,
4271                                                      getContext()));
4272   }
4273 
4274   return false;
4275 }
4276 
4277 /// MatchCoprocessorOperandName - Try to parse an coprocessor related
4278 /// instruction with a symbolic operand name.
4279 /// We accept "crN" syntax for GAS compatibility.
4280 /// <operand-name> ::= <prefix><number>
4281 /// If CoprocOp is 'c', then:
4282 ///   <prefix> ::= c | cr
4283 /// If CoprocOp is 'p', then :
4284 ///   <prefix> ::= p
4285 /// <number> ::= integer in range [0, 15]
4286 static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
4287   // Use the same layout as the tablegen'erated register name matcher. Ugly,
4288   // but efficient.
4289   if (Name.size() < 2 || Name[0] != CoprocOp)
4290     return -1;
4291   Name = (Name[1] == 'r') ? Name.drop_front(2) : Name.drop_front();
4292 
4293   switch (Name.size()) {
4294   default: return -1;
4295   case 1:
4296     switch (Name[0]) {
4297     default:  return -1;
4298     case '0': return 0;
4299     case '1': return 1;
4300     case '2': return 2;
4301     case '3': return 3;
4302     case '4': return 4;
4303     case '5': return 5;
4304     case '6': return 6;
4305     case '7': return 7;
4306     case '8': return 8;
4307     case '9': return 9;
4308     }
4309   case 2:
4310     if (Name[0] != '1')
4311       return -1;
4312     switch (Name[1]) {
4313     default:  return -1;
4314     // CP10 and CP11 are VFP/NEON and so vector instructions should be used.
4315     // However, old cores (v5/v6) did use them in that way.
4316     case '0': return 10;
4317     case '1': return 11;
4318     case '2': return 12;
4319     case '3': return 13;
4320     case '4': return 14;
4321     case '5': return 15;
4322     }
4323   }
4324 }
4325 
4326 /// parseITCondCode - Try to parse a condition code for an IT instruction.
4327 ParseStatus ARMAsmParser::parseITCondCode(OperandVector &Operands) {
4328   MCAsmParser &Parser = getParser();
4329   SMLoc S = Parser.getTok().getLoc();
4330   const AsmToken &Tok = Parser.getTok();
4331   if (!Tok.is(AsmToken::Identifier))
4332     return ParseStatus::NoMatch;
4333   unsigned CC = ARMCondCodeFromString(Tok.getString());
4334   if (CC == ~0U)
4335     return ParseStatus::NoMatch;
4336   Parser.Lex(); // Eat the token.
4337 
4338   Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
4339 
4340   return ParseStatus::Success;
4341 }
4342 
4343 /// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
4344 /// token must be an Identifier when called, and if it is a coprocessor
4345 /// number, the token is eaten and the operand is added to the operand list.
4346 ParseStatus ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands) {
4347   MCAsmParser &Parser = getParser();
4348   SMLoc S = Parser.getTok().getLoc();
4349   const AsmToken &Tok = Parser.getTok();
4350   if (Tok.isNot(AsmToken::Identifier))
4351     return ParseStatus::NoMatch;
4352 
4353   int Num = MatchCoprocessorOperandName(Tok.getString().lower(), 'p');
4354   if (Num == -1)
4355     return ParseStatus::NoMatch;
4356   if (!isValidCoprocessorNumber(Num, getSTI().getFeatureBits()))
4357     return ParseStatus::NoMatch;
4358 
4359   Parser.Lex(); // Eat identifier token.
4360   Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
4361   return ParseStatus::Success;
4362 }
4363 
4364 /// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
4365 /// token must be an Identifier when called, and if it is a coprocessor
4366 /// number, the token is eaten and the operand is added to the operand list.
4367 ParseStatus ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands) {
4368   MCAsmParser &Parser = getParser();
4369   SMLoc S = Parser.getTok().getLoc();
4370   const AsmToken &Tok = Parser.getTok();
4371   if (Tok.isNot(AsmToken::Identifier))
4372     return ParseStatus::NoMatch;
4373 
4374   int Reg = MatchCoprocessorOperandName(Tok.getString().lower(), 'c');
4375   if (Reg == -1)
4376     return ParseStatus::NoMatch;
4377 
4378   Parser.Lex(); // Eat identifier token.
4379   Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
4380   return ParseStatus::Success;
4381 }
4382 
4383 /// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
4384 /// coproc_option : '{' imm0_255 '}'
4385 ParseStatus ARMAsmParser::parseCoprocOptionOperand(OperandVector &Operands) {
4386   MCAsmParser &Parser = getParser();
4387   SMLoc S = Parser.getTok().getLoc();
4388 
4389   // If this isn't a '{', this isn't a coprocessor immediate operand.
4390   if (Parser.getTok().isNot(AsmToken::LCurly))
4391     return ParseStatus::NoMatch;
4392   Parser.Lex(); // Eat the '{'
4393 
4394   const MCExpr *Expr;
4395   SMLoc Loc = Parser.getTok().getLoc();
4396   if (getParser().parseExpression(Expr))
4397     return Error(Loc, "illegal expression");
4398   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4399   if (!CE || CE->getValue() < 0 || CE->getValue() > 255)
4400     return Error(Loc,
4401                  "coprocessor option must be an immediate in range [0, 255]");
4402   int Val = CE->getValue();
4403 
4404   // Check for and consume the closing '}'
4405   if (Parser.getTok().isNot(AsmToken::RCurly))
4406     return ParseStatus::Failure;
4407   SMLoc E = Parser.getTok().getEndLoc();
4408   Parser.Lex(); // Eat the '}'
4409 
4410   Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
4411   return ParseStatus::Success;
4412 }
4413 
4414 // For register list parsing, we need to map from raw GPR register numbering
4415 // to the enumeration values. The enumeration values aren't sorted by
4416 // register number due to our using "sp", "lr" and "pc" as canonical names.
4417 static unsigned getNextRegister(unsigned Reg) {
4418   // If this is a GPR, we need to do it manually, otherwise we can rely
4419   // on the sort ordering of the enumeration since the other reg-classes
4420   // are sane.
4421   if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4422     return Reg + 1;
4423   switch(Reg) {
4424   default: llvm_unreachable("Invalid GPR number!");
4425   case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
4426   case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
4427   case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
4428   case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
4429   case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
4430   case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
4431   case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
4432   case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
4433   }
4434 }
4435 
4436 // Insert an <Encoding, Register> pair in an ordered vector. Return true on
4437 // success, or false, if duplicate encoding found.
4438 static bool
4439 insertNoDuplicates(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
4440                    unsigned Enc, unsigned Reg) {
4441   Regs.emplace_back(Enc, Reg);
4442   for (auto I = Regs.rbegin(), J = I + 1, E = Regs.rend(); J != E; ++I, ++J) {
4443     if (J->first == Enc) {
4444       Regs.erase(J.base());
4445       return false;
4446     }
4447     if (J->first < Enc)
4448       break;
4449     std::swap(*I, *J);
4450   }
4451   return true;
4452 }
4453 
4454 /// Parse a register list.
4455 bool ARMAsmParser::parseRegisterList(OperandVector &Operands, bool EnforceOrder,
4456                                      bool AllowRAAC) {
4457   MCAsmParser &Parser = getParser();
4458   if (Parser.getTok().isNot(AsmToken::LCurly))
4459     return TokError("Token is not a Left Curly Brace");
4460   SMLoc S = Parser.getTok().getLoc();
4461   Parser.Lex(); // Eat '{' token.
4462   SMLoc RegLoc = Parser.getTok().getLoc();
4463 
4464   // Check the first register in the list to see what register class
4465   // this is a list of.
4466   int Reg = tryParseRegister();
4467   if (Reg == -1)
4468     return Error(RegLoc, "register expected");
4469   if (!AllowRAAC && Reg == ARM::RA_AUTH_CODE)
4470     return Error(RegLoc, "pseudo-register not allowed");
4471   // The reglist instructions have at most 16 registers, so reserve
4472   // space for that many.
4473   int EReg = 0;
4474   SmallVector<std::pair<unsigned, unsigned>, 16> Registers;
4475 
4476   // Allow Q regs and just interpret them as the two D sub-registers.
4477   if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4478     Reg = getDRegFromQReg(Reg);
4479     EReg = MRI->getEncodingValue(Reg);
4480     Registers.emplace_back(EReg, Reg);
4481     ++Reg;
4482   }
4483   const MCRegisterClass *RC;
4484   if (Reg == ARM::RA_AUTH_CODE ||
4485       ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4486     RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
4487   else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
4488     RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
4489   else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
4490     RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
4491   else if (ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
4492     RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4493   else
4494     return Error(RegLoc, "invalid register in register list");
4495 
4496   // Store the register.
4497   EReg = MRI->getEncodingValue(Reg);
4498   Registers.emplace_back(EReg, Reg);
4499 
4500   // This starts immediately after the first register token in the list,
4501   // so we can see either a comma or a minus (range separator) as a legal
4502   // next token.
4503   while (Parser.getTok().is(AsmToken::Comma) ||
4504          Parser.getTok().is(AsmToken::Minus)) {
4505     if (Parser.getTok().is(AsmToken::Minus)) {
4506       if (Reg == ARM::RA_AUTH_CODE)
4507         return Error(RegLoc, "pseudo-register not allowed");
4508       Parser.Lex(); // Eat the minus.
4509       SMLoc AfterMinusLoc = Parser.getTok().getLoc();
4510       int EndReg = tryParseRegister();
4511       if (EndReg == -1)
4512         return Error(AfterMinusLoc, "register expected");
4513       if (EndReg == ARM::RA_AUTH_CODE)
4514         return Error(AfterMinusLoc, "pseudo-register not allowed");
4515       // Allow Q regs and just interpret them as the two D sub-registers.
4516       if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
4517         EndReg = getDRegFromQReg(EndReg) + 1;
4518       // If the register is the same as the start reg, there's nothing
4519       // more to do.
4520       if (Reg == EndReg)
4521         continue;
4522       // The register must be in the same register class as the first.
4523       if (!RC->contains(Reg))
4524         return Error(AfterMinusLoc, "invalid register in register list");
4525       // Ranges must go from low to high.
4526       if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
4527         return Error(AfterMinusLoc, "bad range in register list");
4528 
4529       // Add all the registers in the range to the register list.
4530       while (Reg != EndReg) {
4531         Reg = getNextRegister(Reg);
4532         EReg = MRI->getEncodingValue(Reg);
4533         if (!insertNoDuplicates(Registers, EReg, Reg)) {
4534           Warning(AfterMinusLoc, StringRef("duplicated register (") +
4535                                      ARMInstPrinter::getRegisterName(Reg) +
4536                                      ") in register list");
4537         }
4538       }
4539       continue;
4540     }
4541     Parser.Lex(); // Eat the comma.
4542     RegLoc = Parser.getTok().getLoc();
4543     int OldReg = Reg;
4544     const AsmToken RegTok = Parser.getTok();
4545     Reg = tryParseRegister();
4546     if (Reg == -1)
4547       return Error(RegLoc, "register expected");
4548     if (!AllowRAAC && Reg == ARM::RA_AUTH_CODE)
4549       return Error(RegLoc, "pseudo-register not allowed");
4550     // Allow Q regs and just interpret them as the two D sub-registers.
4551     bool isQReg = false;
4552     if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4553       Reg = getDRegFromQReg(Reg);
4554       isQReg = true;
4555     }
4556     if (Reg != ARM::RA_AUTH_CODE && !RC->contains(Reg) &&
4557         RC->getID() == ARMMCRegisterClasses[ARM::GPRRegClassID].getID() &&
4558         ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg)) {
4559       // switch the register classes, as GPRwithAPSRnospRegClassID is a partial
4560       // subset of GPRRegClassId except it contains APSR as well.
4561       RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4562     }
4563     if (Reg == ARM::VPR &&
4564         (RC == &ARMMCRegisterClasses[ARM::SPRRegClassID] ||
4565          RC == &ARMMCRegisterClasses[ARM::DPRRegClassID] ||
4566          RC == &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID])) {
4567       RC = &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID];
4568       EReg = MRI->getEncodingValue(Reg);
4569       if (!insertNoDuplicates(Registers, EReg, Reg)) {
4570         Warning(RegLoc, "duplicated register (" + RegTok.getString() +
4571                             ") in register list");
4572       }
4573       continue;
4574     }
4575     // The register must be in the same register class as the first.
4576     if ((Reg == ARM::RA_AUTH_CODE &&
4577          RC != &ARMMCRegisterClasses[ARM::GPRRegClassID]) ||
4578         (Reg != ARM::RA_AUTH_CODE && !RC->contains(Reg)))
4579       return Error(RegLoc, "invalid register in register list");
4580     // In most cases, the list must be monotonically increasing. An
4581     // exception is CLRM, which is order-independent anyway, so
4582     // there's no potential for confusion if you write clrm {r2,r1}
4583     // instead of clrm {r1,r2}.
4584     if (EnforceOrder &&
4585         MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
4586       if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4587         Warning(RegLoc, "register list not in ascending order");
4588       else if (!ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
4589         return Error(RegLoc, "register list not in ascending order");
4590     }
4591     // VFP register lists must also be contiguous.
4592     if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
4593         RC != &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID] &&
4594         Reg != OldReg + 1)
4595       return Error(RegLoc, "non-contiguous register range");
4596     EReg = MRI->getEncodingValue(Reg);
4597     if (!insertNoDuplicates(Registers, EReg, Reg)) {
4598       Warning(RegLoc, "duplicated register (" + RegTok.getString() +
4599                           ") in register list");
4600     }
4601     if (isQReg) {
4602       EReg = MRI->getEncodingValue(++Reg);
4603       Registers.emplace_back(EReg, Reg);
4604     }
4605   }
4606 
4607   if (Parser.getTok().isNot(AsmToken::RCurly))
4608     return Error(Parser.getTok().getLoc(), "'}' expected");
4609   SMLoc E = Parser.getTok().getEndLoc();
4610   Parser.Lex(); // Eat '}' token.
4611 
4612   // Push the register list operand.
4613   Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
4614 
4615   // The ARM system instruction variants for LDM/STM have a '^' token here.
4616   if (Parser.getTok().is(AsmToken::Caret)) {
4617     Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
4618     Parser.Lex(); // Eat '^' token.
4619   }
4620 
4621   return false;
4622 }
4623 
4624 // Helper function to parse the lane index for vector lists.
4625 ParseStatus ARMAsmParser::parseVectorLane(VectorLaneTy &LaneKind,
4626                                           unsigned &Index, SMLoc &EndLoc) {
4627   MCAsmParser &Parser = getParser();
4628   Index = 0; // Always return a defined index value.
4629   if (Parser.getTok().is(AsmToken::LBrac)) {
4630     Parser.Lex(); // Eat the '['.
4631     if (Parser.getTok().is(AsmToken::RBrac)) {
4632       // "Dn[]" is the 'all lanes' syntax.
4633       LaneKind = AllLanes;
4634       EndLoc = Parser.getTok().getEndLoc();
4635       Parser.Lex(); // Eat the ']'.
4636       return ParseStatus::Success;
4637     }
4638 
4639     // There's an optional '#' token here. Normally there wouldn't be, but
4640     // inline assemble puts one in, and it's friendly to accept that.
4641     if (Parser.getTok().is(AsmToken::Hash))
4642       Parser.Lex(); // Eat '#' or '$'.
4643 
4644     const MCExpr *LaneIndex;
4645     SMLoc Loc = Parser.getTok().getLoc();
4646     if (getParser().parseExpression(LaneIndex))
4647       return Error(Loc, "illegal expression");
4648     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
4649     if (!CE)
4650       return Error(Loc, "lane index must be empty or an integer");
4651     if (Parser.getTok().isNot(AsmToken::RBrac))
4652       return Error(Parser.getTok().getLoc(), "']' expected");
4653     EndLoc = Parser.getTok().getEndLoc();
4654     Parser.Lex(); // Eat the ']'.
4655     int64_t Val = CE->getValue();
4656 
4657     // FIXME: Make this range check context sensitive for .8, .16, .32.
4658     if (Val < 0 || Val > 7)
4659       return Error(Parser.getTok().getLoc(), "lane index out of range");
4660     Index = Val;
4661     LaneKind = IndexedLane;
4662     return ParseStatus::Success;
4663   }
4664   LaneKind = NoLanes;
4665   return ParseStatus::Success;
4666 }
4667 
4668 // parse a vector register list
4669 ParseStatus ARMAsmParser::parseVectorList(OperandVector &Operands) {
4670   MCAsmParser &Parser = getParser();
4671   VectorLaneTy LaneKind;
4672   unsigned LaneIndex;
4673   SMLoc S = Parser.getTok().getLoc();
4674   // As an extension (to match gas), support a plain D register or Q register
4675   // (without encosing curly braces) as a single or double entry list,
4676   // respectively.
4677   if (!hasMVE() && Parser.getTok().is(AsmToken::Identifier)) {
4678     SMLoc E = Parser.getTok().getEndLoc();
4679     int Reg = tryParseRegister();
4680     if (Reg == -1)
4681       return ParseStatus::NoMatch;
4682     if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
4683       ParseStatus Res = parseVectorLane(LaneKind, LaneIndex, E);
4684       if (!Res.isSuccess())
4685         return Res;
4686       switch (LaneKind) {
4687       case NoLanes:
4688         Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
4689         break;
4690       case AllLanes:
4691         Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
4692                                                                 S, E));
4693         break;
4694       case IndexedLane:
4695         Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
4696                                                                LaneIndex,
4697                                                                false, S, E));
4698         break;
4699       }
4700       return ParseStatus::Success;
4701     }
4702     if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4703       Reg = getDRegFromQReg(Reg);
4704       ParseStatus Res = parseVectorLane(LaneKind, LaneIndex, E);
4705       if (!Res.isSuccess())
4706         return Res;
4707       switch (LaneKind) {
4708       case NoLanes:
4709         Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
4710                                    &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4711         Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
4712         break;
4713       case AllLanes:
4714         Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
4715                                    &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4716         Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
4717                                                                 S, E));
4718         break;
4719       case IndexedLane:
4720         Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
4721                                                                LaneIndex,
4722                                                                false, S, E));
4723         break;
4724       }
4725       return ParseStatus::Success;
4726     }
4727     return Error(S, "vector register expected");
4728   }
4729 
4730   if (Parser.getTok().isNot(AsmToken::LCurly))
4731     return ParseStatus::NoMatch;
4732 
4733   Parser.Lex(); // Eat '{' token.
4734   SMLoc RegLoc = Parser.getTok().getLoc();
4735 
4736   int Reg = tryParseRegister();
4737   if (Reg == -1)
4738     return Error(RegLoc, "register expected");
4739   unsigned Count = 1;
4740   int Spacing = 0;
4741   unsigned FirstReg = Reg;
4742 
4743   if (hasMVE() && !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Reg))
4744     return Error(Parser.getTok().getLoc(),
4745                  "vector register in range Q0-Q7 expected");
4746   // The list is of D registers, but we also allow Q regs and just interpret
4747   // them as the two D sub-registers.
4748   else if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4749     FirstReg = Reg = getDRegFromQReg(Reg);
4750     Spacing = 1; // double-spacing requires explicit D registers, otherwise
4751                  // it's ambiguous with four-register single spaced.
4752     ++Reg;
4753     ++Count;
4754   }
4755 
4756   SMLoc E;
4757   if (!parseVectorLane(LaneKind, LaneIndex, E).isSuccess())
4758     return ParseStatus::Failure;
4759 
4760   while (Parser.getTok().is(AsmToken::Comma) ||
4761          Parser.getTok().is(AsmToken::Minus)) {
4762     if (Parser.getTok().is(AsmToken::Minus)) {
4763       if (!Spacing)
4764         Spacing = 1; // Register range implies a single spaced list.
4765       else if (Spacing == 2)
4766         return Error(Parser.getTok().getLoc(),
4767                      "sequential registers in double spaced list");
4768       Parser.Lex(); // Eat the minus.
4769       SMLoc AfterMinusLoc = Parser.getTok().getLoc();
4770       int EndReg = tryParseRegister();
4771       if (EndReg == -1)
4772         return Error(AfterMinusLoc, "register expected");
4773       // Allow Q regs and just interpret them as the two D sub-registers.
4774       if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
4775         EndReg = getDRegFromQReg(EndReg) + 1;
4776       // If the register is the same as the start reg, there's nothing
4777       // more to do.
4778       if (Reg == EndReg)
4779         continue;
4780       // The register must be in the same register class as the first.
4781       if ((hasMVE() &&
4782            !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(EndReg)) ||
4783           (!hasMVE() &&
4784            !ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)))
4785         return Error(AfterMinusLoc, "invalid register in register list");
4786       // Ranges must go from low to high.
4787       if (Reg > EndReg)
4788         return Error(AfterMinusLoc, "bad range in register list");
4789       // Parse the lane specifier if present.
4790       VectorLaneTy NextLaneKind;
4791       unsigned NextLaneIndex;
4792       if (!parseVectorLane(NextLaneKind, NextLaneIndex, E).isSuccess())
4793         return ParseStatus::Failure;
4794       if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
4795         return Error(AfterMinusLoc, "mismatched lane index in register list");
4796 
4797       // Add all the registers in the range to the register list.
4798       Count += EndReg - Reg;
4799       Reg = EndReg;
4800       continue;
4801     }
4802     Parser.Lex(); // Eat the comma.
4803     RegLoc = Parser.getTok().getLoc();
4804     int OldReg = Reg;
4805     Reg = tryParseRegister();
4806     if (Reg == -1)
4807       return Error(RegLoc, "register expected");
4808 
4809     if (hasMVE()) {
4810       if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Reg))
4811         return Error(RegLoc, "vector register in range Q0-Q7 expected");
4812       Spacing = 1;
4813     }
4814     // vector register lists must be contiguous.
4815     // It's OK to use the enumeration values directly here rather, as the
4816     // VFP register classes have the enum sorted properly.
4817     //
4818     // The list is of D registers, but we also allow Q regs and just interpret
4819     // them as the two D sub-registers.
4820     else if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4821       if (!Spacing)
4822         Spacing = 1; // Register range implies a single spaced list.
4823       else if (Spacing == 2)
4824         return Error(
4825             RegLoc,
4826             "invalid register in double-spaced list (must be 'D' register')");
4827       Reg = getDRegFromQReg(Reg);
4828       if (Reg != OldReg + 1)
4829         return Error(RegLoc, "non-contiguous register range");
4830       ++Reg;
4831       Count += 2;
4832       // Parse the lane specifier if present.
4833       VectorLaneTy NextLaneKind;
4834       unsigned NextLaneIndex;
4835       SMLoc LaneLoc = Parser.getTok().getLoc();
4836       if (!parseVectorLane(NextLaneKind, NextLaneIndex, E).isSuccess())
4837         return ParseStatus::Failure;
4838       if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
4839         return Error(LaneLoc, "mismatched lane index in register list");
4840       continue;
4841     }
4842     // Normal D register.
4843     // Figure out the register spacing (single or double) of the list if
4844     // we don't know it already.
4845     if (!Spacing)
4846       Spacing = 1 + (Reg == OldReg + 2);
4847 
4848     // Just check that it's contiguous and keep going.
4849     if (Reg != OldReg + Spacing)
4850       return Error(RegLoc, "non-contiguous register range");
4851     ++Count;
4852     // Parse the lane specifier if present.
4853     VectorLaneTy NextLaneKind;
4854     unsigned NextLaneIndex;
4855     SMLoc EndLoc = Parser.getTok().getLoc();
4856     if (!parseVectorLane(NextLaneKind, NextLaneIndex, E).isSuccess())
4857       return ParseStatus::Failure;
4858     if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
4859       return Error(EndLoc, "mismatched lane index in register list");
4860   }
4861 
4862   if (Parser.getTok().isNot(AsmToken::RCurly))
4863     return Error(Parser.getTok().getLoc(), "'}' expected");
4864   E = Parser.getTok().getEndLoc();
4865   Parser.Lex(); // Eat '}' token.
4866 
4867   switch (LaneKind) {
4868   case NoLanes:
4869   case AllLanes: {
4870     // Two-register operands have been converted to the
4871     // composite register classes.
4872     if (Count == 2 && !hasMVE()) {
4873       const MCRegisterClass *RC = (Spacing == 1) ?
4874         &ARMMCRegisterClasses[ARM::DPairRegClassID] :
4875         &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
4876       FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
4877     }
4878     auto Create = (LaneKind == NoLanes ? ARMOperand::CreateVectorList :
4879                    ARMOperand::CreateVectorListAllLanes);
4880     Operands.push_back(Create(FirstReg, Count, (Spacing == 2), S, E));
4881     break;
4882   }
4883   case IndexedLane:
4884     Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
4885                                                            LaneIndex,
4886                                                            (Spacing == 2),
4887                                                            S, E));
4888     break;
4889   }
4890   return ParseStatus::Success;
4891 }
4892 
4893 /// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
4894 ParseStatus ARMAsmParser::parseMemBarrierOptOperand(OperandVector &Operands) {
4895   MCAsmParser &Parser = getParser();
4896   SMLoc S = Parser.getTok().getLoc();
4897   const AsmToken &Tok = Parser.getTok();
4898   unsigned Opt;
4899 
4900   if (Tok.is(AsmToken::Identifier)) {
4901     StringRef OptStr = Tok.getString();
4902 
4903     Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower())
4904       .Case("sy",    ARM_MB::SY)
4905       .Case("st",    ARM_MB::ST)
4906       .Case("ld",    ARM_MB::LD)
4907       .Case("sh",    ARM_MB::ISH)
4908       .Case("ish",   ARM_MB::ISH)
4909       .Case("shst",  ARM_MB::ISHST)
4910       .Case("ishst", ARM_MB::ISHST)
4911       .Case("ishld", ARM_MB::ISHLD)
4912       .Case("nsh",   ARM_MB::NSH)
4913       .Case("un",    ARM_MB::NSH)
4914       .Case("nshst", ARM_MB::NSHST)
4915       .Case("nshld", ARM_MB::NSHLD)
4916       .Case("unst",  ARM_MB::NSHST)
4917       .Case("osh",   ARM_MB::OSH)
4918       .Case("oshst", ARM_MB::OSHST)
4919       .Case("oshld", ARM_MB::OSHLD)
4920       .Default(~0U);
4921 
4922     // ishld, oshld, nshld and ld are only available from ARMv8.
4923     if (!hasV8Ops() && (Opt == ARM_MB::ISHLD || Opt == ARM_MB::OSHLD ||
4924                         Opt == ARM_MB::NSHLD || Opt == ARM_MB::LD))
4925       Opt = ~0U;
4926 
4927     if (Opt == ~0U)
4928       return ParseStatus::NoMatch;
4929 
4930     Parser.Lex(); // Eat identifier token.
4931   } else if (Tok.is(AsmToken::Hash) ||
4932              Tok.is(AsmToken::Dollar) ||
4933              Tok.is(AsmToken::Integer)) {
4934     if (Parser.getTok().isNot(AsmToken::Integer))
4935       Parser.Lex(); // Eat '#' or '$'.
4936     SMLoc Loc = Parser.getTok().getLoc();
4937 
4938     const MCExpr *MemBarrierID;
4939     if (getParser().parseExpression(MemBarrierID))
4940       return Error(Loc, "illegal expression");
4941 
4942     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID);
4943     if (!CE)
4944       return Error(Loc, "constant expression expected");
4945 
4946     int Val = CE->getValue();
4947     if (Val & ~0xf)
4948       return Error(Loc, "immediate value out of range");
4949 
4950     Opt = ARM_MB::RESERVED_0 + Val;
4951   } else
4952     return ParseStatus::Failure;
4953 
4954   Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
4955   return ParseStatus::Success;
4956 }
4957 
4958 ParseStatus
4959 ARMAsmParser::parseTraceSyncBarrierOptOperand(OperandVector &Operands) {
4960   MCAsmParser &Parser = getParser();
4961   SMLoc S = Parser.getTok().getLoc();
4962   const AsmToken &Tok = Parser.getTok();
4963 
4964   if (Tok.isNot(AsmToken::Identifier))
4965     return ParseStatus::NoMatch;
4966 
4967   if (!Tok.getString().equals_insensitive("csync"))
4968     return ParseStatus::NoMatch;
4969 
4970   Parser.Lex(); // Eat identifier token.
4971 
4972   Operands.push_back(ARMOperand::CreateTraceSyncBarrierOpt(ARM_TSB::CSYNC, S));
4973   return ParseStatus::Success;
4974 }
4975 
4976 /// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options.
4977 ParseStatus
4978 ARMAsmParser::parseInstSyncBarrierOptOperand(OperandVector &Operands) {
4979   MCAsmParser &Parser = getParser();
4980   SMLoc S = Parser.getTok().getLoc();
4981   const AsmToken &Tok = Parser.getTok();
4982   unsigned Opt;
4983 
4984   if (Tok.is(AsmToken::Identifier)) {
4985     StringRef OptStr = Tok.getString();
4986 
4987     if (OptStr.equals_insensitive("sy"))
4988       Opt = ARM_ISB::SY;
4989     else
4990       return ParseStatus::NoMatch;
4991 
4992     Parser.Lex(); // Eat identifier token.
4993   } else if (Tok.is(AsmToken::Hash) ||
4994              Tok.is(AsmToken::Dollar) ||
4995              Tok.is(AsmToken::Integer)) {
4996     if (Parser.getTok().isNot(AsmToken::Integer))
4997       Parser.Lex(); // Eat '#' or '$'.
4998     SMLoc Loc = Parser.getTok().getLoc();
4999 
5000     const MCExpr *ISBarrierID;
5001     if (getParser().parseExpression(ISBarrierID))
5002       return Error(Loc, "illegal expression");
5003 
5004     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ISBarrierID);
5005     if (!CE)
5006       return Error(Loc, "constant expression expected");
5007 
5008     int Val = CE->getValue();
5009     if (Val & ~0xf)
5010       return Error(Loc, "immediate value out of range");
5011 
5012     Opt = ARM_ISB::RESERVED_0 + Val;
5013   } else
5014     return ParseStatus::Failure;
5015 
5016   Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt(
5017           (ARM_ISB::InstSyncBOpt)Opt, S));
5018   return ParseStatus::Success;
5019 }
5020 
5021 /// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
5022 ParseStatus ARMAsmParser::parseProcIFlagsOperand(OperandVector &Operands) {
5023   MCAsmParser &Parser = getParser();
5024   SMLoc S = Parser.getTok().getLoc();
5025   const AsmToken &Tok = Parser.getTok();
5026   if (!Tok.is(AsmToken::Identifier))
5027     return ParseStatus::NoMatch;
5028   StringRef IFlagsStr = Tok.getString();
5029 
5030   // An iflags string of "none" is interpreted to mean that none of the AIF
5031   // bits are set.  Not a terribly useful instruction, but a valid encoding.
5032   unsigned IFlags = 0;
5033   if (IFlagsStr != "none") {
5034         for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
5035       unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1).lower())
5036         .Case("a", ARM_PROC::A)
5037         .Case("i", ARM_PROC::I)
5038         .Case("f", ARM_PROC::F)
5039         .Default(~0U);
5040 
5041       // If some specific iflag is already set, it means that some letter is
5042       // present more than once, this is not acceptable.
5043       if (Flag == ~0U || (IFlags & Flag))
5044         return ParseStatus::NoMatch;
5045 
5046       IFlags |= Flag;
5047     }
5048   }
5049 
5050   Parser.Lex(); // Eat identifier token.
5051   Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
5052   return ParseStatus::Success;
5053 }
5054 
5055 /// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
5056 ParseStatus ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) {
5057   MCAsmParser &Parser = getParser();
5058   SMLoc S = Parser.getTok().getLoc();
5059   const AsmToken &Tok = Parser.getTok();
5060 
5061   if (Tok.is(AsmToken::Integer)) {
5062     int64_t Val = Tok.getIntVal();
5063     if (Val > 255 || Val < 0) {
5064       return ParseStatus::NoMatch;
5065     }
5066     unsigned SYSmvalue = Val & 0xFF;
5067     Parser.Lex();
5068     Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
5069     return ParseStatus::Success;
5070   }
5071 
5072   if (!Tok.is(AsmToken::Identifier))
5073     return ParseStatus::NoMatch;
5074   StringRef Mask = Tok.getString();
5075 
5076   if (isMClass()) {
5077     auto TheReg = ARMSysReg::lookupMClassSysRegByName(Mask.lower());
5078     if (!TheReg || !TheReg->hasRequiredFeatures(getSTI().getFeatureBits()))
5079       return ParseStatus::NoMatch;
5080 
5081     unsigned SYSmvalue = TheReg->Encoding & 0xFFF;
5082 
5083     Parser.Lex(); // Eat identifier token.
5084     Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
5085     return ParseStatus::Success;
5086   }
5087 
5088   // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
5089   size_t Start = 0, Next = Mask.find('_');
5090   StringRef Flags = "";
5091   std::string SpecReg = Mask.slice(Start, Next).lower();
5092   if (Next != StringRef::npos)
5093     Flags = Mask.slice(Next+1, Mask.size());
5094 
5095   // FlagsVal contains the complete mask:
5096   // 3-0: Mask
5097   // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
5098   unsigned FlagsVal = 0;
5099 
5100   if (SpecReg == "apsr") {
5101     FlagsVal = StringSwitch<unsigned>(Flags)
5102     .Case("nzcvq",  0x8) // same as CPSR_f
5103     .Case("g",      0x4) // same as CPSR_s
5104     .Case("nzcvqg", 0xc) // same as CPSR_fs
5105     .Default(~0U);
5106 
5107     if (FlagsVal == ~0U) {
5108       if (!Flags.empty())
5109         return ParseStatus::NoMatch;
5110       else
5111         FlagsVal = 8; // No flag
5112     }
5113   } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
5114     // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
5115     if (Flags == "all" || Flags == "")
5116       Flags = "fc";
5117     for (int i = 0, e = Flags.size(); i != e; ++i) {
5118       unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
5119       .Case("c", 1)
5120       .Case("x", 2)
5121       .Case("s", 4)
5122       .Case("f", 8)
5123       .Default(~0U);
5124 
5125       // If some specific flag is already set, it means that some letter is
5126       // present more than once, this is not acceptable.
5127       if (Flag == ~0U || (FlagsVal & Flag))
5128         return ParseStatus::NoMatch;
5129       FlagsVal |= Flag;
5130     }
5131   } else // No match for special register.
5132     return ParseStatus::NoMatch;
5133 
5134   // Special register without flags is NOT equivalent to "fc" flags.
5135   // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
5136   // two lines would enable gas compatibility at the expense of breaking
5137   // round-tripping.
5138   //
5139   // if (!FlagsVal)
5140   //  FlagsVal = 0x9;
5141 
5142   // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
5143   if (SpecReg == "spsr")
5144     FlagsVal |= 16;
5145 
5146   Parser.Lex(); // Eat identifier token.
5147   Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
5148   return ParseStatus::Success;
5149 }
5150 
5151 /// parseBankedRegOperand - Try to parse a banked register (e.g. "lr_irq") for
5152 /// use in the MRS/MSR instructions added to support virtualization.
5153 ParseStatus ARMAsmParser::parseBankedRegOperand(OperandVector &Operands) {
5154   MCAsmParser &Parser = getParser();
5155   SMLoc S = Parser.getTok().getLoc();
5156   const AsmToken &Tok = Parser.getTok();
5157   if (!Tok.is(AsmToken::Identifier))
5158     return ParseStatus::NoMatch;
5159   StringRef RegName = Tok.getString();
5160 
5161   auto TheReg = ARMBankedReg::lookupBankedRegByName(RegName.lower());
5162   if (!TheReg)
5163     return ParseStatus::NoMatch;
5164   unsigned Encoding = TheReg->Encoding;
5165 
5166   Parser.Lex(); // Eat identifier token.
5167   Operands.push_back(ARMOperand::CreateBankedReg(Encoding, S));
5168   return ParseStatus::Success;
5169 }
5170 
5171 ParseStatus ARMAsmParser::parsePKHImm(OperandVector &Operands, StringRef Op,
5172                                       int Low, int High) {
5173   MCAsmParser &Parser = getParser();
5174   const AsmToken &Tok = Parser.getTok();
5175   if (Tok.isNot(AsmToken::Identifier))
5176     return Error(Parser.getTok().getLoc(), Op + " operand expected.");
5177   StringRef ShiftName = Tok.getString();
5178   std::string LowerOp = Op.lower();
5179   std::string UpperOp = Op.upper();
5180   if (ShiftName != LowerOp && ShiftName != UpperOp)
5181     return Error(Parser.getTok().getLoc(), Op + " operand expected.");
5182   Parser.Lex(); // Eat shift type token.
5183 
5184   // There must be a '#' and a shift amount.
5185   if (Parser.getTok().isNot(AsmToken::Hash) &&
5186       Parser.getTok().isNot(AsmToken::Dollar))
5187     return Error(Parser.getTok().getLoc(), "'#' expected");
5188   Parser.Lex(); // Eat hash token.
5189 
5190   const MCExpr *ShiftAmount;
5191   SMLoc Loc = Parser.getTok().getLoc();
5192   SMLoc EndLoc;
5193   if (getParser().parseExpression(ShiftAmount, EndLoc))
5194     return Error(Loc, "illegal expression");
5195   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
5196   if (!CE)
5197     return Error(Loc, "constant expression expected");
5198   int Val = CE->getValue();
5199   if (Val < Low || Val > High)
5200     return Error(Loc, "immediate value out of range");
5201 
5202   Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc));
5203 
5204   return ParseStatus::Success;
5205 }
5206 
5207 ParseStatus ARMAsmParser::parseSetEndImm(OperandVector &Operands) {
5208   MCAsmParser &Parser = getParser();
5209   const AsmToken &Tok = Parser.getTok();
5210   SMLoc S = Tok.getLoc();
5211   if (Tok.isNot(AsmToken::Identifier))
5212     return Error(S, "'be' or 'le' operand expected");
5213   int Val = StringSwitch<int>(Tok.getString().lower())
5214     .Case("be", 1)
5215     .Case("le", 0)
5216     .Default(-1);
5217   Parser.Lex(); // Eat the token.
5218 
5219   if (Val == -1)
5220     return Error(S, "'be' or 'le' operand expected");
5221   Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::create(Val,
5222                                                                   getContext()),
5223                                            S, Tok.getEndLoc()));
5224   return ParseStatus::Success;
5225 }
5226 
5227 /// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
5228 /// instructions. Legal values are:
5229 ///     lsl #n  'n' in [0,31]
5230 ///     asr #n  'n' in [1,32]
5231 ///             n == 32 encoded as n == 0.
5232 ParseStatus ARMAsmParser::parseShifterImm(OperandVector &Operands) {
5233   MCAsmParser &Parser = getParser();
5234   const AsmToken &Tok = Parser.getTok();
5235   SMLoc S = Tok.getLoc();
5236   if (Tok.isNot(AsmToken::Identifier))
5237     return Error(S, "shift operator 'asr' or 'lsl' expected");
5238   StringRef ShiftName = Tok.getString();
5239   bool isASR;
5240   if (ShiftName == "lsl" || ShiftName == "LSL")
5241     isASR = false;
5242   else if (ShiftName == "asr" || ShiftName == "ASR")
5243     isASR = true;
5244   else
5245     return Error(S, "shift operator 'asr' or 'lsl' expected");
5246   Parser.Lex(); // Eat the operator.
5247 
5248   // A '#' and a shift amount.
5249   if (Parser.getTok().isNot(AsmToken::Hash) &&
5250       Parser.getTok().isNot(AsmToken::Dollar))
5251     return Error(Parser.getTok().getLoc(), "'#' expected");
5252   Parser.Lex(); // Eat hash token.
5253   SMLoc ExLoc = Parser.getTok().getLoc();
5254 
5255   const MCExpr *ShiftAmount;
5256   SMLoc EndLoc;
5257   if (getParser().parseExpression(ShiftAmount, EndLoc))
5258     return Error(ExLoc, "malformed shift expression");
5259   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
5260   if (!CE)
5261     return Error(ExLoc, "shift amount must be an immediate");
5262 
5263   int64_t Val = CE->getValue();
5264   if (isASR) {
5265     // Shift amount must be in [1,32]
5266     if (Val < 1 || Val > 32)
5267       return Error(ExLoc, "'asr' shift amount must be in range [1,32]");
5268     // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
5269     if (isThumb() && Val == 32)
5270       return Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode");
5271     if (Val == 32) Val = 0;
5272   } else {
5273     // Shift amount must be in [1,32]
5274     if (Val < 0 || Val > 31)
5275       return Error(ExLoc, "'lsr' shift amount must be in range [0,31]");
5276   }
5277 
5278   Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc));
5279 
5280   return ParseStatus::Success;
5281 }
5282 
5283 /// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
5284 /// of instructions. Legal values are:
5285 ///     ror #n  'n' in {0, 8, 16, 24}
5286 ParseStatus ARMAsmParser::parseRotImm(OperandVector &Operands) {
5287   MCAsmParser &Parser = getParser();
5288   const AsmToken &Tok = Parser.getTok();
5289   SMLoc S = Tok.getLoc();
5290   if (Tok.isNot(AsmToken::Identifier))
5291     return ParseStatus::NoMatch;
5292   StringRef ShiftName = Tok.getString();
5293   if (ShiftName != "ror" && ShiftName != "ROR")
5294     return ParseStatus::NoMatch;
5295   Parser.Lex(); // Eat the operator.
5296 
5297   // A '#' and a rotate amount.
5298   if (Parser.getTok().isNot(AsmToken::Hash) &&
5299       Parser.getTok().isNot(AsmToken::Dollar))
5300     return Error(Parser.getTok().getLoc(), "'#' expected");
5301   Parser.Lex(); // Eat hash token.
5302   SMLoc ExLoc = Parser.getTok().getLoc();
5303 
5304   const MCExpr *ShiftAmount;
5305   SMLoc EndLoc;
5306   if (getParser().parseExpression(ShiftAmount, EndLoc))
5307     return Error(ExLoc, "malformed rotate expression");
5308   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
5309   if (!CE)
5310     return Error(ExLoc, "rotate amount must be an immediate");
5311 
5312   int64_t Val = CE->getValue();
5313   // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
5314   // normally, zero is represented in asm by omitting the rotate operand
5315   // entirely.
5316   if (Val != 8 && Val != 16 && Val != 24 && Val != 0)
5317     return Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24");
5318 
5319   Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc));
5320 
5321   return ParseStatus::Success;
5322 }
5323 
5324 ParseStatus ARMAsmParser::parseModImm(OperandVector &Operands) {
5325   MCAsmParser &Parser = getParser();
5326   MCAsmLexer &Lexer = getLexer();
5327   int64_t Imm1, Imm2;
5328 
5329   SMLoc S = Parser.getTok().getLoc();
5330 
5331   // 1) A mod_imm operand can appear in the place of a register name:
5332   //   add r0, #mod_imm
5333   //   add r0, r0, #mod_imm
5334   // to correctly handle the latter, we bail out as soon as we see an
5335   // identifier.
5336   //
5337   // 2) Similarly, we do not want to parse into complex operands:
5338   //   mov r0, #mod_imm
5339   //   mov r0, :lower16:(_foo)
5340   if (Parser.getTok().is(AsmToken::Identifier) ||
5341       Parser.getTok().is(AsmToken::Colon))
5342     return ParseStatus::NoMatch;
5343 
5344   // Hash (dollar) is optional as per the ARMARM
5345   if (Parser.getTok().is(AsmToken::Hash) ||
5346       Parser.getTok().is(AsmToken::Dollar)) {
5347     // Avoid parsing into complex operands (#:)
5348     if (Lexer.peekTok().is(AsmToken::Colon))
5349       return ParseStatus::NoMatch;
5350 
5351     // Eat the hash (dollar)
5352     Parser.Lex();
5353   }
5354 
5355   SMLoc Sx1, Ex1;
5356   Sx1 = Parser.getTok().getLoc();
5357   const MCExpr *Imm1Exp;
5358   if (getParser().parseExpression(Imm1Exp, Ex1))
5359     return Error(Sx1, "malformed expression");
5360 
5361   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm1Exp);
5362 
5363   if (CE) {
5364     // Immediate must fit within 32-bits
5365     Imm1 = CE->getValue();
5366     int Enc = ARM_AM::getSOImmVal(Imm1);
5367     if (Enc != -1 && Parser.getTok().is(AsmToken::EndOfStatement)) {
5368       // We have a match!
5369       Operands.push_back(ARMOperand::CreateModImm((Enc & 0xFF),
5370                                                   (Enc & 0xF00) >> 7,
5371                                                   Sx1, Ex1));
5372       return ParseStatus::Success;
5373     }
5374 
5375     // We have parsed an immediate which is not for us, fallback to a plain
5376     // immediate. This can happen for instruction aliases. For an example,
5377     // ARMInstrInfo.td defines the alias [mov <-> mvn] which can transform
5378     // a mov (mvn) with a mod_imm_neg/mod_imm_not operand into the opposite
5379     // instruction with a mod_imm operand. The alias is defined such that the
5380     // parser method is shared, that's why we have to do this here.
5381     if (Parser.getTok().is(AsmToken::EndOfStatement)) {
5382       Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
5383       return ParseStatus::Success;
5384     }
5385   } else {
5386     // Operands like #(l1 - l2) can only be evaluated at a later stage (via an
5387     // MCFixup). Fallback to a plain immediate.
5388     Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
5389     return ParseStatus::Success;
5390   }
5391 
5392   // From this point onward, we expect the input to be a (#bits, #rot) pair
5393   if (Parser.getTok().isNot(AsmToken::Comma))
5394     return Error(Sx1,
5395                  "expected modified immediate operand: #[0, 255], #even[0-30]");
5396 
5397   if (Imm1 & ~0xFF)
5398     return Error(Sx1, "immediate operand must a number in the range [0, 255]");
5399 
5400   // Eat the comma
5401   Parser.Lex();
5402 
5403   // Repeat for #rot
5404   SMLoc Sx2, Ex2;
5405   Sx2 = Parser.getTok().getLoc();
5406 
5407   // Eat the optional hash (dollar)
5408   if (Parser.getTok().is(AsmToken::Hash) ||
5409       Parser.getTok().is(AsmToken::Dollar))
5410     Parser.Lex();
5411 
5412   const MCExpr *Imm2Exp;
5413   if (getParser().parseExpression(Imm2Exp, Ex2))
5414     return Error(Sx2, "malformed expression");
5415 
5416   CE = dyn_cast<MCConstantExpr>(Imm2Exp);
5417 
5418   if (CE) {
5419     Imm2 = CE->getValue();
5420     if (!(Imm2 & ~0x1E)) {
5421       // We have a match!
5422       Operands.push_back(ARMOperand::CreateModImm(Imm1, Imm2, S, Ex2));
5423       return ParseStatus::Success;
5424     }
5425     return Error(Sx2,
5426                  "immediate operand must an even number in the range [0, 30]");
5427   } else {
5428     return Error(Sx2, "constant expression expected");
5429   }
5430 }
5431 
5432 ParseStatus ARMAsmParser::parseBitfield(OperandVector &Operands) {
5433   MCAsmParser &Parser = getParser();
5434   SMLoc S = Parser.getTok().getLoc();
5435   // The bitfield descriptor is really two operands, the LSB and the width.
5436   if (Parser.getTok().isNot(AsmToken::Hash) &&
5437       Parser.getTok().isNot(AsmToken::Dollar))
5438     return Error(Parser.getTok().getLoc(), "'#' expected");
5439   Parser.Lex(); // Eat hash token.
5440 
5441   const MCExpr *LSBExpr;
5442   SMLoc E = Parser.getTok().getLoc();
5443   if (getParser().parseExpression(LSBExpr))
5444     return Error(E, "malformed immediate expression");
5445   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
5446   if (!CE)
5447     return Error(E, "'lsb' operand must be an immediate");
5448 
5449   int64_t LSB = CE->getValue();
5450   // The LSB must be in the range [0,31]
5451   if (LSB < 0 || LSB > 31)
5452     return Error(E, "'lsb' operand must be in the range [0,31]");
5453   E = Parser.getTok().getLoc();
5454 
5455   // Expect another immediate operand.
5456   if (Parser.getTok().isNot(AsmToken::Comma))
5457     return Error(Parser.getTok().getLoc(), "too few operands");
5458   Parser.Lex(); // Eat hash token.
5459   if (Parser.getTok().isNot(AsmToken::Hash) &&
5460       Parser.getTok().isNot(AsmToken::Dollar))
5461     return Error(Parser.getTok().getLoc(), "'#' expected");
5462   Parser.Lex(); // Eat hash token.
5463 
5464   const MCExpr *WidthExpr;
5465   SMLoc EndLoc;
5466   if (getParser().parseExpression(WidthExpr, EndLoc))
5467     return Error(E, "malformed immediate expression");
5468   CE = dyn_cast<MCConstantExpr>(WidthExpr);
5469   if (!CE)
5470     return Error(E, "'width' operand must be an immediate");
5471 
5472   int64_t Width = CE->getValue();
5473   // The LSB must be in the range [1,32-lsb]
5474   if (Width < 1 || Width > 32 - LSB)
5475     return Error(E, "'width' operand must be in the range [1,32-lsb]");
5476 
5477   Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc));
5478 
5479   return ParseStatus::Success;
5480 }
5481 
5482 ParseStatus ARMAsmParser::parsePostIdxReg(OperandVector &Operands) {
5483   // Check for a post-index addressing register operand. Specifically:
5484   // postidx_reg := '+' register {, shift}
5485   //              | '-' register {, shift}
5486   //              | register {, shift}
5487 
5488   // This method must return ParseStatus::NoMatch without consuming any tokens
5489   // in the case where there is no match, as other alternatives take other
5490   // parse methods.
5491   MCAsmParser &Parser = getParser();
5492   AsmToken Tok = Parser.getTok();
5493   SMLoc S = Tok.getLoc();
5494   bool haveEaten = false;
5495   bool isAdd = true;
5496   if (Tok.is(AsmToken::Plus)) {
5497     Parser.Lex(); // Eat the '+' token.
5498     haveEaten = true;
5499   } else if (Tok.is(AsmToken::Minus)) {
5500     Parser.Lex(); // Eat the '-' token.
5501     isAdd = false;
5502     haveEaten = true;
5503   }
5504 
5505   SMLoc E = Parser.getTok().getEndLoc();
5506   int Reg = tryParseRegister();
5507   if (Reg == -1) {
5508     if (!haveEaten)
5509       return ParseStatus::NoMatch;
5510     return Error(Parser.getTok().getLoc(), "register expected");
5511   }
5512 
5513   ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
5514   unsigned ShiftImm = 0;
5515   if (Parser.getTok().is(AsmToken::Comma)) {
5516     Parser.Lex(); // Eat the ','.
5517     if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
5518       return ParseStatus::Failure;
5519 
5520     // FIXME: Only approximates end...may include intervening whitespace.
5521     E = Parser.getTok().getLoc();
5522   }
5523 
5524   Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
5525                                                   ShiftImm, S, E));
5526 
5527   return ParseStatus::Success;
5528 }
5529 
5530 ParseStatus ARMAsmParser::parseAM3Offset(OperandVector &Operands) {
5531   // Check for a post-index addressing register operand. Specifically:
5532   // am3offset := '+' register
5533   //              | '-' register
5534   //              | register
5535   //              | # imm
5536   //              | # + imm
5537   //              | # - imm
5538 
5539   // This method must return ParseStatus::NoMatch without consuming any tokens
5540   // in the case where there is no match, as other alternatives take other
5541   // parse methods.
5542   MCAsmParser &Parser = getParser();
5543   AsmToken Tok = Parser.getTok();
5544   SMLoc S = Tok.getLoc();
5545 
5546   // Do immediates first, as we always parse those if we have a '#'.
5547   if (Parser.getTok().is(AsmToken::Hash) ||
5548       Parser.getTok().is(AsmToken::Dollar)) {
5549     Parser.Lex(); // Eat '#' or '$'.
5550     // Explicitly look for a '-', as we need to encode negative zero
5551     // differently.
5552     bool isNegative = Parser.getTok().is(AsmToken::Minus);
5553     const MCExpr *Offset;
5554     SMLoc E;
5555     if (getParser().parseExpression(Offset, E))
5556       return ParseStatus::Failure;
5557     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
5558     if (!CE)
5559       return Error(S, "constant expression expected");
5560     // Negative zero is encoded as the flag value
5561     // std::numeric_limits<int32_t>::min().
5562     int32_t Val = CE->getValue();
5563     if (isNegative && Val == 0)
5564       Val = std::numeric_limits<int32_t>::min();
5565 
5566     Operands.push_back(
5567       ARMOperand::CreateImm(MCConstantExpr::create(Val, getContext()), S, E));
5568 
5569     return ParseStatus::Success;
5570   }
5571 
5572   bool haveEaten = false;
5573   bool isAdd = true;
5574   if (Tok.is(AsmToken::Plus)) {
5575     Parser.Lex(); // Eat the '+' token.
5576     haveEaten = true;
5577   } else if (Tok.is(AsmToken::Minus)) {
5578     Parser.Lex(); // Eat the '-' token.
5579     isAdd = false;
5580     haveEaten = true;
5581   }
5582 
5583   Tok = Parser.getTok();
5584   int Reg = tryParseRegister();
5585   if (Reg == -1) {
5586     if (!haveEaten)
5587       return ParseStatus::NoMatch;
5588     return Error(Tok.getLoc(), "register expected");
5589   }
5590 
5591   Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
5592                                                   0, S, Tok.getEndLoc()));
5593 
5594   return ParseStatus::Success;
5595 }
5596 
5597 /// Convert parsed operands to MCInst.  Needed here because this instruction
5598 /// only has two register operands, but multiplication is commutative so
5599 /// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN".
5600 void ARMAsmParser::cvtThumbMultiply(MCInst &Inst,
5601                                     const OperandVector &Operands) {
5602   ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1);
5603   ((ARMOperand &)*Operands[1]).addCCOutOperands(Inst, 1);
5604   // If we have a three-operand form, make sure to set Rn to be the operand
5605   // that isn't the same as Rd.
5606   unsigned RegOp = 4;
5607   if (Operands.size() == 6 &&
5608       ((ARMOperand &)*Operands[4]).getReg() ==
5609           ((ARMOperand &)*Operands[3]).getReg())
5610     RegOp = 5;
5611   ((ARMOperand &)*Operands[RegOp]).addRegOperands(Inst, 1);
5612   Inst.addOperand(Inst.getOperand(0));
5613   ((ARMOperand &)*Operands[2]).addCondCodeOperands(Inst, 2);
5614 }
5615 
5616 void ARMAsmParser::cvtThumbBranches(MCInst &Inst,
5617                                     const OperandVector &Operands) {
5618   int CondOp = -1, ImmOp = -1;
5619   switch(Inst.getOpcode()) {
5620     case ARM::tB:
5621     case ARM::tBcc:  CondOp = 1; ImmOp = 2; break;
5622 
5623     case ARM::t2B:
5624     case ARM::t2Bcc: CondOp = 1; ImmOp = 3; break;
5625 
5626     default: llvm_unreachable("Unexpected instruction in cvtThumbBranches");
5627   }
5628   // first decide whether or not the branch should be conditional
5629   // by looking at it's location relative to an IT block
5630   if(inITBlock()) {
5631     // inside an IT block we cannot have any conditional branches. any
5632     // such instructions needs to be converted to unconditional form
5633     switch(Inst.getOpcode()) {
5634       case ARM::tBcc: Inst.setOpcode(ARM::tB); break;
5635       case ARM::t2Bcc: Inst.setOpcode(ARM::t2B); break;
5636     }
5637   } else {
5638     // outside IT blocks we can only have unconditional branches with AL
5639     // condition code or conditional branches with non-AL condition code
5640     unsigned Cond = static_cast<ARMOperand &>(*Operands[CondOp]).getCondCode();
5641     switch(Inst.getOpcode()) {
5642       case ARM::tB:
5643       case ARM::tBcc:
5644         Inst.setOpcode(Cond == ARMCC::AL ? ARM::tB : ARM::tBcc);
5645         break;
5646       case ARM::t2B:
5647       case ARM::t2Bcc:
5648         Inst.setOpcode(Cond == ARMCC::AL ? ARM::t2B : ARM::t2Bcc);
5649         break;
5650     }
5651   }
5652 
5653   // now decide on encoding size based on branch target range
5654   switch(Inst.getOpcode()) {
5655     // classify tB as either t2B or t1B based on range of immediate operand
5656     case ARM::tB: {
5657       ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
5658       if (!op.isSignedOffset<11, 1>() && isThumb() && hasV8MBaseline())
5659         Inst.setOpcode(ARM::t2B);
5660       break;
5661     }
5662     // classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand
5663     case ARM::tBcc: {
5664       ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
5665       if (!op.isSignedOffset<8, 1>() && isThumb() && hasV8MBaseline())
5666         Inst.setOpcode(ARM::t2Bcc);
5667       break;
5668     }
5669   }
5670   ((ARMOperand &)*Operands[ImmOp]).addImmOperands(Inst, 1);
5671   ((ARMOperand &)*Operands[CondOp]).addCondCodeOperands(Inst, 2);
5672 }
5673 
5674 void ARMAsmParser::cvtMVEVMOVQtoDReg(
5675   MCInst &Inst, const OperandVector &Operands) {
5676 
5677   // mnemonic, condition code, Rt, Rt2, Qd, idx, Qd again, idx2
5678   assert(Operands.size() == 8);
5679 
5680   ((ARMOperand &)*Operands[2]).addRegOperands(Inst, 1); // Rt
5681   ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1); // Rt2
5682   ((ARMOperand &)*Operands[4]).addRegOperands(Inst, 1); // Qd
5683   ((ARMOperand &)*Operands[5]).addMVEPairVectorIndexOperands(Inst, 1); // idx
5684   // skip second copy of Qd in Operands[6]
5685   ((ARMOperand &)*Operands[7]).addMVEPairVectorIndexOperands(Inst, 1); // idx2
5686   ((ARMOperand &)*Operands[1]).addCondCodeOperands(Inst, 2); // condition code
5687 }
5688 
5689 /// Parse an ARM memory expression, return false if successful else return true
5690 /// or an error.  The first token must be a '[' when called.
5691 bool ARMAsmParser::parseMemory(OperandVector &Operands) {
5692   MCAsmParser &Parser = getParser();
5693   SMLoc S, E;
5694   if (Parser.getTok().isNot(AsmToken::LBrac))
5695     return TokError("Token is not a Left Bracket");
5696   S = Parser.getTok().getLoc();
5697   Parser.Lex(); // Eat left bracket token.
5698 
5699   const AsmToken &BaseRegTok = Parser.getTok();
5700   int BaseRegNum = tryParseRegister();
5701   if (BaseRegNum == -1)
5702     return Error(BaseRegTok.getLoc(), "register expected");
5703 
5704   // The next token must either be a comma, a colon or a closing bracket.
5705   const AsmToken &Tok = Parser.getTok();
5706   if (!Tok.is(AsmToken::Colon) && !Tok.is(AsmToken::Comma) &&
5707       !Tok.is(AsmToken::RBrac))
5708     return Error(Tok.getLoc(), "malformed memory operand");
5709 
5710   if (Tok.is(AsmToken::RBrac)) {
5711     E = Tok.getEndLoc();
5712     Parser.Lex(); // Eat right bracket token.
5713 
5714     Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
5715                                              ARM_AM::no_shift, 0, 0, false,
5716                                              S, E));
5717 
5718     // If there's a pre-indexing writeback marker, '!', just add it as a token
5719     // operand. It's rather odd, but syntactically valid.
5720     if (Parser.getTok().is(AsmToken::Exclaim)) {
5721       Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5722       Parser.Lex(); // Eat the '!'.
5723     }
5724 
5725     return false;
5726   }
5727 
5728   assert((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) &&
5729          "Lost colon or comma in memory operand?!");
5730   if (Tok.is(AsmToken::Comma)) {
5731     Parser.Lex(); // Eat the comma.
5732   }
5733 
5734   // If we have a ':', it's an alignment specifier.
5735   if (Parser.getTok().is(AsmToken::Colon)) {
5736     Parser.Lex(); // Eat the ':'.
5737     E = Parser.getTok().getLoc();
5738     SMLoc AlignmentLoc = Tok.getLoc();
5739 
5740     const MCExpr *Expr;
5741     if (getParser().parseExpression(Expr))
5742      return true;
5743 
5744     // The expression has to be a constant. Memory references with relocations
5745     // don't come through here, as they use the <label> forms of the relevant
5746     // instructions.
5747     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
5748     if (!CE)
5749       return Error (E, "constant expression expected");
5750 
5751     unsigned Align = 0;
5752     switch (CE->getValue()) {
5753     default:
5754       return Error(E,
5755                    "alignment specifier must be 16, 32, 64, 128, or 256 bits");
5756     case 16:  Align = 2; break;
5757     case 32:  Align = 4; break;
5758     case 64:  Align = 8; break;
5759     case 128: Align = 16; break;
5760     case 256: Align = 32; break;
5761     }
5762 
5763     // Now we should have the closing ']'
5764     if (Parser.getTok().isNot(AsmToken::RBrac))
5765       return Error(Parser.getTok().getLoc(), "']' expected");
5766     E = Parser.getTok().getEndLoc();
5767     Parser.Lex(); // Eat right bracket token.
5768 
5769     // Don't worry about range checking the value here. That's handled by
5770     // the is*() predicates.
5771     Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
5772                                              ARM_AM::no_shift, 0, Align,
5773                                              false, S, E, AlignmentLoc));
5774 
5775     // If there's a pre-indexing writeback marker, '!', just add it as a token
5776     // operand.
5777     if (Parser.getTok().is(AsmToken::Exclaim)) {
5778       Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5779       Parser.Lex(); // Eat the '!'.
5780     }
5781 
5782     return false;
5783   }
5784 
5785   // If we have a '#' or '$', it's an immediate offset, else assume it's a
5786   // register offset. Be friendly and also accept a plain integer or expression
5787   // (without a leading hash) for gas compatibility.
5788   if (Parser.getTok().is(AsmToken::Hash) ||
5789       Parser.getTok().is(AsmToken::Dollar) ||
5790       Parser.getTok().is(AsmToken::LParen) ||
5791       Parser.getTok().is(AsmToken::Integer)) {
5792     if (Parser.getTok().is(AsmToken::Hash) ||
5793         Parser.getTok().is(AsmToken::Dollar))
5794       Parser.Lex(); // Eat '#' or '$'
5795     E = Parser.getTok().getLoc();
5796 
5797     bool isNegative = getParser().getTok().is(AsmToken::Minus);
5798     const MCExpr *Offset, *AdjustedOffset;
5799     if (getParser().parseExpression(Offset))
5800      return true;
5801 
5802     if (const auto *CE = dyn_cast<MCConstantExpr>(Offset)) {
5803       // If the constant was #-0, represent it as
5804       // std::numeric_limits<int32_t>::min().
5805       int32_t Val = CE->getValue();
5806       if (isNegative && Val == 0)
5807         CE = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
5808                                     getContext());
5809       // Don't worry about range checking the value here. That's handled by
5810       // the is*() predicates.
5811       AdjustedOffset = CE;
5812     } else
5813       AdjustedOffset = Offset;
5814     Operands.push_back(ARMOperand::CreateMem(
5815         BaseRegNum, AdjustedOffset, 0, ARM_AM::no_shift, 0, 0, false, S, E));
5816 
5817     // Now we should have the closing ']'
5818     if (Parser.getTok().isNot(AsmToken::RBrac))
5819       return Error(Parser.getTok().getLoc(), "']' expected");
5820     E = Parser.getTok().getEndLoc();
5821     Parser.Lex(); // Eat right bracket token.
5822 
5823     // If there's a pre-indexing writeback marker, '!', just add it as a token
5824     // operand.
5825     if (Parser.getTok().is(AsmToken::Exclaim)) {
5826       Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5827       Parser.Lex(); // Eat the '!'.
5828     }
5829 
5830     return false;
5831   }
5832 
5833   // The register offset is optionally preceded by a '+' or '-'
5834   bool isNegative = false;
5835   if (Parser.getTok().is(AsmToken::Minus)) {
5836     isNegative = true;
5837     Parser.Lex(); // Eat the '-'.
5838   } else if (Parser.getTok().is(AsmToken::Plus)) {
5839     // Nothing to do.
5840     Parser.Lex(); // Eat the '+'.
5841   }
5842 
5843   E = Parser.getTok().getLoc();
5844   int OffsetRegNum = tryParseRegister();
5845   if (OffsetRegNum == -1)
5846     return Error(E, "register expected");
5847 
5848   // If there's a shift operator, handle it.
5849   ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
5850   unsigned ShiftImm = 0;
5851   if (Parser.getTok().is(AsmToken::Comma)) {
5852     Parser.Lex(); // Eat the ','.
5853     if (parseMemRegOffsetShift(ShiftType, ShiftImm))
5854       return true;
5855   }
5856 
5857   // Now we should have the closing ']'
5858   if (Parser.getTok().isNot(AsmToken::RBrac))
5859     return Error(Parser.getTok().getLoc(), "']' expected");
5860   E = Parser.getTok().getEndLoc();
5861   Parser.Lex(); // Eat right bracket token.
5862 
5863   Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, OffsetRegNum,
5864                                            ShiftType, ShiftImm, 0, isNegative,
5865                                            S, E));
5866 
5867   // If there's a pre-indexing writeback marker, '!', just add it as a token
5868   // operand.
5869   if (Parser.getTok().is(AsmToken::Exclaim)) {
5870     Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5871     Parser.Lex(); // Eat the '!'.
5872   }
5873 
5874   return false;
5875 }
5876 
5877 /// parseMemRegOffsetShift - one of these two:
5878 ///   ( lsl | lsr | asr | ror ) , # shift_amount
5879 ///   rrx
5880 /// return true if it parses a shift otherwise it returns false.
5881 bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
5882                                           unsigned &Amount) {
5883   MCAsmParser &Parser = getParser();
5884   SMLoc Loc = Parser.getTok().getLoc();
5885   const AsmToken &Tok = Parser.getTok();
5886   if (Tok.isNot(AsmToken::Identifier))
5887     return Error(Loc, "illegal shift operator");
5888   StringRef ShiftName = Tok.getString();
5889   if (ShiftName == "lsl" || ShiftName == "LSL" ||
5890       ShiftName == "asl" || ShiftName == "ASL")
5891     St = ARM_AM::lsl;
5892   else if (ShiftName == "lsr" || ShiftName == "LSR")
5893     St = ARM_AM::lsr;
5894   else if (ShiftName == "asr" || ShiftName == "ASR")
5895     St = ARM_AM::asr;
5896   else if (ShiftName == "ror" || ShiftName == "ROR")
5897     St = ARM_AM::ror;
5898   else if (ShiftName == "rrx" || ShiftName == "RRX")
5899     St = ARM_AM::rrx;
5900   else if (ShiftName == "uxtw" || ShiftName == "UXTW")
5901     St = ARM_AM::uxtw;
5902   else
5903     return Error(Loc, "illegal shift operator");
5904   Parser.Lex(); // Eat shift type token.
5905 
5906   // rrx stands alone.
5907   Amount = 0;
5908   if (St != ARM_AM::rrx) {
5909     Loc = Parser.getTok().getLoc();
5910     // A '#' and a shift amount.
5911     const AsmToken &HashTok = Parser.getTok();
5912     if (HashTok.isNot(AsmToken::Hash) &&
5913         HashTok.isNot(AsmToken::Dollar))
5914       return Error(HashTok.getLoc(), "'#' expected");
5915     Parser.Lex(); // Eat hash token.
5916 
5917     const MCExpr *Expr;
5918     if (getParser().parseExpression(Expr))
5919       return true;
5920     // Range check the immediate.
5921     // lsl, ror: 0 <= imm <= 31
5922     // lsr, asr: 0 <= imm <= 32
5923     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
5924     if (!CE)
5925       return Error(Loc, "shift amount must be an immediate");
5926     int64_t Imm = CE->getValue();
5927     if (Imm < 0 ||
5928         ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
5929         ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
5930       return Error(Loc, "immediate shift value out of range");
5931     // If <ShiftTy> #0, turn it into a no_shift.
5932     if (Imm == 0)
5933       St = ARM_AM::lsl;
5934     // For consistency, treat lsr #32 and asr #32 as having immediate value 0.
5935     if (Imm == 32)
5936       Imm = 0;
5937     Amount = Imm;
5938   }
5939 
5940   return false;
5941 }
5942 
5943 /// parseFPImm - A floating point immediate expression operand.
5944 ParseStatus ARMAsmParser::parseFPImm(OperandVector &Operands) {
5945   MCAsmParser &Parser = getParser();
5946   // Anything that can accept a floating point constant as an operand
5947   // needs to go through here, as the regular parseExpression is
5948   // integer only.
5949   //
5950   // This routine still creates a generic Immediate operand, containing
5951   // a bitcast of the 64-bit floating point value. The various operands
5952   // that accept floats can check whether the value is valid for them
5953   // via the standard is*() predicates.
5954 
5955   SMLoc S = Parser.getTok().getLoc();
5956 
5957   if (Parser.getTok().isNot(AsmToken::Hash) &&
5958       Parser.getTok().isNot(AsmToken::Dollar))
5959     return ParseStatus::NoMatch;
5960 
5961   // Disambiguate the VMOV forms that can accept an FP immediate.
5962   // vmov.f32 <sreg>, #imm
5963   // vmov.f64 <dreg>, #imm
5964   // vmov.f32 <dreg>, #imm  @ vector f32x2
5965   // vmov.f32 <qreg>, #imm  @ vector f32x4
5966   //
5967   // There are also the NEON VMOV instructions which expect an
5968   // integer constant. Make sure we don't try to parse an FPImm
5969   // for these:
5970   // vmov.i{8|16|32|64} <dreg|qreg>, #imm
5971   ARMOperand &TyOp = static_cast<ARMOperand &>(*Operands[2]);
5972   bool isVmovf = TyOp.isToken() &&
5973                  (TyOp.getToken() == ".f32" || TyOp.getToken() == ".f64" ||
5974                   TyOp.getToken() == ".f16");
5975   ARMOperand &Mnemonic = static_cast<ARMOperand &>(*Operands[0]);
5976   bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() == "fconstd" ||
5977                                          Mnemonic.getToken() == "fconsts");
5978   if (!(isVmovf || isFconst))
5979     return ParseStatus::NoMatch;
5980 
5981   Parser.Lex(); // Eat '#' or '$'.
5982 
5983   // Handle negation, as that still comes through as a separate token.
5984   bool isNegative = false;
5985   if (Parser.getTok().is(AsmToken::Minus)) {
5986     isNegative = true;
5987     Parser.Lex();
5988   }
5989   const AsmToken &Tok = Parser.getTok();
5990   SMLoc Loc = Tok.getLoc();
5991   if (Tok.is(AsmToken::Real) && isVmovf) {
5992     APFloat RealVal(APFloat::IEEEsingle(), Tok.getString());
5993     uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
5994     // If we had a '-' in front, toggle the sign bit.
5995     IntVal ^= (uint64_t)isNegative << 31;
5996     Parser.Lex(); // Eat the token.
5997     Operands.push_back(ARMOperand::CreateImm(
5998           MCConstantExpr::create(IntVal, getContext()),
5999           S, Parser.getTok().getLoc()));
6000     return ParseStatus::Success;
6001   }
6002   // Also handle plain integers. Instructions which allow floating point
6003   // immediates also allow a raw encoded 8-bit value.
6004   if (Tok.is(AsmToken::Integer) && isFconst) {
6005     int64_t Val = Tok.getIntVal();
6006     Parser.Lex(); // Eat the token.
6007     if (Val > 255 || Val < 0)
6008       return Error(Loc, "encoded floating point value out of range");
6009     float RealVal = ARM_AM::getFPImmFloat(Val);
6010     Val = APFloat(RealVal).bitcastToAPInt().getZExtValue();
6011 
6012     Operands.push_back(ARMOperand::CreateImm(
6013         MCConstantExpr::create(Val, getContext()), S,
6014         Parser.getTok().getLoc()));
6015     return ParseStatus::Success;
6016   }
6017 
6018   return Error(Loc, "invalid floating point immediate");
6019 }
6020 
6021 /// Parse a arm instruction operand.  For now this parses the operand regardless
6022 /// of the mnemonic.
6023 bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
6024   MCAsmParser &Parser = getParser();
6025   SMLoc S, E;
6026 
6027   // Check if the current operand has a custom associated parser, if so, try to
6028   // custom parse the operand, or fallback to the general approach.
6029   ParseStatus ResTy = MatchOperandParserImpl(Operands, Mnemonic);
6030   if (ResTy.isSuccess())
6031     return false;
6032   // If there wasn't a custom match, try the generic matcher below. Otherwise,
6033   // there was a match, but an error occurred, in which case, just return that
6034   // the operand parsing failed.
6035   if (ResTy.isFailure())
6036     return true;
6037 
6038   switch (getLexer().getKind()) {
6039   default:
6040     Error(Parser.getTok().getLoc(), "unexpected token in operand");
6041     return true;
6042   case AsmToken::Identifier: {
6043     // If we've seen a branch mnemonic, the next operand must be a label.  This
6044     // is true even if the label is a register name.  So "br r1" means branch to
6045     // label "r1".
6046     bool ExpectLabel = Mnemonic == "b" || Mnemonic == "bl";
6047     if (!ExpectLabel) {
6048       if (!tryParseRegisterWithWriteBack(Operands))
6049         return false;
6050       int Res = tryParseShiftRegister(Operands);
6051       if (Res == 0) // success
6052         return false;
6053       else if (Res == -1) // irrecoverable error
6054         return true;
6055       // If this is VMRS, check for the apsr_nzcv operand.
6056       if (Mnemonic == "vmrs" &&
6057           Parser.getTok().getString().equals_insensitive("apsr_nzcv")) {
6058         S = Parser.getTok().getLoc();
6059         Parser.Lex();
6060         Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
6061         return false;
6062       }
6063     }
6064 
6065     // Fall though for the Identifier case that is not a register or a
6066     // special name.
6067     [[fallthrough]];
6068   }
6069   case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
6070   case AsmToken::Integer: // things like 1f and 2b as a branch targets
6071   case AsmToken::String:  // quoted label names.
6072   case AsmToken::Dot: {   // . as a branch target
6073     // This was not a register so parse other operands that start with an
6074     // identifier (like labels) as expressions and create them as immediates.
6075     const MCExpr *IdVal;
6076     S = Parser.getTok().getLoc();
6077     if (getParser().parseExpression(IdVal))
6078       return true;
6079     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
6080     Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
6081     return false;
6082   }
6083   case AsmToken::LBrac:
6084     return parseMemory(Operands);
6085   case AsmToken::LCurly:
6086     return parseRegisterList(Operands, !Mnemonic.starts_with("clr"));
6087   case AsmToken::Dollar:
6088   case AsmToken::Hash: {
6089     // #42 -> immediate
6090     // $ 42 -> immediate
6091     // $foo -> symbol name
6092     // $42 -> symbol name
6093     S = Parser.getTok().getLoc();
6094 
6095     // Favor the interpretation of $-prefixed operands as symbol names.
6096     // Cases where immediates are explicitly expected are handled by their
6097     // specific ParseMethod implementations.
6098     auto AdjacentToken = getLexer().peekTok(/*ShouldSkipSpace=*/false);
6099     bool ExpectIdentifier = Parser.getTok().is(AsmToken::Dollar) &&
6100                             (AdjacentToken.is(AsmToken::Identifier) ||
6101                              AdjacentToken.is(AsmToken::Integer));
6102     if (!ExpectIdentifier) {
6103       // Token is not part of identifier. Drop leading $ or # before parsing
6104       // expression.
6105       Parser.Lex();
6106     }
6107 
6108     if (Parser.getTok().isNot(AsmToken::Colon)) {
6109       bool IsNegative = Parser.getTok().is(AsmToken::Minus);
6110       const MCExpr *ImmVal;
6111       if (getParser().parseExpression(ImmVal))
6112         return true;
6113       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
6114       if (CE) {
6115         int32_t Val = CE->getValue();
6116         if (IsNegative && Val == 0)
6117           ImmVal = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
6118                                           getContext());
6119       }
6120       E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
6121       Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
6122 
6123       // There can be a trailing '!' on operands that we want as a separate
6124       // '!' Token operand. Handle that here. For example, the compatibility
6125       // alias for 'srsdb sp!, #imm' is 'srsdb #imm!'.
6126       if (Parser.getTok().is(AsmToken::Exclaim)) {
6127         Operands.push_back(ARMOperand::CreateToken(Parser.getTok().getString(),
6128                                                    Parser.getTok().getLoc()));
6129         Parser.Lex(); // Eat exclaim token
6130       }
6131       return false;
6132     }
6133     // w/ a ':' after the '#', it's just like a plain ':'.
6134     [[fallthrough]];
6135   }
6136   case AsmToken::Colon: {
6137     S = Parser.getTok().getLoc();
6138     // ":lower16:", ":upper16:", ":lower0_7:", ":lower8_15:", ":upper0_7:" and
6139     // ":upper8_15:", expression prefixes
6140     // FIXME: Check it's an expression prefix,
6141     // e.g. (FOO - :lower16:BAR) isn't legal.
6142     ARMMCExpr::VariantKind RefKind;
6143     if (parsePrefix(RefKind))
6144       return true;
6145 
6146     const MCExpr *SubExprVal;
6147     if (getParser().parseExpression(SubExprVal))
6148       return true;
6149 
6150     const MCExpr *ExprVal = ARMMCExpr::create(RefKind, SubExprVal,
6151                                               getContext());
6152     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
6153     Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
6154     return false;
6155   }
6156   case AsmToken::Equal: {
6157     S = Parser.getTok().getLoc();
6158     if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
6159       return Error(S, "unexpected token in operand");
6160     Parser.Lex(); // Eat '='
6161     const MCExpr *SubExprVal;
6162     if (getParser().parseExpression(SubExprVal))
6163       return true;
6164     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
6165 
6166     // execute-only: we assume that assembly programmers know what they are
6167     // doing and allow literal pool creation here
6168     Operands.push_back(ARMOperand::CreateConstantPoolImm(SubExprVal, S, E));
6169     return false;
6170   }
6171   }
6172 }
6173 
6174 bool ARMAsmParser::parseImmExpr(int64_t &Out) {
6175   const MCExpr *Expr = nullptr;
6176   SMLoc L = getParser().getTok().getLoc();
6177   if (check(getParser().parseExpression(Expr), L, "expected expression"))
6178     return true;
6179   const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
6180   if (check(!Value, L, "expected constant expression"))
6181     return true;
6182   Out = Value->getValue();
6183   return false;
6184 }
6185 
6186 // parsePrefix - Parse ARM 16-bit relocations expression prefixes, i.e.
6187 // :lower16: and :upper16: and Thumb 8-bit relocation expression prefixes, i.e.
6188 // :upper8_15:, :upper0_7:, :lower8_15: and :lower0_7:
6189 bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
6190   MCAsmParser &Parser = getParser();
6191   RefKind = ARMMCExpr::VK_ARM_None;
6192 
6193   // consume an optional '#' (GNU compatibility)
6194   if (getLexer().is(AsmToken::Hash))
6195     Parser.Lex();
6196 
6197   assert(getLexer().is(AsmToken::Colon) && "expected a :");
6198   Parser.Lex(); // Eat ':'
6199 
6200   if (getLexer().isNot(AsmToken::Identifier)) {
6201     Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
6202     return true;
6203   }
6204 
6205   enum {
6206     COFF = (1 << MCContext::IsCOFF),
6207     ELF = (1 << MCContext::IsELF),
6208     MACHO = (1 << MCContext::IsMachO),
6209     WASM = (1 << MCContext::IsWasm),
6210   };
6211   static const struct PrefixEntry {
6212     const char *Spelling;
6213     ARMMCExpr::VariantKind VariantKind;
6214     uint8_t SupportedFormats;
6215   } PrefixEntries[] = {
6216       {"upper16", ARMMCExpr::VK_ARM_HI16, COFF | ELF | MACHO},
6217       {"lower16", ARMMCExpr::VK_ARM_LO16, COFF | ELF | MACHO},
6218       {"upper8_15", ARMMCExpr::VK_ARM_HI_8_15, ELF},
6219       {"upper0_7", ARMMCExpr::VK_ARM_HI_0_7, ELF},
6220       {"lower8_15", ARMMCExpr::VK_ARM_LO_8_15, ELF},
6221       {"lower0_7", ARMMCExpr::VK_ARM_LO_0_7, ELF},
6222   };
6223 
6224   StringRef IDVal = Parser.getTok().getIdentifier();
6225 
6226   const auto &Prefix =
6227       llvm::find_if(PrefixEntries, [&IDVal](const PrefixEntry &PE) {
6228         return PE.Spelling == IDVal;
6229       });
6230   if (Prefix == std::end(PrefixEntries)) {
6231     Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
6232     return true;
6233   }
6234 
6235   uint8_t CurrentFormat;
6236   switch (getContext().getObjectFileType()) {
6237   case MCContext::IsMachO:
6238     CurrentFormat = MACHO;
6239     break;
6240   case MCContext::IsELF:
6241     CurrentFormat = ELF;
6242     break;
6243   case MCContext::IsCOFF:
6244     CurrentFormat = COFF;
6245     break;
6246   case MCContext::IsWasm:
6247     CurrentFormat = WASM;
6248     break;
6249   case MCContext::IsGOFF:
6250   case MCContext::IsSPIRV:
6251   case MCContext::IsXCOFF:
6252   case MCContext::IsDXContainer:
6253     llvm_unreachable("unexpected object format");
6254     break;
6255   }
6256 
6257   if (~Prefix->SupportedFormats & CurrentFormat) {
6258     Error(Parser.getTok().getLoc(),
6259           "cannot represent relocation in the current file format");
6260     return true;
6261   }
6262 
6263   RefKind = Prefix->VariantKind;
6264   Parser.Lex();
6265 
6266   if (getLexer().isNot(AsmToken::Colon)) {
6267     Error(Parser.getTok().getLoc(), "unexpected token after prefix");
6268     return true;
6269   }
6270   Parser.Lex(); // Eat the last ':'
6271 
6272   // consume an optional trailing '#' (GNU compatibility) bla
6273   parseOptionalToken(AsmToken::Hash);
6274 
6275   return false;
6276 }
6277 
6278 /// Given a mnemonic, split out possible predication code and carry
6279 /// setting letters to form a canonical mnemonic and flags.
6280 //
6281 // FIXME: Would be nice to autogen this.
6282 // FIXME: This is a bit of a maze of special cases.
6283 StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
6284                                       StringRef ExtraToken,
6285                                       unsigned &PredicationCode,
6286                                       unsigned &VPTPredicationCode,
6287                                       bool &CarrySetting,
6288                                       unsigned &ProcessorIMod,
6289                                       StringRef &ITMask) {
6290   PredicationCode = ARMCC::AL;
6291   VPTPredicationCode = ARMVCC::None;
6292   CarrySetting = false;
6293   ProcessorIMod = 0;
6294 
6295   // Ignore some mnemonics we know aren't predicated forms.
6296   //
6297   // FIXME: Would be nice to autogen this.
6298   if ((Mnemonic == "movs" && isThumb()) || Mnemonic == "teq" ||
6299       Mnemonic == "vceq" || Mnemonic == "svc" || Mnemonic == "mls" ||
6300       Mnemonic == "smmls" || Mnemonic == "vcls" || Mnemonic == "vmls" ||
6301       Mnemonic == "vnmls" || Mnemonic == "vacge" || Mnemonic == "vcge" ||
6302       Mnemonic == "vclt" || Mnemonic == "vacgt" || Mnemonic == "vaclt" ||
6303       Mnemonic == "vacle" || Mnemonic == "hlt" || Mnemonic == "vcgt" ||
6304       Mnemonic == "vcle" || Mnemonic == "smlal" || Mnemonic == "umaal" ||
6305       Mnemonic == "umlal" || Mnemonic == "vabal" || Mnemonic == "vmlal" ||
6306       Mnemonic == "vpadal" || Mnemonic == "vqdmlal" || Mnemonic == "fmuls" ||
6307       Mnemonic == "vmaxnm" || Mnemonic == "vminnm" || Mnemonic == "vcvta" ||
6308       Mnemonic == "vcvtn" || Mnemonic == "vcvtp" || Mnemonic == "vcvtm" ||
6309       Mnemonic == "vrinta" || Mnemonic == "vrintn" || Mnemonic == "vrintp" ||
6310       Mnemonic == "vrintm" || Mnemonic == "hvc" ||
6311       Mnemonic.starts_with("vsel") || Mnemonic == "vins" ||
6312       Mnemonic == "vmovx" || Mnemonic == "bxns" || Mnemonic == "blxns" ||
6313       Mnemonic == "vdot" || Mnemonic == "vmmla" || Mnemonic == "vudot" ||
6314       Mnemonic == "vsdot" || Mnemonic == "vcmla" || Mnemonic == "vcadd" ||
6315       Mnemonic == "vfmal" || Mnemonic == "vfmsl" || Mnemonic == "wls" ||
6316       Mnemonic == "le" || Mnemonic == "dls" || Mnemonic == "csel" ||
6317       Mnemonic == "csinc" || Mnemonic == "csinv" || Mnemonic == "csneg" ||
6318       Mnemonic == "cinc" || Mnemonic == "cinv" || Mnemonic == "cneg" ||
6319       Mnemonic == "cset" || Mnemonic == "csetm" || Mnemonic == "aut" ||
6320       Mnemonic == "pac" || Mnemonic == "pacbti" || Mnemonic == "bti")
6321     return Mnemonic;
6322 
6323   // First, split out any predication code. Ignore mnemonics we know aren't
6324   // predicated but do have a carry-set and so weren't caught above.
6325   if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
6326       Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
6327       Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
6328       Mnemonic != "sbcs" && Mnemonic != "rscs" &&
6329       !(hasMVE() &&
6330         (Mnemonic == "vmine" || Mnemonic == "vshle" || Mnemonic == "vshlt" ||
6331          Mnemonic == "vshllt" || Mnemonic == "vrshle" || Mnemonic == "vrshlt" ||
6332          Mnemonic == "vmvne" || Mnemonic == "vorne" || Mnemonic == "vnege" ||
6333          Mnemonic == "vnegt" || Mnemonic == "vmule" || Mnemonic == "vmult" ||
6334          Mnemonic == "vrintne" || Mnemonic == "vcmult" ||
6335          Mnemonic == "vcmule" || Mnemonic == "vpsele" || Mnemonic == "vpselt" ||
6336          Mnemonic.starts_with("vq")))) {
6337     unsigned CC = ARMCondCodeFromString(Mnemonic.substr(Mnemonic.size()-2));
6338     if (CC != ~0U) {
6339       Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
6340       PredicationCode = CC;
6341     }
6342   }
6343 
6344   // Next, determine if we have a carry setting bit. We explicitly ignore all
6345   // the instructions we know end in 's'.
6346   if (Mnemonic.ends_with("s") &&
6347       !(Mnemonic == "cps" || Mnemonic == "mls" || Mnemonic == "mrs" ||
6348         Mnemonic == "smmls" || Mnemonic == "vabs" || Mnemonic == "vcls" ||
6349         Mnemonic == "vmls" || Mnemonic == "vmrs" || Mnemonic == "vnmls" ||
6350         Mnemonic == "vqabs" || Mnemonic == "vrecps" || Mnemonic == "vrsqrts" ||
6351         Mnemonic == "srs" || Mnemonic == "flds" || Mnemonic == "fmrs" ||
6352         Mnemonic == "fsqrts" || Mnemonic == "fsubs" || Mnemonic == "fsts" ||
6353         Mnemonic == "fcpys" || Mnemonic == "fdivs" || Mnemonic == "fmuls" ||
6354         Mnemonic == "fcmps" || Mnemonic == "fcmpzs" || Mnemonic == "vfms" ||
6355         Mnemonic == "vfnms" || Mnemonic == "fconsts" || Mnemonic == "bxns" ||
6356         Mnemonic == "blxns" || Mnemonic == "vfmas" || Mnemonic == "vmlas" ||
6357         (Mnemonic == "movs" && isThumb()))) {
6358     Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
6359     CarrySetting = true;
6360   }
6361 
6362   // The "cps" instruction can have a interrupt mode operand which is glued into
6363   // the mnemonic. Check if this is the case, split it and parse the imod op
6364   if (Mnemonic.starts_with("cps")) {
6365     // Split out any imod code.
6366     unsigned IMod =
6367       StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
6368       .Case("ie", ARM_PROC::IE)
6369       .Case("id", ARM_PROC::ID)
6370       .Default(~0U);
6371     if (IMod != ~0U) {
6372       Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
6373       ProcessorIMod = IMod;
6374     }
6375   }
6376 
6377   if (isMnemonicVPTPredicable(Mnemonic, ExtraToken) && Mnemonic != "vmovlt" &&
6378       Mnemonic != "vshllt" && Mnemonic != "vrshrnt" && Mnemonic != "vshrnt" &&
6379       Mnemonic != "vqrshrunt" && Mnemonic != "vqshrunt" &&
6380       Mnemonic != "vqrshrnt" && Mnemonic != "vqshrnt" && Mnemonic != "vmullt" &&
6381       Mnemonic != "vqmovnt" && Mnemonic != "vqmovunt" &&
6382       Mnemonic != "vqmovnt" && Mnemonic != "vmovnt" && Mnemonic != "vqdmullt" &&
6383       Mnemonic != "vpnot" && Mnemonic != "vcvtt" && Mnemonic != "vcvt") {
6384     unsigned CC = ARMVectorCondCodeFromString(Mnemonic.substr(Mnemonic.size()-1));
6385     if (CC != ~0U) {
6386       Mnemonic = Mnemonic.slice(0, Mnemonic.size()-1);
6387       VPTPredicationCode = CC;
6388     }
6389     return Mnemonic;
6390   }
6391 
6392   // The "it" instruction has the condition mask on the end of the mnemonic.
6393   if (Mnemonic.starts_with("it")) {
6394     ITMask = Mnemonic.slice(2, Mnemonic.size());
6395     Mnemonic = Mnemonic.slice(0, 2);
6396   }
6397 
6398   if (Mnemonic.starts_with("vpst")) {
6399     ITMask = Mnemonic.slice(4, Mnemonic.size());
6400     Mnemonic = Mnemonic.slice(0, 4);
6401   } else if (Mnemonic.starts_with("vpt")) {
6402     ITMask = Mnemonic.slice(3, Mnemonic.size());
6403     Mnemonic = Mnemonic.slice(0, 3);
6404   }
6405 
6406   return Mnemonic;
6407 }
6408 
6409 /// Given a canonical mnemonic, determine if the instruction ever allows
6410 /// inclusion of carry set or predication code operands.
6411 //
6412 // FIXME: It would be nice to autogen this.
6413 void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic,
6414                                          StringRef ExtraToken,
6415                                          StringRef FullInst,
6416                                          bool &CanAcceptCarrySet,
6417                                          bool &CanAcceptPredicationCode,
6418                                          bool &CanAcceptVPTPredicationCode) {
6419   CanAcceptVPTPredicationCode = isMnemonicVPTPredicable(Mnemonic, ExtraToken);
6420 
6421   CanAcceptCarrySet =
6422       Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
6423       Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
6424       Mnemonic == "add" || Mnemonic == "adc" || Mnemonic == "mul" ||
6425       Mnemonic == "bic" || Mnemonic == "asr" || Mnemonic == "orr" ||
6426       Mnemonic == "mvn" || Mnemonic == "rsb" || Mnemonic == "rsc" ||
6427       Mnemonic == "orn" || Mnemonic == "sbc" || Mnemonic == "eor" ||
6428       Mnemonic == "neg" || Mnemonic == "vfm" || Mnemonic == "vfnm" ||
6429       (!isThumb() &&
6430        (Mnemonic == "smull" || Mnemonic == "mov" || Mnemonic == "mla" ||
6431         Mnemonic == "smlal" || Mnemonic == "umlal" || Mnemonic == "umull"));
6432 
6433   if (Mnemonic == "bkpt" || Mnemonic == "cbnz" || Mnemonic == "setend" ||
6434       Mnemonic == "cps" || Mnemonic == "it" || Mnemonic == "cbz" ||
6435       Mnemonic == "trap" || Mnemonic == "hlt" || Mnemonic == "udf" ||
6436       Mnemonic.starts_with("crc32") || Mnemonic.starts_with("cps") ||
6437       Mnemonic.starts_with("vsel") || Mnemonic == "vmaxnm" ||
6438       Mnemonic == "vminnm" || Mnemonic == "vcvta" || Mnemonic == "vcvtn" ||
6439       Mnemonic == "vcvtp" || Mnemonic == "vcvtm" || Mnemonic == "vrinta" ||
6440       Mnemonic == "vrintn" || Mnemonic == "vrintp" || Mnemonic == "vrintm" ||
6441       Mnemonic.starts_with("aes") || Mnemonic == "hvc" ||
6442       Mnemonic == "setpan" || Mnemonic.starts_with("sha1") ||
6443       Mnemonic.starts_with("sha256") ||
6444       (FullInst.starts_with("vmull") && FullInst.ends_with(".p64")) ||
6445       Mnemonic == "vmovx" || Mnemonic == "vins" || Mnemonic == "vudot" ||
6446       Mnemonic == "vsdot" || Mnemonic == "vcmla" || Mnemonic == "vcadd" ||
6447       Mnemonic == "vfmal" || Mnemonic == "vfmsl" || Mnemonic == "vfmat" ||
6448       Mnemonic == "vfmab" || Mnemonic == "vdot" || Mnemonic == "vmmla" ||
6449       Mnemonic == "sb" || Mnemonic == "ssbb" || Mnemonic == "pssbb" ||
6450       Mnemonic == "vsmmla" || Mnemonic == "vummla" || Mnemonic == "vusmmla" ||
6451       Mnemonic == "vusdot" || Mnemonic == "vsudot" || Mnemonic == "bfcsel" ||
6452       Mnemonic == "wls" || Mnemonic == "dls" || Mnemonic == "le" ||
6453       Mnemonic == "csel" || Mnemonic == "csinc" || Mnemonic == "csinv" ||
6454       Mnemonic == "csneg" || Mnemonic == "cinc" || Mnemonic == "cinv" ||
6455       Mnemonic == "cneg" || Mnemonic == "cset" || Mnemonic == "csetm" ||
6456       (hasCDE() && MS.isCDEInstr(Mnemonic) &&
6457        !MS.isITPredicableCDEInstr(Mnemonic)) ||
6458       Mnemonic.starts_with("vpt") || Mnemonic.starts_with("vpst") ||
6459       Mnemonic == "pac" || Mnemonic == "pacbti" || Mnemonic == "aut" ||
6460       Mnemonic == "bti" ||
6461       (hasMVE() &&
6462        (Mnemonic.starts_with("vst2") || Mnemonic.starts_with("vld2") ||
6463         Mnemonic.starts_with("vst4") || Mnemonic.starts_with("vld4") ||
6464         Mnemonic.starts_with("wlstp") || Mnemonic.starts_with("dlstp") ||
6465         Mnemonic.starts_with("letp")))) {
6466     // These mnemonics are never predicable
6467     CanAcceptPredicationCode = false;
6468   } else if (!isThumb()) {
6469     // Some instructions are only predicable in Thumb mode
6470     CanAcceptPredicationCode =
6471         Mnemonic != "cdp2" && Mnemonic != "clrex" && Mnemonic != "mcr2" &&
6472         Mnemonic != "mcrr2" && Mnemonic != "mrc2" && Mnemonic != "mrrc2" &&
6473         Mnemonic != "dmb" && Mnemonic != "dfb" && Mnemonic != "dsb" &&
6474         Mnemonic != "isb" && Mnemonic != "pld" && Mnemonic != "pli" &&
6475         Mnemonic != "pldw" && Mnemonic != "ldc2" && Mnemonic != "ldc2l" &&
6476         Mnemonic != "stc2" && Mnemonic != "stc2l" && Mnemonic != "tsb" &&
6477         !Mnemonic.starts_with("rfe") && !Mnemonic.starts_with("srs");
6478   } else if (isThumbOne()) {
6479     if (hasV6MOps())
6480       CanAcceptPredicationCode = Mnemonic != "movs";
6481     else
6482       CanAcceptPredicationCode = Mnemonic != "nop" && Mnemonic != "movs";
6483   } else
6484     CanAcceptPredicationCode = true;
6485 }
6486 
6487 // Some Thumb instructions have two operand forms that are not
6488 // available as three operand, convert to two operand form if possible.
6489 //
6490 // FIXME: We would really like to be able to tablegen'erate this.
6491 void ARMAsmParser::tryConvertingToTwoOperandForm(StringRef Mnemonic,
6492                                                  bool CarrySetting,
6493                                                  OperandVector &Operands) {
6494   if (Operands.size() != 6)
6495     return;
6496 
6497   const auto &Op3 = static_cast<ARMOperand &>(*Operands[3]);
6498         auto &Op4 = static_cast<ARMOperand &>(*Operands[4]);
6499   if (!Op3.isReg() || !Op4.isReg())
6500     return;
6501 
6502   auto Op3Reg = Op3.getReg();
6503   auto Op4Reg = Op4.getReg();
6504 
6505   // For most Thumb2 cases we just generate the 3 operand form and reduce
6506   // it in processInstruction(), but the 3 operand form of ADD (t2ADDrr)
6507   // won't accept SP or PC so we do the transformation here taking care
6508   // with immediate range in the 'add sp, sp #imm' case.
6509   auto &Op5 = static_cast<ARMOperand &>(*Operands[5]);
6510   if (isThumbTwo()) {
6511     if (Mnemonic != "add")
6512       return;
6513     bool TryTransform = Op3Reg == ARM::PC || Op4Reg == ARM::PC ||
6514                         (Op5.isReg() && Op5.getReg() == ARM::PC);
6515     if (!TryTransform) {
6516       TryTransform = (Op3Reg == ARM::SP || Op4Reg == ARM::SP ||
6517                       (Op5.isReg() && Op5.getReg() == ARM::SP)) &&
6518                      !(Op3Reg == ARM::SP && Op4Reg == ARM::SP &&
6519                        Op5.isImm() && !Op5.isImm0_508s4());
6520     }
6521     if (!TryTransform)
6522       return;
6523   } else if (!isThumbOne())
6524     return;
6525 
6526   if (!(Mnemonic == "add" || Mnemonic == "sub" || Mnemonic == "and" ||
6527         Mnemonic == "eor" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
6528         Mnemonic == "asr" || Mnemonic == "adc" || Mnemonic == "sbc" ||
6529         Mnemonic == "ror" || Mnemonic == "orr" || Mnemonic == "bic"))
6530     return;
6531 
6532   // If first 2 operands of a 3 operand instruction are the same
6533   // then transform to 2 operand version of the same instruction
6534   // e.g. 'adds r0, r0, #1' transforms to 'adds r0, #1'
6535   bool Transform = Op3Reg == Op4Reg;
6536 
6537   // For communtative operations, we might be able to transform if we swap
6538   // Op4 and Op5.  The 'ADD Rdm, SP, Rdm' form is already handled specially
6539   // as tADDrsp.
6540   const ARMOperand *LastOp = &Op5;
6541   bool Swap = false;
6542   if (!Transform && Op5.isReg() && Op3Reg == Op5.getReg() &&
6543       ((Mnemonic == "add" && Op4Reg != ARM::SP) ||
6544        Mnemonic == "and" || Mnemonic == "eor" ||
6545        Mnemonic == "adc" || Mnemonic == "orr")) {
6546     Swap = true;
6547     LastOp = &Op4;
6548     Transform = true;
6549   }
6550 
6551   // If both registers are the same then remove one of them from
6552   // the operand list, with certain exceptions.
6553   if (Transform) {
6554     // Don't transform 'adds Rd, Rd, Rm' or 'sub{s} Rd, Rd, Rm' because the
6555     // 2 operand forms don't exist.
6556     if (((Mnemonic == "add" && CarrySetting) || Mnemonic == "sub") &&
6557         LastOp->isReg())
6558       Transform = false;
6559 
6560     // Don't transform 'add/sub{s} Rd, Rd, #imm' if the immediate fits into
6561     // 3-bits because the ARMARM says not to.
6562     if ((Mnemonic == "add" || Mnemonic == "sub") && LastOp->isImm0_7())
6563       Transform = false;
6564   }
6565 
6566   if (Transform) {
6567     if (Swap)
6568       std::swap(Op4, Op5);
6569     Operands.erase(Operands.begin() + 3);
6570   }
6571 }
6572 
6573 // this function returns true if the operand is one of the following
6574 // relocations: :upper8_15:, :upper0_7:, :lower8_15: or :lower0_7:
6575 static bool isThumbI8Relocation(MCParsedAsmOperand &MCOp) {
6576   ARMOperand &Op = static_cast<ARMOperand &>(MCOp);
6577   if (!Op.isImm())
6578     return false;
6579   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
6580   if (CE)
6581     return false;
6582   const MCExpr *E = dyn_cast<MCExpr>(Op.getImm());
6583   if (!E)
6584     return false;
6585   const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E);
6586   if (ARM16Expr && (ARM16Expr->getKind() == ARMMCExpr::VK_ARM_HI_8_15 ||
6587                     ARM16Expr->getKind() == ARMMCExpr::VK_ARM_HI_0_7 ||
6588                     ARM16Expr->getKind() == ARMMCExpr::VK_ARM_LO_8_15 ||
6589                     ARM16Expr->getKind() == ARMMCExpr::VK_ARM_LO_0_7))
6590     return true;
6591   return false;
6592 }
6593 
6594 bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
6595                                           OperandVector &Operands) {
6596   // FIXME: This is all horribly hacky. We really need a better way to deal
6597   // with optional operands like this in the matcher table.
6598 
6599   // The 'mov' mnemonic is special. One variant has a cc_out operand, while
6600   // another does not. Specifically, the MOVW instruction does not. So we
6601   // special case it here and remove the defaulted (non-setting) cc_out
6602   // operand if that's the instruction we're trying to match.
6603   //
6604   // We do this as post-processing of the explicit operands rather than just
6605   // conditionally adding the cc_out in the first place because we need
6606   // to check the type of the parsed immediate operand.
6607   if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
6608       !static_cast<ARMOperand &>(*Operands[4]).isModImm() &&
6609       static_cast<ARMOperand &>(*Operands[4]).isImm0_65535Expr() &&
6610       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
6611     return true;
6612 
6613   if (Mnemonic == "movs" && Operands.size() > 3 && isThumb() &&
6614       isThumbI8Relocation(*Operands[3]))
6615     return true;
6616 
6617   // Register-register 'add' for thumb does not have a cc_out operand
6618   // when there are only two register operands.
6619   if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
6620       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6621       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6622       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
6623     return true;
6624   // Register-register 'add' for thumb does not have a cc_out operand
6625   // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
6626   // have to check the immediate range here since Thumb2 has a variant
6627   // that can handle a different range and has a cc_out operand.
6628   if (((isThumb() && Mnemonic == "add") ||
6629        (isThumbTwo() && Mnemonic == "sub")) &&
6630       Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6631       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6632       static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::SP &&
6633       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6634       ((Mnemonic == "add" && static_cast<ARMOperand &>(*Operands[5]).isReg()) ||
6635        static_cast<ARMOperand &>(*Operands[5]).isImm0_1020s4()))
6636     return true;
6637   // For Thumb2, add/sub immediate does not have a cc_out operand for the
6638   // imm0_4095 variant. That's the least-preferred variant when
6639   // selecting via the generic "add" mnemonic, so to know that we
6640   // should remove the cc_out operand, we have to explicitly check that
6641   // it's not one of the other variants. Ugh.
6642   if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
6643       Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6644       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6645       static_cast<ARMOperand &>(*Operands[5]).isImm()) {
6646     // Nest conditions rather than one big 'if' statement for readability.
6647     //
6648     // If both registers are low, we're in an IT block, and the immediate is
6649     // in range, we should use encoding T1 instead, which has a cc_out.
6650     if (inITBlock() &&
6651         isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) &&
6652         isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) &&
6653         static_cast<ARMOperand &>(*Operands[5]).isImm0_7())
6654       return false;
6655     // Check against T3. If the second register is the PC, this is an
6656     // alternate form of ADR, which uses encoding T4, so check for that too.
6657     if (static_cast<ARMOperand &>(*Operands[4]).getReg() != ARM::PC &&
6658         (static_cast<ARMOperand &>(*Operands[5]).isT2SOImm() ||
6659          static_cast<ARMOperand &>(*Operands[5]).isT2SOImmNeg()))
6660       return false;
6661 
6662     // Otherwise, we use encoding T4, which does not have a cc_out
6663     // operand.
6664     return true;
6665   }
6666 
6667   // The thumb2 multiply instruction doesn't have a CCOut register, so
6668   // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
6669   // use the 16-bit encoding or not.
6670   if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
6671       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6672       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6673       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6674       static_cast<ARMOperand &>(*Operands[5]).isReg() &&
6675       // If the registers aren't low regs, the destination reg isn't the
6676       // same as one of the source regs, or the cc_out operand is zero
6677       // outside of an IT block, we have to use the 32-bit encoding, so
6678       // remove the cc_out operand.
6679       (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
6680        !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
6681        !isARMLowRegister(static_cast<ARMOperand &>(*Operands[5]).getReg()) ||
6682        !inITBlock() || (static_cast<ARMOperand &>(*Operands[3]).getReg() !=
6683                             static_cast<ARMOperand &>(*Operands[5]).getReg() &&
6684                         static_cast<ARMOperand &>(*Operands[3]).getReg() !=
6685                             static_cast<ARMOperand &>(*Operands[4]).getReg())))
6686     return true;
6687 
6688   // Also check the 'mul' syntax variant that doesn't specify an explicit
6689   // destination register.
6690   if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
6691       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6692       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6693       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6694       // If the registers aren't low regs  or the cc_out operand is zero
6695       // outside of an IT block, we have to use the 32-bit encoding, so
6696       // remove the cc_out operand.
6697       (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
6698        !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
6699        !inITBlock()))
6700     return true;
6701 
6702   // Register-register 'add/sub' for thumb does not have a cc_out operand
6703   // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
6704   // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
6705   // right, this will result in better diagnostics (which operand is off)
6706   // anyway.
6707   if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
6708       (Operands.size() == 5 || Operands.size() == 6) &&
6709       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6710       static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::SP &&
6711       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6712       (static_cast<ARMOperand &>(*Operands[4]).isImm() ||
6713        (Operands.size() == 6 &&
6714         static_cast<ARMOperand &>(*Operands[5]).isImm()))) {
6715     // Thumb2 (add|sub){s}{p}.w GPRnopc, sp, #{T2SOImm} has cc_out
6716     return (!(isThumbTwo() &&
6717               (static_cast<ARMOperand &>(*Operands[4]).isT2SOImm() ||
6718                static_cast<ARMOperand &>(*Operands[4]).isT2SOImmNeg())));
6719   }
6720   // Fixme: Should join all the thumb+thumb2 (add|sub) in a single if case
6721   // Thumb2 ADD r0, #4095 -> ADDW r0, r0, #4095 (T4)
6722   // Thumb2 SUB r0, #4095 -> SUBW r0, r0, #4095
6723   if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
6724       (Operands.size() == 5) &&
6725       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6726       static_cast<ARMOperand &>(*Operands[3]).getReg() != ARM::SP &&
6727       static_cast<ARMOperand &>(*Operands[3]).getReg() != ARM::PC &&
6728       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6729       static_cast<ARMOperand &>(*Operands[4]).isImm()) {
6730     const ARMOperand &IMM = static_cast<ARMOperand &>(*Operands[4]);
6731     if (IMM.isT2SOImm() || IMM.isT2SOImmNeg())
6732       return false; // add.w / sub.w
6733     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(IMM.getImm())) {
6734       const int64_t Value = CE->getValue();
6735       // Thumb1 imm8 sub / add
6736       if ((Value < ((1 << 7) - 1) << 2) && inITBlock() && (!(Value & 3)) &&
6737           isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()))
6738         return false;
6739       return true; // Thumb2 T4 addw / subw
6740     }
6741   }
6742   return false;
6743 }
6744 
6745 bool ARMAsmParser::shouldOmitPredicateOperand(StringRef Mnemonic,
6746                                               OperandVector &Operands) {
6747   // VRINT{Z, X} have a predicate operand in VFP, but not in NEON
6748   unsigned RegIdx = 3;
6749   if ((((Mnemonic == "vrintz" || Mnemonic == "vrintx") && !hasMVE()) ||
6750       Mnemonic == "vrintr") &&
6751       (static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f32" ||
6752        static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f16")) {
6753     if (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
6754         (static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f32" ||
6755          static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f16"))
6756       RegIdx = 4;
6757 
6758     if (static_cast<ARMOperand &>(*Operands[RegIdx]).isReg() &&
6759         (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
6760              static_cast<ARMOperand &>(*Operands[RegIdx]).getReg()) ||
6761          ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
6762              static_cast<ARMOperand &>(*Operands[RegIdx]).getReg())))
6763       return true;
6764   }
6765   return false;
6766 }
6767 
6768 bool ARMAsmParser::shouldOmitVectorPredicateOperand(StringRef Mnemonic,
6769                                                     OperandVector &Operands) {
6770   if (!hasMVE() || Operands.size() < 3)
6771     return true;
6772 
6773   if (Mnemonic.starts_with("vld2") || Mnemonic.starts_with("vld4") ||
6774       Mnemonic.starts_with("vst2") || Mnemonic.starts_with("vst4"))
6775     return true;
6776 
6777   if (Mnemonic.starts_with("vctp") || Mnemonic.starts_with("vpnot"))
6778     return false;
6779 
6780   if (Mnemonic.starts_with("vmov") &&
6781       !(Mnemonic.starts_with("vmovl") || Mnemonic.starts_with("vmovn") ||
6782         Mnemonic.starts_with("vmovx"))) {
6783     for (auto &Operand : Operands) {
6784       if (static_cast<ARMOperand &>(*Operand).isVectorIndex() ||
6785           ((*Operand).isReg() &&
6786            (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
6787              (*Operand).getReg()) ||
6788             ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
6789               (*Operand).getReg())))) {
6790         return true;
6791       }
6792     }
6793     return false;
6794   } else {
6795     for (auto &Operand : Operands) {
6796       // We check the larger class QPR instead of just the legal class
6797       // MQPR, to more accurately report errors when using Q registers
6798       // outside of the allowed range.
6799       if (static_cast<ARMOperand &>(*Operand).isVectorIndex() ||
6800           (Operand->isReg() &&
6801            (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
6802              Operand->getReg()))))
6803         return false;
6804     }
6805     return true;
6806   }
6807 }
6808 
6809 static bool isDataTypeToken(StringRef Tok) {
6810   return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
6811     Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
6812     Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
6813     Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
6814     Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
6815     Tok == ".f" || Tok == ".d";
6816 }
6817 
6818 // FIXME: This bit should probably be handled via an explicit match class
6819 // in the .td files that matches the suffix instead of having it be
6820 // a literal string token the way it is now.
6821 static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
6822   return Mnemonic.starts_with("vldm") || Mnemonic.starts_with("vstm");
6823 }
6824 
6825 static void applyMnemonicAliases(StringRef &Mnemonic,
6826                                  const FeatureBitset &Features,
6827                                  unsigned VariantID);
6828 
6829 // The GNU assembler has aliases of ldrd and strd with the second register
6830 // omitted. We don't have a way to do that in tablegen, so fix it up here.
6831 //
6832 // We have to be careful to not emit an invalid Rt2 here, because the rest of
6833 // the assembly parser could then generate confusing diagnostics refering to
6834 // it. If we do find anything that prevents us from doing the transformation we
6835 // bail out, and let the assembly parser report an error on the instruction as
6836 // it is written.
6837 void ARMAsmParser::fixupGNULDRDAlias(StringRef Mnemonic,
6838                                      OperandVector &Operands) {
6839   if (Mnemonic != "ldrd" && Mnemonic != "strd")
6840     return;
6841   if (Operands.size() < 4)
6842     return;
6843 
6844   ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[2]);
6845   ARMOperand &Op3 = static_cast<ARMOperand &>(*Operands[3]);
6846 
6847   if (!Op2.isReg())
6848     return;
6849   if (!Op3.isGPRMem())
6850     return;
6851 
6852   const MCRegisterClass &GPR = MRI->getRegClass(ARM::GPRRegClassID);
6853   if (!GPR.contains(Op2.getReg()))
6854     return;
6855 
6856   unsigned RtEncoding = MRI->getEncodingValue(Op2.getReg());
6857   if (!isThumb() && (RtEncoding & 1)) {
6858     // In ARM mode, the registers must be from an aligned pair, this
6859     // restriction does not apply in Thumb mode.
6860     return;
6861   }
6862   if (Op2.getReg() == ARM::PC)
6863     return;
6864   unsigned PairedReg = GPR.getRegister(RtEncoding + 1);
6865   if (!PairedReg || PairedReg == ARM::PC ||
6866       (PairedReg == ARM::SP && !hasV8Ops()))
6867     return;
6868 
6869   Operands.insert(
6870       Operands.begin() + 3,
6871       ARMOperand::CreateReg(PairedReg, Op2.getStartLoc(), Op2.getEndLoc()));
6872 }
6873 
6874 // Dual-register instruction have the following syntax:
6875 // <mnemonic> <predicate>? <coproc>, <Rdest>, <Rdest+1>, <Rsrc>, ..., #imm
6876 // This function tries to remove <Rdest+1> and replace <Rdest> with a pair
6877 // operand. If the conversion fails an error is diagnosed, and the function
6878 // returns true.
6879 bool ARMAsmParser::CDEConvertDualRegOperand(StringRef Mnemonic,
6880                                             OperandVector &Operands) {
6881   assert(MS.isCDEDualRegInstr(Mnemonic));
6882   bool isPredicable =
6883       Mnemonic == "cx1da" || Mnemonic == "cx2da" || Mnemonic == "cx3da";
6884   size_t NumPredOps = isPredicable ? 1 : 0;
6885 
6886   if (Operands.size() <= 3 + NumPredOps)
6887     return false;
6888 
6889   StringRef Op2Diag(
6890       "operand must be an even-numbered register in the range [r0, r10]");
6891 
6892   const MCParsedAsmOperand &Op2 = *Operands[2 + NumPredOps];
6893   if (!Op2.isReg())
6894     return Error(Op2.getStartLoc(), Op2Diag);
6895 
6896   unsigned RNext;
6897   unsigned RPair;
6898   switch (Op2.getReg()) {
6899   default:
6900     return Error(Op2.getStartLoc(), Op2Diag);
6901   case ARM::R0:
6902     RNext = ARM::R1;
6903     RPair = ARM::R0_R1;
6904     break;
6905   case ARM::R2:
6906     RNext = ARM::R3;
6907     RPair = ARM::R2_R3;
6908     break;
6909   case ARM::R4:
6910     RNext = ARM::R5;
6911     RPair = ARM::R4_R5;
6912     break;
6913   case ARM::R6:
6914     RNext = ARM::R7;
6915     RPair = ARM::R6_R7;
6916     break;
6917   case ARM::R8:
6918     RNext = ARM::R9;
6919     RPair = ARM::R8_R9;
6920     break;
6921   case ARM::R10:
6922     RNext = ARM::R11;
6923     RPair = ARM::R10_R11;
6924     break;
6925   }
6926 
6927   const MCParsedAsmOperand &Op3 = *Operands[3 + NumPredOps];
6928   if (!Op3.isReg() || Op3.getReg() != RNext)
6929     return Error(Op3.getStartLoc(), "operand must be a consecutive register");
6930 
6931   Operands.erase(Operands.begin() + 3 + NumPredOps);
6932   Operands[2 + NumPredOps] =
6933       ARMOperand::CreateReg(RPair, Op2.getStartLoc(), Op2.getEndLoc());
6934   return false;
6935 }
6936 
6937 /// Parse an arm instruction mnemonic followed by its operands.
6938 bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
6939                                     SMLoc NameLoc, OperandVector &Operands) {
6940   MCAsmParser &Parser = getParser();
6941 
6942   // Apply mnemonic aliases before doing anything else, as the destination
6943   // mnemonic may include suffices and we want to handle them normally.
6944   // The generic tblgen'erated code does this later, at the start of
6945   // MatchInstructionImpl(), but that's too late for aliases that include
6946   // any sort of suffix.
6947   const FeatureBitset &AvailableFeatures = getAvailableFeatures();
6948   unsigned AssemblerDialect = getParser().getAssemblerDialect();
6949   applyMnemonicAliases(Name, AvailableFeatures, AssemblerDialect);
6950 
6951   // First check for the ARM-specific .req directive.
6952   if (Parser.getTok().is(AsmToken::Identifier) &&
6953       Parser.getTok().getIdentifier().lower() == ".req") {
6954     parseDirectiveReq(Name, NameLoc);
6955     // We always return 'error' for this, as we're done with this
6956     // statement and don't need to match the 'instruction."
6957     return true;
6958   }
6959 
6960   // Create the leading tokens for the mnemonic, split by '.' characters.
6961   size_t Start = 0, Next = Name.find('.');
6962   StringRef Mnemonic = Name.slice(Start, Next);
6963   StringRef ExtraToken = Name.slice(Next, Name.find(' ', Next + 1));
6964 
6965   // Split out the predication code and carry setting flag from the mnemonic.
6966   unsigned PredicationCode;
6967   unsigned VPTPredicationCode;
6968   unsigned ProcessorIMod;
6969   bool CarrySetting;
6970   StringRef ITMask;
6971   Mnemonic = splitMnemonic(Mnemonic, ExtraToken, PredicationCode, VPTPredicationCode,
6972                            CarrySetting, ProcessorIMod, ITMask);
6973 
6974   // In Thumb1, only the branch (B) instruction can be predicated.
6975   if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
6976     return Error(NameLoc, "conditional execution not supported in Thumb1");
6977   }
6978 
6979   Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
6980 
6981   // Handle the mask for IT and VPT instructions. In ARMOperand and
6982   // MCOperand, this is stored in a format independent of the
6983   // condition code: the lowest set bit indicates the end of the
6984   // encoding, and above that, a 1 bit indicates 'else', and an 0
6985   // indicates 'then'. E.g.
6986   //    IT    -> 1000
6987   //    ITx   -> x100    (ITT -> 0100, ITE -> 1100)
6988   //    ITxy  -> xy10    (e.g. ITET -> 1010)
6989   //    ITxyz -> xyz1    (e.g. ITEET -> 1101)
6990   // Note: See the ARM::PredBlockMask enum in
6991   //   /lib/Target/ARM/Utils/ARMBaseInfo.h
6992   if (Mnemonic == "it" || Mnemonic.starts_with("vpt") ||
6993       Mnemonic.starts_with("vpst")) {
6994     SMLoc Loc = Mnemonic == "it"  ? SMLoc::getFromPointer(NameLoc.getPointer() + 2) :
6995                 Mnemonic == "vpt" ? SMLoc::getFromPointer(NameLoc.getPointer() + 3) :
6996                                     SMLoc::getFromPointer(NameLoc.getPointer() + 4);
6997     if (ITMask.size() > 3) {
6998       if (Mnemonic == "it")
6999         return Error(Loc, "too many conditions on IT instruction");
7000       return Error(Loc, "too many conditions on VPT instruction");
7001     }
7002     unsigned Mask = 8;
7003     for (char Pos : llvm::reverse(ITMask)) {
7004       if (Pos != 't' && Pos != 'e') {
7005         return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
7006       }
7007       Mask >>= 1;
7008       if (Pos == 'e')
7009         Mask |= 8;
7010     }
7011     Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
7012   }
7013 
7014   // FIXME: This is all a pretty gross hack. We should automatically handle
7015   // optional operands like this via tblgen.
7016 
7017   // Next, add the CCOut and ConditionCode operands, if needed.
7018   //
7019   // For mnemonics which can ever incorporate a carry setting bit or predication
7020   // code, our matching model involves us always generating CCOut and
7021   // ConditionCode operands to match the mnemonic "as written" and then we let
7022   // the matcher deal with finding the right instruction or generating an
7023   // appropriate error.
7024   bool CanAcceptCarrySet, CanAcceptPredicationCode, CanAcceptVPTPredicationCode;
7025   getMnemonicAcceptInfo(Mnemonic, ExtraToken, Name, CanAcceptCarrySet,
7026                         CanAcceptPredicationCode, CanAcceptVPTPredicationCode);
7027 
7028   // If we had a carry-set on an instruction that can't do that, issue an
7029   // error.
7030   if (!CanAcceptCarrySet && CarrySetting) {
7031     return Error(NameLoc, "instruction '" + Mnemonic +
7032                  "' can not set flags, but 's' suffix specified");
7033   }
7034   // If we had a predication code on an instruction that can't do that, issue an
7035   // error.
7036   if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
7037     return Error(NameLoc, "instruction '" + Mnemonic +
7038                  "' is not predicable, but condition code specified");
7039   }
7040 
7041   // If we had a VPT predication code on an instruction that can't do that, issue an
7042   // error.
7043   if (!CanAcceptVPTPredicationCode && VPTPredicationCode != ARMVCC::None) {
7044     return Error(NameLoc, "instruction '" + Mnemonic +
7045                  "' is not VPT predicable, but VPT code T/E is specified");
7046   }
7047 
7048   // Add the carry setting operand, if necessary.
7049   if (CanAcceptCarrySet) {
7050     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
7051     Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
7052                                                Loc));
7053   }
7054 
7055   // Add the predication code operand, if necessary.
7056   if (CanAcceptPredicationCode) {
7057     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
7058                                       CarrySetting);
7059     Operands.push_back(ARMOperand::CreateCondCode(
7060                        ARMCC::CondCodes(PredicationCode), Loc));
7061   }
7062 
7063   // Add the VPT predication code operand, if necessary.
7064   // FIXME: We don't add them for the instructions filtered below as these can
7065   // have custom operands which need special parsing.  This parsing requires
7066   // the operand to be in the same place in the OperandVector as their
7067   // definition in tblgen.  Since these instructions may also have the
7068   // scalar predication operand we do not add the vector one and leave until
7069   // now to fix it up.
7070   if (CanAcceptVPTPredicationCode && Mnemonic != "vmov" &&
7071       !Mnemonic.starts_with("vcmp") &&
7072       !(Mnemonic.starts_with("vcvt") && Mnemonic != "vcvta" &&
7073         Mnemonic != "vcvtn" && Mnemonic != "vcvtp" && Mnemonic != "vcvtm")) {
7074     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
7075                                       CarrySetting);
7076     Operands.push_back(ARMOperand::CreateVPTPred(
7077                          ARMVCC::VPTCodes(VPTPredicationCode), Loc));
7078   }
7079 
7080   // Add the processor imod operand, if necessary.
7081   if (ProcessorIMod) {
7082     Operands.push_back(ARMOperand::CreateImm(
7083           MCConstantExpr::create(ProcessorIMod, getContext()),
7084                                  NameLoc, NameLoc));
7085   } else if (Mnemonic == "cps" && isMClass()) {
7086     return Error(NameLoc, "instruction 'cps' requires effect for M-class");
7087   }
7088 
7089   // Add the remaining tokens in the mnemonic.
7090   while (Next != StringRef::npos) {
7091     Start = Next;
7092     Next = Name.find('.', Start + 1);
7093     ExtraToken = Name.slice(Start, Next);
7094 
7095     // Some NEON instructions have an optional datatype suffix that is
7096     // completely ignored. Check for that.
7097     if (isDataTypeToken(ExtraToken) &&
7098         doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
7099       continue;
7100 
7101     // For for ARM mode generate an error if the .n qualifier is used.
7102     if (ExtraToken == ".n" && !isThumb()) {
7103       SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
7104       return Error(Loc, "instruction with .n (narrow) qualifier not allowed in "
7105                    "arm mode");
7106     }
7107 
7108     // The .n qualifier is always discarded as that is what the tables
7109     // and matcher expect.  In ARM mode the .w qualifier has no effect,
7110     // so discard it to avoid errors that can be caused by the matcher.
7111     if (ExtraToken != ".n" && (isThumb() || ExtraToken != ".w")) {
7112       SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
7113       Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
7114     }
7115   }
7116 
7117   // Read the remaining operands.
7118   if (getLexer().isNot(AsmToken::EndOfStatement)) {
7119     // Read the first operand.
7120     if (parseOperand(Operands, Mnemonic)) {
7121       return true;
7122     }
7123 
7124     while (parseOptionalToken(AsmToken::Comma)) {
7125       // Parse and remember the operand.
7126       if (parseOperand(Operands, Mnemonic)) {
7127         return true;
7128       }
7129     }
7130   }
7131 
7132   if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
7133     return true;
7134 
7135   tryConvertingToTwoOperandForm(Mnemonic, CarrySetting, Operands);
7136 
7137   if (hasCDE() && MS.isCDEInstr(Mnemonic)) {
7138     // Dual-register instructions use even-odd register pairs as their
7139     // destination operand, in assembly such pair is spelled as two
7140     // consecutive registers, without any special syntax. ConvertDualRegOperand
7141     // tries to convert such operand into register pair, e.g. r2, r3 -> r2_r3.
7142     // It returns true, if an error message has been emitted. If the function
7143     // returns false, the function either succeeded or an error (e.g. missing
7144     // operand) will be diagnosed elsewhere.
7145     if (MS.isCDEDualRegInstr(Mnemonic)) {
7146       bool GotError = CDEConvertDualRegOperand(Mnemonic, Operands);
7147       if (GotError)
7148         return GotError;
7149     }
7150   }
7151 
7152   // Some instructions, mostly Thumb, have forms for the same mnemonic that
7153   // do and don't have a cc_out optional-def operand. With some spot-checks
7154   // of the operand list, we can figure out which variant we're trying to
7155   // parse and adjust accordingly before actually matching. We shouldn't ever
7156   // try to remove a cc_out operand that was explicitly set on the
7157   // mnemonic, of course (CarrySetting == true). Reason number #317 the
7158   // table driven matcher doesn't fit well with the ARM instruction set.
7159   if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands))
7160     Operands.erase(Operands.begin() + 1);
7161 
7162   // Some instructions have the same mnemonic, but don't always
7163   // have a predicate. Distinguish them here and delete the
7164   // appropriate predicate if needed.  This could be either the scalar
7165   // predication code or the vector predication code.
7166   if (PredicationCode == ARMCC::AL &&
7167       shouldOmitPredicateOperand(Mnemonic, Operands))
7168     Operands.erase(Operands.begin() + 1);
7169 
7170 
7171   if (hasMVE()) {
7172     if (!shouldOmitVectorPredicateOperand(Mnemonic, Operands) &&
7173         Mnemonic == "vmov" && PredicationCode == ARMCC::LT) {
7174       // Very nasty hack to deal with the vector predicated variant of vmovlt
7175       // the scalar predicated vmov with condition 'lt'.  We can not tell them
7176       // apart until we have parsed their operands.
7177       Operands.erase(Operands.begin() + 1);
7178       Operands.erase(Operands.begin());
7179       SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
7180       SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
7181                                          Mnemonic.size() - 1 + CarrySetting);
7182       Operands.insert(Operands.begin(),
7183                       ARMOperand::CreateVPTPred(ARMVCC::None, PLoc));
7184       Operands.insert(Operands.begin(),
7185                       ARMOperand::CreateToken(StringRef("vmovlt"), MLoc));
7186     } else if (Mnemonic == "vcvt" && PredicationCode == ARMCC::NE &&
7187                !shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
7188       // Another nasty hack to deal with the ambiguity between vcvt with scalar
7189       // predication 'ne' and vcvtn with vector predication 'e'.  As above we
7190       // can only distinguish between the two after we have parsed their
7191       // operands.
7192       Operands.erase(Operands.begin() + 1);
7193       Operands.erase(Operands.begin());
7194       SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
7195       SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
7196                                          Mnemonic.size() - 1 + CarrySetting);
7197       Operands.insert(Operands.begin(),
7198                       ARMOperand::CreateVPTPred(ARMVCC::Else, PLoc));
7199       Operands.insert(Operands.begin(),
7200                       ARMOperand::CreateToken(StringRef("vcvtn"), MLoc));
7201     } else if (Mnemonic == "vmul" && PredicationCode == ARMCC::LT &&
7202                !shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
7203       // Another hack, this time to distinguish between scalar predicated vmul
7204       // with 'lt' predication code and the vector instruction vmullt with
7205       // vector predication code "none"
7206       Operands.erase(Operands.begin() + 1);
7207       Operands.erase(Operands.begin());
7208       SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
7209       Operands.insert(Operands.begin(),
7210                       ARMOperand::CreateToken(StringRef("vmullt"), MLoc));
7211     }
7212     // For vmov and vcmp, as mentioned earlier, we did not add the vector
7213     // predication code, since these may contain operands that require
7214     // special parsing.  So now we have to see if they require vector
7215     // predication and replace the scalar one with the vector predication
7216     // operand if that is the case.
7217     else if (Mnemonic == "vmov" || Mnemonic.starts_with("vcmp") ||
7218              (Mnemonic.starts_with("vcvt") && !Mnemonic.starts_with("vcvta") &&
7219               !Mnemonic.starts_with("vcvtn") &&
7220               !Mnemonic.starts_with("vcvtp") &&
7221               !Mnemonic.starts_with("vcvtm"))) {
7222       if (!shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
7223         // We could not split the vector predicate off vcvt because it might
7224         // have been the scalar vcvtt instruction.  Now we know its a vector
7225         // instruction, we still need to check whether its the vector
7226         // predicated vcvt with 'Then' predication or the vector vcvtt.  We can
7227         // distinguish the two based on the suffixes, if it is any of
7228         // ".f16.f32", ".f32.f16", ".f16.f64" or ".f64.f16" then it is the vcvtt.
7229         if (Mnemonic.starts_with("vcvtt") && Operands.size() >= 4) {
7230           auto Sz1 = static_cast<ARMOperand &>(*Operands[2]);
7231           auto Sz2 = static_cast<ARMOperand &>(*Operands[3]);
7232           if (!(Sz1.isToken() && Sz1.getToken().starts_with(".f") &&
7233                 Sz2.isToken() && Sz2.getToken().starts_with(".f"))) {
7234             Operands.erase(Operands.begin());
7235             SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
7236             VPTPredicationCode = ARMVCC::Then;
7237 
7238             Mnemonic = Mnemonic.substr(0, 4);
7239             Operands.insert(Operands.begin(),
7240                             ARMOperand::CreateToken(Mnemonic, MLoc));
7241           }
7242         }
7243         Operands.erase(Operands.begin() + 1);
7244         SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
7245                                           Mnemonic.size() + CarrySetting);
7246         Operands.insert(Operands.begin() + 1,
7247                         ARMOperand::CreateVPTPred(
7248                             ARMVCC::VPTCodes(VPTPredicationCode), PLoc));
7249       }
7250     } else if (CanAcceptVPTPredicationCode) {
7251       // For all other instructions, make sure only one of the two
7252       // predication operands is left behind, depending on whether we should
7253       // use the vector predication.
7254       if (shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
7255         if (CanAcceptPredicationCode)
7256           Operands.erase(Operands.begin() + 2);
7257         else
7258           Operands.erase(Operands.begin() + 1);
7259       } else if (CanAcceptPredicationCode && PredicationCode == ARMCC::AL) {
7260         Operands.erase(Operands.begin() + 1);
7261       }
7262     }
7263   }
7264 
7265   if (VPTPredicationCode != ARMVCC::None) {
7266     bool usedVPTPredicationCode = false;
7267     for (unsigned I = 1; I < Operands.size(); ++I)
7268       if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred())
7269         usedVPTPredicationCode = true;
7270     if (!usedVPTPredicationCode) {
7271       // If we have a VPT predication code and we haven't just turned it
7272       // into an operand, then it was a mistake for splitMnemonic to
7273       // separate it from the rest of the mnemonic in the first place,
7274       // and this may lead to wrong disassembly (e.g. scalar floating
7275       // point VCMPE is actually a different instruction from VCMP, so
7276       // we mustn't treat them the same). In that situation, glue it
7277       // back on.
7278       Mnemonic = Name.slice(0, Mnemonic.size() + 1);
7279       Operands.erase(Operands.begin());
7280       Operands.insert(Operands.begin(),
7281                       ARMOperand::CreateToken(Mnemonic, NameLoc));
7282     }
7283   }
7284 
7285     // ARM mode 'blx' need special handling, as the register operand version
7286     // is predicable, but the label operand version is not. So, we can't rely
7287     // on the Mnemonic based checking to correctly figure out when to put
7288     // a k_CondCode operand in the list. If we're trying to match the label
7289     // version, remove the k_CondCode operand here.
7290     if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
7291         static_cast<ARMOperand &>(*Operands[2]).isImm())
7292       Operands.erase(Operands.begin() + 1);
7293 
7294     // Adjust operands of ldrexd/strexd to MCK_GPRPair.
7295     // ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
7296     // a single GPRPair reg operand is used in the .td file to replace the two
7297     // GPRs. However, when parsing from asm, the two GRPs cannot be
7298     // automatically
7299     // expressed as a GPRPair, so we have to manually merge them.
7300     // FIXME: We would really like to be able to tablegen'erate this.
7301     if (!isThumb() && Operands.size() > 4 &&
7302         (Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" ||
7303          Mnemonic == "stlexd")) {
7304       bool isLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd");
7305       unsigned Idx = isLoad ? 2 : 3;
7306       ARMOperand &Op1 = static_cast<ARMOperand &>(*Operands[Idx]);
7307       ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[Idx + 1]);
7308 
7309       const MCRegisterClass &MRC = MRI->getRegClass(ARM::GPRRegClassID);
7310       // Adjust only if Op1 and Op2 are GPRs.
7311       if (Op1.isReg() && Op2.isReg() && MRC.contains(Op1.getReg()) &&
7312           MRC.contains(Op2.getReg())) {
7313         unsigned Reg1 = Op1.getReg();
7314         unsigned Reg2 = Op2.getReg();
7315         unsigned Rt = MRI->getEncodingValue(Reg1);
7316         unsigned Rt2 = MRI->getEncodingValue(Reg2);
7317 
7318         // Rt2 must be Rt + 1 and Rt must be even.
7319         if (Rt + 1 != Rt2 || (Rt & 1)) {
7320           return Error(Op2.getStartLoc(),
7321                        isLoad ? "destination operands must be sequential"
7322                               : "source operands must be sequential");
7323         }
7324         unsigned NewReg = MRI->getMatchingSuperReg(
7325             Reg1, ARM::gsub_0, &(MRI->getRegClass(ARM::GPRPairRegClassID)));
7326         Operands[Idx] =
7327             ARMOperand::CreateReg(NewReg, Op1.getStartLoc(), Op2.getEndLoc());
7328         Operands.erase(Operands.begin() + Idx + 1);
7329       }
7330   }
7331 
7332   // GNU Assembler extension (compatibility).
7333   fixupGNULDRDAlias(Mnemonic, Operands);
7334 
7335   // FIXME: As said above, this is all a pretty gross hack.  This instruction
7336   // does not fit with other "subs" and tblgen.
7337   // Adjust operands of B9.3.19 SUBS PC, LR, #imm (Thumb2) system instruction
7338   // so the Mnemonic is the original name "subs" and delete the predicate
7339   // operand so it will match the table entry.
7340   if (isThumbTwo() && Mnemonic == "sub" && Operands.size() == 6 &&
7341       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
7342       static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::PC &&
7343       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
7344       static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::LR &&
7345       static_cast<ARMOperand &>(*Operands[5]).isImm()) {
7346     Operands.front() = ARMOperand::CreateToken(Name, NameLoc);
7347     Operands.erase(Operands.begin() + 1);
7348   }
7349   return false;
7350 }
7351 
7352 // Validate context-sensitive operand constraints.
7353 
7354 // return 'true' if register list contains non-low GPR registers,
7355 // 'false' otherwise. If Reg is in the register list or is HiReg, set
7356 // 'containsReg' to true.
7357 static bool checkLowRegisterList(const MCInst &Inst, unsigned OpNo,
7358                                  unsigned Reg, unsigned HiReg,
7359                                  bool &containsReg) {
7360   containsReg = false;
7361   for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
7362     unsigned OpReg = Inst.getOperand(i).getReg();
7363     if (OpReg == Reg)
7364       containsReg = true;
7365     // Anything other than a low register isn't legal here.
7366     if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
7367       return true;
7368   }
7369   return false;
7370 }
7371 
7372 // Check if the specified regisgter is in the register list of the inst,
7373 // starting at the indicated operand number.
7374 static bool listContainsReg(const MCInst &Inst, unsigned OpNo, unsigned Reg) {
7375   for (unsigned i = OpNo, e = Inst.getNumOperands(); i < e; ++i) {
7376     unsigned OpReg = Inst.getOperand(i).getReg();
7377     if (OpReg == Reg)
7378       return true;
7379   }
7380   return false;
7381 }
7382 
7383 // Return true if instruction has the interesting property of being
7384 // allowed in IT blocks, but not being predicable.
7385 static bool instIsBreakpoint(const MCInst &Inst) {
7386     return Inst.getOpcode() == ARM::tBKPT ||
7387            Inst.getOpcode() == ARM::BKPT ||
7388            Inst.getOpcode() == ARM::tHLT ||
7389            Inst.getOpcode() == ARM::HLT;
7390 }
7391 
7392 bool ARMAsmParser::validatetLDMRegList(const MCInst &Inst,
7393                                        const OperandVector &Operands,
7394                                        unsigned ListNo, bool IsARPop) {
7395   const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
7396   bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
7397 
7398   bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
7399   bool ListContainsLR = listContainsReg(Inst, ListNo, ARM::LR);
7400   bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
7401 
7402   if (!IsARPop && ListContainsSP)
7403     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
7404                  "SP may not be in the register list");
7405   else if (ListContainsPC && ListContainsLR)
7406     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
7407                  "PC and LR may not be in the register list simultaneously");
7408   return false;
7409 }
7410 
7411 bool ARMAsmParser::validatetSTMRegList(const MCInst &Inst,
7412                                        const OperandVector &Operands,
7413                                        unsigned ListNo) {
7414   const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
7415   bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
7416 
7417   bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
7418   bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
7419 
7420   if (ListContainsSP && ListContainsPC)
7421     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
7422                  "SP and PC may not be in the register list");
7423   else if (ListContainsSP)
7424     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
7425                  "SP may not be in the register list");
7426   else if (ListContainsPC)
7427     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
7428                  "PC may not be in the register list");
7429   return false;
7430 }
7431 
7432 bool ARMAsmParser::validateLDRDSTRD(MCInst &Inst,
7433                                     const OperandVector &Operands,
7434                                     bool Load, bool ARMMode, bool Writeback) {
7435   unsigned RtIndex = Load || !Writeback ? 0 : 1;
7436   unsigned Rt = MRI->getEncodingValue(Inst.getOperand(RtIndex).getReg());
7437   unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(RtIndex + 1).getReg());
7438 
7439   if (ARMMode) {
7440     // Rt can't be R14.
7441     if (Rt == 14)
7442       return Error(Operands[3]->getStartLoc(),
7443                   "Rt can't be R14");
7444 
7445     // Rt must be even-numbered.
7446     if ((Rt & 1) == 1)
7447       return Error(Operands[3]->getStartLoc(),
7448                    "Rt must be even-numbered");
7449 
7450     // Rt2 must be Rt + 1.
7451     if (Rt2 != Rt + 1) {
7452       if (Load)
7453         return Error(Operands[3]->getStartLoc(),
7454                      "destination operands must be sequential");
7455       else
7456         return Error(Operands[3]->getStartLoc(),
7457                      "source operands must be sequential");
7458     }
7459 
7460     // FIXME: Diagnose m == 15
7461     // FIXME: Diagnose ldrd with m == t || m == t2.
7462   }
7463 
7464   if (!ARMMode && Load) {
7465     if (Rt2 == Rt)
7466       return Error(Operands[3]->getStartLoc(),
7467                    "destination operands can't be identical");
7468   }
7469 
7470   if (Writeback) {
7471     unsigned Rn = MRI->getEncodingValue(Inst.getOperand(3).getReg());
7472 
7473     if (Rn == Rt || Rn == Rt2) {
7474       if (Load)
7475         return Error(Operands[3]->getStartLoc(),
7476                      "base register needs to be different from destination "
7477                      "registers");
7478       else
7479         return Error(Operands[3]->getStartLoc(),
7480                      "source register and base register can't be identical");
7481     }
7482 
7483     // FIXME: Diagnose ldrd/strd with writeback and n == 15.
7484     // (Except the immediate form of ldrd?)
7485   }
7486 
7487   return false;
7488 }
7489 
7490 static int findFirstVectorPredOperandIdx(const MCInstrDesc &MCID) {
7491   for (unsigned i = 0; i < MCID.NumOperands; ++i) {
7492     if (ARM::isVpred(MCID.operands()[i].OperandType))
7493       return i;
7494   }
7495   return -1;
7496 }
7497 
7498 static bool isVectorPredicable(const MCInstrDesc &MCID) {
7499   return findFirstVectorPredOperandIdx(MCID) != -1;
7500 }
7501 
7502 static bool isARMMCExpr(MCParsedAsmOperand &MCOp) {
7503   ARMOperand &Op = static_cast<ARMOperand &>(MCOp);
7504   if (!Op.isImm())
7505     return false;
7506   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
7507   if (CE)
7508     return false;
7509   const MCExpr *E = dyn_cast<MCExpr>(Op.getImm());
7510   if (!E)
7511     return false;
7512   return true;
7513 }
7514 
7515 // FIXME: We would really like to be able to tablegen'erate this.
7516 bool ARMAsmParser::validateInstruction(MCInst &Inst,
7517                                        const OperandVector &Operands) {
7518   const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
7519   SMLoc Loc = Operands[0]->getStartLoc();
7520 
7521   // Check the IT block state first.
7522   // NOTE: BKPT and HLT instructions have the interesting property of being
7523   // allowed in IT blocks, but not being predicable. They just always execute.
7524   if (inITBlock() && !instIsBreakpoint(Inst)) {
7525     // The instruction must be predicable.
7526     if (!MCID.isPredicable())
7527       return Error(Loc, "instructions in IT block must be predicable");
7528     ARMCC::CondCodes Cond = ARMCC::CondCodes(
7529         Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm());
7530     if (Cond != currentITCond()) {
7531       // Find the condition code Operand to get its SMLoc information.
7532       SMLoc CondLoc;
7533       for (unsigned I = 1; I < Operands.size(); ++I)
7534         if (static_cast<ARMOperand &>(*Operands[I]).isCondCode())
7535           CondLoc = Operands[I]->getStartLoc();
7536       return Error(CondLoc, "incorrect condition in IT block; got '" +
7537                                 StringRef(ARMCondCodeToString(Cond)) +
7538                                 "', but expected '" +
7539                                 ARMCondCodeToString(currentITCond()) + "'");
7540     }
7541   // Check for non-'al' condition codes outside of the IT block.
7542   } else if (isThumbTwo() && MCID.isPredicable() &&
7543              Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
7544              ARMCC::AL && Inst.getOpcode() != ARM::tBcc &&
7545              Inst.getOpcode() != ARM::t2Bcc &&
7546              Inst.getOpcode() != ARM::t2BFic) {
7547     return Error(Loc, "predicated instructions must be in IT block");
7548   } else if (!isThumb() && !useImplicitITARM() && MCID.isPredicable() &&
7549              Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
7550                  ARMCC::AL) {
7551     return Warning(Loc, "predicated instructions should be in IT block");
7552   } else if (!MCID.isPredicable()) {
7553     // Check the instruction doesn't have a predicate operand anyway
7554     // that it's not allowed to use. Sometimes this happens in order
7555     // to keep instructions the same shape even though one cannot
7556     // legally be predicated, e.g. vmul.f16 vs vmul.f32.
7557     for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) {
7558       if (MCID.operands()[i].isPredicate()) {
7559         if (Inst.getOperand(i).getImm() != ARMCC::AL)
7560           return Error(Loc, "instruction is not predicable");
7561         break;
7562       }
7563     }
7564   }
7565 
7566   // PC-setting instructions in an IT block, but not the last instruction of
7567   // the block, are UNPREDICTABLE.
7568   if (inExplicitITBlock() && !lastInITBlock() && isITBlockTerminator(Inst)) {
7569     return Error(Loc, "instruction must be outside of IT block or the last instruction in an IT block");
7570   }
7571 
7572   if (inVPTBlock() && !instIsBreakpoint(Inst)) {
7573     unsigned Bit = extractITMaskBit(VPTState.Mask, VPTState.CurPosition);
7574     if (!isVectorPredicable(MCID))
7575       return Error(Loc, "instruction in VPT block must be predicable");
7576     unsigned Pred = Inst.getOperand(findFirstVectorPredOperandIdx(MCID)).getImm();
7577     unsigned VPTPred = Bit ? ARMVCC::Else : ARMVCC::Then;
7578     if (Pred != VPTPred) {
7579       SMLoc PredLoc;
7580       for (unsigned I = 1; I < Operands.size(); ++I)
7581         if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred())
7582           PredLoc = Operands[I]->getStartLoc();
7583       return Error(PredLoc, "incorrect predication in VPT block; got '" +
7584                    StringRef(ARMVPTPredToString(ARMVCC::VPTCodes(Pred))) +
7585                    "', but expected '" +
7586                    ARMVPTPredToString(ARMVCC::VPTCodes(VPTPred)) + "'");
7587     }
7588   }
7589   else if (isVectorPredicable(MCID) &&
7590            Inst.getOperand(findFirstVectorPredOperandIdx(MCID)).getImm() !=
7591            ARMVCC::None)
7592     return Error(Loc, "VPT predicated instructions must be in VPT block");
7593 
7594   const unsigned Opcode = Inst.getOpcode();
7595   switch (Opcode) {
7596   case ARM::t2IT: {
7597     // Encoding is unpredictable if it ever results in a notional 'NV'
7598     // predicate. Since we don't parse 'NV' directly this means an 'AL'
7599     // predicate with an "else" mask bit.
7600     unsigned Cond = Inst.getOperand(0).getImm();
7601     unsigned Mask = Inst.getOperand(1).getImm();
7602 
7603     // Conditions only allowing a 't' are those with no set bit except
7604     // the lowest-order one that indicates the end of the sequence. In
7605     // other words, powers of 2.
7606     if (Cond == ARMCC::AL && llvm::popcount(Mask) != 1)
7607       return Error(Loc, "unpredictable IT predicate sequence");
7608     break;
7609   }
7610   case ARM::LDRD:
7611     if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/true,
7612                          /*Writeback*/false))
7613       return true;
7614     break;
7615   case ARM::LDRD_PRE:
7616   case ARM::LDRD_POST:
7617     if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/true,
7618                          /*Writeback*/true))
7619       return true;
7620     break;
7621   case ARM::t2LDRDi8:
7622     if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/false,
7623                          /*Writeback*/false))
7624       return true;
7625     break;
7626   case ARM::t2LDRD_PRE:
7627   case ARM::t2LDRD_POST:
7628     if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/false,
7629                          /*Writeback*/true))
7630       return true;
7631     break;
7632   case ARM::t2BXJ: {
7633     const unsigned RmReg = Inst.getOperand(0).getReg();
7634     // Rm = SP is no longer unpredictable in v8-A
7635     if (RmReg == ARM::SP && !hasV8Ops())
7636       return Error(Operands[2]->getStartLoc(),
7637                    "r13 (SP) is an unpredictable operand to BXJ");
7638     return false;
7639   }
7640   case ARM::STRD:
7641     if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/true,
7642                          /*Writeback*/false))
7643       return true;
7644     break;
7645   case ARM::STRD_PRE:
7646   case ARM::STRD_POST:
7647     if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/true,
7648                          /*Writeback*/true))
7649       return true;
7650     break;
7651   case ARM::t2STRD_PRE:
7652   case ARM::t2STRD_POST:
7653     if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/false,
7654                          /*Writeback*/true))
7655       return true;
7656     break;
7657   case ARM::STR_PRE_IMM:
7658   case ARM::STR_PRE_REG:
7659   case ARM::t2STR_PRE:
7660   case ARM::STR_POST_IMM:
7661   case ARM::STR_POST_REG:
7662   case ARM::t2STR_POST:
7663   case ARM::STRH_PRE:
7664   case ARM::t2STRH_PRE:
7665   case ARM::STRH_POST:
7666   case ARM::t2STRH_POST:
7667   case ARM::STRB_PRE_IMM:
7668   case ARM::STRB_PRE_REG:
7669   case ARM::t2STRB_PRE:
7670   case ARM::STRB_POST_IMM:
7671   case ARM::STRB_POST_REG:
7672   case ARM::t2STRB_POST: {
7673     // Rt must be different from Rn.
7674     const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
7675     const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
7676 
7677     if (Rt == Rn)
7678       return Error(Operands[3]->getStartLoc(),
7679                    "source register and base register can't be identical");
7680     return false;
7681   }
7682   case ARM::t2LDR_PRE_imm:
7683   case ARM::t2LDR_POST_imm:
7684   case ARM::t2STR_PRE_imm:
7685   case ARM::t2STR_POST_imm: {
7686     // Rt must be different from Rn.
7687     const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
7688     const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(1).getReg());
7689 
7690     if (Rt == Rn)
7691       return Error(Operands[3]->getStartLoc(),
7692                    "destination register and base register can't be identical");
7693     if (Inst.getOpcode() == ARM::t2LDR_POST_imm ||
7694         Inst.getOpcode() == ARM::t2STR_POST_imm) {
7695       int Imm = Inst.getOperand(2).getImm();
7696       if (Imm > 255 || Imm < -255)
7697         return Error(Operands[5]->getStartLoc(),
7698                      "operand must be in range [-255, 255]");
7699     }
7700     if (Inst.getOpcode() == ARM::t2STR_PRE_imm ||
7701         Inst.getOpcode() == ARM::t2STR_POST_imm) {
7702       if (Inst.getOperand(0).getReg() == ARM::PC) {
7703         return Error(Operands[3]->getStartLoc(),
7704                      "operand must be a register in range [r0, r14]");
7705       }
7706     }
7707     return false;
7708   }
7709 
7710   case ARM::t2LDRB_OFFSET_imm:
7711   case ARM::t2LDRB_PRE_imm:
7712   case ARM::t2LDRB_POST_imm:
7713   case ARM::t2STRB_OFFSET_imm:
7714   case ARM::t2STRB_PRE_imm:
7715   case ARM::t2STRB_POST_imm: {
7716     if (Inst.getOpcode() == ARM::t2LDRB_POST_imm ||
7717         Inst.getOpcode() == ARM::t2STRB_POST_imm ||
7718         Inst.getOpcode() == ARM::t2LDRB_PRE_imm ||
7719         Inst.getOpcode() == ARM::t2STRB_PRE_imm) {
7720       int Imm = Inst.getOperand(2).getImm();
7721       if (Imm > 255 || Imm < -255)
7722         return Error(Operands[5]->getStartLoc(),
7723                      "operand must be in range [-255, 255]");
7724     } else if (Inst.getOpcode() == ARM::t2LDRB_OFFSET_imm ||
7725                Inst.getOpcode() == ARM::t2STRB_OFFSET_imm) {
7726       int Imm = Inst.getOperand(2).getImm();
7727       if (Imm > 0 || Imm < -255)
7728         return Error(Operands[5]->getStartLoc(),
7729                      "operand must be in range [0, 255] with a negative sign");
7730     }
7731     if (Inst.getOperand(0).getReg() == ARM::PC) {
7732       return Error(Operands[3]->getStartLoc(),
7733                    "if operand is PC, should call the LDRB (literal)");
7734     }
7735     return false;
7736   }
7737 
7738   case ARM::t2LDRH_OFFSET_imm:
7739   case ARM::t2LDRH_PRE_imm:
7740   case ARM::t2LDRH_POST_imm:
7741   case ARM::t2STRH_OFFSET_imm:
7742   case ARM::t2STRH_PRE_imm:
7743   case ARM::t2STRH_POST_imm: {
7744     if (Inst.getOpcode() == ARM::t2LDRH_POST_imm ||
7745         Inst.getOpcode() == ARM::t2STRH_POST_imm ||
7746         Inst.getOpcode() == ARM::t2LDRH_PRE_imm ||
7747         Inst.getOpcode() == ARM::t2STRH_PRE_imm) {
7748       int Imm = Inst.getOperand(2).getImm();
7749       if (Imm > 255 || Imm < -255)
7750         return Error(Operands[5]->getStartLoc(),
7751                      "operand must be in range [-255, 255]");
7752     } else if (Inst.getOpcode() == ARM::t2LDRH_OFFSET_imm ||
7753                Inst.getOpcode() == ARM::t2STRH_OFFSET_imm) {
7754       int Imm = Inst.getOperand(2).getImm();
7755       if (Imm > 0 || Imm < -255)
7756         return Error(Operands[5]->getStartLoc(),
7757                      "operand must be in range [0, 255] with a negative sign");
7758     }
7759     if (Inst.getOperand(0).getReg() == ARM::PC) {
7760       return Error(Operands[3]->getStartLoc(),
7761                    "if operand is PC, should call the LDRH (literal)");
7762     }
7763     return false;
7764   }
7765 
7766   case ARM::t2LDRSB_OFFSET_imm:
7767   case ARM::t2LDRSB_PRE_imm:
7768   case ARM::t2LDRSB_POST_imm: {
7769     if (Inst.getOpcode() == ARM::t2LDRSB_POST_imm ||
7770         Inst.getOpcode() == ARM::t2LDRSB_PRE_imm) {
7771       int Imm = Inst.getOperand(2).getImm();
7772       if (Imm > 255 || Imm < -255)
7773         return Error(Operands[5]->getStartLoc(),
7774                      "operand must be in range [-255, 255]");
7775     } else if (Inst.getOpcode() == ARM::t2LDRSB_OFFSET_imm) {
7776       int Imm = Inst.getOperand(2).getImm();
7777       if (Imm > 0 || Imm < -255)
7778         return Error(Operands[5]->getStartLoc(),
7779                      "operand must be in range [0, 255] with a negative sign");
7780     }
7781     if (Inst.getOperand(0).getReg() == ARM::PC) {
7782       return Error(Operands[3]->getStartLoc(),
7783                    "if operand is PC, should call the LDRH (literal)");
7784     }
7785     return false;
7786   }
7787 
7788   case ARM::t2LDRSH_OFFSET_imm:
7789   case ARM::t2LDRSH_PRE_imm:
7790   case ARM::t2LDRSH_POST_imm: {
7791     if (Inst.getOpcode() == ARM::t2LDRSH_POST_imm ||
7792         Inst.getOpcode() == ARM::t2LDRSH_PRE_imm) {
7793       int Imm = Inst.getOperand(2).getImm();
7794       if (Imm > 255 || Imm < -255)
7795         return Error(Operands[5]->getStartLoc(),
7796                      "operand must be in range [-255, 255]");
7797     } else if (Inst.getOpcode() == ARM::t2LDRSH_OFFSET_imm) {
7798       int Imm = Inst.getOperand(2).getImm();
7799       if (Imm > 0 || Imm < -255)
7800         return Error(Operands[5]->getStartLoc(),
7801                      "operand must be in range [0, 255] with a negative sign");
7802     }
7803     if (Inst.getOperand(0).getReg() == ARM::PC) {
7804       return Error(Operands[3]->getStartLoc(),
7805                    "if operand is PC, should call the LDRH (literal)");
7806     }
7807     return false;
7808   }
7809 
7810   case ARM::LDR_PRE_IMM:
7811   case ARM::LDR_PRE_REG:
7812   case ARM::t2LDR_PRE:
7813   case ARM::LDR_POST_IMM:
7814   case ARM::LDR_POST_REG:
7815   case ARM::t2LDR_POST:
7816   case ARM::LDRH_PRE:
7817   case ARM::t2LDRH_PRE:
7818   case ARM::LDRH_POST:
7819   case ARM::t2LDRH_POST:
7820   case ARM::LDRSH_PRE:
7821   case ARM::t2LDRSH_PRE:
7822   case ARM::LDRSH_POST:
7823   case ARM::t2LDRSH_POST:
7824   case ARM::LDRB_PRE_IMM:
7825   case ARM::LDRB_PRE_REG:
7826   case ARM::t2LDRB_PRE:
7827   case ARM::LDRB_POST_IMM:
7828   case ARM::LDRB_POST_REG:
7829   case ARM::t2LDRB_POST:
7830   case ARM::LDRSB_PRE:
7831   case ARM::t2LDRSB_PRE:
7832   case ARM::LDRSB_POST:
7833   case ARM::t2LDRSB_POST: {
7834     // Rt must be different from Rn.
7835     const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
7836     const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
7837 
7838     if (Rt == Rn)
7839       return Error(Operands[3]->getStartLoc(),
7840                    "destination register and base register can't be identical");
7841     return false;
7842   }
7843 
7844   case ARM::MVE_VLDRBU8_rq:
7845   case ARM::MVE_VLDRBU16_rq:
7846   case ARM::MVE_VLDRBS16_rq:
7847   case ARM::MVE_VLDRBU32_rq:
7848   case ARM::MVE_VLDRBS32_rq:
7849   case ARM::MVE_VLDRHU16_rq:
7850   case ARM::MVE_VLDRHU16_rq_u:
7851   case ARM::MVE_VLDRHU32_rq:
7852   case ARM::MVE_VLDRHU32_rq_u:
7853   case ARM::MVE_VLDRHS32_rq:
7854   case ARM::MVE_VLDRHS32_rq_u:
7855   case ARM::MVE_VLDRWU32_rq:
7856   case ARM::MVE_VLDRWU32_rq_u:
7857   case ARM::MVE_VLDRDU64_rq:
7858   case ARM::MVE_VLDRDU64_rq_u:
7859   case ARM::MVE_VLDRWU32_qi:
7860   case ARM::MVE_VLDRWU32_qi_pre:
7861   case ARM::MVE_VLDRDU64_qi:
7862   case ARM::MVE_VLDRDU64_qi_pre: {
7863     // Qd must be different from Qm.
7864     unsigned QdIdx = 0, QmIdx = 2;
7865     bool QmIsPointer = false;
7866     switch (Opcode) {
7867     case ARM::MVE_VLDRWU32_qi:
7868     case ARM::MVE_VLDRDU64_qi:
7869       QmIdx = 1;
7870       QmIsPointer = true;
7871       break;
7872     case ARM::MVE_VLDRWU32_qi_pre:
7873     case ARM::MVE_VLDRDU64_qi_pre:
7874       QdIdx = 1;
7875       QmIsPointer = true;
7876       break;
7877     }
7878 
7879     const unsigned Qd = MRI->getEncodingValue(Inst.getOperand(QdIdx).getReg());
7880     const unsigned Qm = MRI->getEncodingValue(Inst.getOperand(QmIdx).getReg());
7881 
7882     if (Qd == Qm) {
7883       return Error(Operands[3]->getStartLoc(),
7884                    Twine("destination vector register and vector ") +
7885                    (QmIsPointer ? "pointer" : "offset") +
7886                    " register can't be identical");
7887     }
7888     return false;
7889   }
7890 
7891   case ARM::SBFX:
7892   case ARM::t2SBFX:
7893   case ARM::UBFX:
7894   case ARM::t2UBFX: {
7895     // Width must be in range [1, 32-lsb].
7896     unsigned LSB = Inst.getOperand(2).getImm();
7897     unsigned Widthm1 = Inst.getOperand(3).getImm();
7898     if (Widthm1 >= 32 - LSB)
7899       return Error(Operands[5]->getStartLoc(),
7900                    "bitfield width must be in range [1,32-lsb]");
7901     return false;
7902   }
7903   // Notionally handles ARM::tLDMIA_UPD too.
7904   case ARM::tLDMIA: {
7905     // If we're parsing Thumb2, the .w variant is available and handles
7906     // most cases that are normally illegal for a Thumb1 LDM instruction.
7907     // We'll make the transformation in processInstruction() if necessary.
7908     //
7909     // Thumb LDM instructions are writeback iff the base register is not
7910     // in the register list.
7911     unsigned Rn = Inst.getOperand(0).getReg();
7912     bool HasWritebackToken =
7913         (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
7914          static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
7915     bool ListContainsBase;
7916     if (checkLowRegisterList(Inst, 3, Rn, 0, ListContainsBase) && !isThumbTwo())
7917       return Error(Operands[3 + HasWritebackToken]->getStartLoc(),
7918                    "registers must be in range r0-r7");
7919     // If we should have writeback, then there should be a '!' token.
7920     if (!ListContainsBase && !HasWritebackToken && !isThumbTwo())
7921       return Error(Operands[2]->getStartLoc(),
7922                    "writeback operator '!' expected");
7923     // If we should not have writeback, there must not be a '!'. This is
7924     // true even for the 32-bit wide encodings.
7925     if (ListContainsBase && HasWritebackToken)
7926       return Error(Operands[3]->getStartLoc(),
7927                    "writeback operator '!' not allowed when base register "
7928                    "in register list");
7929 
7930     if (validatetLDMRegList(Inst, Operands, 3))
7931       return true;
7932     break;
7933   }
7934   case ARM::LDMIA_UPD:
7935   case ARM::LDMDB_UPD:
7936   case ARM::LDMIB_UPD:
7937   case ARM::LDMDA_UPD:
7938     // ARM variants loading and updating the same register are only officially
7939     // UNPREDICTABLE on v7 upwards. Goodness knows what they did before.
7940     if (!hasV7Ops())
7941       break;
7942     if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
7943       return Error(Operands.back()->getStartLoc(),
7944                    "writeback register not allowed in register list");
7945     break;
7946   case ARM::t2LDMIA:
7947   case ARM::t2LDMDB:
7948     if (validatetLDMRegList(Inst, Operands, 3))
7949       return true;
7950     break;
7951   case ARM::t2STMIA:
7952   case ARM::t2STMDB:
7953     if (validatetSTMRegList(Inst, Operands, 3))
7954       return true;
7955     break;
7956   case ARM::t2LDMIA_UPD:
7957   case ARM::t2LDMDB_UPD:
7958   case ARM::t2STMIA_UPD:
7959   case ARM::t2STMDB_UPD:
7960     if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
7961       return Error(Operands.back()->getStartLoc(),
7962                    "writeback register not allowed in register list");
7963 
7964     if (Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
7965       if (validatetLDMRegList(Inst, Operands, 3))
7966         return true;
7967     } else {
7968       if (validatetSTMRegList(Inst, Operands, 3))
7969         return true;
7970     }
7971     break;
7972 
7973   case ARM::sysLDMIA_UPD:
7974   case ARM::sysLDMDA_UPD:
7975   case ARM::sysLDMDB_UPD:
7976   case ARM::sysLDMIB_UPD:
7977     if (!listContainsReg(Inst, 3, ARM::PC))
7978       return Error(Operands[4]->getStartLoc(),
7979                    "writeback register only allowed on system LDM "
7980                    "if PC in register-list");
7981     break;
7982   case ARM::sysSTMIA_UPD:
7983   case ARM::sysSTMDA_UPD:
7984   case ARM::sysSTMDB_UPD:
7985   case ARM::sysSTMIB_UPD:
7986     return Error(Operands[2]->getStartLoc(),
7987                  "system STM cannot have writeback register");
7988   case ARM::tMUL:
7989     // The second source operand must be the same register as the destination
7990     // operand.
7991     //
7992     // In this case, we must directly check the parsed operands because the
7993     // cvtThumbMultiply() function is written in such a way that it guarantees
7994     // this first statement is always true for the new Inst.  Essentially, the
7995     // destination is unconditionally copied into the second source operand
7996     // without checking to see if it matches what we actually parsed.
7997     if (Operands.size() == 6 && (((ARMOperand &)*Operands[3]).getReg() !=
7998                                  ((ARMOperand &)*Operands[5]).getReg()) &&
7999         (((ARMOperand &)*Operands[3]).getReg() !=
8000          ((ARMOperand &)*Operands[4]).getReg())) {
8001       return Error(Operands[3]->getStartLoc(),
8002                    "destination register must match source register");
8003     }
8004     break;
8005 
8006   // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
8007   // so only issue a diagnostic for thumb1. The instructions will be
8008   // switched to the t2 encodings in processInstruction() if necessary.
8009   case ARM::tPOP: {
8010     bool ListContainsBase;
8011     if (checkLowRegisterList(Inst, 2, 0, ARM::PC, ListContainsBase) &&
8012         !isThumbTwo())
8013       return Error(Operands[2]->getStartLoc(),
8014                    "registers must be in range r0-r7 or pc");
8015     if (validatetLDMRegList(Inst, Operands, 2, !isMClass()))
8016       return true;
8017     break;
8018   }
8019   case ARM::tPUSH: {
8020     bool ListContainsBase;
8021     if (checkLowRegisterList(Inst, 2, 0, ARM::LR, ListContainsBase) &&
8022         !isThumbTwo())
8023       return Error(Operands[2]->getStartLoc(),
8024                    "registers must be in range r0-r7 or lr");
8025     if (validatetSTMRegList(Inst, Operands, 2))
8026       return true;
8027     break;
8028   }
8029   case ARM::tSTMIA_UPD: {
8030     bool ListContainsBase, InvalidLowList;
8031     InvalidLowList = checkLowRegisterList(Inst, 4, Inst.getOperand(0).getReg(),
8032                                           0, ListContainsBase);
8033     if (InvalidLowList && !isThumbTwo())
8034       return Error(Operands[4]->getStartLoc(),
8035                    "registers must be in range r0-r7");
8036 
8037     // This would be converted to a 32-bit stm, but that's not valid if the
8038     // writeback register is in the list.
8039     if (InvalidLowList && ListContainsBase)
8040       return Error(Operands[4]->getStartLoc(),
8041                    "writeback operator '!' not allowed when base register "
8042                    "in register list");
8043 
8044     if (validatetSTMRegList(Inst, Operands, 4))
8045       return true;
8046     break;
8047   }
8048   case ARM::tADDrSP:
8049     // If the non-SP source operand and the destination operand are not the
8050     // same, we need thumb2 (for the wide encoding), or we have an error.
8051     if (!isThumbTwo() &&
8052         Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
8053       return Error(Operands[4]->getStartLoc(),
8054                    "source register must be the same as destination");
8055     }
8056     break;
8057 
8058   case ARM::t2ADDrr:
8059   case ARM::t2ADDrs:
8060   case ARM::t2SUBrr:
8061   case ARM::t2SUBrs:
8062     if (Inst.getOperand(0).getReg() == ARM::SP &&
8063         Inst.getOperand(1).getReg() != ARM::SP)
8064       return Error(Operands[4]->getStartLoc(),
8065                    "source register must be sp if destination is sp");
8066     break;
8067 
8068   // Final range checking for Thumb unconditional branch instructions.
8069   case ARM::tB:
8070     if (!(static_cast<ARMOperand &>(*Operands[2])).isSignedOffset<11, 1>())
8071       return Error(Operands[2]->getStartLoc(), "branch target out of range");
8072     break;
8073   case ARM::t2B: {
8074     int op = (Operands[2]->isImm()) ? 2 : 3;
8075     ARMOperand &Operand = static_cast<ARMOperand &>(*Operands[op]);
8076     // Delay the checks of symbolic expressions until they are resolved.
8077     if (!isa<MCBinaryExpr>(Operand.getImm()) &&
8078         !Operand.isSignedOffset<24, 1>())
8079       return Error(Operands[op]->getStartLoc(), "branch target out of range");
8080     break;
8081   }
8082   // Final range checking for Thumb conditional branch instructions.
8083   case ARM::tBcc:
8084     if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffset<8, 1>())
8085       return Error(Operands[2]->getStartLoc(), "branch target out of range");
8086     break;
8087   case ARM::t2Bcc: {
8088     int Op = (Operands[2]->isImm()) ? 2 : 3;
8089     if (!static_cast<ARMOperand &>(*Operands[Op]).isSignedOffset<20, 1>())
8090       return Error(Operands[Op]->getStartLoc(), "branch target out of range");
8091     break;
8092   }
8093   case ARM::tCBZ:
8094   case ARM::tCBNZ: {
8095     if (!static_cast<ARMOperand &>(*Operands[2]).isUnsignedOffset<6, 1>())
8096       return Error(Operands[2]->getStartLoc(), "branch target out of range");
8097     break;
8098   }
8099   case ARM::MOVi16:
8100   case ARM::MOVTi16:
8101   case ARM::t2MOVi16:
8102   case ARM::t2MOVTi16:
8103     {
8104     // We want to avoid misleadingly allowing something like "mov r0, <symbol>"
8105     // especially when we turn it into a movw and the expression <symbol> does
8106     // not have a :lower16: or :upper16 as part of the expression.  We don't
8107     // want the behavior of silently truncating, which can be unexpected and
8108     // lead to bugs that are difficult to find since this is an easy mistake
8109     // to make.
8110     int i = (Operands[3]->isImm()) ? 3 : 4;
8111     ARMOperand &Op = static_cast<ARMOperand &>(*Operands[i]);
8112     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
8113     if (CE) break;
8114     const MCExpr *E = dyn_cast<MCExpr>(Op.getImm());
8115     if (!E) break;
8116     const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E);
8117     if (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
8118                        ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16))
8119       return Error(
8120           Op.getStartLoc(),
8121           "immediate expression for mov requires :lower16: or :upper16");
8122     break;
8123   }
8124   case ARM::tADDi8: {
8125     MCParsedAsmOperand &Op = *Operands[4];
8126     if (isARMMCExpr(Op) && !isThumbI8Relocation(Op))
8127       return Error(Op.getStartLoc(),
8128                    "Immediate expression for Thumb adds requires :lower0_7:,"
8129                    " :lower8_15:, :upper0_7: or :upper8_15:");
8130     break;
8131   }
8132   case ARM::tMOVi8: {
8133     MCParsedAsmOperand &Op = *Operands[2];
8134     if (isARMMCExpr(Op) && !isThumbI8Relocation(Op))
8135       return Error(Op.getStartLoc(),
8136                    "Immediate expression for Thumb movs requires :lower0_7:,"
8137                    " :lower8_15:, :upper0_7: or :upper8_15:");
8138     break;
8139   }
8140   case ARM::HINT:
8141   case ARM::t2HINT: {
8142     unsigned Imm8 = Inst.getOperand(0).getImm();
8143     unsigned Pred = Inst.getOperand(1).getImm();
8144     // ESB is not predicable (pred must be AL). Without the RAS extension, this
8145     // behaves as any other unallocated hint.
8146     if (Imm8 == 0x10 && Pred != ARMCC::AL && hasRAS())
8147       return Error(Operands[1]->getStartLoc(), "instruction 'esb' is not "
8148                                                "predicable, but condition "
8149                                                "code specified");
8150     if (Imm8 == 0x14 && Pred != ARMCC::AL)
8151       return Error(Operands[1]->getStartLoc(), "instruction 'csdb' is not "
8152                                                "predicable, but condition "
8153                                                "code specified");
8154     break;
8155   }
8156   case ARM::t2BFi:
8157   case ARM::t2BFr:
8158   case ARM::t2BFLi:
8159   case ARM::t2BFLr: {
8160     if (!static_cast<ARMOperand &>(*Operands[2]).isUnsignedOffset<4, 1>() ||
8161         (Inst.getOperand(0).isImm() && Inst.getOperand(0).getImm() == 0))
8162       return Error(Operands[2]->getStartLoc(),
8163                    "branch location out of range or not a multiple of 2");
8164 
8165     if (Opcode == ARM::t2BFi) {
8166       if (!static_cast<ARMOperand &>(*Operands[3]).isSignedOffset<16, 1>())
8167         return Error(Operands[3]->getStartLoc(),
8168                      "branch target out of range or not a multiple of 2");
8169     } else if (Opcode == ARM::t2BFLi) {
8170       if (!static_cast<ARMOperand &>(*Operands[3]).isSignedOffset<18, 1>())
8171         return Error(Operands[3]->getStartLoc(),
8172                      "branch target out of range or not a multiple of 2");
8173     }
8174     break;
8175   }
8176   case ARM::t2BFic: {
8177     if (!static_cast<ARMOperand &>(*Operands[1]).isUnsignedOffset<4, 1>() ||
8178         (Inst.getOperand(0).isImm() && Inst.getOperand(0).getImm() == 0))
8179       return Error(Operands[1]->getStartLoc(),
8180                    "branch location out of range or not a multiple of 2");
8181 
8182     if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffset<16, 1>())
8183       return Error(Operands[2]->getStartLoc(),
8184                    "branch target out of range or not a multiple of 2");
8185 
8186     assert(Inst.getOperand(0).isImm() == Inst.getOperand(2).isImm() &&
8187            "branch location and else branch target should either both be "
8188            "immediates or both labels");
8189 
8190     if (Inst.getOperand(0).isImm() && Inst.getOperand(2).isImm()) {
8191       int Diff = Inst.getOperand(2).getImm() - Inst.getOperand(0).getImm();
8192       if (Diff != 4 && Diff != 2)
8193         return Error(
8194             Operands[3]->getStartLoc(),
8195             "else branch target must be 2 or 4 greater than the branch location");
8196     }
8197     break;
8198   }
8199   case ARM::t2CLRM: {
8200     for (unsigned i = 2; i < Inst.getNumOperands(); i++) {
8201       if (Inst.getOperand(i).isReg() &&
8202           !ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(
8203               Inst.getOperand(i).getReg())) {
8204         return Error(Operands[2]->getStartLoc(),
8205                      "invalid register in register list. Valid registers are "
8206                      "r0-r12, lr/r14 and APSR.");
8207       }
8208     }
8209     break;
8210   }
8211   case ARM::DSB:
8212   case ARM::t2DSB: {
8213 
8214     if (Inst.getNumOperands() < 2)
8215       break;
8216 
8217     unsigned Option = Inst.getOperand(0).getImm();
8218     unsigned Pred = Inst.getOperand(1).getImm();
8219 
8220     // SSBB and PSSBB (DSB #0|#4) are not predicable (pred must be AL).
8221     if (Option == 0 && Pred != ARMCC::AL)
8222       return Error(Operands[1]->getStartLoc(),
8223                    "instruction 'ssbb' is not predicable, but condition code "
8224                    "specified");
8225     if (Option == 4 && Pred != ARMCC::AL)
8226       return Error(Operands[1]->getStartLoc(),
8227                    "instruction 'pssbb' is not predicable, but condition code "
8228                    "specified");
8229     break;
8230   }
8231   case ARM::VMOVRRS: {
8232     // Source registers must be sequential.
8233     const unsigned Sm = MRI->getEncodingValue(Inst.getOperand(2).getReg());
8234     const unsigned Sm1 = MRI->getEncodingValue(Inst.getOperand(3).getReg());
8235     if (Sm1 != Sm + 1)
8236       return Error(Operands[5]->getStartLoc(),
8237                    "source operands must be sequential");
8238     break;
8239   }
8240   case ARM::VMOVSRR: {
8241     // Destination registers must be sequential.
8242     const unsigned Sm = MRI->getEncodingValue(Inst.getOperand(0).getReg());
8243     const unsigned Sm1 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
8244     if (Sm1 != Sm + 1)
8245       return Error(Operands[3]->getStartLoc(),
8246                    "destination operands must be sequential");
8247     break;
8248   }
8249   case ARM::VLDMDIA:
8250   case ARM::VSTMDIA: {
8251     ARMOperand &Op = static_cast<ARMOperand&>(*Operands[3]);
8252     auto &RegList = Op.getRegList();
8253     if (RegList.size() < 1 || RegList.size() > 16)
8254       return Error(Operands[3]->getStartLoc(),
8255                    "list of registers must be at least 1 and at most 16");
8256     break;
8257   }
8258   case ARM::MVE_VQDMULLs32bh:
8259   case ARM::MVE_VQDMULLs32th:
8260   case ARM::MVE_VCMULf32:
8261   case ARM::MVE_VMULLBs32:
8262   case ARM::MVE_VMULLTs32:
8263   case ARM::MVE_VMULLBu32:
8264   case ARM::MVE_VMULLTu32: {
8265     if (Operands[3]->getReg() == Operands[4]->getReg()) {
8266       return Error (Operands[3]->getStartLoc(),
8267                     "Qd register and Qn register can't be identical");
8268     }
8269     if (Operands[3]->getReg() == Operands[5]->getReg()) {
8270       return Error (Operands[3]->getStartLoc(),
8271                     "Qd register and Qm register can't be identical");
8272     }
8273     break;
8274   }
8275   case ARM::MVE_VREV64_8:
8276   case ARM::MVE_VREV64_16:
8277   case ARM::MVE_VREV64_32:
8278   case ARM::MVE_VQDMULL_qr_s32bh:
8279   case ARM::MVE_VQDMULL_qr_s32th: {
8280     if (Operands[3]->getReg() == Operands[4]->getReg()) {
8281       return Error (Operands[3]->getStartLoc(),
8282                     "Qd register and Qn register can't be identical");
8283     }
8284     break;
8285   }
8286   case ARM::MVE_VCADDi32:
8287   case ARM::MVE_VCADDf32:
8288   case ARM::MVE_VHCADDs32: {
8289     if (Operands[3]->getReg() == Operands[5]->getReg()) {
8290       return Error (Operands[3]->getStartLoc(),
8291                     "Qd register and Qm register can't be identical");
8292     }
8293     break;
8294   }
8295   case ARM::MVE_VMOV_rr_q: {
8296     if (Operands[4]->getReg() != Operands[6]->getReg())
8297       return Error (Operands[4]->getStartLoc(), "Q-registers must be the same");
8298     if (static_cast<ARMOperand &>(*Operands[5]).getVectorIndex() !=
8299         static_cast<ARMOperand &>(*Operands[7]).getVectorIndex() + 2)
8300       return Error (Operands[5]->getStartLoc(), "Q-register indexes must be 2 and 0 or 3 and 1");
8301     break;
8302   }
8303   case ARM::MVE_VMOV_q_rr: {
8304     if (Operands[2]->getReg() != Operands[4]->getReg())
8305       return Error (Operands[2]->getStartLoc(), "Q-registers must be the same");
8306     if (static_cast<ARMOperand &>(*Operands[3]).getVectorIndex() !=
8307         static_cast<ARMOperand &>(*Operands[5]).getVectorIndex() + 2)
8308       return Error (Operands[3]->getStartLoc(), "Q-register indexes must be 2 and 0 or 3 and 1");
8309     break;
8310   }
8311   case ARM::UMAAL:
8312   case ARM::UMLAL:
8313   case ARM::UMULL:
8314   case ARM::t2UMAAL:
8315   case ARM::t2UMLAL:
8316   case ARM::t2UMULL:
8317   case ARM::SMLAL:
8318   case ARM::SMLALBB:
8319   case ARM::SMLALBT:
8320   case ARM::SMLALD:
8321   case ARM::SMLALDX:
8322   case ARM::SMLALTB:
8323   case ARM::SMLALTT:
8324   case ARM::SMLSLD:
8325   case ARM::SMLSLDX:
8326   case ARM::SMULL:
8327   case ARM::t2SMLAL:
8328   case ARM::t2SMLALBB:
8329   case ARM::t2SMLALBT:
8330   case ARM::t2SMLALD:
8331   case ARM::t2SMLALDX:
8332   case ARM::t2SMLALTB:
8333   case ARM::t2SMLALTT:
8334   case ARM::t2SMLSLD:
8335   case ARM::t2SMLSLDX:
8336   case ARM::t2SMULL: {
8337     unsigned RdHi = Inst.getOperand(0).getReg();
8338     unsigned RdLo = Inst.getOperand(1).getReg();
8339     if(RdHi == RdLo) {
8340       return Error(Loc,
8341                    "unpredictable instruction, RdHi and RdLo must be different");
8342     }
8343     break;
8344   }
8345 
8346   case ARM::CDE_CX1:
8347   case ARM::CDE_CX1A:
8348   case ARM::CDE_CX1D:
8349   case ARM::CDE_CX1DA:
8350   case ARM::CDE_CX2:
8351   case ARM::CDE_CX2A:
8352   case ARM::CDE_CX2D:
8353   case ARM::CDE_CX2DA:
8354   case ARM::CDE_CX3:
8355   case ARM::CDE_CX3A:
8356   case ARM::CDE_CX3D:
8357   case ARM::CDE_CX3DA:
8358   case ARM::CDE_VCX1_vec:
8359   case ARM::CDE_VCX1_fpsp:
8360   case ARM::CDE_VCX1_fpdp:
8361   case ARM::CDE_VCX1A_vec:
8362   case ARM::CDE_VCX1A_fpsp:
8363   case ARM::CDE_VCX1A_fpdp:
8364   case ARM::CDE_VCX2_vec:
8365   case ARM::CDE_VCX2_fpsp:
8366   case ARM::CDE_VCX2_fpdp:
8367   case ARM::CDE_VCX2A_vec:
8368   case ARM::CDE_VCX2A_fpsp:
8369   case ARM::CDE_VCX2A_fpdp:
8370   case ARM::CDE_VCX3_vec:
8371   case ARM::CDE_VCX3_fpsp:
8372   case ARM::CDE_VCX3_fpdp:
8373   case ARM::CDE_VCX3A_vec:
8374   case ARM::CDE_VCX3A_fpsp:
8375   case ARM::CDE_VCX3A_fpdp: {
8376     assert(Inst.getOperand(1).isImm() &&
8377            "CDE operand 1 must be a coprocessor ID");
8378     int64_t Coproc = Inst.getOperand(1).getImm();
8379     if (Coproc < 8 && !ARM::isCDECoproc(Coproc, *STI))
8380       return Error(Operands[1]->getStartLoc(),
8381                    "coprocessor must be configured as CDE");
8382     else if (Coproc >= 8)
8383       return Error(Operands[1]->getStartLoc(),
8384                    "coprocessor must be in the range [p0, p7]");
8385     break;
8386   }
8387 
8388   case ARM::t2CDP:
8389   case ARM::t2CDP2:
8390   case ARM::t2LDC2L_OFFSET:
8391   case ARM::t2LDC2L_OPTION:
8392   case ARM::t2LDC2L_POST:
8393   case ARM::t2LDC2L_PRE:
8394   case ARM::t2LDC2_OFFSET:
8395   case ARM::t2LDC2_OPTION:
8396   case ARM::t2LDC2_POST:
8397   case ARM::t2LDC2_PRE:
8398   case ARM::t2LDCL_OFFSET:
8399   case ARM::t2LDCL_OPTION:
8400   case ARM::t2LDCL_POST:
8401   case ARM::t2LDCL_PRE:
8402   case ARM::t2LDC_OFFSET:
8403   case ARM::t2LDC_OPTION:
8404   case ARM::t2LDC_POST:
8405   case ARM::t2LDC_PRE:
8406   case ARM::t2MCR:
8407   case ARM::t2MCR2:
8408   case ARM::t2MCRR:
8409   case ARM::t2MCRR2:
8410   case ARM::t2MRC:
8411   case ARM::t2MRC2:
8412   case ARM::t2MRRC:
8413   case ARM::t2MRRC2:
8414   case ARM::t2STC2L_OFFSET:
8415   case ARM::t2STC2L_OPTION:
8416   case ARM::t2STC2L_POST:
8417   case ARM::t2STC2L_PRE:
8418   case ARM::t2STC2_OFFSET:
8419   case ARM::t2STC2_OPTION:
8420   case ARM::t2STC2_POST:
8421   case ARM::t2STC2_PRE:
8422   case ARM::t2STCL_OFFSET:
8423   case ARM::t2STCL_OPTION:
8424   case ARM::t2STCL_POST:
8425   case ARM::t2STCL_PRE:
8426   case ARM::t2STC_OFFSET:
8427   case ARM::t2STC_OPTION:
8428   case ARM::t2STC_POST:
8429   case ARM::t2STC_PRE: {
8430     unsigned Opcode = Inst.getOpcode();
8431     // Inst.getOperand indexes operands in the (oops ...) and (iops ...) dags,
8432     // CopInd is the index of the coprocessor operand.
8433     size_t CopInd = 0;
8434     if (Opcode == ARM::t2MRRC || Opcode == ARM::t2MRRC2)
8435       CopInd = 2;
8436     else if (Opcode == ARM::t2MRC || Opcode == ARM::t2MRC2)
8437       CopInd = 1;
8438     assert(Inst.getOperand(CopInd).isImm() &&
8439            "Operand must be a coprocessor ID");
8440     int64_t Coproc = Inst.getOperand(CopInd).getImm();
8441     // Operands[2] is the coprocessor operand at syntactic level
8442     if (ARM::isCDECoproc(Coproc, *STI))
8443       return Error(Operands[2]->getStartLoc(),
8444                    "coprocessor must be configured as GCP");
8445     break;
8446   }
8447   }
8448 
8449   return false;
8450 }
8451 
8452 static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
8453   switch(Opc) {
8454   default: llvm_unreachable("unexpected opcode!");
8455   // VST1LN
8456   case ARM::VST1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
8457   case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
8458   case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
8459   case ARM::VST1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
8460   case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
8461   case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
8462   case ARM::VST1LNdAsm_8:  Spacing = 1; return ARM::VST1LNd8;
8463   case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
8464   case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
8465 
8466   // VST2LN
8467   case ARM::VST2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
8468   case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
8469   case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
8470   case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
8471   case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
8472 
8473   case ARM::VST2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
8474   case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
8475   case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
8476   case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
8477   case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
8478 
8479   case ARM::VST2LNdAsm_8:  Spacing = 1; return ARM::VST2LNd8;
8480   case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
8481   case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
8482   case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
8483   case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
8484 
8485   // VST3LN
8486   case ARM::VST3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
8487   case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
8488   case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
8489   case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
8490   case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
8491   case ARM::VST3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
8492   case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
8493   case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
8494   case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
8495   case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
8496   case ARM::VST3LNdAsm_8:  Spacing = 1; return ARM::VST3LNd8;
8497   case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
8498   case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
8499   case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
8500   case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
8501 
8502   // VST3
8503   case ARM::VST3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
8504   case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
8505   case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
8506   case ARM::VST3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
8507   case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
8508   case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
8509   case ARM::VST3dWB_register_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
8510   case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
8511   case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
8512   case ARM::VST3qWB_register_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
8513   case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
8514   case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
8515   case ARM::VST3dAsm_8:  Spacing = 1; return ARM::VST3d8;
8516   case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
8517   case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
8518   case ARM::VST3qAsm_8:  Spacing = 2; return ARM::VST3q8;
8519   case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
8520   case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
8521 
8522   // VST4LN
8523   case ARM::VST4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
8524   case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
8525   case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
8526   case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
8527   case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
8528   case ARM::VST4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
8529   case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
8530   case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
8531   case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
8532   case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
8533   case ARM::VST4LNdAsm_8:  Spacing = 1; return ARM::VST4LNd8;
8534   case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
8535   case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
8536   case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
8537   case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
8538 
8539   // VST4
8540   case ARM::VST4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
8541   case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
8542   case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
8543   case ARM::VST4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
8544   case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
8545   case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
8546   case ARM::VST4dWB_register_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
8547   case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
8548   case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
8549   case ARM::VST4qWB_register_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
8550   case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
8551   case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
8552   case ARM::VST4dAsm_8:  Spacing = 1; return ARM::VST4d8;
8553   case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
8554   case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
8555   case ARM::VST4qAsm_8:  Spacing = 2; return ARM::VST4q8;
8556   case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
8557   case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
8558   }
8559 }
8560 
8561 static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
8562   switch(Opc) {
8563   default: llvm_unreachable("unexpected opcode!");
8564   // VLD1LN
8565   case ARM::VLD1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
8566   case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
8567   case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
8568   case ARM::VLD1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
8569   case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
8570   case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
8571   case ARM::VLD1LNdAsm_8:  Spacing = 1; return ARM::VLD1LNd8;
8572   case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
8573   case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
8574 
8575   // VLD2LN
8576   case ARM::VLD2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
8577   case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
8578   case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
8579   case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
8580   case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
8581   case ARM::VLD2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
8582   case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
8583   case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
8584   case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
8585   case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
8586   case ARM::VLD2LNdAsm_8:  Spacing = 1; return ARM::VLD2LNd8;
8587   case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
8588   case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
8589   case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
8590   case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
8591 
8592   // VLD3DUP
8593   case ARM::VLD3DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
8594   case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
8595   case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
8596   case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
8597   case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
8598   case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
8599   case ARM::VLD3DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
8600   case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
8601   case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
8602   case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
8603   case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
8604   case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
8605   case ARM::VLD3DUPdAsm_8:  Spacing = 1; return ARM::VLD3DUPd8;
8606   case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
8607   case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
8608   case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
8609   case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
8610   case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
8611 
8612   // VLD3LN
8613   case ARM::VLD3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
8614   case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
8615   case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
8616   case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
8617   case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
8618   case ARM::VLD3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
8619   case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
8620   case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
8621   case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
8622   case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
8623   case ARM::VLD3LNdAsm_8:  Spacing = 1; return ARM::VLD3LNd8;
8624   case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
8625   case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
8626   case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
8627   case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
8628 
8629   // VLD3
8630   case ARM::VLD3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
8631   case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
8632   case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
8633   case ARM::VLD3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
8634   case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
8635   case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
8636   case ARM::VLD3dWB_register_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
8637   case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
8638   case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
8639   case ARM::VLD3qWB_register_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
8640   case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
8641   case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
8642   case ARM::VLD3dAsm_8:  Spacing = 1; return ARM::VLD3d8;
8643   case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
8644   case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
8645   case ARM::VLD3qAsm_8:  Spacing = 2; return ARM::VLD3q8;
8646   case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
8647   case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
8648 
8649   // VLD4LN
8650   case ARM::VLD4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
8651   case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
8652   case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
8653   case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
8654   case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
8655   case ARM::VLD4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
8656   case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
8657   case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
8658   case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
8659   case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
8660   case ARM::VLD4LNdAsm_8:  Spacing = 1; return ARM::VLD4LNd8;
8661   case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
8662   case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
8663   case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
8664   case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
8665 
8666   // VLD4DUP
8667   case ARM::VLD4DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
8668   case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
8669   case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
8670   case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
8671   case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
8672   case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
8673   case ARM::VLD4DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
8674   case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
8675   case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
8676   case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
8677   case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
8678   case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
8679   case ARM::VLD4DUPdAsm_8:  Spacing = 1; return ARM::VLD4DUPd8;
8680   case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
8681   case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
8682   case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
8683   case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
8684   case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
8685 
8686   // VLD4
8687   case ARM::VLD4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
8688   case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
8689   case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
8690   case ARM::VLD4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
8691   case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
8692   case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
8693   case ARM::VLD4dWB_register_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
8694   case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
8695   case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
8696   case ARM::VLD4qWB_register_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
8697   case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
8698   case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
8699   case ARM::VLD4dAsm_8:  Spacing = 1; return ARM::VLD4d8;
8700   case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
8701   case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
8702   case ARM::VLD4qAsm_8:  Spacing = 2; return ARM::VLD4q8;
8703   case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
8704   case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
8705   }
8706 }
8707 
8708 bool ARMAsmParser::processInstruction(MCInst &Inst,
8709                                       const OperandVector &Operands,
8710                                       MCStreamer &Out) {
8711   // Check if we have the wide qualifier, because if it's present we
8712   // must avoid selecting a 16-bit thumb instruction.
8713   bool HasWideQualifier = false;
8714   for (auto &Op : Operands) {
8715     ARMOperand &ARMOp = static_cast<ARMOperand&>(*Op);
8716     if (ARMOp.isToken() && ARMOp.getToken() == ".w") {
8717       HasWideQualifier = true;
8718       break;
8719     }
8720   }
8721 
8722   switch (Inst.getOpcode()) {
8723   // Alias for alternate form of 'ldr{,b}t Rt, [Rn], #imm' instruction.
8724   case ARM::LDRT_POST:
8725   case ARM::LDRBT_POST: {
8726     const unsigned Opcode =
8727       (Inst.getOpcode() == ARM::LDRT_POST) ? ARM::LDRT_POST_IMM
8728                                            : ARM::LDRBT_POST_IMM;
8729     MCInst TmpInst;
8730     TmpInst.setOpcode(Opcode);
8731     TmpInst.addOperand(Inst.getOperand(0));
8732     TmpInst.addOperand(Inst.getOperand(1));
8733     TmpInst.addOperand(Inst.getOperand(1));
8734     TmpInst.addOperand(MCOperand::createReg(0));
8735     TmpInst.addOperand(MCOperand::createImm(0));
8736     TmpInst.addOperand(Inst.getOperand(2));
8737     TmpInst.addOperand(Inst.getOperand(3));
8738     Inst = TmpInst;
8739     return true;
8740   }
8741   // Alias for 'ldr{sb,h,sh}t Rt, [Rn] {, #imm}' for ommitted immediate.
8742   case ARM::LDRSBTii:
8743   case ARM::LDRHTii:
8744   case ARM::LDRSHTii: {
8745     MCInst TmpInst;
8746 
8747     if (Inst.getOpcode() == ARM::LDRSBTii)
8748       TmpInst.setOpcode(ARM::LDRSBTi);
8749     else if (Inst.getOpcode() == ARM::LDRHTii)
8750       TmpInst.setOpcode(ARM::LDRHTi);
8751     else if (Inst.getOpcode() == ARM::LDRSHTii)
8752       TmpInst.setOpcode(ARM::LDRSHTi);
8753     TmpInst.addOperand(Inst.getOperand(0));
8754     TmpInst.addOperand(Inst.getOperand(1));
8755     TmpInst.addOperand(Inst.getOperand(1));
8756     TmpInst.addOperand(MCOperand::createImm(256));
8757     TmpInst.addOperand(Inst.getOperand(2));
8758     Inst = TmpInst;
8759     return true;
8760   }
8761   // Alias for alternate form of 'str{,b}t Rt, [Rn], #imm' instruction.
8762   case ARM::STRT_POST:
8763   case ARM::STRBT_POST: {
8764     const unsigned Opcode =
8765       (Inst.getOpcode() == ARM::STRT_POST) ? ARM::STRT_POST_IMM
8766                                            : ARM::STRBT_POST_IMM;
8767     MCInst TmpInst;
8768     TmpInst.setOpcode(Opcode);
8769     TmpInst.addOperand(Inst.getOperand(1));
8770     TmpInst.addOperand(Inst.getOperand(0));
8771     TmpInst.addOperand(Inst.getOperand(1));
8772     TmpInst.addOperand(MCOperand::createReg(0));
8773     TmpInst.addOperand(MCOperand::createImm(0));
8774     TmpInst.addOperand(Inst.getOperand(2));
8775     TmpInst.addOperand(Inst.getOperand(3));
8776     Inst = TmpInst;
8777     return true;
8778   }
8779   // Alias for alternate form of 'ADR Rd, #imm' instruction.
8780   case ARM::ADDri: {
8781     if (Inst.getOperand(1).getReg() != ARM::PC ||
8782         Inst.getOperand(5).getReg() != 0 ||
8783         !(Inst.getOperand(2).isExpr() || Inst.getOperand(2).isImm()))
8784       return false;
8785     MCInst TmpInst;
8786     TmpInst.setOpcode(ARM::ADR);
8787     TmpInst.addOperand(Inst.getOperand(0));
8788     if (Inst.getOperand(2).isImm()) {
8789       // Immediate (mod_imm) will be in its encoded form, we must unencode it
8790       // before passing it to the ADR instruction.
8791       unsigned Enc = Inst.getOperand(2).getImm();
8792       TmpInst.addOperand(MCOperand::createImm(
8793           llvm::rotr<uint32_t>(Enc & 0xFF, (Enc & 0xF00) >> 7)));
8794     } else {
8795       // Turn PC-relative expression into absolute expression.
8796       // Reading PC provides the start of the current instruction + 8 and
8797       // the transform to adr is biased by that.
8798       MCSymbol *Dot = getContext().createTempSymbol();
8799       Out.emitLabel(Dot);
8800       const MCExpr *OpExpr = Inst.getOperand(2).getExpr();
8801       const MCExpr *InstPC = MCSymbolRefExpr::create(Dot,
8802                                                      MCSymbolRefExpr::VK_None,
8803                                                      getContext());
8804       const MCExpr *Const8 = MCConstantExpr::create(8, getContext());
8805       const MCExpr *ReadPC = MCBinaryExpr::createAdd(InstPC, Const8,
8806                                                      getContext());
8807       const MCExpr *FixupAddr = MCBinaryExpr::createAdd(ReadPC, OpExpr,
8808                                                         getContext());
8809       TmpInst.addOperand(MCOperand::createExpr(FixupAddr));
8810     }
8811     TmpInst.addOperand(Inst.getOperand(3));
8812     TmpInst.addOperand(Inst.getOperand(4));
8813     Inst = TmpInst;
8814     return true;
8815   }
8816   // Aliases for imm syntax of LDR instructions.
8817   case ARM::t2LDR_PRE_imm:
8818   case ARM::t2LDR_POST_imm: {
8819     MCInst TmpInst;
8820     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDR_PRE_imm ? ARM::t2LDR_PRE
8821                                                              : ARM::t2LDR_POST);
8822     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8823     TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
8824     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8825     TmpInst.addOperand(Inst.getOperand(2)); // imm
8826     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8827     Inst = TmpInst;
8828     return true;
8829   }
8830   // Aliases for imm syntax of STR instructions.
8831   case ARM::t2STR_PRE_imm:
8832   case ARM::t2STR_POST_imm: {
8833     MCInst TmpInst;
8834     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2STR_PRE_imm ? ARM::t2STR_PRE
8835                                                              : ARM::t2STR_POST);
8836     TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
8837     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8838     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8839     TmpInst.addOperand(Inst.getOperand(2)); // imm
8840     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8841     Inst = TmpInst;
8842     return true;
8843   }
8844   // Aliases for imm syntax of LDRB instructions.
8845   case ARM::t2LDRB_OFFSET_imm: {
8846     MCInst TmpInst;
8847     TmpInst.setOpcode(ARM::t2LDRBi8);
8848     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8849     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8850     TmpInst.addOperand(Inst.getOperand(2)); // imm
8851     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8852     Inst = TmpInst;
8853     return true;
8854   }
8855   case ARM::t2LDRB_PRE_imm:
8856   case ARM::t2LDRB_POST_imm: {
8857     MCInst TmpInst;
8858     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDRB_PRE_imm
8859                           ? ARM::t2LDRB_PRE
8860                           : ARM::t2LDRB_POST);
8861     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8862     TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
8863     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8864     TmpInst.addOperand(Inst.getOperand(2)); // imm
8865     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8866     Inst = TmpInst;
8867     return true;
8868   }
8869   // Aliases for imm syntax of STRB instructions.
8870   case ARM::t2STRB_OFFSET_imm: {
8871     MCInst TmpInst;
8872     TmpInst.setOpcode(ARM::t2STRBi8);
8873     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8874     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8875     TmpInst.addOperand(Inst.getOperand(2)); // imm
8876     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8877     Inst = TmpInst;
8878     return true;
8879   }
8880   case ARM::t2STRB_PRE_imm:
8881   case ARM::t2STRB_POST_imm: {
8882     MCInst TmpInst;
8883     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2STRB_PRE_imm
8884                           ? ARM::t2STRB_PRE
8885                           : ARM::t2STRB_POST);
8886     TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
8887     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8888     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8889     TmpInst.addOperand(Inst.getOperand(2)); // imm
8890     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8891     Inst = TmpInst;
8892     return true;
8893   }
8894   // Aliases for imm syntax of LDRH instructions.
8895   case ARM::t2LDRH_OFFSET_imm: {
8896     MCInst TmpInst;
8897     TmpInst.setOpcode(ARM::t2LDRHi8);
8898     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8899     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8900     TmpInst.addOperand(Inst.getOperand(2)); // imm
8901     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8902     Inst = TmpInst;
8903     return true;
8904   }
8905   case ARM::t2LDRH_PRE_imm:
8906   case ARM::t2LDRH_POST_imm: {
8907     MCInst TmpInst;
8908     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDRH_PRE_imm
8909                           ? ARM::t2LDRH_PRE
8910                           : ARM::t2LDRH_POST);
8911     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8912     TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
8913     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8914     TmpInst.addOperand(Inst.getOperand(2)); // imm
8915     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8916     Inst = TmpInst;
8917     return true;
8918   }
8919   // Aliases for imm syntax of STRH instructions.
8920   case ARM::t2STRH_OFFSET_imm: {
8921     MCInst TmpInst;
8922     TmpInst.setOpcode(ARM::t2STRHi8);
8923     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8924     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8925     TmpInst.addOperand(Inst.getOperand(2)); // imm
8926     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8927     Inst = TmpInst;
8928     return true;
8929   }
8930   case ARM::t2STRH_PRE_imm:
8931   case ARM::t2STRH_POST_imm: {
8932     MCInst TmpInst;
8933     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2STRH_PRE_imm
8934                           ? ARM::t2STRH_PRE
8935                           : ARM::t2STRH_POST);
8936     TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
8937     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8938     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8939     TmpInst.addOperand(Inst.getOperand(2)); // imm
8940     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8941     Inst = TmpInst;
8942     return true;
8943   }
8944   // Aliases for imm syntax of LDRSB instructions.
8945   case ARM::t2LDRSB_OFFSET_imm: {
8946     MCInst TmpInst;
8947     TmpInst.setOpcode(ARM::t2LDRSBi8);
8948     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8949     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8950     TmpInst.addOperand(Inst.getOperand(2)); // imm
8951     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8952     Inst = TmpInst;
8953     return true;
8954   }
8955   case ARM::t2LDRSB_PRE_imm:
8956   case ARM::t2LDRSB_POST_imm: {
8957     MCInst TmpInst;
8958     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDRSB_PRE_imm
8959                           ? ARM::t2LDRSB_PRE
8960                           : ARM::t2LDRSB_POST);
8961     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8962     TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
8963     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8964     TmpInst.addOperand(Inst.getOperand(2)); // imm
8965     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8966     Inst = TmpInst;
8967     return true;
8968   }
8969   // Aliases for imm syntax of LDRSH instructions.
8970   case ARM::t2LDRSH_OFFSET_imm: {
8971     MCInst TmpInst;
8972     TmpInst.setOpcode(ARM::t2LDRSHi8);
8973     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8974     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8975     TmpInst.addOperand(Inst.getOperand(2)); // imm
8976     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8977     Inst = TmpInst;
8978     return true;
8979   }
8980   case ARM::t2LDRSH_PRE_imm:
8981   case ARM::t2LDRSH_POST_imm: {
8982     MCInst TmpInst;
8983     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDRSH_PRE_imm
8984                           ? ARM::t2LDRSH_PRE
8985                           : ARM::t2LDRSH_POST);
8986     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8987     TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
8988     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8989     TmpInst.addOperand(Inst.getOperand(2)); // imm
8990     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8991     Inst = TmpInst;
8992     return true;
8993   }
8994   // Aliases for alternate PC+imm syntax of LDR instructions.
8995   case ARM::t2LDRpcrel:
8996     // Select the narrow version if the immediate will fit.
8997     if (Inst.getOperand(1).getImm() > 0 &&
8998         Inst.getOperand(1).getImm() <= 0xff &&
8999         !HasWideQualifier)
9000       Inst.setOpcode(ARM::tLDRpci);
9001     else
9002       Inst.setOpcode(ARM::t2LDRpci);
9003     return true;
9004   case ARM::t2LDRBpcrel:
9005     Inst.setOpcode(ARM::t2LDRBpci);
9006     return true;
9007   case ARM::t2LDRHpcrel:
9008     Inst.setOpcode(ARM::t2LDRHpci);
9009     return true;
9010   case ARM::t2LDRSBpcrel:
9011     Inst.setOpcode(ARM::t2LDRSBpci);
9012     return true;
9013   case ARM::t2LDRSHpcrel:
9014     Inst.setOpcode(ARM::t2LDRSHpci);
9015     return true;
9016   case ARM::LDRConstPool:
9017   case ARM::tLDRConstPool:
9018   case ARM::t2LDRConstPool: {
9019     // Pseudo instruction ldr rt, =immediate is converted to a
9020     // MOV rt, immediate if immediate is known and representable
9021     // otherwise we create a constant pool entry that we load from.
9022     MCInst TmpInst;
9023     if (Inst.getOpcode() == ARM::LDRConstPool)
9024       TmpInst.setOpcode(ARM::LDRi12);
9025     else if (Inst.getOpcode() == ARM::tLDRConstPool)
9026       TmpInst.setOpcode(ARM::tLDRpci);
9027     else if (Inst.getOpcode() == ARM::t2LDRConstPool)
9028       TmpInst.setOpcode(ARM::t2LDRpci);
9029     const ARMOperand &PoolOperand =
9030       (HasWideQualifier ?
9031        static_cast<ARMOperand &>(*Operands[4]) :
9032        static_cast<ARMOperand &>(*Operands[3]));
9033     const MCExpr *SubExprVal = PoolOperand.getConstantPoolImm();
9034     // If SubExprVal is a constant we may be able to use a MOV
9035     if (isa<MCConstantExpr>(SubExprVal) &&
9036         Inst.getOperand(0).getReg() != ARM::PC &&
9037         Inst.getOperand(0).getReg() != ARM::SP) {
9038       int64_t Value =
9039         (int64_t) (cast<MCConstantExpr>(SubExprVal))->getValue();
9040       bool UseMov  = true;
9041       bool MovHasS = true;
9042       if (Inst.getOpcode() == ARM::LDRConstPool) {
9043         // ARM Constant
9044         if (ARM_AM::getSOImmVal(Value) != -1) {
9045           Value = ARM_AM::getSOImmVal(Value);
9046           TmpInst.setOpcode(ARM::MOVi);
9047         }
9048         else if (ARM_AM::getSOImmVal(~Value) != -1) {
9049           Value = ARM_AM::getSOImmVal(~Value);
9050           TmpInst.setOpcode(ARM::MVNi);
9051         }
9052         else if (hasV6T2Ops() &&
9053                  Value >=0 && Value < 65536) {
9054           TmpInst.setOpcode(ARM::MOVi16);
9055           MovHasS = false;
9056         }
9057         else
9058           UseMov = false;
9059       }
9060       else {
9061         // Thumb/Thumb2 Constant
9062         if (hasThumb2() &&
9063             ARM_AM::getT2SOImmVal(Value) != -1)
9064           TmpInst.setOpcode(ARM::t2MOVi);
9065         else if (hasThumb2() &&
9066                  ARM_AM::getT2SOImmVal(~Value) != -1) {
9067           TmpInst.setOpcode(ARM::t2MVNi);
9068           Value = ~Value;
9069         }
9070         else if (hasV8MBaseline() &&
9071                  Value >=0 && Value < 65536) {
9072           TmpInst.setOpcode(ARM::t2MOVi16);
9073           MovHasS = false;
9074         }
9075         else
9076           UseMov = false;
9077       }
9078       if (UseMov) {
9079         TmpInst.addOperand(Inst.getOperand(0));           // Rt
9080         TmpInst.addOperand(MCOperand::createImm(Value));  // Immediate
9081         TmpInst.addOperand(Inst.getOperand(2));           // CondCode
9082         TmpInst.addOperand(Inst.getOperand(3));           // CondCode
9083         if (MovHasS)
9084           TmpInst.addOperand(MCOperand::createReg(0));    // S
9085         Inst = TmpInst;
9086         return true;
9087       }
9088     }
9089     // No opportunity to use MOV/MVN create constant pool
9090     const MCExpr *CPLoc =
9091       getTargetStreamer().addConstantPoolEntry(SubExprVal,
9092                                                PoolOperand.getStartLoc());
9093     TmpInst.addOperand(Inst.getOperand(0));           // Rt
9094     TmpInst.addOperand(MCOperand::createExpr(CPLoc)); // offset to constpool
9095     if (TmpInst.getOpcode() == ARM::LDRi12)
9096       TmpInst.addOperand(MCOperand::createImm(0));    // unused offset
9097     TmpInst.addOperand(Inst.getOperand(2));           // CondCode
9098     TmpInst.addOperand(Inst.getOperand(3));           // CondCode
9099     Inst = TmpInst;
9100     return true;
9101   }
9102   // Handle NEON VST complex aliases.
9103   case ARM::VST1LNdWB_register_Asm_8:
9104   case ARM::VST1LNdWB_register_Asm_16:
9105   case ARM::VST1LNdWB_register_Asm_32: {
9106     MCInst TmpInst;
9107     // Shuffle the operands around so the lane index operand is in the
9108     // right place.
9109     unsigned Spacing;
9110     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9111     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9112     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9113     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9114     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9115     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9116     TmpInst.addOperand(Inst.getOperand(1)); // lane
9117     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9118     TmpInst.addOperand(Inst.getOperand(6));
9119     Inst = TmpInst;
9120     return true;
9121   }
9122 
9123   case ARM::VST2LNdWB_register_Asm_8:
9124   case ARM::VST2LNdWB_register_Asm_16:
9125   case ARM::VST2LNdWB_register_Asm_32:
9126   case ARM::VST2LNqWB_register_Asm_16:
9127   case ARM::VST2LNqWB_register_Asm_32: {
9128     MCInst TmpInst;
9129     // Shuffle the operands around so the lane index operand is in the
9130     // right place.
9131     unsigned Spacing;
9132     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9133     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9134     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9135     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9136     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9137     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9138     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9139                                             Spacing));
9140     TmpInst.addOperand(Inst.getOperand(1)); // lane
9141     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9142     TmpInst.addOperand(Inst.getOperand(6));
9143     Inst = TmpInst;
9144     return true;
9145   }
9146 
9147   case ARM::VST3LNdWB_register_Asm_8:
9148   case ARM::VST3LNdWB_register_Asm_16:
9149   case ARM::VST3LNdWB_register_Asm_32:
9150   case ARM::VST3LNqWB_register_Asm_16:
9151   case ARM::VST3LNqWB_register_Asm_32: {
9152     MCInst TmpInst;
9153     // Shuffle the operands around so the lane index operand is in the
9154     // right place.
9155     unsigned Spacing;
9156     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9157     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9158     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9159     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9160     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9161     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9162     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9163                                             Spacing));
9164     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9165                                             Spacing * 2));
9166     TmpInst.addOperand(Inst.getOperand(1)); // lane
9167     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9168     TmpInst.addOperand(Inst.getOperand(6));
9169     Inst = TmpInst;
9170     return true;
9171   }
9172 
9173   case ARM::VST4LNdWB_register_Asm_8:
9174   case ARM::VST4LNdWB_register_Asm_16:
9175   case ARM::VST4LNdWB_register_Asm_32:
9176   case ARM::VST4LNqWB_register_Asm_16:
9177   case ARM::VST4LNqWB_register_Asm_32: {
9178     MCInst TmpInst;
9179     // Shuffle the operands around so the lane index operand is in the
9180     // right place.
9181     unsigned Spacing;
9182     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9183     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9184     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9185     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9186     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9187     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9188     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9189                                             Spacing));
9190     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9191                                             Spacing * 2));
9192     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9193                                             Spacing * 3));
9194     TmpInst.addOperand(Inst.getOperand(1)); // lane
9195     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9196     TmpInst.addOperand(Inst.getOperand(6));
9197     Inst = TmpInst;
9198     return true;
9199   }
9200 
9201   case ARM::VST1LNdWB_fixed_Asm_8:
9202   case ARM::VST1LNdWB_fixed_Asm_16:
9203   case ARM::VST1LNdWB_fixed_Asm_32: {
9204     MCInst TmpInst;
9205     // Shuffle the operands around so the lane index operand is in the
9206     // right place.
9207     unsigned Spacing;
9208     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9209     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9210     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9211     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9212     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9213     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9214     TmpInst.addOperand(Inst.getOperand(1)); // lane
9215     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9216     TmpInst.addOperand(Inst.getOperand(5));
9217     Inst = TmpInst;
9218     return true;
9219   }
9220 
9221   case ARM::VST2LNdWB_fixed_Asm_8:
9222   case ARM::VST2LNdWB_fixed_Asm_16:
9223   case ARM::VST2LNdWB_fixed_Asm_32:
9224   case ARM::VST2LNqWB_fixed_Asm_16:
9225   case ARM::VST2LNqWB_fixed_Asm_32: {
9226     MCInst TmpInst;
9227     // Shuffle the operands around so the lane index operand is in the
9228     // right place.
9229     unsigned Spacing;
9230     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9231     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9232     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9233     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9234     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9235     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9236     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9237                                             Spacing));
9238     TmpInst.addOperand(Inst.getOperand(1)); // lane
9239     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9240     TmpInst.addOperand(Inst.getOperand(5));
9241     Inst = TmpInst;
9242     return true;
9243   }
9244 
9245   case ARM::VST3LNdWB_fixed_Asm_8:
9246   case ARM::VST3LNdWB_fixed_Asm_16:
9247   case ARM::VST3LNdWB_fixed_Asm_32:
9248   case ARM::VST3LNqWB_fixed_Asm_16:
9249   case ARM::VST3LNqWB_fixed_Asm_32: {
9250     MCInst TmpInst;
9251     // Shuffle the operands around so the lane index operand is in the
9252     // right place.
9253     unsigned Spacing;
9254     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9255     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9256     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9257     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9258     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9259     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9260     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9261                                             Spacing));
9262     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9263                                             Spacing * 2));
9264     TmpInst.addOperand(Inst.getOperand(1)); // lane
9265     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9266     TmpInst.addOperand(Inst.getOperand(5));
9267     Inst = TmpInst;
9268     return true;
9269   }
9270 
9271   case ARM::VST4LNdWB_fixed_Asm_8:
9272   case ARM::VST4LNdWB_fixed_Asm_16:
9273   case ARM::VST4LNdWB_fixed_Asm_32:
9274   case ARM::VST4LNqWB_fixed_Asm_16:
9275   case ARM::VST4LNqWB_fixed_Asm_32: {
9276     MCInst TmpInst;
9277     // Shuffle the operands around so the lane index operand is in the
9278     // right place.
9279     unsigned Spacing;
9280     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9281     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9282     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9283     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9284     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9285     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9286     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9287                                             Spacing));
9288     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9289                                             Spacing * 2));
9290     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9291                                             Spacing * 3));
9292     TmpInst.addOperand(Inst.getOperand(1)); // lane
9293     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9294     TmpInst.addOperand(Inst.getOperand(5));
9295     Inst = TmpInst;
9296     return true;
9297   }
9298 
9299   case ARM::VST1LNdAsm_8:
9300   case ARM::VST1LNdAsm_16:
9301   case ARM::VST1LNdAsm_32: {
9302     MCInst TmpInst;
9303     // Shuffle the operands around so the lane index operand is in the
9304     // right place.
9305     unsigned Spacing;
9306     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9307     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9308     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9309     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9310     TmpInst.addOperand(Inst.getOperand(1)); // lane
9311     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9312     TmpInst.addOperand(Inst.getOperand(5));
9313     Inst = TmpInst;
9314     return true;
9315   }
9316 
9317   case ARM::VST2LNdAsm_8:
9318   case ARM::VST2LNdAsm_16:
9319   case ARM::VST2LNdAsm_32:
9320   case ARM::VST2LNqAsm_16:
9321   case ARM::VST2LNqAsm_32: {
9322     MCInst TmpInst;
9323     // Shuffle the operands around so the lane index operand is in the
9324     // right place.
9325     unsigned Spacing;
9326     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9327     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9328     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9329     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9330     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9331                                             Spacing));
9332     TmpInst.addOperand(Inst.getOperand(1)); // lane
9333     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9334     TmpInst.addOperand(Inst.getOperand(5));
9335     Inst = TmpInst;
9336     return true;
9337   }
9338 
9339   case ARM::VST3LNdAsm_8:
9340   case ARM::VST3LNdAsm_16:
9341   case ARM::VST3LNdAsm_32:
9342   case ARM::VST3LNqAsm_16:
9343   case ARM::VST3LNqAsm_32: {
9344     MCInst TmpInst;
9345     // Shuffle the operands around so the lane index operand is in the
9346     // right place.
9347     unsigned Spacing;
9348     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9349     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9350     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9351     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9352     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9353                                             Spacing));
9354     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9355                                             Spacing * 2));
9356     TmpInst.addOperand(Inst.getOperand(1)); // lane
9357     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9358     TmpInst.addOperand(Inst.getOperand(5));
9359     Inst = TmpInst;
9360     return true;
9361   }
9362 
9363   case ARM::VST4LNdAsm_8:
9364   case ARM::VST4LNdAsm_16:
9365   case ARM::VST4LNdAsm_32:
9366   case ARM::VST4LNqAsm_16:
9367   case ARM::VST4LNqAsm_32: {
9368     MCInst TmpInst;
9369     // Shuffle the operands around so the lane index operand is in the
9370     // right place.
9371     unsigned Spacing;
9372     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9373     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9374     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9375     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9376     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9377                                             Spacing));
9378     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9379                                             Spacing * 2));
9380     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9381                                             Spacing * 3));
9382     TmpInst.addOperand(Inst.getOperand(1)); // lane
9383     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9384     TmpInst.addOperand(Inst.getOperand(5));
9385     Inst = TmpInst;
9386     return true;
9387   }
9388 
9389   // Handle NEON VLD complex aliases.
9390   case ARM::VLD1LNdWB_register_Asm_8:
9391   case ARM::VLD1LNdWB_register_Asm_16:
9392   case ARM::VLD1LNdWB_register_Asm_32: {
9393     MCInst TmpInst;
9394     // Shuffle the operands around so the lane index operand is in the
9395     // right place.
9396     unsigned Spacing;
9397     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9398     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9399     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9400     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9401     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9402     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9403     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9404     TmpInst.addOperand(Inst.getOperand(1)); // lane
9405     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9406     TmpInst.addOperand(Inst.getOperand(6));
9407     Inst = TmpInst;
9408     return true;
9409   }
9410 
9411   case ARM::VLD2LNdWB_register_Asm_8:
9412   case ARM::VLD2LNdWB_register_Asm_16:
9413   case ARM::VLD2LNdWB_register_Asm_32:
9414   case ARM::VLD2LNqWB_register_Asm_16:
9415   case ARM::VLD2LNqWB_register_Asm_32: {
9416     MCInst TmpInst;
9417     // Shuffle the operands around so the lane index operand is in the
9418     // right place.
9419     unsigned Spacing;
9420     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9421     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9422     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9423                                             Spacing));
9424     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9425     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9426     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9427     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9428     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9429     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9430                                             Spacing));
9431     TmpInst.addOperand(Inst.getOperand(1)); // lane
9432     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9433     TmpInst.addOperand(Inst.getOperand(6));
9434     Inst = TmpInst;
9435     return true;
9436   }
9437 
9438   case ARM::VLD3LNdWB_register_Asm_8:
9439   case ARM::VLD3LNdWB_register_Asm_16:
9440   case ARM::VLD3LNdWB_register_Asm_32:
9441   case ARM::VLD3LNqWB_register_Asm_16:
9442   case ARM::VLD3LNqWB_register_Asm_32: {
9443     MCInst TmpInst;
9444     // Shuffle the operands around so the lane index operand is in the
9445     // right place.
9446     unsigned Spacing;
9447     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9448     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9449     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9450                                             Spacing));
9451     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9452                                             Spacing * 2));
9453     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9454     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9455     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9456     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9457     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9458     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9459                                             Spacing));
9460     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9461                                             Spacing * 2));
9462     TmpInst.addOperand(Inst.getOperand(1)); // lane
9463     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9464     TmpInst.addOperand(Inst.getOperand(6));
9465     Inst = TmpInst;
9466     return true;
9467   }
9468 
9469   case ARM::VLD4LNdWB_register_Asm_8:
9470   case ARM::VLD4LNdWB_register_Asm_16:
9471   case ARM::VLD4LNdWB_register_Asm_32:
9472   case ARM::VLD4LNqWB_register_Asm_16:
9473   case ARM::VLD4LNqWB_register_Asm_32: {
9474     MCInst TmpInst;
9475     // Shuffle the operands around so the lane index operand is in the
9476     // right place.
9477     unsigned Spacing;
9478     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9479     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9480     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9481                                             Spacing));
9482     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9483                                             Spacing * 2));
9484     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9485                                             Spacing * 3));
9486     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9487     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9488     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9489     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9490     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9491     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9492                                             Spacing));
9493     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9494                                             Spacing * 2));
9495     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9496                                             Spacing * 3));
9497     TmpInst.addOperand(Inst.getOperand(1)); // lane
9498     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9499     TmpInst.addOperand(Inst.getOperand(6));
9500     Inst = TmpInst;
9501     return true;
9502   }
9503 
9504   case ARM::VLD1LNdWB_fixed_Asm_8:
9505   case ARM::VLD1LNdWB_fixed_Asm_16:
9506   case ARM::VLD1LNdWB_fixed_Asm_32: {
9507     MCInst TmpInst;
9508     // Shuffle the operands around so the lane index operand is in the
9509     // right place.
9510     unsigned Spacing;
9511     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9512     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9513     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9514     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9515     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9516     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9517     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9518     TmpInst.addOperand(Inst.getOperand(1)); // lane
9519     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9520     TmpInst.addOperand(Inst.getOperand(5));
9521     Inst = TmpInst;
9522     return true;
9523   }
9524 
9525   case ARM::VLD2LNdWB_fixed_Asm_8:
9526   case ARM::VLD2LNdWB_fixed_Asm_16:
9527   case ARM::VLD2LNdWB_fixed_Asm_32:
9528   case ARM::VLD2LNqWB_fixed_Asm_16:
9529   case ARM::VLD2LNqWB_fixed_Asm_32: {
9530     MCInst TmpInst;
9531     // Shuffle the operands around so the lane index operand is in the
9532     // right place.
9533     unsigned Spacing;
9534     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9535     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9536     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9537                                             Spacing));
9538     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9539     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9540     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9541     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9542     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9543     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9544                                             Spacing));
9545     TmpInst.addOperand(Inst.getOperand(1)); // lane
9546     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9547     TmpInst.addOperand(Inst.getOperand(5));
9548     Inst = TmpInst;
9549     return true;
9550   }
9551 
9552   case ARM::VLD3LNdWB_fixed_Asm_8:
9553   case ARM::VLD3LNdWB_fixed_Asm_16:
9554   case ARM::VLD3LNdWB_fixed_Asm_32:
9555   case ARM::VLD3LNqWB_fixed_Asm_16:
9556   case ARM::VLD3LNqWB_fixed_Asm_32: {
9557     MCInst TmpInst;
9558     // Shuffle the operands around so the lane index operand is in the
9559     // right place.
9560     unsigned Spacing;
9561     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9562     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9563     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9564                                             Spacing));
9565     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9566                                             Spacing * 2));
9567     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9568     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9569     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9570     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9571     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9572     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9573                                             Spacing));
9574     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9575                                             Spacing * 2));
9576     TmpInst.addOperand(Inst.getOperand(1)); // lane
9577     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9578     TmpInst.addOperand(Inst.getOperand(5));
9579     Inst = TmpInst;
9580     return true;
9581   }
9582 
9583   case ARM::VLD4LNdWB_fixed_Asm_8:
9584   case ARM::VLD4LNdWB_fixed_Asm_16:
9585   case ARM::VLD4LNdWB_fixed_Asm_32:
9586   case ARM::VLD4LNqWB_fixed_Asm_16:
9587   case ARM::VLD4LNqWB_fixed_Asm_32: {
9588     MCInst TmpInst;
9589     // Shuffle the operands around so the lane index operand is in the
9590     // right place.
9591     unsigned Spacing;
9592     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9593     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9594     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9595                                             Spacing));
9596     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9597                                             Spacing * 2));
9598     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9599                                             Spacing * 3));
9600     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9601     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9602     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9603     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9604     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9605     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9606                                             Spacing));
9607     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9608                                             Spacing * 2));
9609     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9610                                             Spacing * 3));
9611     TmpInst.addOperand(Inst.getOperand(1)); // lane
9612     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9613     TmpInst.addOperand(Inst.getOperand(5));
9614     Inst = TmpInst;
9615     return true;
9616   }
9617 
9618   case ARM::VLD1LNdAsm_8:
9619   case ARM::VLD1LNdAsm_16:
9620   case ARM::VLD1LNdAsm_32: {
9621     MCInst TmpInst;
9622     // Shuffle the operands around so the lane index operand is in the
9623     // right place.
9624     unsigned Spacing;
9625     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9626     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9627     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9628     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9629     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9630     TmpInst.addOperand(Inst.getOperand(1)); // lane
9631     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9632     TmpInst.addOperand(Inst.getOperand(5));
9633     Inst = TmpInst;
9634     return true;
9635   }
9636 
9637   case ARM::VLD2LNdAsm_8:
9638   case ARM::VLD2LNdAsm_16:
9639   case ARM::VLD2LNdAsm_32:
9640   case ARM::VLD2LNqAsm_16:
9641   case ARM::VLD2LNqAsm_32: {
9642     MCInst TmpInst;
9643     // Shuffle the operands around so the lane index operand is in the
9644     // right place.
9645     unsigned Spacing;
9646     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9647     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9648     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9649                                             Spacing));
9650     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9651     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9652     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9653     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9654                                             Spacing));
9655     TmpInst.addOperand(Inst.getOperand(1)); // lane
9656     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9657     TmpInst.addOperand(Inst.getOperand(5));
9658     Inst = TmpInst;
9659     return true;
9660   }
9661 
9662   case ARM::VLD3LNdAsm_8:
9663   case ARM::VLD3LNdAsm_16:
9664   case ARM::VLD3LNdAsm_32:
9665   case ARM::VLD3LNqAsm_16:
9666   case ARM::VLD3LNqAsm_32: {
9667     MCInst TmpInst;
9668     // Shuffle the operands around so the lane index operand is in the
9669     // right place.
9670     unsigned Spacing;
9671     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9672     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9673     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9674                                             Spacing));
9675     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9676                                             Spacing * 2));
9677     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9678     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9679     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9680     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9681                                             Spacing));
9682     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9683                                             Spacing * 2));
9684     TmpInst.addOperand(Inst.getOperand(1)); // lane
9685     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9686     TmpInst.addOperand(Inst.getOperand(5));
9687     Inst = TmpInst;
9688     return true;
9689   }
9690 
9691   case ARM::VLD4LNdAsm_8:
9692   case ARM::VLD4LNdAsm_16:
9693   case ARM::VLD4LNdAsm_32:
9694   case ARM::VLD4LNqAsm_16:
9695   case ARM::VLD4LNqAsm_32: {
9696     MCInst TmpInst;
9697     // Shuffle the operands around so the lane index operand is in the
9698     // right place.
9699     unsigned Spacing;
9700     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9701     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9702     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9703                                             Spacing));
9704     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9705                                             Spacing * 2));
9706     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9707                                             Spacing * 3));
9708     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9709     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9710     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9711     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9712                                             Spacing));
9713     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9714                                             Spacing * 2));
9715     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9716                                             Spacing * 3));
9717     TmpInst.addOperand(Inst.getOperand(1)); // lane
9718     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9719     TmpInst.addOperand(Inst.getOperand(5));
9720     Inst = TmpInst;
9721     return true;
9722   }
9723 
9724   // VLD3DUP single 3-element structure to all lanes instructions.
9725   case ARM::VLD3DUPdAsm_8:
9726   case ARM::VLD3DUPdAsm_16:
9727   case ARM::VLD3DUPdAsm_32:
9728   case ARM::VLD3DUPqAsm_8:
9729   case ARM::VLD3DUPqAsm_16:
9730   case ARM::VLD3DUPqAsm_32: {
9731     MCInst TmpInst;
9732     unsigned Spacing;
9733     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9734     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9735     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9736                                             Spacing));
9737     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9738                                             Spacing * 2));
9739     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9740     TmpInst.addOperand(Inst.getOperand(2)); // alignment
9741     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9742     TmpInst.addOperand(Inst.getOperand(4));
9743     Inst = TmpInst;
9744     return true;
9745   }
9746 
9747   case ARM::VLD3DUPdWB_fixed_Asm_8:
9748   case ARM::VLD3DUPdWB_fixed_Asm_16:
9749   case ARM::VLD3DUPdWB_fixed_Asm_32:
9750   case ARM::VLD3DUPqWB_fixed_Asm_8:
9751   case ARM::VLD3DUPqWB_fixed_Asm_16:
9752   case ARM::VLD3DUPqWB_fixed_Asm_32: {
9753     MCInst TmpInst;
9754     unsigned Spacing;
9755     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9756     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9757     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9758                                             Spacing));
9759     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9760                                             Spacing * 2));
9761     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9762     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9763     TmpInst.addOperand(Inst.getOperand(2)); // alignment
9764     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9765     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9766     TmpInst.addOperand(Inst.getOperand(4));
9767     Inst = TmpInst;
9768     return true;
9769   }
9770 
9771   case ARM::VLD3DUPdWB_register_Asm_8:
9772   case ARM::VLD3DUPdWB_register_Asm_16:
9773   case ARM::VLD3DUPdWB_register_Asm_32:
9774   case ARM::VLD3DUPqWB_register_Asm_8:
9775   case ARM::VLD3DUPqWB_register_Asm_16:
9776   case ARM::VLD3DUPqWB_register_Asm_32: {
9777     MCInst TmpInst;
9778     unsigned Spacing;
9779     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9780     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9781     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9782                                             Spacing));
9783     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9784                                             Spacing * 2));
9785     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9786     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9787     TmpInst.addOperand(Inst.getOperand(2)); // alignment
9788     TmpInst.addOperand(Inst.getOperand(3)); // Rm
9789     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9790     TmpInst.addOperand(Inst.getOperand(5));
9791     Inst = TmpInst;
9792     return true;
9793   }
9794 
9795   // VLD3 multiple 3-element structure instructions.
9796   case ARM::VLD3dAsm_8:
9797   case ARM::VLD3dAsm_16:
9798   case ARM::VLD3dAsm_32:
9799   case ARM::VLD3qAsm_8:
9800   case ARM::VLD3qAsm_16:
9801   case ARM::VLD3qAsm_32: {
9802     MCInst TmpInst;
9803     unsigned Spacing;
9804     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9805     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9806     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9807                                             Spacing));
9808     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9809                                             Spacing * 2));
9810     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9811     TmpInst.addOperand(Inst.getOperand(2)); // alignment
9812     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9813     TmpInst.addOperand(Inst.getOperand(4));
9814     Inst = TmpInst;
9815     return true;
9816   }
9817 
9818   case ARM::VLD3dWB_fixed_Asm_8:
9819   case ARM::VLD3dWB_fixed_Asm_16:
9820   case ARM::VLD3dWB_fixed_Asm_32:
9821   case ARM::VLD3qWB_fixed_Asm_8:
9822   case ARM::VLD3qWB_fixed_Asm_16:
9823   case ARM::VLD3qWB_fixed_Asm_32: {
9824     MCInst TmpInst;
9825     unsigned Spacing;
9826     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9827     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9828     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9829                                             Spacing));
9830     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9831                                             Spacing * 2));
9832     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9833     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9834     TmpInst.addOperand(Inst.getOperand(2)); // alignment
9835     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9836     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9837     TmpInst.addOperand(Inst.getOperand(4));
9838     Inst = TmpInst;
9839     return true;
9840   }
9841 
9842   case ARM::VLD3dWB_register_Asm_8:
9843   case ARM::VLD3dWB_register_Asm_16:
9844   case ARM::VLD3dWB_register_Asm_32:
9845   case ARM::VLD3qWB_register_Asm_8:
9846   case ARM::VLD3qWB_register_Asm_16:
9847   case ARM::VLD3qWB_register_Asm_32: {
9848     MCInst TmpInst;
9849     unsigned Spacing;
9850     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9851     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9852     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9853                                             Spacing));
9854     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9855                                             Spacing * 2));
9856     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9857     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9858     TmpInst.addOperand(Inst.getOperand(2)); // alignment
9859     TmpInst.addOperand(Inst.getOperand(3)); // Rm
9860     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9861     TmpInst.addOperand(Inst.getOperand(5));
9862     Inst = TmpInst;
9863     return true;
9864   }
9865 
9866   // VLD4DUP single 3-element structure to all lanes instructions.
9867   case ARM::VLD4DUPdAsm_8:
9868   case ARM::VLD4DUPdAsm_16:
9869   case ARM::VLD4DUPdAsm_32:
9870   case ARM::VLD4DUPqAsm_8:
9871   case ARM::VLD4DUPqAsm_16:
9872   case ARM::VLD4DUPqAsm_32: {
9873     MCInst TmpInst;
9874     unsigned Spacing;
9875     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9876     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9877     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9878                                             Spacing));
9879     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9880                                             Spacing * 2));
9881     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9882                                             Spacing * 3));
9883     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9884     TmpInst.addOperand(Inst.getOperand(2)); // alignment
9885     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9886     TmpInst.addOperand(Inst.getOperand(4));
9887     Inst = TmpInst;
9888     return true;
9889   }
9890 
9891   case ARM::VLD4DUPdWB_fixed_Asm_8:
9892   case ARM::VLD4DUPdWB_fixed_Asm_16:
9893   case ARM::VLD4DUPdWB_fixed_Asm_32:
9894   case ARM::VLD4DUPqWB_fixed_Asm_8:
9895   case ARM::VLD4DUPqWB_fixed_Asm_16:
9896   case ARM::VLD4DUPqWB_fixed_Asm_32: {
9897     MCInst TmpInst;
9898     unsigned Spacing;
9899     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9900     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9901     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9902                                             Spacing));
9903     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9904                                             Spacing * 2));
9905     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9906                                             Spacing * 3));
9907     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9908     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9909     TmpInst.addOperand(Inst.getOperand(2)); // alignment
9910     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9911     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9912     TmpInst.addOperand(Inst.getOperand(4));
9913     Inst = TmpInst;
9914     return true;
9915   }
9916 
9917   case ARM::VLD4DUPdWB_register_Asm_8:
9918   case ARM::VLD4DUPdWB_register_Asm_16:
9919   case ARM::VLD4DUPdWB_register_Asm_32:
9920   case ARM::VLD4DUPqWB_register_Asm_8:
9921   case ARM::VLD4DUPqWB_register_Asm_16:
9922   case ARM::VLD4DUPqWB_register_Asm_32: {
9923     MCInst TmpInst;
9924     unsigned Spacing;
9925     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9926     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9927     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9928                                             Spacing));
9929     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9930                                             Spacing * 2));
9931     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9932                                             Spacing * 3));
9933     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9934     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9935     TmpInst.addOperand(Inst.getOperand(2)); // alignment
9936     TmpInst.addOperand(Inst.getOperand(3)); // Rm
9937     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9938     TmpInst.addOperand(Inst.getOperand(5));
9939     Inst = TmpInst;
9940     return true;
9941   }
9942 
9943   // VLD4 multiple 4-element structure instructions.
9944   case ARM::VLD4dAsm_8:
9945   case ARM::VLD4dAsm_16:
9946   case ARM::VLD4dAsm_32:
9947   case ARM::VLD4qAsm_8:
9948   case ARM::VLD4qAsm_16:
9949   case ARM::VLD4qAsm_32: {
9950     MCInst TmpInst;
9951     unsigned Spacing;
9952     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9953     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9954     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9955                                             Spacing));
9956     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9957                                             Spacing * 2));
9958     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9959                                             Spacing * 3));
9960     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9961     TmpInst.addOperand(Inst.getOperand(2)); // alignment
9962     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9963     TmpInst.addOperand(Inst.getOperand(4));
9964     Inst = TmpInst;
9965     return true;
9966   }
9967 
9968   case ARM::VLD4dWB_fixed_Asm_8:
9969   case ARM::VLD4dWB_fixed_Asm_16:
9970   case ARM::VLD4dWB_fixed_Asm_32:
9971   case ARM::VLD4qWB_fixed_Asm_8:
9972   case ARM::VLD4qWB_fixed_Asm_16:
9973   case ARM::VLD4qWB_fixed_Asm_32: {
9974     MCInst TmpInst;
9975     unsigned Spacing;
9976     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9977     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9978     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9979                                             Spacing));
9980     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9981                                             Spacing * 2));
9982     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9983                                             Spacing * 3));
9984     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9985     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9986     TmpInst.addOperand(Inst.getOperand(2)); // alignment
9987     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9988     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9989     TmpInst.addOperand(Inst.getOperand(4));
9990     Inst = TmpInst;
9991     return true;
9992   }
9993 
9994   case ARM::VLD4dWB_register_Asm_8:
9995   case ARM::VLD4dWB_register_Asm_16:
9996   case ARM::VLD4dWB_register_Asm_32:
9997   case ARM::VLD4qWB_register_Asm_8:
9998   case ARM::VLD4qWB_register_Asm_16:
9999   case ARM::VLD4qWB_register_Asm_32: {
10000     MCInst TmpInst;
10001     unsigned Spacing;
10002     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10003     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10004     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10005                                             Spacing));
10006     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10007                                             Spacing * 2));
10008     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10009                                             Spacing * 3));
10010     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10011     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10012     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10013     TmpInst.addOperand(Inst.getOperand(3)); // Rm
10014     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10015     TmpInst.addOperand(Inst.getOperand(5));
10016     Inst = TmpInst;
10017     return true;
10018   }
10019 
10020   // VST3 multiple 3-element structure instructions.
10021   case ARM::VST3dAsm_8:
10022   case ARM::VST3dAsm_16:
10023   case ARM::VST3dAsm_32:
10024   case ARM::VST3qAsm_8:
10025   case ARM::VST3qAsm_16:
10026   case ARM::VST3qAsm_32: {
10027     MCInst TmpInst;
10028     unsigned Spacing;
10029     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10030     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10031     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10032     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10033     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10034                                             Spacing));
10035     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10036                                             Spacing * 2));
10037     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10038     TmpInst.addOperand(Inst.getOperand(4));
10039     Inst = TmpInst;
10040     return true;
10041   }
10042 
10043   case ARM::VST3dWB_fixed_Asm_8:
10044   case ARM::VST3dWB_fixed_Asm_16:
10045   case ARM::VST3dWB_fixed_Asm_32:
10046   case ARM::VST3qWB_fixed_Asm_8:
10047   case ARM::VST3qWB_fixed_Asm_16:
10048   case ARM::VST3qWB_fixed_Asm_32: {
10049     MCInst TmpInst;
10050     unsigned Spacing;
10051     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10052     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10053     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10054     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10055     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
10056     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10057     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10058                                             Spacing));
10059     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10060                                             Spacing * 2));
10061     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10062     TmpInst.addOperand(Inst.getOperand(4));
10063     Inst = TmpInst;
10064     return true;
10065   }
10066 
10067   case ARM::VST3dWB_register_Asm_8:
10068   case ARM::VST3dWB_register_Asm_16:
10069   case ARM::VST3dWB_register_Asm_32:
10070   case ARM::VST3qWB_register_Asm_8:
10071   case ARM::VST3qWB_register_Asm_16:
10072   case ARM::VST3qWB_register_Asm_32: {
10073     MCInst TmpInst;
10074     unsigned Spacing;
10075     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10076     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10077     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10078     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10079     TmpInst.addOperand(Inst.getOperand(3)); // Rm
10080     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10081     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10082                                             Spacing));
10083     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10084                                             Spacing * 2));
10085     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10086     TmpInst.addOperand(Inst.getOperand(5));
10087     Inst = TmpInst;
10088     return true;
10089   }
10090 
10091   // VST4 multiple 3-element structure instructions.
10092   case ARM::VST4dAsm_8:
10093   case ARM::VST4dAsm_16:
10094   case ARM::VST4dAsm_32:
10095   case ARM::VST4qAsm_8:
10096   case ARM::VST4qAsm_16:
10097   case ARM::VST4qAsm_32: {
10098     MCInst TmpInst;
10099     unsigned Spacing;
10100     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10101     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10102     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10103     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10104     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10105                                             Spacing));
10106     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10107                                             Spacing * 2));
10108     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10109                                             Spacing * 3));
10110     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10111     TmpInst.addOperand(Inst.getOperand(4));
10112     Inst = TmpInst;
10113     return true;
10114   }
10115 
10116   case ARM::VST4dWB_fixed_Asm_8:
10117   case ARM::VST4dWB_fixed_Asm_16:
10118   case ARM::VST4dWB_fixed_Asm_32:
10119   case ARM::VST4qWB_fixed_Asm_8:
10120   case ARM::VST4qWB_fixed_Asm_16:
10121   case ARM::VST4qWB_fixed_Asm_32: {
10122     MCInst TmpInst;
10123     unsigned Spacing;
10124     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10125     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10126     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10127     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10128     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
10129     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10130     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10131                                             Spacing));
10132     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10133                                             Spacing * 2));
10134     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10135                                             Spacing * 3));
10136     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10137     TmpInst.addOperand(Inst.getOperand(4));
10138     Inst = TmpInst;
10139     return true;
10140   }
10141 
10142   case ARM::VST4dWB_register_Asm_8:
10143   case ARM::VST4dWB_register_Asm_16:
10144   case ARM::VST4dWB_register_Asm_32:
10145   case ARM::VST4qWB_register_Asm_8:
10146   case ARM::VST4qWB_register_Asm_16:
10147   case ARM::VST4qWB_register_Asm_32: {
10148     MCInst TmpInst;
10149     unsigned Spacing;
10150     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10151     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10152     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10153     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10154     TmpInst.addOperand(Inst.getOperand(3)); // Rm
10155     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10156     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10157                                             Spacing));
10158     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10159                                             Spacing * 2));
10160     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10161                                             Spacing * 3));
10162     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10163     TmpInst.addOperand(Inst.getOperand(5));
10164     Inst = TmpInst;
10165     return true;
10166   }
10167 
10168   // Handle encoding choice for the shift-immediate instructions.
10169   case ARM::t2LSLri:
10170   case ARM::t2LSRri:
10171   case ARM::t2ASRri:
10172     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10173         isARMLowRegister(Inst.getOperand(1).getReg()) &&
10174         Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
10175         !HasWideQualifier) {
10176       unsigned NewOpc;
10177       switch (Inst.getOpcode()) {
10178       default: llvm_unreachable("unexpected opcode");
10179       case ARM::t2LSLri: NewOpc = ARM::tLSLri; break;
10180       case ARM::t2LSRri: NewOpc = ARM::tLSRri; break;
10181       case ARM::t2ASRri: NewOpc = ARM::tASRri; break;
10182       }
10183       // The Thumb1 operands aren't in the same order. Awesome, eh?
10184       MCInst TmpInst;
10185       TmpInst.setOpcode(NewOpc);
10186       TmpInst.addOperand(Inst.getOperand(0));
10187       TmpInst.addOperand(Inst.getOperand(5));
10188       TmpInst.addOperand(Inst.getOperand(1));
10189       TmpInst.addOperand(Inst.getOperand(2));
10190       TmpInst.addOperand(Inst.getOperand(3));
10191       TmpInst.addOperand(Inst.getOperand(4));
10192       Inst = TmpInst;
10193       return true;
10194     }
10195     return false;
10196 
10197   // Handle the Thumb2 mode MOV complex aliases.
10198   case ARM::t2MOVsr:
10199   case ARM::t2MOVSsr: {
10200     // Which instruction to expand to depends on the CCOut operand and
10201     // whether we're in an IT block if the register operands are low
10202     // registers.
10203     bool isNarrow = false;
10204     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10205         isARMLowRegister(Inst.getOperand(1).getReg()) &&
10206         isARMLowRegister(Inst.getOperand(2).getReg()) &&
10207         Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
10208         inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr) &&
10209         !HasWideQualifier)
10210       isNarrow = true;
10211     MCInst TmpInst;
10212     unsigned newOpc;
10213     switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
10214     default: llvm_unreachable("unexpected opcode!");
10215     case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
10216     case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
10217     case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
10218     case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
10219     }
10220     TmpInst.setOpcode(newOpc);
10221     TmpInst.addOperand(Inst.getOperand(0)); // Rd
10222     if (isNarrow)
10223       TmpInst.addOperand(MCOperand::createReg(
10224           Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
10225     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10226     TmpInst.addOperand(Inst.getOperand(2)); // Rm
10227     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10228     TmpInst.addOperand(Inst.getOperand(5));
10229     if (!isNarrow)
10230       TmpInst.addOperand(MCOperand::createReg(
10231           Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
10232     Inst = TmpInst;
10233     return true;
10234   }
10235   case ARM::t2MOVsi:
10236   case ARM::t2MOVSsi: {
10237     // Which instruction to expand to depends on the CCOut operand and
10238     // whether we're in an IT block if the register operands are low
10239     // registers.
10240     bool isNarrow = false;
10241     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10242         isARMLowRegister(Inst.getOperand(1).getReg()) &&
10243         inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi) &&
10244         !HasWideQualifier)
10245       isNarrow = true;
10246     MCInst TmpInst;
10247     unsigned newOpc;
10248     unsigned Shift = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
10249     unsigned Amount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
10250     bool isMov = false;
10251     // MOV rd, rm, LSL #0 is actually a MOV instruction
10252     if (Shift == ARM_AM::lsl && Amount == 0) {
10253       isMov = true;
10254       // The 16-bit encoding of MOV rd, rm, LSL #N is explicitly encoding T2 of
10255       // MOV (register) in the ARMv8-A and ARMv8-M manuals, and immediate 0 is
10256       // unpredictable in an IT block so the 32-bit encoding T3 has to be used
10257       // instead.
10258       if (inITBlock()) {
10259         isNarrow = false;
10260       }
10261       newOpc = isNarrow ? ARM::tMOVSr : ARM::t2MOVr;
10262     } else {
10263       switch(Shift) {
10264       default: llvm_unreachable("unexpected opcode!");
10265       case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
10266       case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
10267       case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
10268       case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
10269       case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
10270       }
10271     }
10272     if (Amount == 32) Amount = 0;
10273     TmpInst.setOpcode(newOpc);
10274     TmpInst.addOperand(Inst.getOperand(0)); // Rd
10275     if (isNarrow && !isMov)
10276       TmpInst.addOperand(MCOperand::createReg(
10277           Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
10278     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10279     if (newOpc != ARM::t2RRX && !isMov)
10280       TmpInst.addOperand(MCOperand::createImm(Amount));
10281     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10282     TmpInst.addOperand(Inst.getOperand(4));
10283     if (!isNarrow)
10284       TmpInst.addOperand(MCOperand::createReg(
10285           Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
10286     Inst = TmpInst;
10287     return true;
10288   }
10289   // Handle the ARM mode MOV complex aliases.
10290   case ARM::ASRr:
10291   case ARM::LSRr:
10292   case ARM::LSLr:
10293   case ARM::RORr: {
10294     ARM_AM::ShiftOpc ShiftTy;
10295     switch(Inst.getOpcode()) {
10296     default: llvm_unreachable("unexpected opcode!");
10297     case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
10298     case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
10299     case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
10300     case ARM::RORr: ShiftTy = ARM_AM::ror; break;
10301     }
10302     unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
10303     MCInst TmpInst;
10304     TmpInst.setOpcode(ARM::MOVsr);
10305     TmpInst.addOperand(Inst.getOperand(0)); // Rd
10306     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10307     TmpInst.addOperand(Inst.getOperand(2)); // Rm
10308     TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
10309     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10310     TmpInst.addOperand(Inst.getOperand(4));
10311     TmpInst.addOperand(Inst.getOperand(5)); // cc_out
10312     Inst = TmpInst;
10313     return true;
10314   }
10315   case ARM::ASRi:
10316   case ARM::LSRi:
10317   case ARM::LSLi:
10318   case ARM::RORi: {
10319     ARM_AM::ShiftOpc ShiftTy;
10320     switch(Inst.getOpcode()) {
10321     default: llvm_unreachable("unexpected opcode!");
10322     case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
10323     case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
10324     case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
10325     case ARM::RORi: ShiftTy = ARM_AM::ror; break;
10326     }
10327     // A shift by zero is a plain MOVr, not a MOVsi.
10328     unsigned Amt = Inst.getOperand(2).getImm();
10329     unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
10330     // A shift by 32 should be encoded as 0 when permitted
10331     if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr))
10332       Amt = 0;
10333     unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
10334     MCInst TmpInst;
10335     TmpInst.setOpcode(Opc);
10336     TmpInst.addOperand(Inst.getOperand(0)); // Rd
10337     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10338     if (Opc == ARM::MOVsi)
10339       TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
10340     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10341     TmpInst.addOperand(Inst.getOperand(4));
10342     TmpInst.addOperand(Inst.getOperand(5)); // cc_out
10343     Inst = TmpInst;
10344     return true;
10345   }
10346   case ARM::RRXi: {
10347     unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
10348     MCInst TmpInst;
10349     TmpInst.setOpcode(ARM::MOVsi);
10350     TmpInst.addOperand(Inst.getOperand(0)); // Rd
10351     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10352     TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
10353     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10354     TmpInst.addOperand(Inst.getOperand(3));
10355     TmpInst.addOperand(Inst.getOperand(4)); // cc_out
10356     Inst = TmpInst;
10357     return true;
10358   }
10359   case ARM::t2LDMIA_UPD: {
10360     // If this is a load of a single register, then we should use
10361     // a post-indexed LDR instruction instead, per the ARM ARM.
10362     if (Inst.getNumOperands() != 5)
10363       return false;
10364     MCInst TmpInst;
10365     TmpInst.setOpcode(ARM::t2LDR_POST);
10366     TmpInst.addOperand(Inst.getOperand(4)); // Rt
10367     TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
10368     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10369     TmpInst.addOperand(MCOperand::createImm(4));
10370     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10371     TmpInst.addOperand(Inst.getOperand(3));
10372     Inst = TmpInst;
10373     return true;
10374   }
10375   case ARM::t2STMDB_UPD: {
10376     // If this is a store of a single register, then we should use
10377     // a pre-indexed STR instruction instead, per the ARM ARM.
10378     if (Inst.getNumOperands() != 5)
10379       return false;
10380     MCInst TmpInst;
10381     TmpInst.setOpcode(ARM::t2STR_PRE);
10382     TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
10383     TmpInst.addOperand(Inst.getOperand(4)); // Rt
10384     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10385     TmpInst.addOperand(MCOperand::createImm(-4));
10386     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10387     TmpInst.addOperand(Inst.getOperand(3));
10388     Inst = TmpInst;
10389     return true;
10390   }
10391   case ARM::LDMIA_UPD:
10392     // If this is a load of a single register via a 'pop', then we should use
10393     // a post-indexed LDR instruction instead, per the ARM ARM.
10394     if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "pop" &&
10395         Inst.getNumOperands() == 5) {
10396       MCInst TmpInst;
10397       TmpInst.setOpcode(ARM::LDR_POST_IMM);
10398       TmpInst.addOperand(Inst.getOperand(4)); // Rt
10399       TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
10400       TmpInst.addOperand(Inst.getOperand(1)); // Rn
10401       TmpInst.addOperand(MCOperand::createReg(0));  // am2offset
10402       TmpInst.addOperand(MCOperand::createImm(4));
10403       TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10404       TmpInst.addOperand(Inst.getOperand(3));
10405       Inst = TmpInst;
10406       return true;
10407     }
10408     break;
10409   case ARM::STMDB_UPD:
10410     // If this is a store of a single register via a 'push', then we should use
10411     // a pre-indexed STR instruction instead, per the ARM ARM.
10412     if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "push" &&
10413         Inst.getNumOperands() == 5) {
10414       MCInst TmpInst;
10415       TmpInst.setOpcode(ARM::STR_PRE_IMM);
10416       TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
10417       TmpInst.addOperand(Inst.getOperand(4)); // Rt
10418       TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
10419       TmpInst.addOperand(MCOperand::createImm(-4));
10420       TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10421       TmpInst.addOperand(Inst.getOperand(3));
10422       Inst = TmpInst;
10423     }
10424     break;
10425   case ARM::t2ADDri12:
10426   case ARM::t2SUBri12:
10427   case ARM::t2ADDspImm12:
10428   case ARM::t2SUBspImm12: {
10429     // If the immediate fits for encoding T3 and the generic
10430     // mnemonic was used, encoding T3 is preferred.
10431     const StringRef Token = static_cast<ARMOperand &>(*Operands[0]).getToken();
10432     if ((Token != "add" && Token != "sub") ||
10433         ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
10434       break;
10435     switch (Inst.getOpcode()) {
10436     case ARM::t2ADDri12:
10437       Inst.setOpcode(ARM::t2ADDri);
10438       break;
10439     case ARM::t2SUBri12:
10440       Inst.setOpcode(ARM::t2SUBri);
10441       break;
10442     case ARM::t2ADDspImm12:
10443       Inst.setOpcode(ARM::t2ADDspImm);
10444       break;
10445     case ARM::t2SUBspImm12:
10446       Inst.setOpcode(ARM::t2SUBspImm);
10447       break;
10448     }
10449 
10450     Inst.addOperand(MCOperand::createReg(0)); // cc_out
10451     return true;
10452   }
10453   case ARM::tADDi8:
10454     // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
10455     // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
10456     // to encoding T2 if <Rd> is specified and encoding T2 is preferred
10457     // to encoding T1 if <Rd> is omitted."
10458     if (Inst.getOperand(3).isImm() &&
10459         (unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
10460       Inst.setOpcode(ARM::tADDi3);
10461       return true;
10462     }
10463     break;
10464   case ARM::tSUBi8:
10465     // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
10466     // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
10467     // to encoding T2 if <Rd> is specified and encoding T2 is preferred
10468     // to encoding T1 if <Rd> is omitted."
10469     if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
10470       Inst.setOpcode(ARM::tSUBi3);
10471       return true;
10472     }
10473     break;
10474   case ARM::t2ADDri:
10475   case ARM::t2SUBri: {
10476     // If the destination and first source operand are the same, and
10477     // the flags are compatible with the current IT status, use encoding T2
10478     // instead of T3. For compatibility with the system 'as'. Make sure the
10479     // wide encoding wasn't explicit.
10480     if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
10481         !isARMLowRegister(Inst.getOperand(0).getReg()) ||
10482         (Inst.getOperand(2).isImm() &&
10483          (unsigned)Inst.getOperand(2).getImm() > 255) ||
10484         Inst.getOperand(5).getReg() != (inITBlock() ? 0 : ARM::CPSR) ||
10485         HasWideQualifier)
10486       break;
10487     MCInst TmpInst;
10488     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
10489                       ARM::tADDi8 : ARM::tSUBi8);
10490     TmpInst.addOperand(Inst.getOperand(0));
10491     TmpInst.addOperand(Inst.getOperand(5));
10492     TmpInst.addOperand(Inst.getOperand(0));
10493     TmpInst.addOperand(Inst.getOperand(2));
10494     TmpInst.addOperand(Inst.getOperand(3));
10495     TmpInst.addOperand(Inst.getOperand(4));
10496     Inst = TmpInst;
10497     return true;
10498   }
10499   case ARM::t2ADDspImm:
10500   case ARM::t2SUBspImm: {
10501     // Prefer T1 encoding if possible
10502     if (Inst.getOperand(5).getReg() != 0 || HasWideQualifier)
10503       break;
10504     unsigned V = Inst.getOperand(2).getImm();
10505     if (V & 3 || V > ((1 << 7) - 1) << 2)
10506       break;
10507     MCInst TmpInst;
10508     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDspImm ? ARM::tADDspi
10509                                                           : ARM::tSUBspi);
10510     TmpInst.addOperand(MCOperand::createReg(ARM::SP)); // destination reg
10511     TmpInst.addOperand(MCOperand::createReg(ARM::SP)); // source reg
10512     TmpInst.addOperand(MCOperand::createImm(V / 4));   // immediate
10513     TmpInst.addOperand(Inst.getOperand(3));            // pred
10514     TmpInst.addOperand(Inst.getOperand(4));
10515     Inst = TmpInst;
10516     return true;
10517   }
10518   case ARM::t2ADDrr: {
10519     // If the destination and first source operand are the same, and
10520     // there's no setting of the flags, use encoding T2 instead of T3.
10521     // Note that this is only for ADD, not SUB. This mirrors the system
10522     // 'as' behaviour.  Also take advantage of ADD being commutative.
10523     // Make sure the wide encoding wasn't explicit.
10524     bool Swap = false;
10525     auto DestReg = Inst.getOperand(0).getReg();
10526     bool Transform = DestReg == Inst.getOperand(1).getReg();
10527     if (!Transform && DestReg == Inst.getOperand(2).getReg()) {
10528       Transform = true;
10529       Swap = true;
10530     }
10531     if (!Transform ||
10532         Inst.getOperand(5).getReg() != 0 ||
10533         HasWideQualifier)
10534       break;
10535     MCInst TmpInst;
10536     TmpInst.setOpcode(ARM::tADDhirr);
10537     TmpInst.addOperand(Inst.getOperand(0));
10538     TmpInst.addOperand(Inst.getOperand(0));
10539     TmpInst.addOperand(Inst.getOperand(Swap ? 1 : 2));
10540     TmpInst.addOperand(Inst.getOperand(3));
10541     TmpInst.addOperand(Inst.getOperand(4));
10542     Inst = TmpInst;
10543     return true;
10544   }
10545   case ARM::tADDrSP:
10546     // If the non-SP source operand and the destination operand are not the
10547     // same, we need to use the 32-bit encoding if it's available.
10548     if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
10549       Inst.setOpcode(ARM::t2ADDrr);
10550       Inst.addOperand(MCOperand::createReg(0)); // cc_out
10551       return true;
10552     }
10553     break;
10554   case ARM::tB:
10555     // A Thumb conditional branch outside of an IT block is a tBcc.
10556     if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
10557       Inst.setOpcode(ARM::tBcc);
10558       return true;
10559     }
10560     break;
10561   case ARM::t2B:
10562     // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
10563     if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
10564       Inst.setOpcode(ARM::t2Bcc);
10565       return true;
10566     }
10567     break;
10568   case ARM::t2Bcc:
10569     // If the conditional is AL or we're in an IT block, we really want t2B.
10570     if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
10571       Inst.setOpcode(ARM::t2B);
10572       return true;
10573     }
10574     break;
10575   case ARM::tBcc:
10576     // If the conditional is AL, we really want tB.
10577     if (Inst.getOperand(1).getImm() == ARMCC::AL) {
10578       Inst.setOpcode(ARM::tB);
10579       return true;
10580     }
10581     break;
10582   case ARM::tLDMIA: {
10583     // If the register list contains any high registers, or if the writeback
10584     // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
10585     // instead if we're in Thumb2. Otherwise, this should have generated
10586     // an error in validateInstruction().
10587     unsigned Rn = Inst.getOperand(0).getReg();
10588     bool hasWritebackToken =
10589         (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
10590          static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
10591     bool listContainsBase;
10592     if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
10593         (!listContainsBase && !hasWritebackToken) ||
10594         (listContainsBase && hasWritebackToken)) {
10595       // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
10596       assert(isThumbTwo());
10597       Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
10598       // If we're switching to the updating version, we need to insert
10599       // the writeback tied operand.
10600       if (hasWritebackToken)
10601         Inst.insert(Inst.begin(),
10602                     MCOperand::createReg(Inst.getOperand(0).getReg()));
10603       return true;
10604     }
10605     break;
10606   }
10607   case ARM::tSTMIA_UPD: {
10608     // If the register list contains any high registers, we need to use
10609     // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
10610     // should have generated an error in validateInstruction().
10611     unsigned Rn = Inst.getOperand(0).getReg();
10612     bool listContainsBase;
10613     if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
10614       // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
10615       assert(isThumbTwo());
10616       Inst.setOpcode(ARM::t2STMIA_UPD);
10617       return true;
10618     }
10619     break;
10620   }
10621   case ARM::tPOP: {
10622     bool listContainsBase;
10623     // If the register list contains any high registers, we need to use
10624     // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
10625     // should have generated an error in validateInstruction().
10626     if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
10627       return false;
10628     assert(isThumbTwo());
10629     Inst.setOpcode(ARM::t2LDMIA_UPD);
10630     // Add the base register and writeback operands.
10631     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
10632     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
10633     return true;
10634   }
10635   case ARM::tPUSH: {
10636     bool listContainsBase;
10637     if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
10638       return false;
10639     assert(isThumbTwo());
10640     Inst.setOpcode(ARM::t2STMDB_UPD);
10641     // Add the base register and writeback operands.
10642     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
10643     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
10644     return true;
10645   }
10646   case ARM::t2MOVi:
10647     // If we can use the 16-bit encoding and the user didn't explicitly
10648     // request the 32-bit variant, transform it here.
10649     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10650         (Inst.getOperand(1).isImm() &&
10651          (unsigned)Inst.getOperand(1).getImm() <= 255) &&
10652         Inst.getOperand(4).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
10653         !HasWideQualifier) {
10654       // The operands aren't in the same order for tMOVi8...
10655       MCInst TmpInst;
10656       TmpInst.setOpcode(ARM::tMOVi8);
10657       TmpInst.addOperand(Inst.getOperand(0));
10658       TmpInst.addOperand(Inst.getOperand(4));
10659       TmpInst.addOperand(Inst.getOperand(1));
10660       TmpInst.addOperand(Inst.getOperand(2));
10661       TmpInst.addOperand(Inst.getOperand(3));
10662       Inst = TmpInst;
10663       return true;
10664     }
10665     break;
10666 
10667   case ARM::t2MOVr:
10668     // If we can use the 16-bit encoding and the user didn't explicitly
10669     // request the 32-bit variant, transform it here.
10670     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10671         isARMLowRegister(Inst.getOperand(1).getReg()) &&
10672         Inst.getOperand(2).getImm() == ARMCC::AL &&
10673         Inst.getOperand(4).getReg() == ARM::CPSR &&
10674         !HasWideQualifier) {
10675       // The operands aren't the same for tMOV[S]r... (no cc_out)
10676       MCInst TmpInst;
10677       unsigned Op = Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr;
10678       TmpInst.setOpcode(Op);
10679       TmpInst.addOperand(Inst.getOperand(0));
10680       TmpInst.addOperand(Inst.getOperand(1));
10681       if (Op == ARM::tMOVr) {
10682         TmpInst.addOperand(Inst.getOperand(2));
10683         TmpInst.addOperand(Inst.getOperand(3));
10684       }
10685       Inst = TmpInst;
10686       return true;
10687     }
10688     break;
10689 
10690   case ARM::t2SXTH:
10691   case ARM::t2SXTB:
10692   case ARM::t2UXTH:
10693   case ARM::t2UXTB:
10694     // If we can use the 16-bit encoding and the user didn't explicitly
10695     // request the 32-bit variant, transform it here.
10696     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10697         isARMLowRegister(Inst.getOperand(1).getReg()) &&
10698         Inst.getOperand(2).getImm() == 0 &&
10699         !HasWideQualifier) {
10700       unsigned NewOpc;
10701       switch (Inst.getOpcode()) {
10702       default: llvm_unreachable("Illegal opcode!");
10703       case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
10704       case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
10705       case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
10706       case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
10707       }
10708       // The operands aren't the same for thumb1 (no rotate operand).
10709       MCInst TmpInst;
10710       TmpInst.setOpcode(NewOpc);
10711       TmpInst.addOperand(Inst.getOperand(0));
10712       TmpInst.addOperand(Inst.getOperand(1));
10713       TmpInst.addOperand(Inst.getOperand(3));
10714       TmpInst.addOperand(Inst.getOperand(4));
10715       Inst = TmpInst;
10716       return true;
10717     }
10718     break;
10719 
10720   case ARM::MOVsi: {
10721     ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
10722     // rrx shifts and asr/lsr of #32 is encoded as 0
10723     if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr)
10724       return false;
10725     if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
10726       // Shifting by zero is accepted as a vanilla 'MOVr'
10727       MCInst TmpInst;
10728       TmpInst.setOpcode(ARM::MOVr);
10729       TmpInst.addOperand(Inst.getOperand(0));
10730       TmpInst.addOperand(Inst.getOperand(1));
10731       TmpInst.addOperand(Inst.getOperand(3));
10732       TmpInst.addOperand(Inst.getOperand(4));
10733       TmpInst.addOperand(Inst.getOperand(5));
10734       Inst = TmpInst;
10735       return true;
10736     }
10737     return false;
10738   }
10739   case ARM::ANDrsi:
10740   case ARM::ORRrsi:
10741   case ARM::EORrsi:
10742   case ARM::BICrsi:
10743   case ARM::SUBrsi:
10744   case ARM::ADDrsi: {
10745     unsigned newOpc;
10746     ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
10747     if (SOpc == ARM_AM::rrx) return false;
10748     switch (Inst.getOpcode()) {
10749     default: llvm_unreachable("unexpected opcode!");
10750     case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
10751     case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
10752     case ARM::EORrsi: newOpc = ARM::EORrr; break;
10753     case ARM::BICrsi: newOpc = ARM::BICrr; break;
10754     case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
10755     case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
10756     }
10757     // If the shift is by zero, use the non-shifted instruction definition.
10758     // The exception is for right shifts, where 0 == 32
10759     if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0 &&
10760         !(SOpc == ARM_AM::lsr || SOpc == ARM_AM::asr)) {
10761       MCInst TmpInst;
10762       TmpInst.setOpcode(newOpc);
10763       TmpInst.addOperand(Inst.getOperand(0));
10764       TmpInst.addOperand(Inst.getOperand(1));
10765       TmpInst.addOperand(Inst.getOperand(2));
10766       TmpInst.addOperand(Inst.getOperand(4));
10767       TmpInst.addOperand(Inst.getOperand(5));
10768       TmpInst.addOperand(Inst.getOperand(6));
10769       Inst = TmpInst;
10770       return true;
10771     }
10772     return false;
10773   }
10774   case ARM::ITasm:
10775   case ARM::t2IT: {
10776     // Set up the IT block state according to the IT instruction we just
10777     // matched.
10778     assert(!inITBlock() && "nested IT blocks?!");
10779     startExplicitITBlock(ARMCC::CondCodes(Inst.getOperand(0).getImm()),
10780                          Inst.getOperand(1).getImm());
10781     break;
10782   }
10783   case ARM::t2LSLrr:
10784   case ARM::t2LSRrr:
10785   case ARM::t2ASRrr:
10786   case ARM::t2SBCrr:
10787   case ARM::t2RORrr:
10788   case ARM::t2BICrr:
10789     // Assemblers should use the narrow encodings of these instructions when permissible.
10790     if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
10791          isARMLowRegister(Inst.getOperand(2).getReg())) &&
10792         Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
10793         Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
10794         !HasWideQualifier) {
10795       unsigned NewOpc;
10796       switch (Inst.getOpcode()) {
10797         default: llvm_unreachable("unexpected opcode");
10798         case ARM::t2LSLrr: NewOpc = ARM::tLSLrr; break;
10799         case ARM::t2LSRrr: NewOpc = ARM::tLSRrr; break;
10800         case ARM::t2ASRrr: NewOpc = ARM::tASRrr; break;
10801         case ARM::t2SBCrr: NewOpc = ARM::tSBC; break;
10802         case ARM::t2RORrr: NewOpc = ARM::tROR; break;
10803         case ARM::t2BICrr: NewOpc = ARM::tBIC; break;
10804       }
10805       MCInst TmpInst;
10806       TmpInst.setOpcode(NewOpc);
10807       TmpInst.addOperand(Inst.getOperand(0));
10808       TmpInst.addOperand(Inst.getOperand(5));
10809       TmpInst.addOperand(Inst.getOperand(1));
10810       TmpInst.addOperand(Inst.getOperand(2));
10811       TmpInst.addOperand(Inst.getOperand(3));
10812       TmpInst.addOperand(Inst.getOperand(4));
10813       Inst = TmpInst;
10814       return true;
10815     }
10816     return false;
10817 
10818   case ARM::t2ANDrr:
10819   case ARM::t2EORrr:
10820   case ARM::t2ADCrr:
10821   case ARM::t2ORRrr:
10822     // Assemblers should use the narrow encodings of these instructions when permissible.
10823     // These instructions are special in that they are commutable, so shorter encodings
10824     // are available more often.
10825     if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
10826          isARMLowRegister(Inst.getOperand(2).getReg())) &&
10827         (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() ||
10828          Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) &&
10829         Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
10830         !HasWideQualifier) {
10831       unsigned NewOpc;
10832       switch (Inst.getOpcode()) {
10833         default: llvm_unreachable("unexpected opcode");
10834         case ARM::t2ADCrr: NewOpc = ARM::tADC; break;
10835         case ARM::t2ANDrr: NewOpc = ARM::tAND; break;
10836         case ARM::t2EORrr: NewOpc = ARM::tEOR; break;
10837         case ARM::t2ORRrr: NewOpc = ARM::tORR; break;
10838       }
10839       MCInst TmpInst;
10840       TmpInst.setOpcode(NewOpc);
10841       TmpInst.addOperand(Inst.getOperand(0));
10842       TmpInst.addOperand(Inst.getOperand(5));
10843       if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) {
10844         TmpInst.addOperand(Inst.getOperand(1));
10845         TmpInst.addOperand(Inst.getOperand(2));
10846       } else {
10847         TmpInst.addOperand(Inst.getOperand(2));
10848         TmpInst.addOperand(Inst.getOperand(1));
10849       }
10850       TmpInst.addOperand(Inst.getOperand(3));
10851       TmpInst.addOperand(Inst.getOperand(4));
10852       Inst = TmpInst;
10853       return true;
10854     }
10855     return false;
10856   case ARM::MVE_VPST:
10857   case ARM::MVE_VPTv16i8:
10858   case ARM::MVE_VPTv8i16:
10859   case ARM::MVE_VPTv4i32:
10860   case ARM::MVE_VPTv16u8:
10861   case ARM::MVE_VPTv8u16:
10862   case ARM::MVE_VPTv4u32:
10863   case ARM::MVE_VPTv16s8:
10864   case ARM::MVE_VPTv8s16:
10865   case ARM::MVE_VPTv4s32:
10866   case ARM::MVE_VPTv4f32:
10867   case ARM::MVE_VPTv8f16:
10868   case ARM::MVE_VPTv16i8r:
10869   case ARM::MVE_VPTv8i16r:
10870   case ARM::MVE_VPTv4i32r:
10871   case ARM::MVE_VPTv16u8r:
10872   case ARM::MVE_VPTv8u16r:
10873   case ARM::MVE_VPTv4u32r:
10874   case ARM::MVE_VPTv16s8r:
10875   case ARM::MVE_VPTv8s16r:
10876   case ARM::MVE_VPTv4s32r:
10877   case ARM::MVE_VPTv4f32r:
10878   case ARM::MVE_VPTv8f16r: {
10879     assert(!inVPTBlock() && "Nested VPT blocks are not allowed");
10880     MCOperand &MO = Inst.getOperand(0);
10881     VPTState.Mask = MO.getImm();
10882     VPTState.CurPosition = 0;
10883     break;
10884   }
10885   }
10886   return false;
10887 }
10888 
10889 unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
10890   // 16-bit thumb arithmetic instructions either require or preclude the 'S'
10891   // suffix depending on whether they're in an IT block or not.
10892   unsigned Opc = Inst.getOpcode();
10893   const MCInstrDesc &MCID = MII.get(Opc);
10894   if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
10895     assert(MCID.hasOptionalDef() &&
10896            "optionally flag setting instruction missing optional def operand");
10897     assert(MCID.NumOperands == Inst.getNumOperands() &&
10898            "operand count mismatch!");
10899     // Find the optional-def operand (cc_out).
10900     unsigned OpNo;
10901     for (OpNo = 0;
10902          OpNo < MCID.NumOperands && !MCID.operands()[OpNo].isOptionalDef();
10903          ++OpNo)
10904       ;
10905     // If we're parsing Thumb1, reject it completely.
10906     if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
10907       return Match_RequiresFlagSetting;
10908     // If we're parsing Thumb2, which form is legal depends on whether we're
10909     // in an IT block.
10910     if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
10911         !inITBlock())
10912       return Match_RequiresITBlock;
10913     if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
10914         inITBlock())
10915       return Match_RequiresNotITBlock;
10916     // LSL with zero immediate is not allowed in an IT block
10917     if (Opc == ARM::tLSLri && Inst.getOperand(3).getImm() == 0 && inITBlock())
10918       return Match_RequiresNotITBlock;
10919   } else if (isThumbOne()) {
10920     // Some high-register supporting Thumb1 encodings only allow both registers
10921     // to be from r0-r7 when in Thumb2.
10922     if (Opc == ARM::tADDhirr && !hasV6MOps() &&
10923         isARMLowRegister(Inst.getOperand(1).getReg()) &&
10924         isARMLowRegister(Inst.getOperand(2).getReg()))
10925       return Match_RequiresThumb2;
10926     // Others only require ARMv6 or later.
10927     else if (Opc == ARM::tMOVr && !hasV6Ops() &&
10928              isARMLowRegister(Inst.getOperand(0).getReg()) &&
10929              isARMLowRegister(Inst.getOperand(1).getReg()))
10930       return Match_RequiresV6;
10931   }
10932 
10933   // Before ARMv8 the rules for when SP is allowed in t2MOVr are more complex
10934   // than the loop below can handle, so it uses the GPRnopc register class and
10935   // we do SP handling here.
10936   if (Opc == ARM::t2MOVr && !hasV8Ops())
10937   {
10938     // SP as both source and destination is not allowed
10939     if (Inst.getOperand(0).getReg() == ARM::SP &&
10940         Inst.getOperand(1).getReg() == ARM::SP)
10941       return Match_RequiresV8;
10942     // When flags-setting SP as either source or destination is not allowed
10943     if (Inst.getOperand(4).getReg() == ARM::CPSR &&
10944         (Inst.getOperand(0).getReg() == ARM::SP ||
10945          Inst.getOperand(1).getReg() == ARM::SP))
10946       return Match_RequiresV8;
10947   }
10948 
10949   switch (Inst.getOpcode()) {
10950   case ARM::VMRS:
10951   case ARM::VMSR:
10952   case ARM::VMRS_FPCXTS:
10953   case ARM::VMRS_FPCXTNS:
10954   case ARM::VMSR_FPCXTS:
10955   case ARM::VMSR_FPCXTNS:
10956   case ARM::VMRS_FPSCR_NZCVQC:
10957   case ARM::VMSR_FPSCR_NZCVQC:
10958   case ARM::FMSTAT:
10959   case ARM::VMRS_VPR:
10960   case ARM::VMRS_P0:
10961   case ARM::VMSR_VPR:
10962   case ARM::VMSR_P0:
10963     // Use of SP for VMRS/VMSR is only allowed in ARM mode with the exception of
10964     // ARMv8-A.
10965     if (Inst.getOperand(0).isReg() && Inst.getOperand(0).getReg() == ARM::SP &&
10966         (isThumb() && !hasV8Ops()))
10967       return Match_InvalidOperand;
10968     break;
10969   case ARM::t2TBB:
10970   case ARM::t2TBH:
10971     // Rn = sp is only allowed with ARMv8-A
10972     if (!hasV8Ops() && (Inst.getOperand(0).getReg() == ARM::SP))
10973       return Match_RequiresV8;
10974     break;
10975   default:
10976     break;
10977   }
10978 
10979   for (unsigned I = 0; I < MCID.NumOperands; ++I)
10980     if (MCID.operands()[I].RegClass == ARM::rGPRRegClassID) {
10981       // rGPRRegClass excludes PC, and also excluded SP before ARMv8
10982       const auto &Op = Inst.getOperand(I);
10983       if (!Op.isReg()) {
10984         // This can happen in awkward cases with tied operands, e.g. a
10985         // writeback load/store with a complex addressing mode in
10986         // which there's an output operand corresponding to the
10987         // updated written-back base register: the Tablegen-generated
10988         // AsmMatcher will have written a placeholder operand to that
10989         // slot in the form of an immediate 0, because it can't
10990         // generate the register part of the complex addressing-mode
10991         // operand ahead of time.
10992         continue;
10993       }
10994 
10995       unsigned Reg = Op.getReg();
10996       if ((Reg == ARM::SP) && !hasV8Ops())
10997         return Match_RequiresV8;
10998       else if (Reg == ARM::PC)
10999         return Match_InvalidOperand;
11000     }
11001 
11002   return Match_Success;
11003 }
11004 
11005 namespace llvm {
11006 
11007 template <> inline bool IsCPSRDead<MCInst>(const MCInst *Instr) {
11008   return true; // In an assembly source, no need to second-guess
11009 }
11010 
11011 } // end namespace llvm
11012 
11013 // Returns true if Inst is unpredictable if it is in and IT block, but is not
11014 // the last instruction in the block.
11015 bool ARMAsmParser::isITBlockTerminator(MCInst &Inst) const {
11016   const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
11017 
11018   // All branch & call instructions terminate IT blocks with the exception of
11019   // SVC.
11020   if (MCID.isTerminator() || (MCID.isCall() && Inst.getOpcode() != ARM::tSVC) ||
11021       MCID.isReturn() || MCID.isBranch() || MCID.isIndirectBranch())
11022     return true;
11023 
11024   // Any arithmetic instruction which writes to the PC also terminates the IT
11025   // block.
11026   if (MCID.hasDefOfPhysReg(Inst, ARM::PC, *MRI))
11027     return true;
11028 
11029   return false;
11030 }
11031 
11032 unsigned ARMAsmParser::MatchInstruction(OperandVector &Operands, MCInst &Inst,
11033                                           SmallVectorImpl<NearMissInfo> &NearMisses,
11034                                           bool MatchingInlineAsm,
11035                                           bool &EmitInITBlock,
11036                                           MCStreamer &Out) {
11037   // If we can't use an implicit IT block here, just match as normal.
11038   if (inExplicitITBlock() || !isThumbTwo() || !useImplicitITThumb())
11039     return MatchInstructionImpl(Operands, Inst, &NearMisses, MatchingInlineAsm);
11040 
11041   // Try to match the instruction in an extension of the current IT block (if
11042   // there is one).
11043   if (inImplicitITBlock()) {
11044     extendImplicitITBlock(ITState.Cond);
11045     if (MatchInstructionImpl(Operands, Inst, nullptr, MatchingInlineAsm) ==
11046             Match_Success) {
11047       // The match succeded, but we still have to check that the instruction is
11048       // valid in this implicit IT block.
11049       const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
11050       if (MCID.isPredicable()) {
11051         ARMCC::CondCodes InstCond =
11052             (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
11053                 .getImm();
11054         ARMCC::CondCodes ITCond = currentITCond();
11055         if (InstCond == ITCond) {
11056           EmitInITBlock = true;
11057           return Match_Success;
11058         } else if (InstCond == ARMCC::getOppositeCondition(ITCond)) {
11059           invertCurrentITCondition();
11060           EmitInITBlock = true;
11061           return Match_Success;
11062         }
11063       }
11064     }
11065     rewindImplicitITPosition();
11066   }
11067 
11068   // Finish the current IT block, and try to match outside any IT block.
11069   flushPendingInstructions(Out);
11070   unsigned PlainMatchResult =
11071       MatchInstructionImpl(Operands, Inst, &NearMisses, MatchingInlineAsm);
11072   if (PlainMatchResult == Match_Success) {
11073     const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
11074     if (MCID.isPredicable()) {
11075       ARMCC::CondCodes InstCond =
11076           (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
11077               .getImm();
11078       // Some forms of the branch instruction have their own condition code
11079       // fields, so can be conditionally executed without an IT block.
11080       if (Inst.getOpcode() == ARM::tBcc || Inst.getOpcode() == ARM::t2Bcc) {
11081         EmitInITBlock = false;
11082         return Match_Success;
11083       }
11084       if (InstCond == ARMCC::AL) {
11085         EmitInITBlock = false;
11086         return Match_Success;
11087       }
11088     } else {
11089       EmitInITBlock = false;
11090       return Match_Success;
11091     }
11092   }
11093 
11094   // Try to match in a new IT block. The matcher doesn't check the actual
11095   // condition, so we create an IT block with a dummy condition, and fix it up
11096   // once we know the actual condition.
11097   startImplicitITBlock();
11098   if (MatchInstructionImpl(Operands, Inst, nullptr, MatchingInlineAsm) ==
11099       Match_Success) {
11100     const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
11101     if (MCID.isPredicable()) {
11102       ITState.Cond =
11103           (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
11104               .getImm();
11105       EmitInITBlock = true;
11106       return Match_Success;
11107     }
11108   }
11109   discardImplicitITBlock();
11110 
11111   // If none of these succeed, return the error we got when trying to match
11112   // outside any IT blocks.
11113   EmitInITBlock = false;
11114   return PlainMatchResult;
11115 }
11116 
11117 static std::string ARMMnemonicSpellCheck(StringRef S, const FeatureBitset &FBS,
11118                                          unsigned VariantID = 0);
11119 
11120 static const char *getSubtargetFeatureName(uint64_t Val);
11121 bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
11122                                            OperandVector &Operands,
11123                                            MCStreamer &Out, uint64_t &ErrorInfo,
11124                                            bool MatchingInlineAsm) {
11125   MCInst Inst;
11126   unsigned MatchResult;
11127   bool PendConditionalInstruction = false;
11128 
11129   SmallVector<NearMissInfo, 4> NearMisses;
11130   MatchResult = MatchInstruction(Operands, Inst, NearMisses, MatchingInlineAsm,
11131                                  PendConditionalInstruction, Out);
11132 
11133   switch (MatchResult) {
11134   case Match_Success:
11135     LLVM_DEBUG(dbgs() << "Parsed as: ";
11136                Inst.dump_pretty(dbgs(), MII.getName(Inst.getOpcode()));
11137                dbgs() << "\n");
11138 
11139     // Context sensitive operand constraints aren't handled by the matcher,
11140     // so check them here.
11141     if (validateInstruction(Inst, Operands)) {
11142       // Still progress the IT block, otherwise one wrong condition causes
11143       // nasty cascading errors.
11144       forwardITPosition();
11145       forwardVPTPosition();
11146       return true;
11147     }
11148 
11149     {
11150       // Some instructions need post-processing to, for example, tweak which
11151       // encoding is selected. Loop on it while changes happen so the
11152       // individual transformations can chain off each other. E.g.,
11153       // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
11154       while (processInstruction(Inst, Operands, Out))
11155         LLVM_DEBUG(dbgs() << "Changed to: ";
11156                    Inst.dump_pretty(dbgs(), MII.getName(Inst.getOpcode()));
11157                    dbgs() << "\n");
11158     }
11159 
11160     // Only move forward at the very end so that everything in validate
11161     // and process gets a consistent answer about whether we're in an IT
11162     // block.
11163     forwardITPosition();
11164     forwardVPTPosition();
11165 
11166     // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
11167     // doesn't actually encode.
11168     if (Inst.getOpcode() == ARM::ITasm)
11169       return false;
11170 
11171     Inst.setLoc(IDLoc);
11172     if (PendConditionalInstruction) {
11173       PendingConditionalInsts.push_back(Inst);
11174       if (isITBlockFull() || isITBlockTerminator(Inst))
11175         flushPendingInstructions(Out);
11176     } else {
11177       Out.emitInstruction(Inst, getSTI());
11178     }
11179     return false;
11180   case Match_NearMisses:
11181     ReportNearMisses(NearMisses, IDLoc, Operands);
11182     return true;
11183   case Match_MnemonicFail: {
11184     FeatureBitset FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
11185     std::string Suggestion = ARMMnemonicSpellCheck(
11186       ((ARMOperand &)*Operands[0]).getToken(), FBS);
11187     return Error(IDLoc, "invalid instruction" + Suggestion,
11188                  ((ARMOperand &)*Operands[0]).getLocRange());
11189   }
11190   }
11191 
11192   llvm_unreachable("Implement any new match types added!");
11193 }
11194 
11195 /// parseDirective parses the arm specific directives
11196 bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
11197   const MCContext::Environment Format = getContext().getObjectFileType();
11198   bool IsMachO = Format == MCContext::IsMachO;
11199   bool IsCOFF = Format == MCContext::IsCOFF;
11200 
11201   std::string IDVal = DirectiveID.getIdentifier().lower();
11202   if (IDVal == ".word")
11203     parseLiteralValues(4, DirectiveID.getLoc());
11204   else if (IDVal == ".short" || IDVal == ".hword")
11205     parseLiteralValues(2, DirectiveID.getLoc());
11206   else if (IDVal == ".thumb")
11207     parseDirectiveThumb(DirectiveID.getLoc());
11208   else if (IDVal == ".arm")
11209     parseDirectiveARM(DirectiveID.getLoc());
11210   else if (IDVal == ".thumb_func")
11211     parseDirectiveThumbFunc(DirectiveID.getLoc());
11212   else if (IDVal == ".code")
11213     parseDirectiveCode(DirectiveID.getLoc());
11214   else if (IDVal == ".syntax")
11215     parseDirectiveSyntax(DirectiveID.getLoc());
11216   else if (IDVal == ".unreq")
11217     parseDirectiveUnreq(DirectiveID.getLoc());
11218   else if (IDVal == ".fnend")
11219     parseDirectiveFnEnd(DirectiveID.getLoc());
11220   else if (IDVal == ".cantunwind")
11221     parseDirectiveCantUnwind(DirectiveID.getLoc());
11222   else if (IDVal == ".personality")
11223     parseDirectivePersonality(DirectiveID.getLoc());
11224   else if (IDVal == ".handlerdata")
11225     parseDirectiveHandlerData(DirectiveID.getLoc());
11226   else if (IDVal == ".setfp")
11227     parseDirectiveSetFP(DirectiveID.getLoc());
11228   else if (IDVal == ".pad")
11229     parseDirectivePad(DirectiveID.getLoc());
11230   else if (IDVal == ".save")
11231     parseDirectiveRegSave(DirectiveID.getLoc(), false);
11232   else if (IDVal == ".vsave")
11233     parseDirectiveRegSave(DirectiveID.getLoc(), true);
11234   else if (IDVal == ".ltorg" || IDVal == ".pool")
11235     parseDirectiveLtorg(DirectiveID.getLoc());
11236   else if (IDVal == ".even")
11237     parseDirectiveEven(DirectiveID.getLoc());
11238   else if (IDVal == ".personalityindex")
11239     parseDirectivePersonalityIndex(DirectiveID.getLoc());
11240   else if (IDVal == ".unwind_raw")
11241     parseDirectiveUnwindRaw(DirectiveID.getLoc());
11242   else if (IDVal == ".movsp")
11243     parseDirectiveMovSP(DirectiveID.getLoc());
11244   else if (IDVal == ".arch_extension")
11245     parseDirectiveArchExtension(DirectiveID.getLoc());
11246   else if (IDVal == ".align")
11247     return parseDirectiveAlign(DirectiveID.getLoc()); // Use Generic on failure.
11248   else if (IDVal == ".thumb_set")
11249     parseDirectiveThumbSet(DirectiveID.getLoc());
11250   else if (IDVal == ".inst")
11251     parseDirectiveInst(DirectiveID.getLoc());
11252   else if (IDVal == ".inst.n")
11253     parseDirectiveInst(DirectiveID.getLoc(), 'n');
11254   else if (IDVal == ".inst.w")
11255     parseDirectiveInst(DirectiveID.getLoc(), 'w');
11256   else if (!IsMachO && !IsCOFF) {
11257     if (IDVal == ".arch")
11258       parseDirectiveArch(DirectiveID.getLoc());
11259     else if (IDVal == ".cpu")
11260       parseDirectiveCPU(DirectiveID.getLoc());
11261     else if (IDVal == ".eabi_attribute")
11262       parseDirectiveEabiAttr(DirectiveID.getLoc());
11263     else if (IDVal == ".fpu")
11264       parseDirectiveFPU(DirectiveID.getLoc());
11265     else if (IDVal == ".fnstart")
11266       parseDirectiveFnStart(DirectiveID.getLoc());
11267     else if (IDVal == ".object_arch")
11268       parseDirectiveObjectArch(DirectiveID.getLoc());
11269     else if (IDVal == ".tlsdescseq")
11270       parseDirectiveTLSDescSeq(DirectiveID.getLoc());
11271     else
11272       return true;
11273   } else if (IsCOFF) {
11274     if (IDVal == ".seh_stackalloc")
11275       parseDirectiveSEHAllocStack(DirectiveID.getLoc(), /*Wide=*/false);
11276     else if (IDVal == ".seh_stackalloc_w")
11277       parseDirectiveSEHAllocStack(DirectiveID.getLoc(), /*Wide=*/true);
11278     else if (IDVal == ".seh_save_regs")
11279       parseDirectiveSEHSaveRegs(DirectiveID.getLoc(), /*Wide=*/false);
11280     else if (IDVal == ".seh_save_regs_w")
11281       parseDirectiveSEHSaveRegs(DirectiveID.getLoc(), /*Wide=*/true);
11282     else if (IDVal == ".seh_save_sp")
11283       parseDirectiveSEHSaveSP(DirectiveID.getLoc());
11284     else if (IDVal == ".seh_save_fregs")
11285       parseDirectiveSEHSaveFRegs(DirectiveID.getLoc());
11286     else if (IDVal == ".seh_save_lr")
11287       parseDirectiveSEHSaveLR(DirectiveID.getLoc());
11288     else if (IDVal == ".seh_endprologue")
11289       parseDirectiveSEHPrologEnd(DirectiveID.getLoc(), /*Fragment=*/false);
11290     else if (IDVal == ".seh_endprologue_fragment")
11291       parseDirectiveSEHPrologEnd(DirectiveID.getLoc(), /*Fragment=*/true);
11292     else if (IDVal == ".seh_nop")
11293       parseDirectiveSEHNop(DirectiveID.getLoc(), /*Wide=*/false);
11294     else if (IDVal == ".seh_nop_w")
11295       parseDirectiveSEHNop(DirectiveID.getLoc(), /*Wide=*/true);
11296     else if (IDVal == ".seh_startepilogue")
11297       parseDirectiveSEHEpilogStart(DirectiveID.getLoc(), /*Condition=*/false);
11298     else if (IDVal == ".seh_startepilogue_cond")
11299       parseDirectiveSEHEpilogStart(DirectiveID.getLoc(), /*Condition=*/true);
11300     else if (IDVal == ".seh_endepilogue")
11301       parseDirectiveSEHEpilogEnd(DirectiveID.getLoc());
11302     else if (IDVal == ".seh_custom")
11303       parseDirectiveSEHCustom(DirectiveID.getLoc());
11304     else
11305       return true;
11306   } else
11307     return true;
11308   return false;
11309 }
11310 
11311 /// parseLiteralValues
11312 ///  ::= .hword expression [, expression]*
11313 ///  ::= .short expression [, expression]*
11314 ///  ::= .word expression [, expression]*
11315 bool ARMAsmParser::parseLiteralValues(unsigned Size, SMLoc L) {
11316   auto parseOne = [&]() -> bool {
11317     const MCExpr *Value;
11318     if (getParser().parseExpression(Value))
11319       return true;
11320     getParser().getStreamer().emitValue(Value, Size, L);
11321     return false;
11322   };
11323   return (parseMany(parseOne));
11324 }
11325 
11326 /// parseDirectiveThumb
11327 ///  ::= .thumb
11328 bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
11329   if (parseEOL() || check(!hasThumb(), L, "target does not support Thumb mode"))
11330     return true;
11331 
11332   if (!isThumb())
11333     SwitchMode();
11334 
11335   getParser().getStreamer().emitAssemblerFlag(MCAF_Code16);
11336   getParser().getStreamer().emitCodeAlignment(Align(2), &getSTI(), 0);
11337   return false;
11338 }
11339 
11340 /// parseDirectiveARM
11341 ///  ::= .arm
11342 bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
11343   if (parseEOL() || check(!hasARM(), L, "target does not support ARM mode"))
11344     return true;
11345 
11346   if (isThumb())
11347     SwitchMode();
11348   getParser().getStreamer().emitAssemblerFlag(MCAF_Code32);
11349   getParser().getStreamer().emitCodeAlignment(Align(4), &getSTI(), 0);
11350   return false;
11351 }
11352 
11353 void ARMAsmParser::doBeforeLabelEmit(MCSymbol *Symbol, SMLoc IDLoc) {
11354   // We need to flush the current implicit IT block on a label, because it is
11355   // not legal to branch into an IT block.
11356   flushPendingInstructions(getStreamer());
11357 }
11358 
11359 void ARMAsmParser::onLabelParsed(MCSymbol *Symbol) {
11360   if (NextSymbolIsThumb) {
11361     getParser().getStreamer().emitThumbFunc(Symbol);
11362     NextSymbolIsThumb = false;
11363   }
11364 }
11365 
11366 /// parseDirectiveThumbFunc
11367 ///  ::= .thumbfunc symbol_name
11368 bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
11369   MCAsmParser &Parser = getParser();
11370   const auto Format = getContext().getObjectFileType();
11371   bool IsMachO = Format == MCContext::IsMachO;
11372 
11373   // Darwin asm has (optionally) function name after .thumb_func direction
11374   // ELF doesn't
11375 
11376   if (IsMachO) {
11377     if (Parser.getTok().is(AsmToken::Identifier) ||
11378         Parser.getTok().is(AsmToken::String)) {
11379       MCSymbol *Func = getParser().getContext().getOrCreateSymbol(
11380           Parser.getTok().getIdentifier());
11381       getParser().getStreamer().emitThumbFunc(Func);
11382       Parser.Lex();
11383       if (parseEOL())
11384         return true;
11385       return false;
11386     }
11387   }
11388 
11389   if (parseEOL())
11390     return true;
11391 
11392   // .thumb_func implies .thumb
11393   if (!isThumb())
11394     SwitchMode();
11395 
11396   getParser().getStreamer().emitAssemblerFlag(MCAF_Code16);
11397 
11398   NextSymbolIsThumb = true;
11399   return false;
11400 }
11401 
11402 /// parseDirectiveSyntax
11403 ///  ::= .syntax unified | divided
11404 bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
11405   MCAsmParser &Parser = getParser();
11406   const AsmToken &Tok = Parser.getTok();
11407   if (Tok.isNot(AsmToken::Identifier)) {
11408     Error(L, "unexpected token in .syntax directive");
11409     return false;
11410   }
11411 
11412   StringRef Mode = Tok.getString();
11413   Parser.Lex();
11414   if (check(Mode == "divided" || Mode == "DIVIDED", L,
11415             "'.syntax divided' arm assembly not supported") ||
11416       check(Mode != "unified" && Mode != "UNIFIED", L,
11417             "unrecognized syntax mode in .syntax directive") ||
11418       parseEOL())
11419     return true;
11420 
11421   // TODO tell the MC streamer the mode
11422   // getParser().getStreamer().Emit???();
11423   return false;
11424 }
11425 
11426 /// parseDirectiveCode
11427 ///  ::= .code 16 | 32
11428 bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
11429   MCAsmParser &Parser = getParser();
11430   const AsmToken &Tok = Parser.getTok();
11431   if (Tok.isNot(AsmToken::Integer))
11432     return Error(L, "unexpected token in .code directive");
11433   int64_t Val = Parser.getTok().getIntVal();
11434   if (Val != 16 && Val != 32) {
11435     Error(L, "invalid operand to .code directive");
11436     return false;
11437   }
11438   Parser.Lex();
11439 
11440   if (parseEOL())
11441     return true;
11442 
11443   if (Val == 16) {
11444     if (!hasThumb())
11445       return Error(L, "target does not support Thumb mode");
11446 
11447     if (!isThumb())
11448       SwitchMode();
11449     getParser().getStreamer().emitAssemblerFlag(MCAF_Code16);
11450   } else {
11451     if (!hasARM())
11452       return Error(L, "target does not support ARM mode");
11453 
11454     if (isThumb())
11455       SwitchMode();
11456     getParser().getStreamer().emitAssemblerFlag(MCAF_Code32);
11457   }
11458 
11459   return false;
11460 }
11461 
11462 /// parseDirectiveReq
11463 ///  ::= name .req registername
11464 bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
11465   MCAsmParser &Parser = getParser();
11466   Parser.Lex(); // Eat the '.req' token.
11467   MCRegister Reg;
11468   SMLoc SRegLoc, ERegLoc;
11469   if (check(parseRegister(Reg, SRegLoc, ERegLoc), SRegLoc,
11470             "register name expected") ||
11471       parseEOL())
11472     return true;
11473 
11474   if (RegisterReqs.insert(std::make_pair(Name, Reg)).first->second != Reg)
11475     return Error(SRegLoc,
11476                  "redefinition of '" + Name + "' does not match original.");
11477 
11478   return false;
11479 }
11480 
11481 /// parseDirectiveUneq
11482 ///  ::= .unreq registername
11483 bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
11484   MCAsmParser &Parser = getParser();
11485   if (Parser.getTok().isNot(AsmToken::Identifier))
11486     return Error(L, "unexpected input in .unreq directive.");
11487   RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
11488   Parser.Lex(); // Eat the identifier.
11489   return parseEOL();
11490 }
11491 
11492 // After changing arch/CPU, try to put the ARM/Thumb mode back to what it was
11493 // before, if supported by the new target, or emit mapping symbols for the mode
11494 // switch.
11495 void ARMAsmParser::FixModeAfterArchChange(bool WasThumb, SMLoc Loc) {
11496   if (WasThumb != isThumb()) {
11497     if (WasThumb && hasThumb()) {
11498       // Stay in Thumb mode
11499       SwitchMode();
11500     } else if (!WasThumb && hasARM()) {
11501       // Stay in ARM mode
11502       SwitchMode();
11503     } else {
11504       // Mode switch forced, because the new arch doesn't support the old mode.
11505       getParser().getStreamer().emitAssemblerFlag(isThumb() ? MCAF_Code16
11506                                                             : MCAF_Code32);
11507       // Warn about the implcit mode switch. GAS does not switch modes here,
11508       // but instead stays in the old mode, reporting an error on any following
11509       // instructions as the mode does not exist on the target.
11510       Warning(Loc, Twine("new target does not support ") +
11511                        (WasThumb ? "thumb" : "arm") + " mode, switching to " +
11512                        (!WasThumb ? "thumb" : "arm") + " mode");
11513     }
11514   }
11515 }
11516 
11517 /// parseDirectiveArch
11518 ///  ::= .arch token
11519 bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
11520   StringRef Arch = getParser().parseStringToEndOfStatement().trim();
11521   ARM::ArchKind ID = ARM::parseArch(Arch);
11522 
11523   if (ID == ARM::ArchKind::INVALID)
11524     return Error(L, "Unknown arch name");
11525 
11526   bool WasThumb = isThumb();
11527   Triple T;
11528   MCSubtargetInfo &STI = copySTI();
11529   STI.setDefaultFeatures("", /*TuneCPU*/ "",
11530                          ("+" + ARM::getArchName(ID)).str());
11531   setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
11532   FixModeAfterArchChange(WasThumb, L);
11533 
11534   getTargetStreamer().emitArch(ID);
11535   return false;
11536 }
11537 
11538 /// parseDirectiveEabiAttr
11539 ///  ::= .eabi_attribute int, int [, "str"]
11540 ///  ::= .eabi_attribute Tag_name, int [, "str"]
11541 bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
11542   MCAsmParser &Parser = getParser();
11543   int64_t Tag;
11544   SMLoc TagLoc;
11545   TagLoc = Parser.getTok().getLoc();
11546   if (Parser.getTok().is(AsmToken::Identifier)) {
11547     StringRef Name = Parser.getTok().getIdentifier();
11548     std::optional<unsigned> Ret = ELFAttrs::attrTypeFromString(
11549         Name, ARMBuildAttrs::getARMAttributeTags());
11550     if (!Ret) {
11551       Error(TagLoc, "attribute name not recognised: " + Name);
11552       return false;
11553     }
11554     Tag = *Ret;
11555     Parser.Lex();
11556   } else {
11557     const MCExpr *AttrExpr;
11558 
11559     TagLoc = Parser.getTok().getLoc();
11560     if (Parser.parseExpression(AttrExpr))
11561       return true;
11562 
11563     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(AttrExpr);
11564     if (check(!CE, TagLoc, "expected numeric constant"))
11565       return true;
11566 
11567     Tag = CE->getValue();
11568   }
11569 
11570   if (Parser.parseComma())
11571     return true;
11572 
11573   StringRef StringValue = "";
11574   bool IsStringValue = false;
11575 
11576   int64_t IntegerValue = 0;
11577   bool IsIntegerValue = false;
11578 
11579   if (Tag == ARMBuildAttrs::CPU_raw_name || Tag == ARMBuildAttrs::CPU_name)
11580     IsStringValue = true;
11581   else if (Tag == ARMBuildAttrs::compatibility) {
11582     IsStringValue = true;
11583     IsIntegerValue = true;
11584   } else if (Tag < 32 || Tag % 2 == 0)
11585     IsIntegerValue = true;
11586   else if (Tag % 2 == 1)
11587     IsStringValue = true;
11588   else
11589     llvm_unreachable("invalid tag type");
11590 
11591   if (IsIntegerValue) {
11592     const MCExpr *ValueExpr;
11593     SMLoc ValueExprLoc = Parser.getTok().getLoc();
11594     if (Parser.parseExpression(ValueExpr))
11595       return true;
11596 
11597     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ValueExpr);
11598     if (!CE)
11599       return Error(ValueExprLoc, "expected numeric constant");
11600     IntegerValue = CE->getValue();
11601   }
11602 
11603   if (Tag == ARMBuildAttrs::compatibility) {
11604     if (Parser.parseComma())
11605       return true;
11606   }
11607 
11608   std::string EscapedValue;
11609   if (IsStringValue) {
11610     if (Parser.getTok().isNot(AsmToken::String))
11611       return Error(Parser.getTok().getLoc(), "bad string constant");
11612 
11613     if (Tag == ARMBuildAttrs::also_compatible_with) {
11614       if (Parser.parseEscapedString(EscapedValue))
11615         return Error(Parser.getTok().getLoc(), "bad escaped string constant");
11616 
11617       StringValue = EscapedValue;
11618     } else {
11619       StringValue = Parser.getTok().getStringContents();
11620       Parser.Lex();
11621     }
11622   }
11623 
11624   if (Parser.parseEOL())
11625     return true;
11626 
11627   if (IsIntegerValue && IsStringValue) {
11628     assert(Tag == ARMBuildAttrs::compatibility);
11629     getTargetStreamer().emitIntTextAttribute(Tag, IntegerValue, StringValue);
11630   } else if (IsIntegerValue)
11631     getTargetStreamer().emitAttribute(Tag, IntegerValue);
11632   else if (IsStringValue)
11633     getTargetStreamer().emitTextAttribute(Tag, StringValue);
11634   return false;
11635 }
11636 
11637 /// parseDirectiveCPU
11638 ///  ::= .cpu str
11639 bool ARMAsmParser::parseDirectiveCPU(SMLoc L) {
11640   StringRef CPU = getParser().parseStringToEndOfStatement().trim();
11641   getTargetStreamer().emitTextAttribute(ARMBuildAttrs::CPU_name, CPU);
11642 
11643   // FIXME: This is using table-gen data, but should be moved to
11644   // ARMTargetParser once that is table-gen'd.
11645   if (!getSTI().isCPUStringValid(CPU))
11646     return Error(L, "Unknown CPU name");
11647 
11648   bool WasThumb = isThumb();
11649   MCSubtargetInfo &STI = copySTI();
11650   STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
11651   setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
11652   FixModeAfterArchChange(WasThumb, L);
11653 
11654   return false;
11655 }
11656 
11657 /// parseDirectiveFPU
11658 ///  ::= .fpu str
11659 bool ARMAsmParser::parseDirectiveFPU(SMLoc L) {
11660   SMLoc FPUNameLoc = getTok().getLoc();
11661   StringRef FPU = getParser().parseStringToEndOfStatement().trim();
11662 
11663   ARM::FPUKind ID = ARM::parseFPU(FPU);
11664   std::vector<StringRef> Features;
11665   if (!ARM::getFPUFeatures(ID, Features))
11666     return Error(FPUNameLoc, "Unknown FPU name");
11667 
11668   MCSubtargetInfo &STI = copySTI();
11669   for (auto Feature : Features)
11670     STI.ApplyFeatureFlag(Feature);
11671   setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
11672 
11673   getTargetStreamer().emitFPU(ID);
11674   return false;
11675 }
11676 
11677 /// parseDirectiveFnStart
11678 ///  ::= .fnstart
11679 bool ARMAsmParser::parseDirectiveFnStart(SMLoc L) {
11680   if (parseEOL())
11681     return true;
11682 
11683   if (UC.hasFnStart()) {
11684     Error(L, ".fnstart starts before the end of previous one");
11685     UC.emitFnStartLocNotes();
11686     return true;
11687   }
11688 
11689   // Reset the unwind directives parser state
11690   UC.reset();
11691 
11692   getTargetStreamer().emitFnStart();
11693 
11694   UC.recordFnStart(L);
11695   return false;
11696 }
11697 
11698 /// parseDirectiveFnEnd
11699 ///  ::= .fnend
11700 bool ARMAsmParser::parseDirectiveFnEnd(SMLoc L) {
11701   if (parseEOL())
11702     return true;
11703   // Check the ordering of unwind directives
11704   if (!UC.hasFnStart())
11705     return Error(L, ".fnstart must precede .fnend directive");
11706 
11707   // Reset the unwind directives parser state
11708   getTargetStreamer().emitFnEnd();
11709 
11710   UC.reset();
11711   return false;
11712 }
11713 
11714 /// parseDirectiveCantUnwind
11715 ///  ::= .cantunwind
11716 bool ARMAsmParser::parseDirectiveCantUnwind(SMLoc L) {
11717   if (parseEOL())
11718     return true;
11719 
11720   UC.recordCantUnwind(L);
11721   // Check the ordering of unwind directives
11722   if (check(!UC.hasFnStart(), L, ".fnstart must precede .cantunwind directive"))
11723     return true;
11724 
11725   if (UC.hasHandlerData()) {
11726     Error(L, ".cantunwind can't be used with .handlerdata directive");
11727     UC.emitHandlerDataLocNotes();
11728     return true;
11729   }
11730   if (UC.hasPersonality()) {
11731     Error(L, ".cantunwind can't be used with .personality directive");
11732     UC.emitPersonalityLocNotes();
11733     return true;
11734   }
11735 
11736   getTargetStreamer().emitCantUnwind();
11737   return false;
11738 }
11739 
11740 /// parseDirectivePersonality
11741 ///  ::= .personality name
11742 bool ARMAsmParser::parseDirectivePersonality(SMLoc L) {
11743   MCAsmParser &Parser = getParser();
11744   bool HasExistingPersonality = UC.hasPersonality();
11745 
11746   // Parse the name of the personality routine
11747   if (Parser.getTok().isNot(AsmToken::Identifier))
11748     return Error(L, "unexpected input in .personality directive.");
11749   StringRef Name(Parser.getTok().getIdentifier());
11750   Parser.Lex();
11751 
11752   if (parseEOL())
11753     return true;
11754 
11755   UC.recordPersonality(L);
11756 
11757   // Check the ordering of unwind directives
11758   if (!UC.hasFnStart())
11759     return Error(L, ".fnstart must precede .personality directive");
11760   if (UC.cantUnwind()) {
11761     Error(L, ".personality can't be used with .cantunwind directive");
11762     UC.emitCantUnwindLocNotes();
11763     return true;
11764   }
11765   if (UC.hasHandlerData()) {
11766     Error(L, ".personality must precede .handlerdata directive");
11767     UC.emitHandlerDataLocNotes();
11768     return true;
11769   }
11770   if (HasExistingPersonality) {
11771     Error(L, "multiple personality directives");
11772     UC.emitPersonalityLocNotes();
11773     return true;
11774   }
11775 
11776   MCSymbol *PR = getParser().getContext().getOrCreateSymbol(Name);
11777   getTargetStreamer().emitPersonality(PR);
11778   return false;
11779 }
11780 
11781 /// parseDirectiveHandlerData
11782 ///  ::= .handlerdata
11783 bool ARMAsmParser::parseDirectiveHandlerData(SMLoc L) {
11784   if (parseEOL())
11785     return true;
11786 
11787   UC.recordHandlerData(L);
11788   // Check the ordering of unwind directives
11789   if (!UC.hasFnStart())
11790     return Error(L, ".fnstart must precede .personality directive");
11791   if (UC.cantUnwind()) {
11792     Error(L, ".handlerdata can't be used with .cantunwind directive");
11793     UC.emitCantUnwindLocNotes();
11794     return true;
11795   }
11796 
11797   getTargetStreamer().emitHandlerData();
11798   return false;
11799 }
11800 
11801 /// parseDirectiveSetFP
11802 ///  ::= .setfp fpreg, spreg [, offset]
11803 bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) {
11804   MCAsmParser &Parser = getParser();
11805   // Check the ordering of unwind directives
11806   if (check(!UC.hasFnStart(), L, ".fnstart must precede .setfp directive") ||
11807       check(UC.hasHandlerData(), L,
11808             ".setfp must precede .handlerdata directive"))
11809     return true;
11810 
11811   // Parse fpreg
11812   SMLoc FPRegLoc = Parser.getTok().getLoc();
11813   int FPReg = tryParseRegister();
11814 
11815   if (check(FPReg == -1, FPRegLoc, "frame pointer register expected") ||
11816       Parser.parseComma())
11817     return true;
11818 
11819   // Parse spreg
11820   SMLoc SPRegLoc = Parser.getTok().getLoc();
11821   int SPReg = tryParseRegister();
11822   if (check(SPReg == -1, SPRegLoc, "stack pointer register expected") ||
11823       check(SPReg != ARM::SP && SPReg != UC.getFPReg(), SPRegLoc,
11824             "register should be either $sp or the latest fp register"))
11825     return true;
11826 
11827   // Update the frame pointer register
11828   UC.saveFPReg(FPReg);
11829 
11830   // Parse offset
11831   int64_t Offset = 0;
11832   if (Parser.parseOptionalToken(AsmToken::Comma)) {
11833     if (Parser.getTok().isNot(AsmToken::Hash) &&
11834         Parser.getTok().isNot(AsmToken::Dollar))
11835       return Error(Parser.getTok().getLoc(), "'#' expected");
11836     Parser.Lex(); // skip hash token.
11837 
11838     const MCExpr *OffsetExpr;
11839     SMLoc ExLoc = Parser.getTok().getLoc();
11840     SMLoc EndLoc;
11841     if (getParser().parseExpression(OffsetExpr, EndLoc))
11842       return Error(ExLoc, "malformed setfp offset");
11843     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
11844     if (check(!CE, ExLoc, "setfp offset must be an immediate"))
11845       return true;
11846     Offset = CE->getValue();
11847   }
11848 
11849   if (Parser.parseEOL())
11850     return true;
11851 
11852   getTargetStreamer().emitSetFP(static_cast<unsigned>(FPReg),
11853                                 static_cast<unsigned>(SPReg), Offset);
11854   return false;
11855 }
11856 
11857 /// parseDirective
11858 ///  ::= .pad offset
11859 bool ARMAsmParser::parseDirectivePad(SMLoc L) {
11860   MCAsmParser &Parser = getParser();
11861   // Check the ordering of unwind directives
11862   if (!UC.hasFnStart())
11863     return Error(L, ".fnstart must precede .pad directive");
11864   if (UC.hasHandlerData())
11865     return Error(L, ".pad must precede .handlerdata directive");
11866 
11867   // Parse the offset
11868   if (Parser.getTok().isNot(AsmToken::Hash) &&
11869       Parser.getTok().isNot(AsmToken::Dollar))
11870     return Error(Parser.getTok().getLoc(), "'#' expected");
11871   Parser.Lex(); // skip hash token.
11872 
11873   const MCExpr *OffsetExpr;
11874   SMLoc ExLoc = Parser.getTok().getLoc();
11875   SMLoc EndLoc;
11876   if (getParser().parseExpression(OffsetExpr, EndLoc))
11877     return Error(ExLoc, "malformed pad offset");
11878   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
11879   if (!CE)
11880     return Error(ExLoc, "pad offset must be an immediate");
11881 
11882   if (parseEOL())
11883     return true;
11884 
11885   getTargetStreamer().emitPad(CE->getValue());
11886   return false;
11887 }
11888 
11889 /// parseDirectiveRegSave
11890 ///  ::= .save  { registers }
11891 ///  ::= .vsave { registers }
11892 bool ARMAsmParser::parseDirectiveRegSave(SMLoc L, bool IsVector) {
11893   // Check the ordering of unwind directives
11894   if (!UC.hasFnStart())
11895     return Error(L, ".fnstart must precede .save or .vsave directives");
11896   if (UC.hasHandlerData())
11897     return Error(L, ".save or .vsave must precede .handlerdata directive");
11898 
11899   // RAII object to make sure parsed operands are deleted.
11900   SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands;
11901 
11902   // Parse the register list
11903   if (parseRegisterList(Operands, true, true) || parseEOL())
11904     return true;
11905   ARMOperand &Op = (ARMOperand &)*Operands[0];
11906   if (!IsVector && !Op.isRegList())
11907     return Error(L, ".save expects GPR registers");
11908   if (IsVector && !Op.isDPRRegList())
11909     return Error(L, ".vsave expects DPR registers");
11910 
11911   getTargetStreamer().emitRegSave(Op.getRegList(), IsVector);
11912   return false;
11913 }
11914 
11915 /// parseDirectiveInst
11916 ///  ::= .inst opcode [, ...]
11917 ///  ::= .inst.n opcode [, ...]
11918 ///  ::= .inst.w opcode [, ...]
11919 bool ARMAsmParser::parseDirectiveInst(SMLoc Loc, char Suffix) {
11920   int Width = 4;
11921 
11922   if (isThumb()) {
11923     switch (Suffix) {
11924     case 'n':
11925       Width = 2;
11926       break;
11927     case 'w':
11928       break;
11929     default:
11930       Width = 0;
11931       break;
11932     }
11933   } else {
11934     if (Suffix)
11935       return Error(Loc, "width suffixes are invalid in ARM mode");
11936   }
11937 
11938   auto parseOne = [&]() -> bool {
11939     const MCExpr *Expr;
11940     if (getParser().parseExpression(Expr))
11941       return true;
11942     const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
11943     if (!Value) {
11944       return Error(Loc, "expected constant expression");
11945     }
11946 
11947     char CurSuffix = Suffix;
11948     switch (Width) {
11949     case 2:
11950       if (Value->getValue() > 0xffff)
11951         return Error(Loc, "inst.n operand is too big, use inst.w instead");
11952       break;
11953     case 4:
11954       if (Value->getValue() > 0xffffffff)
11955         return Error(Loc, StringRef(Suffix ? "inst.w" : "inst") +
11956                               " operand is too big");
11957       break;
11958     case 0:
11959       // Thumb mode, no width indicated. Guess from the opcode, if possible.
11960       if (Value->getValue() < 0xe800)
11961         CurSuffix = 'n';
11962       else if (Value->getValue() >= 0xe8000000)
11963         CurSuffix = 'w';
11964       else
11965         return Error(Loc, "cannot determine Thumb instruction size, "
11966                           "use inst.n/inst.w instead");
11967       break;
11968     default:
11969       llvm_unreachable("only supported widths are 2 and 4");
11970     }
11971 
11972     getTargetStreamer().emitInst(Value->getValue(), CurSuffix);
11973     forwardITPosition();
11974     forwardVPTPosition();
11975     return false;
11976   };
11977 
11978   if (parseOptionalToken(AsmToken::EndOfStatement))
11979     return Error(Loc, "expected expression following directive");
11980   if (parseMany(parseOne))
11981     return true;
11982   return false;
11983 }
11984 
11985 /// parseDirectiveLtorg
11986 ///  ::= .ltorg | .pool
11987 bool ARMAsmParser::parseDirectiveLtorg(SMLoc L) {
11988   if (parseEOL())
11989     return true;
11990   getTargetStreamer().emitCurrentConstantPool();
11991   return false;
11992 }
11993 
11994 bool ARMAsmParser::parseDirectiveEven(SMLoc L) {
11995   const MCSection *Section = getStreamer().getCurrentSectionOnly();
11996 
11997   if (parseEOL())
11998     return true;
11999 
12000   if (!Section) {
12001     getStreamer().initSections(false, getSTI());
12002     Section = getStreamer().getCurrentSectionOnly();
12003   }
12004 
12005   assert(Section && "must have section to emit alignment");
12006   if (Section->useCodeAlign())
12007     getStreamer().emitCodeAlignment(Align(2), &getSTI());
12008   else
12009     getStreamer().emitValueToAlignment(Align(2));
12010 
12011   return false;
12012 }
12013 
12014 /// parseDirectivePersonalityIndex
12015 ///   ::= .personalityindex index
12016 bool ARMAsmParser::parseDirectivePersonalityIndex(SMLoc L) {
12017   MCAsmParser &Parser = getParser();
12018   bool HasExistingPersonality = UC.hasPersonality();
12019 
12020   const MCExpr *IndexExpression;
12021   SMLoc IndexLoc = Parser.getTok().getLoc();
12022   if (Parser.parseExpression(IndexExpression) || parseEOL()) {
12023     return true;
12024   }
12025 
12026   UC.recordPersonalityIndex(L);
12027 
12028   if (!UC.hasFnStart()) {
12029     return Error(L, ".fnstart must precede .personalityindex directive");
12030   }
12031   if (UC.cantUnwind()) {
12032     Error(L, ".personalityindex cannot be used with .cantunwind");
12033     UC.emitCantUnwindLocNotes();
12034     return true;
12035   }
12036   if (UC.hasHandlerData()) {
12037     Error(L, ".personalityindex must precede .handlerdata directive");
12038     UC.emitHandlerDataLocNotes();
12039     return true;
12040   }
12041   if (HasExistingPersonality) {
12042     Error(L, "multiple personality directives");
12043     UC.emitPersonalityLocNotes();
12044     return true;
12045   }
12046 
12047   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(IndexExpression);
12048   if (!CE)
12049     return Error(IndexLoc, "index must be a constant number");
12050   if (CE->getValue() < 0 || CE->getValue() >= ARM::EHABI::NUM_PERSONALITY_INDEX)
12051     return Error(IndexLoc,
12052                  "personality routine index should be in range [0-3]");
12053 
12054   getTargetStreamer().emitPersonalityIndex(CE->getValue());
12055   return false;
12056 }
12057 
12058 /// parseDirectiveUnwindRaw
12059 ///   ::= .unwind_raw offset, opcode [, opcode...]
12060 bool ARMAsmParser::parseDirectiveUnwindRaw(SMLoc L) {
12061   MCAsmParser &Parser = getParser();
12062   int64_t StackOffset;
12063   const MCExpr *OffsetExpr;
12064   SMLoc OffsetLoc = getLexer().getLoc();
12065 
12066   if (!UC.hasFnStart())
12067     return Error(L, ".fnstart must precede .unwind_raw directives");
12068   if (getParser().parseExpression(OffsetExpr))
12069     return Error(OffsetLoc, "expected expression");
12070 
12071   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
12072   if (!CE)
12073     return Error(OffsetLoc, "offset must be a constant");
12074 
12075   StackOffset = CE->getValue();
12076 
12077   if (Parser.parseComma())
12078     return true;
12079 
12080   SmallVector<uint8_t, 16> Opcodes;
12081 
12082   auto parseOne = [&]() -> bool {
12083     const MCExpr *OE = nullptr;
12084     SMLoc OpcodeLoc = getLexer().getLoc();
12085     if (check(getLexer().is(AsmToken::EndOfStatement) ||
12086                   Parser.parseExpression(OE),
12087               OpcodeLoc, "expected opcode expression"))
12088       return true;
12089     const MCConstantExpr *OC = dyn_cast<MCConstantExpr>(OE);
12090     if (!OC)
12091       return Error(OpcodeLoc, "opcode value must be a constant");
12092     const int64_t Opcode = OC->getValue();
12093     if (Opcode & ~0xff)
12094       return Error(OpcodeLoc, "invalid opcode");
12095     Opcodes.push_back(uint8_t(Opcode));
12096     return false;
12097   };
12098 
12099   // Must have at least 1 element
12100   SMLoc OpcodeLoc = getLexer().getLoc();
12101   if (parseOptionalToken(AsmToken::EndOfStatement))
12102     return Error(OpcodeLoc, "expected opcode expression");
12103   if (parseMany(parseOne))
12104     return true;
12105 
12106   getTargetStreamer().emitUnwindRaw(StackOffset, Opcodes);
12107   return false;
12108 }
12109 
12110 /// parseDirectiveTLSDescSeq
12111 ///   ::= .tlsdescseq tls-variable
12112 bool ARMAsmParser::parseDirectiveTLSDescSeq(SMLoc L) {
12113   MCAsmParser &Parser = getParser();
12114 
12115   if (getLexer().isNot(AsmToken::Identifier))
12116     return TokError("expected variable after '.tlsdescseq' directive");
12117 
12118   const MCSymbolRefExpr *SRE =
12119     MCSymbolRefExpr::create(Parser.getTok().getIdentifier(),
12120                             MCSymbolRefExpr::VK_ARM_TLSDESCSEQ, getContext());
12121   Lex();
12122 
12123   if (parseEOL())
12124     return true;
12125 
12126   getTargetStreamer().annotateTLSDescriptorSequence(SRE);
12127   return false;
12128 }
12129 
12130 /// parseDirectiveMovSP
12131 ///  ::= .movsp reg [, #offset]
12132 bool ARMAsmParser::parseDirectiveMovSP(SMLoc L) {
12133   MCAsmParser &Parser = getParser();
12134   if (!UC.hasFnStart())
12135     return Error(L, ".fnstart must precede .movsp directives");
12136   if (UC.getFPReg() != ARM::SP)
12137     return Error(L, "unexpected .movsp directive");
12138 
12139   SMLoc SPRegLoc = Parser.getTok().getLoc();
12140   int SPReg = tryParseRegister();
12141   if (SPReg == -1)
12142     return Error(SPRegLoc, "register expected");
12143   if (SPReg == ARM::SP || SPReg == ARM::PC)
12144     return Error(SPRegLoc, "sp and pc are not permitted in .movsp directive");
12145 
12146   int64_t Offset = 0;
12147   if (Parser.parseOptionalToken(AsmToken::Comma)) {
12148     if (Parser.parseToken(AsmToken::Hash, "expected #constant"))
12149       return true;
12150 
12151     const MCExpr *OffsetExpr;
12152     SMLoc OffsetLoc = Parser.getTok().getLoc();
12153 
12154     if (Parser.parseExpression(OffsetExpr))
12155       return Error(OffsetLoc, "malformed offset expression");
12156 
12157     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
12158     if (!CE)
12159       return Error(OffsetLoc, "offset must be an immediate constant");
12160 
12161     Offset = CE->getValue();
12162   }
12163 
12164   if (parseEOL())
12165     return true;
12166 
12167   getTargetStreamer().emitMovSP(SPReg, Offset);
12168   UC.saveFPReg(SPReg);
12169 
12170   return false;
12171 }
12172 
12173 /// parseDirectiveObjectArch
12174 ///   ::= .object_arch name
12175 bool ARMAsmParser::parseDirectiveObjectArch(SMLoc L) {
12176   MCAsmParser &Parser = getParser();
12177   if (getLexer().isNot(AsmToken::Identifier))
12178     return Error(getLexer().getLoc(), "unexpected token");
12179 
12180   StringRef Arch = Parser.getTok().getString();
12181   SMLoc ArchLoc = Parser.getTok().getLoc();
12182   Lex();
12183 
12184   ARM::ArchKind ID = ARM::parseArch(Arch);
12185 
12186   if (ID == ARM::ArchKind::INVALID)
12187     return Error(ArchLoc, "unknown architecture '" + Arch + "'");
12188   if (parseToken(AsmToken::EndOfStatement))
12189     return true;
12190 
12191   getTargetStreamer().emitObjectArch(ID);
12192   return false;
12193 }
12194 
12195 /// parseDirectiveAlign
12196 ///   ::= .align
12197 bool ARMAsmParser::parseDirectiveAlign(SMLoc L) {
12198   // NOTE: if this is not the end of the statement, fall back to the target
12199   // agnostic handling for this directive which will correctly handle this.
12200   if (parseOptionalToken(AsmToken::EndOfStatement)) {
12201     // '.align' is target specifically handled to mean 2**2 byte alignment.
12202     const MCSection *Section = getStreamer().getCurrentSectionOnly();
12203     assert(Section && "must have section to emit alignment");
12204     if (Section->useCodeAlign())
12205       getStreamer().emitCodeAlignment(Align(4), &getSTI(), 0);
12206     else
12207       getStreamer().emitValueToAlignment(Align(4), 0, 1, 0);
12208     return false;
12209   }
12210   return true;
12211 }
12212 
12213 /// parseDirectiveThumbSet
12214 ///  ::= .thumb_set name, value
12215 bool ARMAsmParser::parseDirectiveThumbSet(SMLoc L) {
12216   MCAsmParser &Parser = getParser();
12217 
12218   StringRef Name;
12219   if (check(Parser.parseIdentifier(Name),
12220             "expected identifier after '.thumb_set'") ||
12221       Parser.parseComma())
12222     return true;
12223 
12224   MCSymbol *Sym;
12225   const MCExpr *Value;
12226   if (MCParserUtils::parseAssignmentExpression(Name, /* allow_redef */ true,
12227                                                Parser, Sym, Value))
12228     return true;
12229 
12230   getTargetStreamer().emitThumbSet(Sym, Value);
12231   return false;
12232 }
12233 
12234 /// parseDirectiveSEHAllocStack
12235 /// ::= .seh_stackalloc
12236 /// ::= .seh_stackalloc_w
12237 bool ARMAsmParser::parseDirectiveSEHAllocStack(SMLoc L, bool Wide) {
12238   int64_t Size;
12239   if (parseImmExpr(Size))
12240     return true;
12241   getTargetStreamer().emitARMWinCFIAllocStack(Size, Wide);
12242   return false;
12243 }
12244 
12245 /// parseDirectiveSEHSaveRegs
12246 /// ::= .seh_save_regs
12247 /// ::= .seh_save_regs_w
12248 bool ARMAsmParser::parseDirectiveSEHSaveRegs(SMLoc L, bool Wide) {
12249   SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands;
12250 
12251   if (parseRegisterList(Operands) || parseEOL())
12252     return true;
12253   ARMOperand &Op = (ARMOperand &)*Operands[0];
12254   if (!Op.isRegList())
12255     return Error(L, ".seh_save_regs{_w} expects GPR registers");
12256   const SmallVectorImpl<unsigned> &RegList = Op.getRegList();
12257   uint32_t Mask = 0;
12258   for (size_t i = 0; i < RegList.size(); ++i) {
12259     unsigned Reg = MRI->getEncodingValue(RegList[i]);
12260     if (Reg == 15) // pc -> lr
12261       Reg = 14;
12262     if (Reg == 13)
12263       return Error(L, ".seh_save_regs{_w} can't include SP");
12264     assert(Reg < 16U && "Register out of range");
12265     unsigned Bit = (1u << Reg);
12266     Mask |= Bit;
12267   }
12268   if (!Wide && (Mask & 0x1f00) != 0)
12269     return Error(L,
12270                  ".seh_save_regs cannot save R8-R12, needs .seh_save_regs_w");
12271   getTargetStreamer().emitARMWinCFISaveRegMask(Mask, Wide);
12272   return false;
12273 }
12274 
12275 /// parseDirectiveSEHSaveSP
12276 /// ::= .seh_save_sp
12277 bool ARMAsmParser::parseDirectiveSEHSaveSP(SMLoc L) {
12278   int Reg = tryParseRegister();
12279   if (Reg == -1 || !MRI->getRegClass(ARM::GPRRegClassID).contains(Reg))
12280     return Error(L, "expected GPR");
12281   unsigned Index = MRI->getEncodingValue(Reg);
12282   if (Index > 14 || Index == 13)
12283     return Error(L, "invalid register for .seh_save_sp");
12284   getTargetStreamer().emitARMWinCFISaveSP(Index);
12285   return false;
12286 }
12287 
12288 /// parseDirectiveSEHSaveFRegs
12289 /// ::= .seh_save_fregs
12290 bool ARMAsmParser::parseDirectiveSEHSaveFRegs(SMLoc L) {
12291   SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands;
12292 
12293   if (parseRegisterList(Operands) || parseEOL())
12294     return true;
12295   ARMOperand &Op = (ARMOperand &)*Operands[0];
12296   if (!Op.isDPRRegList())
12297     return Error(L, ".seh_save_fregs expects DPR registers");
12298   const SmallVectorImpl<unsigned> &RegList = Op.getRegList();
12299   uint32_t Mask = 0;
12300   for (size_t i = 0; i < RegList.size(); ++i) {
12301     unsigned Reg = MRI->getEncodingValue(RegList[i]);
12302     assert(Reg < 32U && "Register out of range");
12303     unsigned Bit = (1u << Reg);
12304     Mask |= Bit;
12305   }
12306 
12307   if (Mask == 0)
12308     return Error(L, ".seh_save_fregs missing registers");
12309 
12310   unsigned First = 0;
12311   while ((Mask & 1) == 0) {
12312     First++;
12313     Mask >>= 1;
12314   }
12315   if (((Mask + 1) & Mask) != 0)
12316     return Error(L,
12317                  ".seh_save_fregs must take a contiguous range of registers");
12318   unsigned Last = First;
12319   while ((Mask & 2) != 0) {
12320     Last++;
12321     Mask >>= 1;
12322   }
12323   if (First < 16 && Last >= 16)
12324     return Error(L, ".seh_save_fregs must be all d0-d15 or d16-d31");
12325   getTargetStreamer().emitARMWinCFISaveFRegs(First, Last);
12326   return false;
12327 }
12328 
12329 /// parseDirectiveSEHSaveLR
12330 /// ::= .seh_save_lr
12331 bool ARMAsmParser::parseDirectiveSEHSaveLR(SMLoc L) {
12332   int64_t Offset;
12333   if (parseImmExpr(Offset))
12334     return true;
12335   getTargetStreamer().emitARMWinCFISaveLR(Offset);
12336   return false;
12337 }
12338 
12339 /// parseDirectiveSEHPrologEnd
12340 /// ::= .seh_endprologue
12341 /// ::= .seh_endprologue_fragment
12342 bool ARMAsmParser::parseDirectiveSEHPrologEnd(SMLoc L, bool Fragment) {
12343   getTargetStreamer().emitARMWinCFIPrologEnd(Fragment);
12344   return false;
12345 }
12346 
12347 /// parseDirectiveSEHNop
12348 /// ::= .seh_nop
12349 /// ::= .seh_nop_w
12350 bool ARMAsmParser::parseDirectiveSEHNop(SMLoc L, bool Wide) {
12351   getTargetStreamer().emitARMWinCFINop(Wide);
12352   return false;
12353 }
12354 
12355 /// parseDirectiveSEHEpilogStart
12356 /// ::= .seh_startepilogue
12357 /// ::= .seh_startepilogue_cond
12358 bool ARMAsmParser::parseDirectiveSEHEpilogStart(SMLoc L, bool Condition) {
12359   unsigned CC = ARMCC::AL;
12360   if (Condition) {
12361     MCAsmParser &Parser = getParser();
12362     SMLoc S = Parser.getTok().getLoc();
12363     const AsmToken &Tok = Parser.getTok();
12364     if (!Tok.is(AsmToken::Identifier))
12365       return Error(S, ".seh_startepilogue_cond missing condition");
12366     CC = ARMCondCodeFromString(Tok.getString());
12367     if (CC == ~0U)
12368       return Error(S, "invalid condition");
12369     Parser.Lex(); // Eat the token.
12370   }
12371 
12372   getTargetStreamer().emitARMWinCFIEpilogStart(CC);
12373   return false;
12374 }
12375 
12376 /// parseDirectiveSEHEpilogEnd
12377 /// ::= .seh_endepilogue
12378 bool ARMAsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
12379   getTargetStreamer().emitARMWinCFIEpilogEnd();
12380   return false;
12381 }
12382 
12383 /// parseDirectiveSEHCustom
12384 /// ::= .seh_custom
12385 bool ARMAsmParser::parseDirectiveSEHCustom(SMLoc L) {
12386   unsigned Opcode = 0;
12387   do {
12388     int64_t Byte;
12389     if (parseImmExpr(Byte))
12390       return true;
12391     if (Byte > 0xff || Byte < 0)
12392       return Error(L, "Invalid byte value in .seh_custom");
12393     if (Opcode > 0x00ffffff)
12394       return Error(L, "Too many bytes in .seh_custom");
12395     // Store the bytes as one big endian number in Opcode. In a multi byte
12396     // opcode sequence, the first byte can't be zero.
12397     Opcode = (Opcode << 8) | Byte;
12398   } while (parseOptionalToken(AsmToken::Comma));
12399   getTargetStreamer().emitARMWinCFICustom(Opcode);
12400   return false;
12401 }
12402 
12403 /// Force static initialization.
12404 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeARMAsmParser() {
12405   RegisterMCAsmParser<ARMAsmParser> X(getTheARMLETarget());
12406   RegisterMCAsmParser<ARMAsmParser> Y(getTheARMBETarget());
12407   RegisterMCAsmParser<ARMAsmParser> A(getTheThumbLETarget());
12408   RegisterMCAsmParser<ARMAsmParser> B(getTheThumbBETarget());
12409 }
12410 
12411 #define GET_REGISTER_MATCHER
12412 #define GET_SUBTARGET_FEATURE_NAME
12413 #define GET_MATCHER_IMPLEMENTATION
12414 #define GET_MNEMONIC_SPELL_CHECKER
12415 #include "ARMGenAsmMatcher.inc"
12416 
12417 // Some diagnostics need to vary with subtarget features, so they are handled
12418 // here. For example, the DPR class has either 16 or 32 registers, depending
12419 // on the FPU available.
12420 const char *
12421 ARMAsmParser::getCustomOperandDiag(ARMMatchResultTy MatchError) {
12422   switch (MatchError) {
12423   // rGPR contains sp starting with ARMv8.
12424   case Match_rGPR:
12425     return hasV8Ops() ? "operand must be a register in range [r0, r14]"
12426                       : "operand must be a register in range [r0, r12] or r14";
12427   // DPR contains 16 registers for some FPUs, and 32 for others.
12428   case Match_DPR:
12429     return hasD32() ? "operand must be a register in range [d0, d31]"
12430                     : "operand must be a register in range [d0, d15]";
12431   case Match_DPR_RegList:
12432     return hasD32() ? "operand must be a list of registers in range [d0, d31]"
12433                     : "operand must be a list of registers in range [d0, d15]";
12434 
12435   // For all other diags, use the static string from tablegen.
12436   default:
12437     return getMatchKindDiag(MatchError);
12438   }
12439 }
12440 
12441 // Process the list of near-misses, throwing away ones we don't want to report
12442 // to the user, and converting the rest to a source location and string that
12443 // should be reported.
12444 void
12445 ARMAsmParser::FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
12446                                SmallVectorImpl<NearMissMessage> &NearMissesOut,
12447                                SMLoc IDLoc, OperandVector &Operands) {
12448   // TODO: If operand didn't match, sub in a dummy one and run target
12449   // predicate, so that we can avoid reporting near-misses that are invalid?
12450   // TODO: Many operand types dont have SuperClasses set, so we report
12451   // redundant ones.
12452   // TODO: Some operands are superclasses of registers (e.g.
12453   // MCK_RegShiftedImm), we don't have any way to represent that currently.
12454   // TODO: This is not all ARM-specific, can some of it be factored out?
12455 
12456   // Record some information about near-misses that we have already seen, so
12457   // that we can avoid reporting redundant ones. For example, if there are
12458   // variants of an instruction that take 8- and 16-bit immediates, we want
12459   // to only report the widest one.
12460   std::multimap<unsigned, unsigned> OperandMissesSeen;
12461   SmallSet<FeatureBitset, 4> FeatureMissesSeen;
12462   bool ReportedTooFewOperands = false;
12463 
12464   // Process the near-misses in reverse order, so that we see more general ones
12465   // first, and so can avoid emitting more specific ones.
12466   for (NearMissInfo &I : reverse(NearMissesIn)) {
12467     switch (I.getKind()) {
12468     case NearMissInfo::NearMissOperand: {
12469       SMLoc OperandLoc =
12470           ((ARMOperand &)*Operands[I.getOperandIndex()]).getStartLoc();
12471       const char *OperandDiag =
12472           getCustomOperandDiag((ARMMatchResultTy)I.getOperandError());
12473 
12474       // If we have already emitted a message for a superclass, don't also report
12475       // the sub-class. We consider all operand classes that we don't have a
12476       // specialised diagnostic for to be equal for the propose of this check,
12477       // so that we don't report the generic error multiple times on the same
12478       // operand.
12479       unsigned DupCheckMatchClass = OperandDiag ? I.getOperandClass() : ~0U;
12480       auto PrevReports = OperandMissesSeen.equal_range(I.getOperandIndex());
12481       if (std::any_of(PrevReports.first, PrevReports.second,
12482                       [DupCheckMatchClass](
12483                           const std::pair<unsigned, unsigned> Pair) {
12484             if (DupCheckMatchClass == ~0U || Pair.second == ~0U)
12485               return Pair.second == DupCheckMatchClass;
12486             else
12487               return isSubclass((MatchClassKind)DupCheckMatchClass,
12488                                 (MatchClassKind)Pair.second);
12489           }))
12490         break;
12491       OperandMissesSeen.insert(
12492           std::make_pair(I.getOperandIndex(), DupCheckMatchClass));
12493 
12494       NearMissMessage Message;
12495       Message.Loc = OperandLoc;
12496       if (OperandDiag) {
12497         Message.Message = OperandDiag;
12498       } else if (I.getOperandClass() == InvalidMatchClass) {
12499         Message.Message = "too many operands for instruction";
12500       } else {
12501         Message.Message = "invalid operand for instruction";
12502         LLVM_DEBUG(
12503             dbgs() << "Missing diagnostic string for operand class "
12504                    << getMatchClassName((MatchClassKind)I.getOperandClass())
12505                    << I.getOperandClass() << ", error " << I.getOperandError()
12506                    << ", opcode " << MII.getName(I.getOpcode()) << "\n");
12507       }
12508       NearMissesOut.emplace_back(Message);
12509       break;
12510     }
12511     case NearMissInfo::NearMissFeature: {
12512       const FeatureBitset &MissingFeatures = I.getFeatures();
12513       // Don't report the same set of features twice.
12514       if (FeatureMissesSeen.count(MissingFeatures))
12515         break;
12516       FeatureMissesSeen.insert(MissingFeatures);
12517 
12518       // Special case: don't report a feature set which includes arm-mode for
12519       // targets that don't have ARM mode.
12520       if (MissingFeatures.test(Feature_IsARMBit) && !hasARM())
12521         break;
12522       // Don't report any near-misses that both require switching instruction
12523       // set, and adding other subtarget features.
12524       if (isThumb() && MissingFeatures.test(Feature_IsARMBit) &&
12525           MissingFeatures.count() > 1)
12526         break;
12527       if (!isThumb() && MissingFeatures.test(Feature_IsThumbBit) &&
12528           MissingFeatures.count() > 1)
12529         break;
12530       if (!isThumb() && MissingFeatures.test(Feature_IsThumb2Bit) &&
12531           (MissingFeatures & ~FeatureBitset({Feature_IsThumb2Bit,
12532                                              Feature_IsThumbBit})).any())
12533         break;
12534       if (isMClass() && MissingFeatures.test(Feature_HasNEONBit))
12535         break;
12536 
12537       NearMissMessage Message;
12538       Message.Loc = IDLoc;
12539       raw_svector_ostream OS(Message.Message);
12540 
12541       OS << "instruction requires:";
12542       for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i)
12543         if (MissingFeatures.test(i))
12544           OS << ' ' << getSubtargetFeatureName(i);
12545 
12546       NearMissesOut.emplace_back(Message);
12547 
12548       break;
12549     }
12550     case NearMissInfo::NearMissPredicate: {
12551       NearMissMessage Message;
12552       Message.Loc = IDLoc;
12553       switch (I.getPredicateError()) {
12554       case Match_RequiresNotITBlock:
12555         Message.Message = "flag setting instruction only valid outside IT block";
12556         break;
12557       case Match_RequiresITBlock:
12558         Message.Message = "instruction only valid inside IT block";
12559         break;
12560       case Match_RequiresV6:
12561         Message.Message = "instruction variant requires ARMv6 or later";
12562         break;
12563       case Match_RequiresThumb2:
12564         Message.Message = "instruction variant requires Thumb2";
12565         break;
12566       case Match_RequiresV8:
12567         Message.Message = "instruction variant requires ARMv8 or later";
12568         break;
12569       case Match_RequiresFlagSetting:
12570         Message.Message = "no flag-preserving variant of this instruction available";
12571         break;
12572       case Match_InvalidOperand:
12573         Message.Message = "invalid operand for instruction";
12574         break;
12575       default:
12576         llvm_unreachable("Unhandled target predicate error");
12577         break;
12578       }
12579       NearMissesOut.emplace_back(Message);
12580       break;
12581     }
12582     case NearMissInfo::NearMissTooFewOperands: {
12583       if (!ReportedTooFewOperands) {
12584         SMLoc EndLoc = ((ARMOperand &)*Operands.back()).getEndLoc();
12585         NearMissesOut.emplace_back(NearMissMessage{
12586             EndLoc, StringRef("too few operands for instruction")});
12587         ReportedTooFewOperands = true;
12588       }
12589       break;
12590     }
12591     case NearMissInfo::NoNearMiss:
12592       // This should never leave the matcher.
12593       llvm_unreachable("not a near-miss");
12594       break;
12595     }
12596   }
12597 }
12598 
12599 void ARMAsmParser::ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses,
12600                                     SMLoc IDLoc, OperandVector &Operands) {
12601   SmallVector<NearMissMessage, 4> Messages;
12602   FilterNearMisses(NearMisses, Messages, IDLoc, Operands);
12603 
12604   if (Messages.size() == 0) {
12605     // No near-misses were found, so the best we can do is "invalid
12606     // instruction".
12607     Error(IDLoc, "invalid instruction");
12608   } else if (Messages.size() == 1) {
12609     // One near miss was found, report it as the sole error.
12610     Error(Messages[0].Loc, Messages[0].Message);
12611   } else {
12612     // More than one near miss, so report a generic "invalid instruction"
12613     // error, followed by notes for each of the near-misses.
12614     Error(IDLoc, "invalid instruction, any one of the following would fix this:");
12615     for (auto &M : Messages) {
12616       Note(M.Loc, M.Message);
12617     }
12618   }
12619 }
12620 
12621 bool ARMAsmParser::enableArchExtFeature(StringRef Name, SMLoc &ExtLoc) {
12622   // FIXME: This structure should be moved inside ARMTargetParser
12623   // when we start to table-generate them, and we can use the ARM
12624   // flags below, that were generated by table-gen.
12625   static const struct {
12626     const uint64_t Kind;
12627     const FeatureBitset ArchCheck;
12628     const FeatureBitset Features;
12629   } Extensions[] = {
12630       {ARM::AEK_CRC, {Feature_HasV8Bit}, {ARM::FeatureCRC}},
12631       {ARM::AEK_AES,
12632        {Feature_HasV8Bit},
12633        {ARM::FeatureAES, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12634       {ARM::AEK_SHA2,
12635        {Feature_HasV8Bit},
12636        {ARM::FeatureSHA2, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12637       {ARM::AEK_CRYPTO,
12638        {Feature_HasV8Bit},
12639        {ARM::FeatureCrypto, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12640       {(ARM::AEK_DSP | ARM::AEK_SIMD | ARM::AEK_FP),
12641        {Feature_HasV8_1MMainlineBit},
12642        {ARM::HasMVEFloatOps}},
12643       {ARM::AEK_FP,
12644        {Feature_HasV8Bit},
12645        {ARM::FeatureVFP2_SP, ARM::FeatureFPARMv8}},
12646       {(ARM::AEK_HWDIVTHUMB | ARM::AEK_HWDIVARM),
12647        {Feature_HasV7Bit, Feature_IsNotMClassBit},
12648        {ARM::FeatureHWDivThumb, ARM::FeatureHWDivARM}},
12649       {ARM::AEK_MP,
12650        {Feature_HasV7Bit, Feature_IsNotMClassBit},
12651        {ARM::FeatureMP}},
12652       {ARM::AEK_SIMD,
12653        {Feature_HasV8Bit},
12654        {ARM::FeatureNEON, ARM::FeatureVFP2_SP, ARM::FeatureFPARMv8}},
12655       {ARM::AEK_SEC, {Feature_HasV6KBit}, {ARM::FeatureTrustZone}},
12656       // FIXME: Only available in A-class, isel not predicated
12657       {ARM::AEK_VIRT, {Feature_HasV7Bit}, {ARM::FeatureVirtualization}},
12658       {ARM::AEK_FP16,
12659        {Feature_HasV8_2aBit},
12660        {ARM::FeatureFPARMv8, ARM::FeatureFullFP16}},
12661       {ARM::AEK_RAS, {Feature_HasV8Bit}, {ARM::FeatureRAS}},
12662       {ARM::AEK_LOB, {Feature_HasV8_1MMainlineBit}, {ARM::FeatureLOB}},
12663       {ARM::AEK_PACBTI, {Feature_HasV8_1MMainlineBit}, {ARM::FeaturePACBTI}},
12664       // FIXME: Unsupported extensions.
12665       {ARM::AEK_OS, {}, {}},
12666       {ARM::AEK_IWMMXT, {}, {}},
12667       {ARM::AEK_IWMMXT2, {}, {}},
12668       {ARM::AEK_MAVERICK, {}, {}},
12669       {ARM::AEK_XSCALE, {}, {}},
12670   };
12671   bool EnableFeature = true;
12672   if (Name.starts_with_insensitive("no")) {
12673     EnableFeature = false;
12674     Name = Name.substr(2);
12675   }
12676   uint64_t FeatureKind = ARM::parseArchExt(Name);
12677   if (FeatureKind == ARM::AEK_INVALID)
12678     return Error(ExtLoc, "unknown architectural extension: " + Name);
12679 
12680   for (const auto &Extension : Extensions) {
12681     if (Extension.Kind != FeatureKind)
12682       continue;
12683 
12684     if (Extension.Features.none())
12685       return Error(ExtLoc, "unsupported architectural extension: " + Name);
12686 
12687     if ((getAvailableFeatures() & Extension.ArchCheck) != Extension.ArchCheck)
12688       return Error(ExtLoc, "architectural extension '" + Name +
12689                                "' is not "
12690                                "allowed for the current base architecture");
12691 
12692     MCSubtargetInfo &STI = copySTI();
12693     if (EnableFeature) {
12694       STI.SetFeatureBitsTransitively(Extension.Features);
12695     } else {
12696       STI.ClearFeatureBitsTransitively(Extension.Features);
12697     }
12698     FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
12699     setAvailableFeatures(Features);
12700     return true;
12701   }
12702   return false;
12703 }
12704 
12705 /// parseDirectiveArchExtension
12706 ///   ::= .arch_extension [no]feature
12707 bool ARMAsmParser::parseDirectiveArchExtension(SMLoc L) {
12708 
12709   MCAsmParser &Parser = getParser();
12710 
12711   if (getLexer().isNot(AsmToken::Identifier))
12712     return Error(getLexer().getLoc(), "expected architecture extension name");
12713 
12714   StringRef Name = Parser.getTok().getString();
12715   SMLoc ExtLoc = Parser.getTok().getLoc();
12716   Lex();
12717 
12718   if (parseEOL())
12719     return true;
12720 
12721   if (Name == "nocrypto") {
12722     enableArchExtFeature("nosha2", ExtLoc);
12723     enableArchExtFeature("noaes", ExtLoc);
12724   }
12725 
12726   if (enableArchExtFeature(Name, ExtLoc))
12727     return false;
12728 
12729   return Error(ExtLoc, "unknown architectural extension: " + Name);
12730 }
12731 
12732 // Define this matcher function after the auto-generated include so we
12733 // have the match class enum definitions.
12734 unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
12735                                                   unsigned Kind) {
12736   ARMOperand &Op = static_cast<ARMOperand &>(AsmOp);
12737   // If the kind is a token for a literal immediate, check if our asm
12738   // operand matches. This is for InstAliases which have a fixed-value
12739   // immediate in the syntax.
12740   switch (Kind) {
12741   default: break;
12742   case MCK__HASH_0:
12743     if (Op.isImm())
12744       if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
12745         if (CE->getValue() == 0)
12746           return Match_Success;
12747     break;
12748   case MCK__HASH_8:
12749     if (Op.isImm())
12750       if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
12751         if (CE->getValue() == 8)
12752           return Match_Success;
12753     break;
12754   case MCK__HASH_16:
12755     if (Op.isImm())
12756       if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
12757         if (CE->getValue() == 16)
12758           return Match_Success;
12759     break;
12760   case MCK_ModImm:
12761     if (Op.isImm()) {
12762       const MCExpr *SOExpr = Op.getImm();
12763       int64_t Value;
12764       if (!SOExpr->evaluateAsAbsolute(Value))
12765         return Match_Success;
12766       assert((Value >= std::numeric_limits<int32_t>::min() &&
12767               Value <= std::numeric_limits<uint32_t>::max()) &&
12768              "expression value must be representable in 32 bits");
12769     }
12770     break;
12771   case MCK_rGPR:
12772     if (hasV8Ops() && Op.isReg() && Op.getReg() == ARM::SP)
12773       return Match_Success;
12774     return Match_rGPR;
12775   case MCK_GPRPair:
12776     if (Op.isReg() &&
12777         MRI->getRegClass(ARM::GPRRegClassID).contains(Op.getReg()))
12778       return Match_Success;
12779     break;
12780   }
12781   return Match_InvalidOperand;
12782 }
12783 
12784 bool ARMAsmParser::isMnemonicVPTPredicable(StringRef Mnemonic,
12785                                            StringRef ExtraToken) {
12786   if (!hasMVE())
12787     return false;
12788 
12789   if (MS.isVPTPredicableCDEInstr(Mnemonic) ||
12790       (Mnemonic.starts_with("vldrh") && Mnemonic != "vldrhi") ||
12791       (Mnemonic.starts_with("vmov") &&
12792        !(ExtraToken == ".f16" || ExtraToken == ".32" || ExtraToken == ".16" ||
12793          ExtraToken == ".8")) ||
12794       (Mnemonic.starts_with("vrint") && Mnemonic != "vrintr") ||
12795       (Mnemonic.starts_with("vstrh") && Mnemonic != "vstrhi"))
12796     return true;
12797 
12798   const char *predicable_prefixes[] = {
12799       "vabav",      "vabd",     "vabs",      "vadc",       "vadd",
12800       "vaddlv",     "vaddv",    "vand",      "vbic",       "vbrsr",
12801       "vcadd",      "vcls",     "vclz",      "vcmla",      "vcmp",
12802       "vcmul",      "vctp",     "vcvt",      "vddup",      "vdup",
12803       "vdwdup",     "veor",     "vfma",      "vfmas",      "vfms",
12804       "vhadd",      "vhcadd",   "vhsub",     "vidup",      "viwdup",
12805       "vldrb",      "vldrd",    "vldrw",     "vmax",       "vmaxa",
12806       "vmaxav",     "vmaxnm",   "vmaxnma",   "vmaxnmav",   "vmaxnmv",
12807       "vmaxv",      "vmin",     "vminav",    "vminnm",     "vminnmav",
12808       "vminnmv",    "vminv",    "vmla",      "vmladav",    "vmlaldav",
12809       "vmlalv",     "vmlas",    "vmlav",     "vmlsdav",    "vmlsldav",
12810       "vmovlb",     "vmovlt",   "vmovnb",    "vmovnt",     "vmul",
12811       "vmvn",       "vneg",     "vorn",      "vorr",       "vpnot",
12812       "vpsel",      "vqabs",    "vqadd",     "vqdmladh",   "vqdmlah",
12813       "vqdmlash",   "vqdmlsdh", "vqdmulh",   "vqdmull",    "vqmovn",
12814       "vqmovun",    "vqneg",    "vqrdmladh", "vqrdmlah",   "vqrdmlash",
12815       "vqrdmlsdh",  "vqrdmulh", "vqrshl",    "vqrshrn",    "vqrshrun",
12816       "vqshl",      "vqshrn",   "vqshrun",   "vqsub",      "vrev16",
12817       "vrev32",     "vrev64",   "vrhadd",    "vrmlaldavh", "vrmlalvh",
12818       "vrmlsldavh", "vrmulh",   "vrshl",     "vrshr",      "vrshrn",
12819       "vsbc",       "vshl",     "vshlc",     "vshll",      "vshr",
12820       "vshrn",      "vsli",     "vsri",      "vstrb",      "vstrd",
12821       "vstrw",      "vsub"};
12822 
12823   return std::any_of(
12824       std::begin(predicable_prefixes), std::end(predicable_prefixes),
12825       [&Mnemonic](const char *prefix) { return Mnemonic.starts_with(prefix); });
12826 }
12827