xref: /llvm-project/llvm/lib/Target/X86/AsmParser/X86Operand.h (revision 9844badfca51e0eba72964552fd624224cbaacb0)
1 //===- X86Operand.h - Parsed X86 machine instruction ------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef LLVM_LIB_TARGET_X86_ASMPARSER_X86OPERAND_H
10 #define LLVM_LIB_TARGET_X86_ASMPARSER_X86OPERAND_H
11 
12 #include "MCTargetDesc/X86IntelInstPrinter.h"
13 #include "MCTargetDesc/X86MCTargetDesc.h"
14 #include "X86AsmParserCommon.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/StringRef.h"
17 #include "llvm/MC/MCExpr.h"
18 #include "llvm/MC/MCInst.h"
19 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
20 #include "llvm/MC/MCRegisterInfo.h"
21 #include "llvm/MC/MCSymbol.h"
22 #include "llvm/Support/Casting.h"
23 #include "llvm/Support/SMLoc.h"
24 #include <cassert>
25 #include <memory>
26 
27 namespace llvm {
28 
29 /// X86Operand - Instances of this class represent a parsed X86 machine
30 /// instruction.
31 struct X86Operand final : public MCParsedAsmOperand {
32   enum KindTy { Token, Register, Immediate, Memory, Prefix, DXRegister } Kind;
33 
34   SMLoc StartLoc, EndLoc;
35   SMLoc OffsetOfLoc;
36   StringRef SymName;
37   void *OpDecl;
38   bool AddressOf;
39 
40   /// This used for inline asm which may specify base reg and index reg for
41   /// MemOp. e.g. ARR[eax + ecx*4], so no extra reg can be used for MemOp.
42   bool UseUpRegs = false;
43 
44   struct TokOp {
45     const char *Data;
46     unsigned Length;
47   };
48 
49   struct RegOp {
50     MCRegister RegNo;
51   };
52 
53   struct PrefOp {
54     unsigned Prefixes;
55   };
56 
57   struct ImmOp {
58     const MCExpr *Val;
59     bool LocalRef;
60   };
61 
62   struct MemOp {
63     MCRegister SegReg;
64     const MCExpr *Disp;
65     MCRegister BaseReg;
66     MCRegister DefaultBaseReg;
67     MCRegister IndexReg;
68     unsigned Scale;
69     unsigned Size;
70     unsigned ModeSize;
71 
72     /// If the memory operand is unsized and there are multiple instruction
73     /// matches, prefer the one with this size.
74     unsigned FrontendSize;
75 
76     /// If false, then this operand must be a memory operand for an indirect
77     /// branch instruction. Otherwise, this operand may belong to either a
78     /// direct or indirect branch instruction.
79     bool MaybeDirectBranchDest;
80   };
81 
82   union {
83     struct TokOp Tok;
84     struct RegOp Reg;
85     struct ImmOp Imm;
86     struct MemOp Mem;
87     struct PrefOp Pref;
88   };
89 
90   X86Operand(KindTy K, SMLoc Start, SMLoc End)
91       : Kind(K), StartLoc(Start), EndLoc(End), OpDecl(nullptr),
92         AddressOf(false) {}
93 
94   StringRef getSymName() override { return SymName; }
95   void *getOpDecl() override { return OpDecl; }
96 
97   /// getStartLoc - Get the location of the first token of this operand.
98   SMLoc getStartLoc() const override { return StartLoc; }
99 
100   /// getEndLoc - Get the location of the last token of this operand.
101   SMLoc getEndLoc() const override { return EndLoc; }
102 
103   /// getLocRange - Get the range between the first and last token of this
104   /// operand.
105   SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
106 
107   /// getOffsetOfLoc - Get the location of the offset operator.
108   SMLoc getOffsetOfLoc() const override { return OffsetOfLoc; }
109 
110   void print(raw_ostream &OS) const override {
111 
112     auto PrintImmValue = [&](const MCExpr *Val, const char *VName) {
113       if (Val->getKind() == MCExpr::Constant) {
114         if (auto Imm = cast<MCConstantExpr>(Val)->getValue())
115           OS << VName << Imm;
116       } else if (Val->getKind() == MCExpr::SymbolRef) {
117         if (auto *SRE = dyn_cast<MCSymbolRefExpr>(Val)) {
118           const MCSymbol &Sym = SRE->getSymbol();
119           if (const char *SymNameStr = Sym.getName().data())
120             OS << VName << SymNameStr;
121         }
122       }
123     };
124 
125     switch (Kind) {
126     case Token:
127       OS << Tok.Data;
128       break;
129     case Register:
130       OS << "Reg:" << X86IntelInstPrinter::getRegisterName(Reg.RegNo);
131       break;
132     case DXRegister:
133       OS << "DXReg";
134       break;
135     case Immediate:
136       PrintImmValue(Imm.Val, "Imm:");
137       break;
138     case Prefix:
139       OS << "Prefix:" << Pref.Prefixes;
140       break;
141     case Memory:
142       OS << "Memory: ModeSize=" << Mem.ModeSize;
143       if (Mem.Size)
144         OS << ",Size=" << Mem.Size;
145       if (Mem.BaseReg)
146         OS << ",BaseReg=" << X86IntelInstPrinter::getRegisterName(Mem.BaseReg);
147       if (Mem.IndexReg)
148         OS << ",IndexReg="
149            << X86IntelInstPrinter::getRegisterName(Mem.IndexReg);
150       if (Mem.Scale)
151         OS << ",Scale=" << Mem.Scale;
152       if (Mem.Disp)
153         PrintImmValue(Mem.Disp, ",Disp=");
154       if (Mem.SegReg)
155         OS << ",SegReg=" << X86IntelInstPrinter::getRegisterName(Mem.SegReg);
156       break;
157     }
158   }
159 
160   StringRef getToken() const {
161     assert(Kind == Token && "Invalid access!");
162     return StringRef(Tok.Data, Tok.Length);
163   }
164   void setTokenValue(StringRef Value) {
165     assert(Kind == Token && "Invalid access!");
166     Tok.Data = Value.data();
167     Tok.Length = Value.size();
168   }
169 
170   MCRegister getReg() const override {
171     assert(Kind == Register && "Invalid access!");
172     return Reg.RegNo;
173   }
174 
175   unsigned getPrefix() const {
176     assert(Kind == Prefix && "Invalid access!");
177     return Pref.Prefixes;
178   }
179 
180   const MCExpr *getImm() const {
181     assert(Kind == Immediate && "Invalid access!");
182     return Imm.Val;
183   }
184 
185   const MCExpr *getMemDisp() const {
186     assert(Kind == Memory && "Invalid access!");
187     return Mem.Disp;
188   }
189   MCRegister getMemSegReg() const {
190     assert(Kind == Memory && "Invalid access!");
191     return Mem.SegReg;
192   }
193   MCRegister getMemBaseReg() const {
194     assert(Kind == Memory && "Invalid access!");
195     return Mem.BaseReg;
196   }
197   MCRegister getMemDefaultBaseReg() const {
198     assert(Kind == Memory && "Invalid access!");
199     return Mem.DefaultBaseReg;
200   }
201   MCRegister getMemIndexReg() const {
202     assert(Kind == Memory && "Invalid access!");
203     return Mem.IndexReg;
204   }
205   unsigned getMemScale() const {
206     assert(Kind == Memory && "Invalid access!");
207     return Mem.Scale;
208   }
209   unsigned getMemModeSize() const {
210     assert(Kind == Memory && "Invalid access!");
211     return Mem.ModeSize;
212   }
213   unsigned getMemFrontendSize() const {
214     assert(Kind == Memory && "Invalid access!");
215     return Mem.FrontendSize;
216   }
217   bool isMaybeDirectBranchDest() const {
218     assert(Kind == Memory && "Invalid access!");
219     return Mem.MaybeDirectBranchDest;
220   }
221 
222   bool isToken() const override {return Kind == Token; }
223 
224   bool isImm() const override { return Kind == Immediate; }
225 
226   bool isImmSExti16i8() const {
227     if (!isImm())
228       return false;
229 
230     // If this isn't a constant expr, just assume it fits and let relaxation
231     // handle it.
232     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
233     if (!CE)
234       return true;
235 
236     // Otherwise, check the value is in a range that makes sense for this
237     // extension.
238     return isImmSExti16i8Value(CE->getValue());
239   }
240   bool isImmSExti32i8() const {
241     if (!isImm())
242       return false;
243 
244     // If this isn't a constant expr, just assume it fits and let relaxation
245     // handle it.
246     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
247     if (!CE)
248       return true;
249 
250     // Otherwise, check the value is in a range that makes sense for this
251     // extension.
252     return isImmSExti32i8Value(CE->getValue());
253   }
254   bool isImmSExti64i8() const {
255     if (!isImm())
256       return false;
257 
258     // If this isn't a constant expr, just assume it fits and let relaxation
259     // handle it.
260     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
261     if (!CE)
262       return true;
263 
264     // Otherwise, check the value is in a range that makes sense for this
265     // extension.
266     return isImmSExti64i8Value(CE->getValue());
267   }
268   bool isImmSExti64i32() const {
269     if (!isImm())
270       return false;
271 
272     // If this isn't a constant expr, just assume it fits and let relaxation
273     // handle it.
274     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
275     if (!CE)
276       return true;
277 
278     // Otherwise, check the value is in a range that makes sense for this
279     // extension.
280     return isImmSExti64i32Value(CE->getValue());
281   }
282 
283   bool isImmUnsignedi4() const {
284     if (!isImm()) return false;
285     // If this isn't a constant expr, reject it. The immediate byte is shared
286     // with a register encoding. We can't have it affected by a relocation.
287     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
288     if (!CE) return false;
289     return isImmUnsignedi4Value(CE->getValue());
290   }
291 
292   bool isImmUnsignedi8() const {
293     if (!isImm()) return false;
294     // If this isn't a constant expr, just assume it fits and let relaxation
295     // handle it.
296     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
297     if (!CE) return true;
298     return isImmUnsignedi8Value(CE->getValue());
299   }
300 
301   bool isOffsetOfLocal() const override { return isImm() && Imm.LocalRef; }
302 
303   bool needAddressOf() const override { return AddressOf; }
304 
305   bool isMem() const override { return Kind == Memory; }
306   bool isMemUnsized() const {
307     return Kind == Memory && Mem.Size == 0;
308   }
309   bool isMem8() const {
310     return Kind == Memory && (!Mem.Size || Mem.Size == 8);
311   }
312   bool isMem16() const {
313     return Kind == Memory && (!Mem.Size || Mem.Size == 16);
314   }
315   bool isMem32() const {
316     return Kind == Memory && (!Mem.Size || Mem.Size == 32);
317   }
318   bool isMem64() const {
319     return Kind == Memory && (!Mem.Size || Mem.Size == 64);
320   }
321   bool isMem80() const {
322     return Kind == Memory && (!Mem.Size || Mem.Size == 80);
323   }
324   bool isMem128() const {
325     return Kind == Memory && (!Mem.Size || Mem.Size == 128);
326   }
327   bool isMem256() const {
328     return Kind == Memory && (!Mem.Size || Mem.Size == 256);
329   }
330   bool isMem512() const {
331     return Kind == Memory && (!Mem.Size || Mem.Size == 512);
332   }
333 
334   bool isSibMem() const {
335     return isMem() && Mem.BaseReg != X86::RIP && Mem.BaseReg != X86::EIP;
336   }
337 
338   bool isMemIndexReg(unsigned LowR, unsigned HighR) const {
339     assert(Kind == Memory && "Invalid access!");
340     return Mem.IndexReg >= LowR && Mem.IndexReg <= HighR;
341   }
342 
343   bool isMem32_RC128() const {
344     return isMem32() && isMemIndexReg(X86::XMM0, X86::XMM15);
345   }
346   bool isMem64_RC128() const {
347     return isMem64() && isMemIndexReg(X86::XMM0, X86::XMM15);
348   }
349   bool isMem32_RC256() const {
350     return isMem32() && isMemIndexReg(X86::YMM0, X86::YMM15);
351   }
352   bool isMem64_RC256() const {
353     return isMem64() && isMemIndexReg(X86::YMM0, X86::YMM15);
354   }
355 
356   bool isMem32_RC128X() const {
357     return isMem32() && X86II::isXMMReg(Mem.IndexReg);
358   }
359   bool isMem64_RC128X() const {
360     return isMem64() && X86II::isXMMReg(Mem.IndexReg);
361   }
362   bool isMem32_RC256X() const {
363     return isMem32() && X86II::isYMMReg(Mem.IndexReg);
364   }
365   bool isMem64_RC256X() const {
366     return isMem64() && X86II::isYMMReg(Mem.IndexReg);
367   }
368   bool isMem32_RC512() const {
369     return isMem32() && X86II::isZMMReg(Mem.IndexReg);
370   }
371   bool isMem64_RC512() const {
372     return isMem64() && X86II::isZMMReg(Mem.IndexReg);
373   }
374 
375   bool isMem512_GR16() const {
376     if (!isMem512())
377       return false;
378     if (getMemBaseReg() &&
379         !X86MCRegisterClasses[X86::GR16RegClassID].contains(getMemBaseReg()))
380       return false;
381     return true;
382   }
383   bool isMem512_GR32() const {
384     if (!isMem512())
385       return false;
386     if (getMemBaseReg() &&
387         !X86MCRegisterClasses[X86::GR32RegClassID].contains(getMemBaseReg()) &&
388         getMemBaseReg() != X86::EIP)
389       return false;
390     if (getMemIndexReg() &&
391         !X86MCRegisterClasses[X86::GR32RegClassID].contains(getMemIndexReg()) &&
392         getMemIndexReg() != X86::EIZ)
393       return false;
394     return true;
395   }
396   bool isMem512_GR64() const {
397     if (!isMem512())
398       return false;
399     if (getMemBaseReg() &&
400         !X86MCRegisterClasses[X86::GR64RegClassID].contains(getMemBaseReg()) &&
401         getMemBaseReg() != X86::RIP)
402       return false;
403     if (getMemIndexReg() &&
404         !X86MCRegisterClasses[X86::GR64RegClassID].contains(getMemIndexReg()) &&
405         getMemIndexReg() != X86::RIZ)
406       return false;
407     return true;
408   }
409 
410   bool isAbsMem() const {
411     return Kind == Memory && !getMemSegReg() && !getMemBaseReg() &&
412            !getMemIndexReg() && getMemScale() == 1 && isMaybeDirectBranchDest();
413   }
414 
415   bool isAVX512RC() const{
416       return isImm();
417   }
418 
419   bool isAbsMem16() const {
420     return isAbsMem() && Mem.ModeSize == 16;
421   }
422 
423   bool isMemUseUpRegs() const override { return UseUpRegs; }
424 
425   bool isSrcIdx() const {
426     return !getMemIndexReg() && getMemScale() == 1 &&
427       (getMemBaseReg() == X86::RSI || getMemBaseReg() == X86::ESI ||
428        getMemBaseReg() == X86::SI) && isa<MCConstantExpr>(getMemDisp()) &&
429       cast<MCConstantExpr>(getMemDisp())->getValue() == 0;
430   }
431   bool isSrcIdx8() const {
432     return isMem8() && isSrcIdx();
433   }
434   bool isSrcIdx16() const {
435     return isMem16() && isSrcIdx();
436   }
437   bool isSrcIdx32() const {
438     return isMem32() && isSrcIdx();
439   }
440   bool isSrcIdx64() const {
441     return isMem64() && isSrcIdx();
442   }
443 
444   bool isDstIdx() const {
445     return !getMemIndexReg() && getMemScale() == 1 &&
446            (!getMemSegReg() || getMemSegReg() == X86::ES) &&
447            (getMemBaseReg() == X86::RDI || getMemBaseReg() == X86::EDI ||
448             getMemBaseReg() == X86::DI) &&
449            isa<MCConstantExpr>(getMemDisp()) &&
450            cast<MCConstantExpr>(getMemDisp())->getValue() == 0;
451   }
452   bool isDstIdx8() const {
453     return isMem8() && isDstIdx();
454   }
455   bool isDstIdx16() const {
456     return isMem16() && isDstIdx();
457   }
458   bool isDstIdx32() const {
459     return isMem32() && isDstIdx();
460   }
461   bool isDstIdx64() const {
462     return isMem64() && isDstIdx();
463   }
464 
465   bool isMemOffs() const {
466     return Kind == Memory && !getMemBaseReg() && !getMemIndexReg() &&
467       getMemScale() == 1;
468   }
469 
470   bool isMemOffs16_8() const {
471     return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 8);
472   }
473   bool isMemOffs16_16() const {
474     return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 16);
475   }
476   bool isMemOffs16_32() const {
477     return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 32);
478   }
479   bool isMemOffs32_8() const {
480     return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 8);
481   }
482   bool isMemOffs32_16() const {
483     return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 16);
484   }
485   bool isMemOffs32_32() const {
486     return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 32);
487   }
488   bool isMemOffs32_64() const {
489     return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 64);
490   }
491   bool isMemOffs64_8() const {
492     return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 8);
493   }
494   bool isMemOffs64_16() const {
495     return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 16);
496   }
497   bool isMemOffs64_32() const {
498     return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 32);
499   }
500   bool isMemOffs64_64() const {
501     return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 64);
502   }
503 
504   bool isPrefix() const { return Kind == Prefix; }
505   bool isReg() const override { return Kind == Register; }
506   bool isDXReg() const { return Kind == DXRegister; }
507 
508   bool isGR32orGR64() const {
509     return Kind == Register &&
510       (X86MCRegisterClasses[X86::GR32RegClassID].contains(getReg()) ||
511        X86MCRegisterClasses[X86::GR64RegClassID].contains(getReg()));
512   }
513 
514   bool isGR16orGR32orGR64() const {
515     return Kind == Register &&
516       (X86MCRegisterClasses[X86::GR16RegClassID].contains(getReg()) ||
517        X86MCRegisterClasses[X86::GR32RegClassID].contains(getReg()) ||
518        X86MCRegisterClasses[X86::GR64RegClassID].contains(getReg()));
519   }
520 
521   bool isVectorReg() const {
522     return Kind == Register &&
523            (X86MCRegisterClasses[X86::VR64RegClassID].contains(getReg()) ||
524             X86MCRegisterClasses[X86::VR128XRegClassID].contains(getReg()) ||
525             X86MCRegisterClasses[X86::VR256XRegClassID].contains(getReg()) ||
526             X86MCRegisterClasses[X86::VR512RegClassID].contains(getReg()));
527   }
528 
529   bool isVK1Pair() const {
530     return Kind == Register &&
531       X86MCRegisterClasses[X86::VK1RegClassID].contains(getReg());
532   }
533 
534   bool isVK2Pair() const {
535     return Kind == Register &&
536       X86MCRegisterClasses[X86::VK2RegClassID].contains(getReg());
537   }
538 
539   bool isVK4Pair() const {
540     return Kind == Register &&
541       X86MCRegisterClasses[X86::VK4RegClassID].contains(getReg());
542   }
543 
544   bool isVK8Pair() const {
545     return Kind == Register &&
546       X86MCRegisterClasses[X86::VK8RegClassID].contains(getReg());
547   }
548 
549   bool isVK16Pair() const {
550     return Kind == Register &&
551       X86MCRegisterClasses[X86::VK16RegClassID].contains(getReg());
552   }
553 
554   void addExpr(MCInst &Inst, const MCExpr *Expr) const {
555     // Add as immediates when possible.
556     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
557       Inst.addOperand(MCOperand::createImm(CE->getValue()));
558     else
559       Inst.addOperand(MCOperand::createExpr(Expr));
560   }
561 
562   void addRegOperands(MCInst &Inst, unsigned N) const {
563     assert(N == 1 && "Invalid number of operands!");
564     Inst.addOperand(MCOperand::createReg(getReg()));
565   }
566 
567   void addGR32orGR64Operands(MCInst &Inst, unsigned N) const {
568     assert(N == 1 && "Invalid number of operands!");
569     MCRegister RegNo = getReg();
570     if (X86MCRegisterClasses[X86::GR64RegClassID].contains(RegNo))
571       RegNo = getX86SubSuperRegister(RegNo, 32);
572     Inst.addOperand(MCOperand::createReg(RegNo));
573   }
574 
575   void addGR16orGR32orGR64Operands(MCInst &Inst, unsigned N) const {
576     assert(N == 1 && "Invalid number of operands!");
577     MCRegister RegNo = getReg();
578     if (X86MCRegisterClasses[X86::GR32RegClassID].contains(RegNo) ||
579         X86MCRegisterClasses[X86::GR64RegClassID].contains(RegNo))
580       RegNo = getX86SubSuperRegister(RegNo, 16);
581     Inst.addOperand(MCOperand::createReg(RegNo));
582   }
583 
584   void addAVX512RCOperands(MCInst &Inst, unsigned N) const {
585     assert(N == 1 && "Invalid number of operands!");
586     addExpr(Inst, getImm());
587   }
588 
589   void addImmOperands(MCInst &Inst, unsigned N) const {
590     assert(N == 1 && "Invalid number of operands!");
591     addExpr(Inst, getImm());
592   }
593 
594   void addMaskPairOperands(MCInst &Inst, unsigned N) const {
595     assert(N == 1 && "Invalid number of operands!");
596     MCRegister Reg = getReg();
597     switch (Reg.id()) {
598     case X86::K0:
599     case X86::K1:
600       Reg = X86::K0_K1;
601       break;
602     case X86::K2:
603     case X86::K3:
604       Reg = X86::K2_K3;
605       break;
606     case X86::K4:
607     case X86::K5:
608       Reg = X86::K4_K5;
609       break;
610     case X86::K6:
611     case X86::K7:
612       Reg = X86::K6_K7;
613       break;
614     }
615     Inst.addOperand(MCOperand::createReg(Reg));
616   }
617 
618   bool isTILEPair() const {
619     return Kind == Register &&
620            X86MCRegisterClasses[X86::TILERegClassID].contains(getReg());
621   }
622 
623   void addTILEPairOperands(MCInst &Inst, unsigned N) const {
624     assert(N == 1 && "Invalid number of operands!");
625     unsigned Reg = getReg();
626     switch (Reg) {
627     default:
628       llvm_unreachable("Invalid tile register!");
629     case X86::TMM0:
630     case X86::TMM1:
631       Reg = X86::TMM0_TMM1;
632       break;
633     case X86::TMM2:
634     case X86::TMM3:
635       Reg = X86::TMM2_TMM3;
636       break;
637     case X86::TMM4:
638     case X86::TMM5:
639       Reg = X86::TMM4_TMM5;
640       break;
641     case X86::TMM6:
642     case X86::TMM7:
643       Reg = X86::TMM6_TMM7;
644       break;
645     }
646     Inst.addOperand(MCOperand::createReg(Reg));
647   }
648 
649   void addMemOperands(MCInst &Inst, unsigned N) const {
650     assert((N == 5) && "Invalid number of operands!");
651     if (getMemBaseReg())
652       Inst.addOperand(MCOperand::createReg(getMemBaseReg()));
653     else
654       Inst.addOperand(MCOperand::createReg(getMemDefaultBaseReg()));
655     Inst.addOperand(MCOperand::createImm(getMemScale()));
656     Inst.addOperand(MCOperand::createReg(getMemIndexReg()));
657     addExpr(Inst, getMemDisp());
658     Inst.addOperand(MCOperand::createReg(getMemSegReg()));
659   }
660 
661   void addAbsMemOperands(MCInst &Inst, unsigned N) const {
662     assert((N == 1) && "Invalid number of operands!");
663     // Add as immediates when possible.
664     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getMemDisp()))
665       Inst.addOperand(MCOperand::createImm(CE->getValue()));
666     else
667       Inst.addOperand(MCOperand::createExpr(getMemDisp()));
668   }
669 
670   void addSrcIdxOperands(MCInst &Inst, unsigned N) const {
671     assert((N == 2) && "Invalid number of operands!");
672     Inst.addOperand(MCOperand::createReg(getMemBaseReg()));
673     Inst.addOperand(MCOperand::createReg(getMemSegReg()));
674   }
675 
676   void addDstIdxOperands(MCInst &Inst, unsigned N) const {
677     assert((N == 1) && "Invalid number of operands!");
678     Inst.addOperand(MCOperand::createReg(getMemBaseReg()));
679   }
680 
681   void addMemOffsOperands(MCInst &Inst, unsigned N) const {
682     assert((N == 2) && "Invalid number of operands!");
683     // Add as immediates when possible.
684     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getMemDisp()))
685       Inst.addOperand(MCOperand::createImm(CE->getValue()));
686     else
687       Inst.addOperand(MCOperand::createExpr(getMemDisp()));
688     Inst.addOperand(MCOperand::createReg(getMemSegReg()));
689   }
690 
691   static std::unique_ptr<X86Operand> CreateToken(StringRef Str, SMLoc Loc) {
692     SMLoc EndLoc = SMLoc::getFromPointer(Loc.getPointer() + Str.size());
693     auto Res = std::make_unique<X86Operand>(Token, Loc, EndLoc);
694     Res->Tok.Data = Str.data();
695     Res->Tok.Length = Str.size();
696     return Res;
697   }
698 
699   static std::unique_ptr<X86Operand>
700   CreateReg(MCRegister Reg, SMLoc StartLoc, SMLoc EndLoc,
701             bool AddressOf = false, SMLoc OffsetOfLoc = SMLoc(),
702             StringRef SymName = StringRef(), void *OpDecl = nullptr) {
703     auto Res = std::make_unique<X86Operand>(Register, StartLoc, EndLoc);
704     Res->Reg.RegNo = Reg;
705     Res->AddressOf = AddressOf;
706     Res->OffsetOfLoc = OffsetOfLoc;
707     Res->SymName = SymName;
708     Res->OpDecl = OpDecl;
709     return Res;
710   }
711 
712   static std::unique_ptr<X86Operand>
713   CreateDXReg(SMLoc StartLoc, SMLoc EndLoc) {
714     return std::make_unique<X86Operand>(DXRegister, StartLoc, EndLoc);
715   }
716 
717   static std::unique_ptr<X86Operand>
718   CreatePrefix(unsigned Prefixes, SMLoc StartLoc, SMLoc EndLoc) {
719     auto Res = std::make_unique<X86Operand>(Prefix, StartLoc, EndLoc);
720     Res->Pref.Prefixes = Prefixes;
721     return Res;
722   }
723 
724   static std::unique_ptr<X86Operand> CreateImm(const MCExpr *Val,
725                                                SMLoc StartLoc, SMLoc EndLoc,
726                                                StringRef SymName = StringRef(),
727                                                void *OpDecl = nullptr,
728                                                bool GlobalRef = true) {
729     auto Res = std::make_unique<X86Operand>(Immediate, StartLoc, EndLoc);
730     Res->Imm.Val      = Val;
731     Res->Imm.LocalRef = !GlobalRef;
732     Res->SymName      = SymName;
733     Res->OpDecl       = OpDecl;
734     Res->AddressOf    = true;
735     return Res;
736   }
737 
738   /// Create an absolute memory operand.
739   static std::unique_ptr<X86Operand>
740   CreateMem(unsigned ModeSize, const MCExpr *Disp, SMLoc StartLoc, SMLoc EndLoc,
741             unsigned Size = 0, StringRef SymName = StringRef(),
742             void *OpDecl = nullptr, unsigned FrontendSize = 0,
743             bool UseUpRegs = false, bool MaybeDirectBranchDest = true) {
744     auto Res = std::make_unique<X86Operand>(Memory, StartLoc, EndLoc);
745     Res->Mem.SegReg = MCRegister();
746     Res->Mem.Disp     = Disp;
747     Res->Mem.BaseReg = MCRegister();
748     Res->Mem.DefaultBaseReg = MCRegister();
749     Res->Mem.IndexReg = MCRegister();
750     Res->Mem.Scale    = 1;
751     Res->Mem.Size     = Size;
752     Res->Mem.ModeSize = ModeSize;
753     Res->Mem.FrontendSize = FrontendSize;
754     Res->Mem.MaybeDirectBranchDest = MaybeDirectBranchDest;
755     Res->UseUpRegs = UseUpRegs;
756     Res->SymName      = SymName;
757     Res->OpDecl       = OpDecl;
758     Res->AddressOf    = false;
759     return Res;
760   }
761 
762   /// Create a generalized memory operand.
763   static std::unique_ptr<X86Operand>
764   CreateMem(unsigned ModeSize, MCRegister SegReg, const MCExpr *Disp,
765             MCRegister BaseReg, MCRegister IndexReg, unsigned Scale,
766             SMLoc StartLoc, SMLoc EndLoc, unsigned Size = 0,
767             MCRegister DefaultBaseReg = MCRegister(),
768             StringRef SymName = StringRef(), void *OpDecl = nullptr,
769             unsigned FrontendSize = 0, bool UseUpRegs = false,
770             bool MaybeDirectBranchDest = true) {
771     // We should never just have a displacement, that should be parsed as an
772     // absolute memory operand.
773     assert((SegReg || BaseReg || IndexReg || DefaultBaseReg) &&
774            "Invalid memory operand!");
775 
776     // The scale should always be one of {1,2,4,8}.
777     assert(((Scale == 1 || Scale == 2 || Scale == 4 || Scale == 8)) &&
778            "Invalid scale!");
779     auto Res = std::make_unique<X86Operand>(Memory, StartLoc, EndLoc);
780     Res->Mem.SegReg   = SegReg;
781     Res->Mem.Disp     = Disp;
782     Res->Mem.BaseReg  = BaseReg;
783     Res->Mem.DefaultBaseReg = DefaultBaseReg;
784     Res->Mem.IndexReg = IndexReg;
785     Res->Mem.Scale    = Scale;
786     Res->Mem.Size     = Size;
787     Res->Mem.ModeSize = ModeSize;
788     Res->Mem.FrontendSize = FrontendSize;
789     Res->Mem.MaybeDirectBranchDest = MaybeDirectBranchDest;
790     Res->UseUpRegs = UseUpRegs;
791     Res->SymName      = SymName;
792     Res->OpDecl       = OpDecl;
793     Res->AddressOf    = false;
794     return Res;
795   }
796 };
797 
798 } // end namespace llvm
799 
800 #endif // LLVM_LIB_TARGET_X86_ASMPARSER_X86OPERAND_H
801