xref: /freebsd-src/contrib/llvm-project/llvm/lib/Target/X86/AsmParser/X86Operand.h (revision a7dea1671b87c07d2d266f836bfa8b58efc7c134)
1 //===- X86Operand.h - Parsed X86 machine instruction ------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef LLVM_LIB_TARGET_X86_ASMPARSER_X86OPERAND_H
10 #define LLVM_LIB_TARGET_X86_ASMPARSER_X86OPERAND_H
11 
12 #include "MCTargetDesc/X86IntelInstPrinter.h"
13 #include "MCTargetDesc/X86MCTargetDesc.h"
14 #include "X86AsmParserCommon.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/StringRef.h"
17 #include "llvm/MC/MCExpr.h"
18 #include "llvm/MC/MCInst.h"
19 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
20 #include "llvm/MC/MCRegisterInfo.h"
21 #include "llvm/Support/Casting.h"
22 #include "llvm/Support/ErrorHandling.h"
23 #include "llvm/Support/SMLoc.h"
24 #include <cassert>
25 #include <memory>
26 
27 namespace llvm {
28 
29 /// X86Operand - Instances of this class represent a parsed X86 machine
30 /// instruction.
31 struct X86Operand final : public MCParsedAsmOperand {
32   enum KindTy { Token, Register, Immediate, Memory, Prefix, DXRegister } Kind;
33 
34   SMLoc StartLoc, EndLoc;
35   SMLoc OffsetOfLoc;
36   StringRef SymName;
37   void *OpDecl;
38   bool AddressOf;
39 
40   struct TokOp {
41     const char *Data;
42     unsigned Length;
43   };
44 
45   struct RegOp {
46     unsigned RegNo;
47   };
48 
49   struct PrefOp {
50     unsigned Prefixes;
51   };
52 
53   struct ImmOp {
54     const MCExpr *Val;
55   };
56 
57   struct MemOp {
58     unsigned SegReg;
59     const MCExpr *Disp;
60     unsigned BaseReg;
61     unsigned IndexReg;
62     unsigned Scale;
63     unsigned Size;
64     unsigned ModeSize;
65 
66     /// If the memory operand is unsized and there are multiple instruction
67     /// matches, prefer the one with this size.
68     unsigned FrontendSize;
69   };
70 
71   union {
72     struct TokOp Tok;
73     struct RegOp Reg;
74     struct ImmOp Imm;
75     struct MemOp Mem;
76     struct PrefOp Pref;
77   };
78 
79   X86Operand(KindTy K, SMLoc Start, SMLoc End)
80       : Kind(K), StartLoc(Start), EndLoc(End) {}
81 
82   StringRef getSymName() override { return SymName; }
83   void *getOpDecl() override { return OpDecl; }
84 
85   /// getStartLoc - Get the location of the first token of this operand.
86   SMLoc getStartLoc() const override { return StartLoc; }
87 
88   /// getEndLoc - Get the location of the last token of this operand.
89   SMLoc getEndLoc() const override { return EndLoc; }
90 
91   /// getLocRange - Get the range between the first and last token of this
92   /// operand.
93   SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
94 
95   /// getOffsetOfLoc - Get the location of the offset operator.
96   SMLoc getOffsetOfLoc() const override { return OffsetOfLoc; }
97 
98   void print(raw_ostream &OS) const override {
99 
100     auto PrintImmValue = [&](const MCExpr *Val, const char *VName) {
101       if (Val->getKind() == MCExpr::Constant) {
102         if (auto Imm = cast<MCConstantExpr>(Val)->getValue())
103           OS << VName << Imm;
104       } else if (Val->getKind() == MCExpr::SymbolRef) {
105         if (auto *SRE = dyn_cast<MCSymbolRefExpr>(Val)) {
106           const MCSymbol &Sym = SRE->getSymbol();
107           if (auto SymName = Sym.getName().data())
108             OS << VName << SymName;
109         }
110       }
111     };
112 
113     switch (Kind) {
114     case Token:
115       OS << Tok.Data;
116       break;
117     case Register:
118       OS << "Reg:" << X86IntelInstPrinter::getRegisterName(Reg.RegNo);
119       break;
120     case DXRegister:
121       OS << "DXReg";
122       break;
123     case Immediate:
124       PrintImmValue(Imm.Val, "Imm:");
125       break;
126     case Prefix:
127       OS << "Prefix:" << Pref.Prefixes;
128       break;
129     case Memory:
130       OS << "Memory: ModeSize=" << Mem.ModeSize;
131       if (Mem.Size)
132         OS << ",Size=" << Mem.Size;
133       if (Mem.BaseReg)
134         OS << ",BaseReg=" << X86IntelInstPrinter::getRegisterName(Mem.BaseReg);
135       if (Mem.IndexReg)
136         OS << ",IndexReg="
137            << X86IntelInstPrinter::getRegisterName(Mem.IndexReg);
138       if (Mem.Scale)
139         OS << ",Scale=" << Mem.Scale;
140       if (Mem.Disp)
141         PrintImmValue(Mem.Disp, ",Disp=");
142       if (Mem.SegReg)
143         OS << ",SegReg=" << X86IntelInstPrinter::getRegisterName(Mem.SegReg);
144       break;
145     }
146   }
147 
148   StringRef getToken() const {
149     assert(Kind == Token && "Invalid access!");
150     return StringRef(Tok.Data, Tok.Length);
151   }
152   void setTokenValue(StringRef Value) {
153     assert(Kind == Token && "Invalid access!");
154     Tok.Data = Value.data();
155     Tok.Length = Value.size();
156   }
157 
158   unsigned getReg() const override {
159     assert(Kind == Register && "Invalid access!");
160     return Reg.RegNo;
161   }
162 
163   unsigned getPrefix() const {
164     assert(Kind == Prefix && "Invalid access!");
165     return Pref.Prefixes;
166   }
167 
168   const MCExpr *getImm() const {
169     assert(Kind == Immediate && "Invalid access!");
170     return Imm.Val;
171   }
172 
173   const MCExpr *getMemDisp() const {
174     assert(Kind == Memory && "Invalid access!");
175     return Mem.Disp;
176   }
177   unsigned getMemSegReg() const {
178     assert(Kind == Memory && "Invalid access!");
179     return Mem.SegReg;
180   }
181   unsigned getMemBaseReg() const {
182     assert(Kind == Memory && "Invalid access!");
183     return Mem.BaseReg;
184   }
185   unsigned getMemIndexReg() const {
186     assert(Kind == Memory && "Invalid access!");
187     return Mem.IndexReg;
188   }
189   unsigned getMemScale() const {
190     assert(Kind == Memory && "Invalid access!");
191     return Mem.Scale;
192   }
193   unsigned getMemModeSize() const {
194     assert(Kind == Memory && "Invalid access!");
195     return Mem.ModeSize;
196   }
197   unsigned getMemFrontendSize() const {
198     assert(Kind == Memory && "Invalid access!");
199     return Mem.FrontendSize;
200   }
201 
202   bool isToken() const override {return Kind == Token; }
203 
204   bool isImm() const override { return Kind == Immediate; }
205 
206   bool isImmSExti16i8() const {
207     if (!isImm())
208       return false;
209 
210     // If this isn't a constant expr, just assume it fits and let relaxation
211     // handle it.
212     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
213     if (!CE)
214       return true;
215 
216     // Otherwise, check the value is in a range that makes sense for this
217     // extension.
218     return isImmSExti16i8Value(CE->getValue());
219   }
220   bool isImmSExti32i8() const {
221     if (!isImm())
222       return false;
223 
224     // If this isn't a constant expr, just assume it fits and let relaxation
225     // handle it.
226     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
227     if (!CE)
228       return true;
229 
230     // Otherwise, check the value is in a range that makes sense for this
231     // extension.
232     return isImmSExti32i8Value(CE->getValue());
233   }
234   bool isImmSExti64i8() const {
235     if (!isImm())
236       return false;
237 
238     // If this isn't a constant expr, just assume it fits and let relaxation
239     // handle it.
240     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
241     if (!CE)
242       return true;
243 
244     // Otherwise, check the value is in a range that makes sense for this
245     // extension.
246     return isImmSExti64i8Value(CE->getValue());
247   }
248   bool isImmSExti64i32() const {
249     if (!isImm())
250       return false;
251 
252     // If this isn't a constant expr, just assume it fits and let relaxation
253     // handle it.
254     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
255     if (!CE)
256       return true;
257 
258     // Otherwise, check the value is in a range that makes sense for this
259     // extension.
260     return isImmSExti64i32Value(CE->getValue());
261   }
262 
263   bool isImmUnsignedi4() const {
264     if (!isImm()) return false;
265     // If this isn't a constant expr, reject it. The immediate byte is shared
266     // with a register encoding. We can't have it affected by a relocation.
267     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
268     if (!CE) return false;
269     return isImmUnsignedi4Value(CE->getValue());
270   }
271 
272   bool isImmUnsignedi8() const {
273     if (!isImm()) return false;
274     // If this isn't a constant expr, just assume it fits and let relaxation
275     // handle it.
276     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
277     if (!CE) return true;
278     return isImmUnsignedi8Value(CE->getValue());
279   }
280 
281   bool isOffsetOf() const override {
282     return OffsetOfLoc.getPointer();
283   }
284 
285   bool needAddressOf() const override {
286     return AddressOf;
287   }
288 
289   bool isMem() const override { return Kind == Memory; }
290   bool isMemUnsized() const {
291     return Kind == Memory && Mem.Size == 0;
292   }
293   bool isMem8() const {
294     return Kind == Memory && (!Mem.Size || Mem.Size == 8);
295   }
296   bool isMem16() const {
297     return Kind == Memory && (!Mem.Size || Mem.Size == 16);
298   }
299   bool isMem32() const {
300     return Kind == Memory && (!Mem.Size || Mem.Size == 32);
301   }
302   bool isMem64() const {
303     return Kind == Memory && (!Mem.Size || Mem.Size == 64);
304   }
305   bool isMem80() const {
306     return Kind == Memory && (!Mem.Size || Mem.Size == 80);
307   }
308   bool isMem128() const {
309     return Kind == Memory && (!Mem.Size || Mem.Size == 128);
310   }
311   bool isMem256() const {
312     return Kind == Memory && (!Mem.Size || Mem.Size == 256);
313   }
314   bool isMem512() const {
315     return Kind == Memory && (!Mem.Size || Mem.Size == 512);
316   }
317   bool isMemIndexReg(unsigned LowR, unsigned HighR) const {
318     assert(Kind == Memory && "Invalid access!");
319     return Mem.IndexReg >= LowR && Mem.IndexReg <= HighR;
320   }
321 
322   bool isMem64_RC128() const {
323     return isMem64() && isMemIndexReg(X86::XMM0, X86::XMM15);
324   }
325   bool isMem128_RC128() const {
326     return isMem128() && isMemIndexReg(X86::XMM0, X86::XMM15);
327   }
328   bool isMem128_RC256() const {
329     return isMem128() && isMemIndexReg(X86::YMM0, X86::YMM15);
330   }
331   bool isMem256_RC128() const {
332     return isMem256() && isMemIndexReg(X86::XMM0, X86::XMM15);
333   }
334   bool isMem256_RC256() const {
335     return isMem256() && isMemIndexReg(X86::YMM0, X86::YMM15);
336   }
337 
338   bool isMem64_RC128X() const {
339     return isMem64() && isMemIndexReg(X86::XMM0, X86::XMM31);
340   }
341   bool isMem128_RC128X() const {
342     return isMem128() && isMemIndexReg(X86::XMM0, X86::XMM31);
343   }
344   bool isMem128_RC256X() const {
345     return isMem128() && isMemIndexReg(X86::YMM0, X86::YMM31);
346   }
347   bool isMem256_RC128X() const {
348     return isMem256() && isMemIndexReg(X86::XMM0, X86::XMM31);
349   }
350   bool isMem256_RC256X() const {
351     return isMem256() && isMemIndexReg(X86::YMM0, X86::YMM31);
352   }
353   bool isMem256_RC512() const {
354     return isMem256() && isMemIndexReg(X86::ZMM0, X86::ZMM31);
355   }
356   bool isMem512_RC256X() const {
357     return isMem512() && isMemIndexReg(X86::YMM0, X86::YMM31);
358   }
359   bool isMem512_RC512() const {
360     return isMem512() && isMemIndexReg(X86::ZMM0, X86::ZMM31);
361   }
362 
363   bool isAbsMem() const {
364     return Kind == Memory && !getMemSegReg() && !getMemBaseReg() &&
365       !getMemIndexReg() && getMemScale() == 1;
366   }
367   bool isAVX512RC() const{
368       return isImm();
369   }
370 
371   bool isAbsMem16() const {
372     return isAbsMem() && Mem.ModeSize == 16;
373   }
374 
375   bool isSrcIdx() const {
376     return !getMemIndexReg() && getMemScale() == 1 &&
377       (getMemBaseReg() == X86::RSI || getMemBaseReg() == X86::ESI ||
378        getMemBaseReg() == X86::SI) && isa<MCConstantExpr>(getMemDisp()) &&
379       cast<MCConstantExpr>(getMemDisp())->getValue() == 0;
380   }
381   bool isSrcIdx8() const {
382     return isMem8() && isSrcIdx();
383   }
384   bool isSrcIdx16() const {
385     return isMem16() && isSrcIdx();
386   }
387   bool isSrcIdx32() const {
388     return isMem32() && isSrcIdx();
389   }
390   bool isSrcIdx64() const {
391     return isMem64() && isSrcIdx();
392   }
393 
394   bool isDstIdx() const {
395     return !getMemIndexReg() && getMemScale() == 1 &&
396       (getMemSegReg() == 0 || getMemSegReg() == X86::ES) &&
397       (getMemBaseReg() == X86::RDI || getMemBaseReg() == X86::EDI ||
398        getMemBaseReg() == X86::DI) && isa<MCConstantExpr>(getMemDisp()) &&
399       cast<MCConstantExpr>(getMemDisp())->getValue() == 0;
400   }
401   bool isDstIdx8() const {
402     return isMem8() && isDstIdx();
403   }
404   bool isDstIdx16() const {
405     return isMem16() && isDstIdx();
406   }
407   bool isDstIdx32() const {
408     return isMem32() && isDstIdx();
409   }
410   bool isDstIdx64() const {
411     return isMem64() && isDstIdx();
412   }
413 
414   bool isMemOffs() const {
415     return Kind == Memory && !getMemBaseReg() && !getMemIndexReg() &&
416       getMemScale() == 1;
417   }
418 
419   bool isMemOffs16_8() const {
420     return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 8);
421   }
422   bool isMemOffs16_16() const {
423     return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 16);
424   }
425   bool isMemOffs16_32() const {
426     return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 32);
427   }
428   bool isMemOffs32_8() const {
429     return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 8);
430   }
431   bool isMemOffs32_16() const {
432     return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 16);
433   }
434   bool isMemOffs32_32() const {
435     return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 32);
436   }
437   bool isMemOffs32_64() const {
438     return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 64);
439   }
440   bool isMemOffs64_8() const {
441     return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 8);
442   }
443   bool isMemOffs64_16() const {
444     return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 16);
445   }
446   bool isMemOffs64_32() const {
447     return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 32);
448   }
449   bool isMemOffs64_64() const {
450     return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 64);
451   }
452 
453   bool isPrefix() const { return Kind == Prefix; }
454   bool isReg() const override { return Kind == Register; }
455   bool isDXReg() const { return Kind == DXRegister; }
456 
457   bool isGR32orGR64() const {
458     return Kind == Register &&
459       (X86MCRegisterClasses[X86::GR32RegClassID].contains(getReg()) ||
460       X86MCRegisterClasses[X86::GR64RegClassID].contains(getReg()));
461   }
462 
463   bool isVK1Pair() const {
464     return Kind == Register &&
465       X86MCRegisterClasses[X86::VK1RegClassID].contains(getReg());
466   }
467 
468   bool isVK2Pair() const {
469     return Kind == Register &&
470       X86MCRegisterClasses[X86::VK2RegClassID].contains(getReg());
471   }
472 
473   bool isVK4Pair() const {
474     return Kind == Register &&
475       X86MCRegisterClasses[X86::VK4RegClassID].contains(getReg());
476   }
477 
478   bool isVK8Pair() const {
479     return Kind == Register &&
480       X86MCRegisterClasses[X86::VK8RegClassID].contains(getReg());
481   }
482 
483   bool isVK16Pair() const {
484     return Kind == Register &&
485       X86MCRegisterClasses[X86::VK16RegClassID].contains(getReg());
486   }
487 
488   void addExpr(MCInst &Inst, const MCExpr *Expr) const {
489     // Add as immediates when possible.
490     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
491       Inst.addOperand(MCOperand::createImm(CE->getValue()));
492     else
493       Inst.addOperand(MCOperand::createExpr(Expr));
494   }
495 
496   void addRegOperands(MCInst &Inst, unsigned N) const {
497     assert(N == 1 && "Invalid number of operands!");
498     Inst.addOperand(MCOperand::createReg(getReg()));
499   }
500 
501   void addGR32orGR64Operands(MCInst &Inst, unsigned N) const {
502     assert(N == 1 && "Invalid number of operands!");
503     MCRegister RegNo = getReg();
504     if (X86MCRegisterClasses[X86::GR64RegClassID].contains(RegNo))
505       RegNo = getX86SubSuperRegister(RegNo, 32);
506     Inst.addOperand(MCOperand::createReg(RegNo));
507   }
508 
509   void addAVX512RCOperands(MCInst &Inst, unsigned N) const {
510     assert(N == 1 && "Invalid number of operands!");
511     addExpr(Inst, getImm());
512   }
513 
514   void addImmOperands(MCInst &Inst, unsigned N) const {
515     assert(N == 1 && "Invalid number of operands!");
516     addExpr(Inst, getImm());
517   }
518 
519   void addMaskPairOperands(MCInst &Inst, unsigned N) const {
520     assert(N == 1 && "Invalid number of operands!");
521     unsigned Reg = getReg();
522     switch (Reg) {
523     case X86::K0:
524     case X86::K1:
525       Reg = X86::K0_K1;
526       break;
527     case X86::K2:
528     case X86::K3:
529       Reg = X86::K2_K3;
530       break;
531     case X86::K4:
532     case X86::K5:
533       Reg = X86::K4_K5;
534       break;
535     case X86::K6:
536     case X86::K7:
537       Reg = X86::K6_K7;
538       break;
539     }
540     Inst.addOperand(MCOperand::createReg(Reg));
541   }
542 
543   void addMemOperands(MCInst &Inst, unsigned N) const {
544     assert((N == 5) && "Invalid number of operands!");
545     Inst.addOperand(MCOperand::createReg(getMemBaseReg()));
546     Inst.addOperand(MCOperand::createImm(getMemScale()));
547     Inst.addOperand(MCOperand::createReg(getMemIndexReg()));
548     addExpr(Inst, getMemDisp());
549     Inst.addOperand(MCOperand::createReg(getMemSegReg()));
550   }
551 
552   void addAbsMemOperands(MCInst &Inst, unsigned N) const {
553     assert((N == 1) && "Invalid number of operands!");
554     // Add as immediates when possible.
555     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getMemDisp()))
556       Inst.addOperand(MCOperand::createImm(CE->getValue()));
557     else
558       Inst.addOperand(MCOperand::createExpr(getMemDisp()));
559   }
560 
561   void addSrcIdxOperands(MCInst &Inst, unsigned N) const {
562     assert((N == 2) && "Invalid number of operands!");
563     Inst.addOperand(MCOperand::createReg(getMemBaseReg()));
564     Inst.addOperand(MCOperand::createReg(getMemSegReg()));
565   }
566 
567   void addDstIdxOperands(MCInst &Inst, unsigned N) const {
568     assert((N == 1) && "Invalid number of operands!");
569     Inst.addOperand(MCOperand::createReg(getMemBaseReg()));
570   }
571 
572   void addMemOffsOperands(MCInst &Inst, unsigned N) const {
573     assert((N == 2) && "Invalid number of operands!");
574     // Add as immediates when possible.
575     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getMemDisp()))
576       Inst.addOperand(MCOperand::createImm(CE->getValue()));
577     else
578       Inst.addOperand(MCOperand::createExpr(getMemDisp()));
579     Inst.addOperand(MCOperand::createReg(getMemSegReg()));
580   }
581 
582   static std::unique_ptr<X86Operand> CreateToken(StringRef Str, SMLoc Loc) {
583     SMLoc EndLoc = SMLoc::getFromPointer(Loc.getPointer() + Str.size());
584     auto Res = std::make_unique<X86Operand>(Token, Loc, EndLoc);
585     Res->Tok.Data = Str.data();
586     Res->Tok.Length = Str.size();
587     return Res;
588   }
589 
590   static std::unique_ptr<X86Operand>
591   CreateReg(unsigned RegNo, SMLoc StartLoc, SMLoc EndLoc,
592             bool AddressOf = false, SMLoc OffsetOfLoc = SMLoc(),
593             StringRef SymName = StringRef(), void *OpDecl = nullptr) {
594     auto Res = std::make_unique<X86Operand>(Register, StartLoc, EndLoc);
595     Res->Reg.RegNo = RegNo;
596     Res->AddressOf = AddressOf;
597     Res->OffsetOfLoc = OffsetOfLoc;
598     Res->SymName = SymName;
599     Res->OpDecl = OpDecl;
600     return Res;
601   }
602 
603   static std::unique_ptr<X86Operand>
604   CreateDXReg(SMLoc StartLoc, SMLoc EndLoc) {
605     return std::make_unique<X86Operand>(DXRegister, StartLoc, EndLoc);
606   }
607 
608   static std::unique_ptr<X86Operand>
609   CreatePrefix(unsigned Prefixes, SMLoc StartLoc, SMLoc EndLoc) {
610     auto Res = std::make_unique<X86Operand>(Prefix, StartLoc, EndLoc);
611     Res->Pref.Prefixes = Prefixes;
612     return Res;
613   }
614 
615   static std::unique_ptr<X86Operand> CreateImm(const MCExpr *Val,
616                                                SMLoc StartLoc, SMLoc EndLoc) {
617     auto Res = std::make_unique<X86Operand>(Immediate, StartLoc, EndLoc);
618     Res->Imm.Val = Val;
619     return Res;
620   }
621 
622   /// Create an absolute memory operand.
623   static std::unique_ptr<X86Operand>
624   CreateMem(unsigned ModeSize, const MCExpr *Disp, SMLoc StartLoc, SMLoc EndLoc,
625             unsigned Size = 0, StringRef SymName = StringRef(),
626             void *OpDecl = nullptr, unsigned FrontendSize = 0) {
627     auto Res = std::make_unique<X86Operand>(Memory, StartLoc, EndLoc);
628     Res->Mem.SegReg   = 0;
629     Res->Mem.Disp     = Disp;
630     Res->Mem.BaseReg  = 0;
631     Res->Mem.IndexReg = 0;
632     Res->Mem.Scale    = 1;
633     Res->Mem.Size     = Size;
634     Res->Mem.ModeSize = ModeSize;
635     Res->Mem.FrontendSize = FrontendSize;
636     Res->SymName      = SymName;
637     Res->OpDecl       = OpDecl;
638     Res->AddressOf    = false;
639     return Res;
640   }
641 
642   /// Create a generalized memory operand.
643   static std::unique_ptr<X86Operand>
644   CreateMem(unsigned ModeSize, unsigned SegReg, const MCExpr *Disp,
645             unsigned BaseReg, unsigned IndexReg, unsigned Scale, SMLoc StartLoc,
646             SMLoc EndLoc, unsigned Size = 0, StringRef SymName = StringRef(),
647             void *OpDecl = nullptr, unsigned FrontendSize = 0) {
648     // We should never just have a displacement, that should be parsed as an
649     // absolute memory operand.
650     assert((SegReg || BaseReg || IndexReg) && "Invalid memory operand!");
651 
652     // The scale should always be one of {1,2,4,8}.
653     assert(((Scale == 1 || Scale == 2 || Scale == 4 || Scale == 8)) &&
654            "Invalid scale!");
655     auto Res = std::make_unique<X86Operand>(Memory, StartLoc, EndLoc);
656     Res->Mem.SegReg   = SegReg;
657     Res->Mem.Disp     = Disp;
658     Res->Mem.BaseReg  = BaseReg;
659     Res->Mem.IndexReg = IndexReg;
660     Res->Mem.Scale    = Scale;
661     Res->Mem.Size     = Size;
662     Res->Mem.ModeSize = ModeSize;
663     Res->Mem.FrontendSize = FrontendSize;
664     Res->SymName      = SymName;
665     Res->OpDecl       = OpDecl;
666     Res->AddressOf    = false;
667     return Res;
668   }
669 };
670 
671 } // end namespace llvm
672 
673 #endif // LLVM_LIB_TARGET_X86_ASMPARSER_X86OPERAND_H
674