1 //===-- X86MCCodeEmitter.cpp - Convert X86 code to machine code -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the X86MCCodeEmitter class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "MCTargetDesc/X86BaseInfo.h" 14 #include "MCTargetDesc/X86FixupKinds.h" 15 #include "MCTargetDesc/X86MCTargetDesc.h" 16 #include "llvm/ADT/SmallVector.h" 17 #include "llvm/MC/MCCodeEmitter.h" 18 #include "llvm/MC/MCContext.h" 19 #include "llvm/MC/MCExpr.h" 20 #include "llvm/MC/MCFixup.h" 21 #include "llvm/MC/MCInst.h" 22 #include "llvm/MC/MCInstrDesc.h" 23 #include "llvm/MC/MCInstrInfo.h" 24 #include "llvm/MC/MCRegisterInfo.h" 25 #include "llvm/MC/MCSubtargetInfo.h" 26 #include "llvm/MC/MCSymbol.h" 27 #include "llvm/Support/Casting.h" 28 #include "llvm/Support/ErrorHandling.h" 29 #include <cassert> 30 #include <cstdint> 31 #include <cstdlib> 32 33 using namespace llvm; 34 35 #define DEBUG_TYPE "mccodeemitter" 36 37 namespace { 38 39 enum PrefixKind { None, REX, REX2, XOP, VEX2, VEX3, EVEX }; 40 41 static void emitByte(uint8_t C, SmallVectorImpl<char> &CB) { CB.push_back(C); } 42 43 class X86OpcodePrefixHelper { 44 // REX (1 byte) 45 // +-----+ +------+ 46 // | 40H | | WRXB | 47 // +-----+ +------+ 48 49 // REX2 (2 bytes) 50 // +-----+ +-------------------+ 51 // | D5H | | M | R'X'B' | WRXB | 52 // +-----+ +-------------------+ 53 54 // XOP (3-byte) 55 // +-----+ +--------------+ +-------------------+ 56 // | 8Fh | | RXB | m-mmmm | | W | vvvv | L | pp | 57 // +-----+ +--------------+ +-------------------+ 58 59 // VEX2 (2 bytes) 60 // +-----+ +-------------------+ 61 // | C5h | | R | vvvv | L | pp | 62 // +-----+ +-------------------+ 63 64 // VEX3 (3 bytes) 65 // +-----+ +--------------+ +-------------------+ 66 // | C4h | | RXB | m-mmmm | | W | vvvv | L | pp | 67 // +-----+ +--------------+ +-------------------+ 68 69 // VEX_R: opcode externsion equivalent to REX.R in 70 // 1's complement (inverted) form 71 // 72 // 1: Same as REX_R=0 (must be 1 in 32-bit mode) 73 // 0: Same as REX_R=1 (64 bit mode only) 74 75 // VEX_X: equivalent to REX.X, only used when a 76 // register is used for index in SIB Byte. 77 // 78 // 1: Same as REX.X=0 (must be 1 in 32-bit mode) 79 // 0: Same as REX.X=1 (64-bit mode only) 80 81 // VEX_B: 82 // 1: Same as REX_B=0 (ignored in 32-bit mode) 83 // 0: Same as REX_B=1 (64 bit mode only) 84 85 // VEX_W: opcode specific (use like REX.W, or used for 86 // opcode extension, or ignored, depending on the opcode byte) 87 88 // VEX_5M (VEX m-mmmmm field): 89 // 90 // 0b00000: Reserved for future use 91 // 0b00001: implied 0F leading opcode 92 // 0b00010: implied 0F 38 leading opcode bytes 93 // 0b00011: implied 0F 3A leading opcode bytes 94 // 0b00100: Reserved for future use 95 // 0b00101: VEX MAP5 96 // 0b00110: VEX MAP6 97 // 0b00111: VEX MAP7 98 // 0b00111-0b11111: Reserved for future use 99 // 0b01000: XOP map select - 08h instructions with imm byte 100 // 0b01001: XOP map select - 09h instructions with no imm byte 101 // 0b01010: XOP map select - 0Ah instructions with imm dword 102 103 // VEX_4V (VEX vvvv field): a register specifier 104 // (in 1's complement form) or 1111 if unused. 105 106 // VEX_PP: opcode extension providing equivalent 107 // functionality of a SIMD prefix 108 // 0b00: None 109 // 0b01: 66 110 // 0b10: F3 111 // 0b11: F2 112 113 // EVEX (4 bytes) 114 // +-----+ +---------------+ +-------------------+ +------------------------+ 115 // | 62h | | RXBR' | B'mmm | | W | vvvv | U | pp | | z | L'L | b | v' | aaa | 116 // +-----+ +---------------+ +-------------------+ +------------------------+ 117 118 // EVEX_L2/VEX_L (Vector Length): 119 // L2 L 120 // 0 0: scalar or 128-bit vector 121 // 0 1: 256-bit vector 122 // 1 0: 512-bit vector 123 124 // 32-Register Support in 64-bit Mode Using EVEX with Embedded REX/REX2 Bits: 125 // 126 // +----------+---------+--------+-----------+---------+--------------+ 127 // | | 4 | 3 | [2:0] | Type | Common Usage | 128 // +----------+---------+--------+-----------+---------+--------------+ 129 // | REG | EVEX_R' | EVEX_R | modrm.reg | GPR, VR | Dest or Src | 130 // | VVVV | EVEX_v' | EVEX.vvvv | GPR, VR | Dest or Src | 131 // | RM (VR) | EVEX_X | EVEX_B | modrm.r/m | VR | Dest or Src | 132 // | RM (GPR) | EVEX_B' | EVEX_B | modrm.r/m | GPR | Dest or Src | 133 // | BASE | EVEX_B' | EVEX_B | modrm.r/m | GPR | MA | 134 // | INDEX | EVEX_U | EVEX_X | sib.index | GPR | MA | 135 // | VIDX | EVEX_v' | EVEX_X | sib.index | VR | VSIB MA | 136 // +----------+---------+--------+-----------+---------+--------------+ 137 // 138 // * GPR - General-purpose register 139 // * VR - Vector register 140 // * VIDX - Vector index 141 // * VSIB - Vector SIB 142 // * MA - Memory addressing 143 144 private: 145 unsigned W : 1; 146 unsigned R : 1; 147 unsigned X : 1; 148 unsigned B : 1; 149 unsigned M : 1; 150 unsigned R2 : 1; 151 unsigned X2 : 1; 152 unsigned B2 : 1; 153 unsigned VEX_4V : 4; 154 unsigned VEX_L : 1; 155 unsigned VEX_PP : 2; 156 unsigned VEX_5M : 5; 157 unsigned EVEX_z : 1; 158 unsigned EVEX_L2 : 1; 159 unsigned EVEX_b : 1; 160 unsigned EVEX_V2 : 1; 161 unsigned EVEX_aaa : 3; 162 PrefixKind Kind = None; 163 const MCRegisterInfo &MRI; 164 165 unsigned getRegEncoding(const MCInst &MI, unsigned OpNum) const { 166 return MRI.getEncodingValue(MI.getOperand(OpNum).getReg()); 167 } 168 169 void setR(unsigned Encoding) { R = Encoding >> 3 & 1; } 170 void setR2(unsigned Encoding) { 171 R2 = Encoding >> 4 & 1; 172 assert((!R2 || (Kind <= REX2 || Kind == EVEX)) && "invalid setting"); 173 } 174 void setX(unsigned Encoding) { X = Encoding >> 3 & 1; } 175 void setX2(unsigned Encoding) { 176 assert((Kind <= REX2 || Kind == EVEX) && "invalid setting"); 177 X2 = Encoding >> 4 & 1; 178 } 179 void setB(unsigned Encoding) { B = Encoding >> 3 & 1; } 180 void setB2(unsigned Encoding) { 181 assert((Kind <= REX2 || Kind == EVEX) && "invalid setting"); 182 B2 = Encoding >> 4 & 1; 183 } 184 void set4V(unsigned Encoding) { VEX_4V = Encoding & 0xf; } 185 void setV2(unsigned Encoding) { EVEX_V2 = Encoding >> 4 & 1; } 186 187 public: 188 void setW(bool V) { W = V; } 189 void setR(const MCInst &MI, unsigned OpNum) { 190 setR(getRegEncoding(MI, OpNum)); 191 } 192 void setX(const MCInst &MI, unsigned OpNum, unsigned Shift = 3) { 193 MCRegister Reg = MI.getOperand(OpNum).getReg(); 194 // X is used to extend vector register only when shift is not 3. 195 if (Shift != 3 && X86II::isApxExtendedReg(Reg)) 196 return; 197 unsigned Encoding = MRI.getEncodingValue(Reg); 198 X = Encoding >> Shift & 1; 199 } 200 void setB(const MCInst &MI, unsigned OpNum) { 201 B = getRegEncoding(MI, OpNum) >> 3 & 1; 202 } 203 void set4V(const MCInst &MI, unsigned OpNum, bool IsImm = false) { 204 // OF, SF, ZF and CF reuse VEX_4V bits but are not reversed 205 if (IsImm) 206 set4V(~(MI.getOperand(OpNum).getImm())); 207 else 208 set4V(getRegEncoding(MI, OpNum)); 209 } 210 void setL(bool V) { VEX_L = V; } 211 void setPP(unsigned V) { VEX_PP = V; } 212 void set5M(unsigned V) { VEX_5M = V; } 213 void setR2(const MCInst &MI, unsigned OpNum) { 214 setR2(getRegEncoding(MI, OpNum)); 215 } 216 void setRR2(const MCInst &MI, unsigned OpNum) { 217 unsigned Encoding = getRegEncoding(MI, OpNum); 218 setR(Encoding); 219 setR2(Encoding); 220 } 221 void setM(bool V) { M = V; } 222 void setXX2(const MCInst &MI, unsigned OpNum) { 223 MCRegister Reg = MI.getOperand(OpNum).getReg(); 224 unsigned Encoding = MRI.getEncodingValue(Reg); 225 setX(Encoding); 226 // Index can be a vector register while X2 is used to extend GPR only. 227 if (Kind <= REX2 || X86II::isApxExtendedReg(Reg)) 228 setX2(Encoding); 229 } 230 void setBB2(const MCInst &MI, unsigned OpNum) { 231 MCRegister Reg = MI.getOperand(OpNum).getReg(); 232 unsigned Encoding = MRI.getEncodingValue(Reg); 233 setB(Encoding); 234 // Base can be a vector register while B2 is used to extend GPR only 235 if (Kind <= REX2 || X86II::isApxExtendedReg(Reg)) 236 setB2(Encoding); 237 } 238 void setZ(bool V) { EVEX_z = V; } 239 void setL2(bool V) { EVEX_L2 = V; } 240 void setEVEX_b(bool V) { EVEX_b = V; } 241 void setEVEX_U(bool V) { X2 = V; } 242 void setV2(const MCInst &MI, unsigned OpNum, bool HasVEX_4V) { 243 // Only needed with VSIB which don't use VVVV. 244 if (HasVEX_4V) 245 return; 246 MCRegister Reg = MI.getOperand(OpNum).getReg(); 247 if (X86II::isApxExtendedReg(Reg)) 248 return; 249 setV2(MRI.getEncodingValue(Reg)); 250 } 251 void set4VV2(const MCInst &MI, unsigned OpNum) { 252 unsigned Encoding = getRegEncoding(MI, OpNum); 253 set4V(Encoding); 254 setV2(Encoding); 255 } 256 void setAAA(const MCInst &MI, unsigned OpNum) { 257 EVEX_aaa = getRegEncoding(MI, OpNum); 258 } 259 void setNF(bool V) { EVEX_aaa |= V << 2; } 260 void setSC(const MCInst &MI, unsigned OpNum) { 261 unsigned Encoding = MI.getOperand(OpNum).getImm(); 262 EVEX_V2 = ~(Encoding >> 3) & 0x1; 263 EVEX_aaa = Encoding & 0x7; 264 } 265 266 X86OpcodePrefixHelper(const MCRegisterInfo &MRI) 267 : W(0), R(0), X(0), B(0), M(0), R2(0), X2(0), B2(0), VEX_4V(0), VEX_L(0), 268 VEX_PP(0), VEX_5M(0), EVEX_z(0), EVEX_L2(0), EVEX_b(0), EVEX_V2(0), 269 EVEX_aaa(0), MRI(MRI) {} 270 271 void setLowerBound(PrefixKind K) { Kind = K; } 272 273 PrefixKind determineOptimalKind() { 274 switch (Kind) { 275 case None: 276 // Not M bit here by intention b/c 277 // 1. No guarantee that REX2 is supported by arch w/o explict EGPR 278 // 2. REX2 is longer than 0FH 279 Kind = (R2 | X2 | B2) ? REX2 : (W | R | X | B) ? REX : None; 280 break; 281 case REX: 282 Kind = (R2 | X2 | B2) ? REX2 : REX; 283 break; 284 case REX2: 285 case XOP: 286 case VEX3: 287 case EVEX: 288 break; 289 case VEX2: 290 Kind = (W | X | B | (VEX_5M != 1)) ? VEX3 : VEX2; 291 break; 292 } 293 return Kind; 294 } 295 296 void emit(SmallVectorImpl<char> &CB) const { 297 uint8_t FirstPayload = 298 ((~R) & 0x1) << 7 | ((~X) & 0x1) << 6 | ((~B) & 0x1) << 5; 299 uint8_t LastPayload = ((~VEX_4V) & 0xf) << 3 | VEX_L << 2 | VEX_PP; 300 switch (Kind) { 301 case None: 302 return; 303 case REX: 304 emitByte(0x40 | W << 3 | R << 2 | X << 1 | B, CB); 305 return; 306 case REX2: 307 emitByte(0xD5, CB); 308 emitByte(M << 7 | R2 << 6 | X2 << 5 | B2 << 4 | W << 3 | R << 2 | X << 1 | 309 B, 310 CB); 311 return; 312 case VEX2: 313 emitByte(0xC5, CB); 314 emitByte(((~R) & 1) << 7 | LastPayload, CB); 315 return; 316 case VEX3: 317 case XOP: 318 emitByte(Kind == VEX3 ? 0xC4 : 0x8F, CB); 319 emitByte(FirstPayload | VEX_5M, CB); 320 emitByte(W << 7 | LastPayload, CB); 321 return; 322 case EVEX: 323 assert(VEX_5M && !(VEX_5M & 0x8) && "invalid mmm fields for EVEX!"); 324 emitByte(0x62, CB); 325 emitByte(FirstPayload | ((~R2) & 0x1) << 4 | B2 << 3 | VEX_5M, CB); 326 emitByte(W << 7 | ((~VEX_4V) & 0xf) << 3 | ((~X2) & 0x1) << 2 | VEX_PP, 327 CB); 328 emitByte(EVEX_z << 7 | EVEX_L2 << 6 | VEX_L << 5 | EVEX_b << 4 | 329 ((~EVEX_V2) & 0x1) << 3 | EVEX_aaa, 330 CB); 331 return; 332 } 333 } 334 }; 335 336 class X86MCCodeEmitter : public MCCodeEmitter { 337 const MCInstrInfo &MCII; 338 MCContext &Ctx; 339 340 public: 341 X86MCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx) 342 : MCII(mcii), Ctx(ctx) {} 343 X86MCCodeEmitter(const X86MCCodeEmitter &) = delete; 344 X86MCCodeEmitter &operator=(const X86MCCodeEmitter &) = delete; 345 ~X86MCCodeEmitter() override = default; 346 347 void emitPrefix(const MCInst &MI, SmallVectorImpl<char> &CB, 348 const MCSubtargetInfo &STI) const; 349 350 void encodeInstruction(const MCInst &MI, SmallVectorImpl<char> &CB, 351 SmallVectorImpl<MCFixup> &Fixups, 352 const MCSubtargetInfo &STI) const override; 353 354 private: 355 unsigned getX86RegNum(const MCOperand &MO) const; 356 357 unsigned getX86RegEncoding(const MCInst &MI, unsigned OpNum) const; 358 359 void emitImmediate(const MCOperand &Disp, SMLoc Loc, unsigned ImmSize, 360 MCFixupKind FixupKind, uint64_t StartByte, 361 SmallVectorImpl<char> &CB, 362 SmallVectorImpl<MCFixup> &Fixups, int ImmOffset = 0) const; 363 364 void emitRegModRMByte(const MCOperand &ModRMReg, unsigned RegOpcodeFld, 365 SmallVectorImpl<char> &CB) const; 366 367 void emitSIBByte(unsigned SS, unsigned Index, unsigned Base, 368 SmallVectorImpl<char> &CB) const; 369 370 void emitMemModRMByte(const MCInst &MI, unsigned Op, unsigned RegOpcodeField, 371 uint64_t TSFlags, PrefixKind Kind, uint64_t StartByte, 372 SmallVectorImpl<char> &CB, 373 SmallVectorImpl<MCFixup> &Fixups, 374 const MCSubtargetInfo &STI, 375 bool ForceSIB = false) const; 376 377 PrefixKind emitPrefixImpl(unsigned &CurOp, const MCInst &MI, 378 const MCSubtargetInfo &STI, 379 SmallVectorImpl<char> &CB) const; 380 381 PrefixKind emitVEXOpcodePrefix(int MemOperand, const MCInst &MI, 382 const MCSubtargetInfo &STI, 383 SmallVectorImpl<char> &CB) const; 384 385 void emitSegmentOverridePrefix(unsigned SegOperand, const MCInst &MI, 386 SmallVectorImpl<char> &CB) const; 387 388 PrefixKind emitOpcodePrefix(int MemOperand, const MCInst &MI, 389 const MCSubtargetInfo &STI, 390 SmallVectorImpl<char> &CB) const; 391 392 PrefixKind emitREXPrefix(int MemOperand, const MCInst &MI, 393 const MCSubtargetInfo &STI, 394 SmallVectorImpl<char> &CB) const; 395 }; 396 397 } // end anonymous namespace 398 399 static uint8_t modRMByte(unsigned Mod, unsigned RegOpcode, unsigned RM) { 400 assert(Mod < 4 && RegOpcode < 8 && RM < 8 && "ModRM Fields out of range!"); 401 return RM | (RegOpcode << 3) | (Mod << 6); 402 } 403 404 static void emitConstant(uint64_t Val, unsigned Size, 405 SmallVectorImpl<char> &CB) { 406 // Output the constant in little endian byte order. 407 for (unsigned i = 0; i != Size; ++i) { 408 emitByte(Val & 255, CB); 409 Val >>= 8; 410 } 411 } 412 413 /// Determine if this immediate can fit in a disp8 or a compressed disp8 for 414 /// EVEX instructions. \p will be set to the value to pass to the ImmOffset 415 /// parameter of emitImmediate. 416 static bool isDispOrCDisp8(uint64_t TSFlags, int Value, int &ImmOffset) { 417 bool HasEVEX = (TSFlags & X86II::EncodingMask) == X86II::EVEX; 418 419 unsigned CD8_Scale = 420 (TSFlags & X86II::CD8_Scale_Mask) >> X86II::CD8_Scale_Shift; 421 CD8_Scale = CD8_Scale ? 1U << (CD8_Scale - 1) : 0U; 422 if (!HasEVEX || !CD8_Scale) 423 return isInt<8>(Value); 424 425 assert(isPowerOf2_32(CD8_Scale) && "Unexpected CD8 scale!"); 426 if (Value & (CD8_Scale - 1)) // Unaligned offset 427 return false; 428 429 int CDisp8 = Value / static_cast<int>(CD8_Scale); 430 if (!isInt<8>(CDisp8)) 431 return false; 432 433 // ImmOffset will be added to Value in emitImmediate leaving just CDisp8. 434 ImmOffset = CDisp8 - Value; 435 return true; 436 } 437 438 /// \returns the appropriate fixup kind to use for an immediate in an 439 /// instruction with the specified TSFlags. 440 static MCFixupKind getImmFixupKind(uint64_t TSFlags) { 441 unsigned Size = X86II::getSizeOfImm(TSFlags); 442 bool isPCRel = X86II::isImmPCRel(TSFlags); 443 444 if (X86II::isImmSigned(TSFlags)) { 445 switch (Size) { 446 default: 447 llvm_unreachable("Unsupported signed fixup size!"); 448 case 4: 449 return MCFixupKind(X86::reloc_signed_4byte); 450 } 451 } 452 return MCFixup::getKindForSize(Size, isPCRel); 453 } 454 455 enum GlobalOffsetTableExprKind { GOT_None, GOT_Normal, GOT_SymDiff }; 456 457 /// Check if this expression starts with _GLOBAL_OFFSET_TABLE_ and if it is 458 /// of the form _GLOBAL_OFFSET_TABLE_-symbol. This is needed to support PIC on 459 /// ELF i386 as _GLOBAL_OFFSET_TABLE_ is magical. We check only simple case that 460 /// are know to be used: _GLOBAL_OFFSET_TABLE_ by itself or at the start of a 461 /// binary expression. 462 static GlobalOffsetTableExprKind 463 startsWithGlobalOffsetTable(const MCExpr *Expr) { 464 const MCExpr *RHS = nullptr; 465 if (Expr->getKind() == MCExpr::Binary) { 466 const MCBinaryExpr *BE = static_cast<const MCBinaryExpr *>(Expr); 467 Expr = BE->getLHS(); 468 RHS = BE->getRHS(); 469 } 470 471 if (Expr->getKind() != MCExpr::SymbolRef) 472 return GOT_None; 473 474 const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr *>(Expr); 475 const MCSymbol &S = Ref->getSymbol(); 476 if (S.getName() != "_GLOBAL_OFFSET_TABLE_") 477 return GOT_None; 478 if (RHS && RHS->getKind() == MCExpr::SymbolRef) 479 return GOT_SymDiff; 480 return GOT_Normal; 481 } 482 483 static bool hasSecRelSymbolRef(const MCExpr *Expr) { 484 if (Expr->getKind() == MCExpr::SymbolRef) { 485 const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr *>(Expr); 486 return Ref->getKind() == MCSymbolRefExpr::VK_SECREL; 487 } 488 return false; 489 } 490 491 static bool isPCRel32Branch(const MCInst &MI, const MCInstrInfo &MCII) { 492 unsigned Opcode = MI.getOpcode(); 493 const MCInstrDesc &Desc = MCII.get(Opcode); 494 if ((Opcode != X86::CALL64pcrel32 && Opcode != X86::JMP_4 && 495 Opcode != X86::JCC_4) || 496 getImmFixupKind(Desc.TSFlags) != FK_PCRel_4) 497 return false; 498 499 unsigned CurOp = X86II::getOperandBias(Desc); 500 const MCOperand &Op = MI.getOperand(CurOp); 501 if (!Op.isExpr()) 502 return false; 503 504 const MCSymbolRefExpr *Ref = dyn_cast<MCSymbolRefExpr>(Op.getExpr()); 505 return Ref && Ref->getKind() == MCSymbolRefExpr::VK_None; 506 } 507 508 unsigned X86MCCodeEmitter::getX86RegNum(const MCOperand &MO) const { 509 return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg()) & 0x7; 510 } 511 512 unsigned X86MCCodeEmitter::getX86RegEncoding(const MCInst &MI, 513 unsigned OpNum) const { 514 return Ctx.getRegisterInfo()->getEncodingValue(MI.getOperand(OpNum).getReg()); 515 } 516 517 void X86MCCodeEmitter::emitImmediate(const MCOperand &DispOp, SMLoc Loc, 518 unsigned Size, MCFixupKind FixupKind, 519 uint64_t StartByte, 520 SmallVectorImpl<char> &CB, 521 SmallVectorImpl<MCFixup> &Fixups, 522 int ImmOffset) const { 523 const MCExpr *Expr = nullptr; 524 if (DispOp.isImm()) { 525 // If this is a simple integer displacement that doesn't require a 526 // relocation, emit it now. 527 if (FixupKind != FK_PCRel_1 && FixupKind != FK_PCRel_2 && 528 FixupKind != FK_PCRel_4) { 529 emitConstant(DispOp.getImm() + ImmOffset, Size, CB); 530 return; 531 } 532 Expr = MCConstantExpr::create(DispOp.getImm(), Ctx); 533 } else { 534 Expr = DispOp.getExpr(); 535 } 536 537 // If we have an immoffset, add it to the expression. 538 if ((FixupKind == FK_Data_4 || FixupKind == FK_Data_8 || 539 FixupKind == MCFixupKind(X86::reloc_signed_4byte))) { 540 GlobalOffsetTableExprKind Kind = startsWithGlobalOffsetTable(Expr); 541 if (Kind != GOT_None) { 542 assert(ImmOffset == 0); 543 544 if (Size == 8) { 545 FixupKind = MCFixupKind(X86::reloc_global_offset_table8); 546 } else { 547 assert(Size == 4); 548 FixupKind = MCFixupKind(X86::reloc_global_offset_table); 549 } 550 551 if (Kind == GOT_Normal) 552 ImmOffset = static_cast<int>(CB.size() - StartByte); 553 } else if (Expr->getKind() == MCExpr::SymbolRef) { 554 if (hasSecRelSymbolRef(Expr)) { 555 FixupKind = MCFixupKind(FK_SecRel_4); 556 } 557 } else if (Expr->getKind() == MCExpr::Binary) { 558 const MCBinaryExpr *Bin = static_cast<const MCBinaryExpr *>(Expr); 559 if (hasSecRelSymbolRef(Bin->getLHS()) || 560 hasSecRelSymbolRef(Bin->getRHS())) { 561 FixupKind = MCFixupKind(FK_SecRel_4); 562 } 563 } 564 } 565 566 // If the fixup is pc-relative, we need to bias the value to be relative to 567 // the start of the field, not the end of the field. 568 if (FixupKind == FK_PCRel_4 || 569 FixupKind == MCFixupKind(X86::reloc_riprel_4byte) || 570 FixupKind == MCFixupKind(X86::reloc_riprel_4byte_movq_load) || 571 FixupKind == MCFixupKind(X86::reloc_riprel_4byte_movq_load_rex2) || 572 FixupKind == MCFixupKind(X86::reloc_riprel_4byte_relax) || 573 FixupKind == MCFixupKind(X86::reloc_riprel_4byte_relax_rex) || 574 FixupKind == MCFixupKind(X86::reloc_riprel_4byte_relax_rex2) || 575 FixupKind == MCFixupKind(X86::reloc_branch_4byte_pcrel) || 576 FixupKind == MCFixupKind(X86::reloc_riprel_4byte_relax_evex)) { 577 ImmOffset -= 4; 578 // If this is a pc-relative load off _GLOBAL_OFFSET_TABLE_: 579 // leaq _GLOBAL_OFFSET_TABLE_(%rip), %r15 580 // this needs to be a GOTPC32 relocation. 581 if (startsWithGlobalOffsetTable(Expr) != GOT_None) 582 FixupKind = MCFixupKind(X86::reloc_global_offset_table); 583 } 584 585 if (FixupKind == FK_PCRel_2) 586 ImmOffset -= 2; 587 if (FixupKind == FK_PCRel_1) 588 ImmOffset -= 1; 589 590 if (ImmOffset) 591 Expr = MCBinaryExpr::createAdd(Expr, MCConstantExpr::create(ImmOffset, Ctx), 592 Ctx); 593 594 // Emit a symbolic constant as a fixup and 4 zeros. 595 Fixups.push_back(MCFixup::create(static_cast<uint32_t>(CB.size() - StartByte), 596 Expr, FixupKind, Loc)); 597 emitConstant(0, Size, CB); 598 } 599 600 void X86MCCodeEmitter::emitRegModRMByte(const MCOperand &ModRMReg, 601 unsigned RegOpcodeFld, 602 SmallVectorImpl<char> &CB) const { 603 emitByte(modRMByte(3, RegOpcodeFld, getX86RegNum(ModRMReg)), CB); 604 } 605 606 void X86MCCodeEmitter::emitSIBByte(unsigned SS, unsigned Index, unsigned Base, 607 SmallVectorImpl<char> &CB) const { 608 // SIB byte is in the same format as the modRMByte. 609 emitByte(modRMByte(SS, Index, Base), CB); 610 } 611 612 void X86MCCodeEmitter::emitMemModRMByte( 613 const MCInst &MI, unsigned Op, unsigned RegOpcodeField, uint64_t TSFlags, 614 PrefixKind Kind, uint64_t StartByte, SmallVectorImpl<char> &CB, 615 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI, 616 bool ForceSIB) const { 617 const MCOperand &Disp = MI.getOperand(Op + X86::AddrDisp); 618 const MCOperand &Base = MI.getOperand(Op + X86::AddrBaseReg); 619 const MCOperand &Scale = MI.getOperand(Op + X86::AddrScaleAmt); 620 const MCOperand &IndexReg = MI.getOperand(Op + X86::AddrIndexReg); 621 MCRegister BaseReg = Base.getReg(); 622 623 // Handle %rip relative addressing. 624 if (BaseReg == X86::RIP || 625 BaseReg == X86::EIP) { // [disp32+rIP] in X86-64 mode 626 assert(STI.hasFeature(X86::Is64Bit) && 627 "Rip-relative addressing requires 64-bit mode"); 628 assert(!IndexReg.getReg() && !ForceSIB && "Invalid rip-relative address"); 629 emitByte(modRMByte(0, RegOpcodeField, 5), CB); 630 631 unsigned Opcode = MI.getOpcode(); 632 unsigned FixupKind = [&]() { 633 // Enable relaxed relocation only for a MCSymbolRefExpr. We cannot use a 634 // relaxed relocation if an offset is present (e.g. x@GOTPCREL+4). 635 if (!(Disp.isExpr() && isa<MCSymbolRefExpr>(Disp.getExpr()))) 636 return X86::reloc_riprel_4byte; 637 638 // Certain loads for GOT references can be relocated against the symbol 639 // directly if the symbol ends up in the same linkage unit. 640 switch (Opcode) { 641 default: 642 return X86::reloc_riprel_4byte; 643 case X86::MOV64rm: 644 // movq loads is a subset of reloc_riprel_4byte_relax_rex/rex2. It is a 645 // special case because COFF and Mach-O don't support ELF's more 646 // flexible R_X86_64_REX_GOTPCRELX/R_X86_64_CODE_4_GOTPCRELX relaxation. 647 return Kind == REX2 ? X86::reloc_riprel_4byte_movq_load_rex2 648 : X86::reloc_riprel_4byte_movq_load; 649 case X86::ADC32rm: 650 case X86::ADD32rm: 651 case X86::AND32rm: 652 case X86::CMP32rm: 653 case X86::MOV32rm: 654 case X86::OR32rm: 655 case X86::SBB32rm: 656 case X86::SUB32rm: 657 case X86::TEST32mr: 658 case X86::XOR32rm: 659 case X86::CALL64m: 660 case X86::JMP64m: 661 case X86::TAILJMPm64: 662 case X86::TEST64mr: 663 case X86::ADC64rm: 664 case X86::ADD64rm: 665 case X86::AND64rm: 666 case X86::CMP64rm: 667 case X86::OR64rm: 668 case X86::SBB64rm: 669 case X86::SUB64rm: 670 case X86::XOR64rm: 671 case X86::LEA64r: 672 return Kind == REX2 ? X86::reloc_riprel_4byte_relax_rex2 673 : Kind == REX ? X86::reloc_riprel_4byte_relax_rex 674 : X86::reloc_riprel_4byte_relax; 675 case X86::ADD64rm_NF: 676 case X86::ADD64rm_ND: 677 case X86::ADD64mr_ND: 678 case X86::ADD64mr_NF_ND: 679 case X86::ADD64rm_NF_ND: 680 return X86::reloc_riprel_4byte_relax_evex; 681 } 682 }(); 683 684 // rip-relative addressing is actually relative to the *next* instruction. 685 // Since an immediate can follow the mod/rm byte for an instruction, this 686 // means that we need to bias the displacement field of the instruction with 687 // the size of the immediate field. If we have this case, add it into the 688 // expression to emit. 689 // Note: rip-relative addressing using immediate displacement values should 690 // not be adjusted, assuming it was the user's intent. 691 int ImmSize = !Disp.isImm() && X86II::hasImm(TSFlags) 692 ? X86II::getSizeOfImm(TSFlags) 693 : 0; 694 695 emitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(FixupKind), StartByte, CB, 696 Fixups, -ImmSize); 697 return; 698 } 699 700 unsigned BaseRegNo = BaseReg ? getX86RegNum(Base) : -1U; 701 702 bool IsAdSize16 = STI.hasFeature(X86::Is32Bit) && 703 (TSFlags & X86II::AdSizeMask) == X86II::AdSize16; 704 705 // 16-bit addressing forms of the ModR/M byte have a different encoding for 706 // the R/M field and are far more limited in which registers can be used. 707 if (IsAdSize16 || X86_MC::is16BitMemOperand(MI, Op, STI)) { 708 if (BaseReg) { 709 // For 32-bit addressing, the row and column values in Table 2-2 are 710 // basically the same. It's AX/CX/DX/BX/SP/BP/SI/DI in that order, with 711 // some special cases. And getX86RegNum reflects that numbering. 712 // For 16-bit addressing it's more fun, as shown in the SDM Vol 2A, 713 // Table 2-1 "16-Bit Addressing Forms with the ModR/M byte". We can only 714 // use SI/DI/BP/BX, which have "row" values 4-7 in no particular order, 715 // while values 0-3 indicate the allowed combinations (base+index) of 716 // those: 0 for BX+SI, 1 for BX+DI, 2 for BP+SI, 3 for BP+DI. 717 // 718 // R16Table[] is a lookup from the normal RegNo, to the row values from 719 // Table 2-1 for 16-bit addressing modes. Where zero means disallowed. 720 static const unsigned R16Table[] = {0, 0, 0, 7, 0, 6, 4, 5}; 721 unsigned RMfield = R16Table[BaseRegNo]; 722 723 assert(RMfield && "invalid 16-bit base register"); 724 725 if (IndexReg.getReg()) { 726 unsigned IndexReg16 = R16Table[getX86RegNum(IndexReg)]; 727 728 assert(IndexReg16 && "invalid 16-bit index register"); 729 // We must have one of SI/DI (4,5), and one of BP/BX (6,7). 730 assert(((IndexReg16 ^ RMfield) & 2) && 731 "invalid 16-bit base/index register combination"); 732 assert(Scale.getImm() == 1 && 733 "invalid scale for 16-bit memory reference"); 734 735 // Allow base/index to appear in either order (although GAS doesn't). 736 if (IndexReg16 & 2) 737 RMfield = (RMfield & 1) | ((7 - IndexReg16) << 1); 738 else 739 RMfield = (IndexReg16 & 1) | ((7 - RMfield) << 1); 740 } 741 742 if (Disp.isImm() && isInt<8>(Disp.getImm())) { 743 if (Disp.getImm() == 0 && RMfield != 6) { 744 // There is no displacement; just the register. 745 emitByte(modRMByte(0, RegOpcodeField, RMfield), CB); 746 return; 747 } 748 // Use the [REG]+disp8 form, including for [BP] which cannot be encoded. 749 emitByte(modRMByte(1, RegOpcodeField, RMfield), CB); 750 emitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, StartByte, CB, Fixups); 751 return; 752 } 753 // This is the [REG]+disp16 case. 754 emitByte(modRMByte(2, RegOpcodeField, RMfield), CB); 755 } else { 756 assert(!IndexReg.getReg() && "Unexpected index register!"); 757 // There is no BaseReg; this is the plain [disp16] case. 758 emitByte(modRMByte(0, RegOpcodeField, 6), CB); 759 } 760 761 // Emit 16-bit displacement for plain disp16 or [REG]+disp16 cases. 762 emitImmediate(Disp, MI.getLoc(), 2, FK_Data_2, StartByte, CB, Fixups); 763 return; 764 } 765 766 // Check for presence of {disp8} or {disp32} pseudo prefixes. 767 bool UseDisp8 = MI.getFlags() & X86::IP_USE_DISP8; 768 bool UseDisp32 = MI.getFlags() & X86::IP_USE_DISP32; 769 770 // We only allow no displacement if no pseudo prefix is present. 771 bool AllowNoDisp = !UseDisp8 && !UseDisp32; 772 // Disp8 is allowed unless the {disp32} prefix is present. 773 bool AllowDisp8 = !UseDisp32; 774 775 // Determine whether a SIB byte is needed. 776 if (!ForceSIB && !X86II::needSIB(BaseReg, IndexReg.getReg(), 777 STI.hasFeature(X86::Is64Bit))) { 778 if (!BaseReg) { // [disp32] in X86-32 mode 779 emitByte(modRMByte(0, RegOpcodeField, 5), CB); 780 emitImmediate(Disp, MI.getLoc(), 4, FK_Data_4, StartByte, CB, Fixups); 781 return; 782 } 783 784 // If the base is not EBP/ESP/R12/R13/R20/R21/R28/R29 and there is no 785 // displacement, use simple indirect register encoding, this handles 786 // addresses like [EAX]. The encoding for [EBP], [R13], [R20], [R21], [R28] 787 // or [R29] with no displacement means [disp32] so we handle it by emitting 788 // a displacement of 0 later. 789 if (BaseRegNo != N86::EBP) { 790 if (Disp.isImm() && Disp.getImm() == 0 && AllowNoDisp) { 791 emitByte(modRMByte(0, RegOpcodeField, BaseRegNo), CB); 792 return; 793 } 794 795 // If the displacement is @tlscall, treat it as a zero. 796 if (Disp.isExpr()) { 797 auto *Sym = dyn_cast<MCSymbolRefExpr>(Disp.getExpr()); 798 if (Sym && Sym->getKind() == MCSymbolRefExpr::VK_TLSCALL) { 799 // This is exclusively used by call *a@tlscall(base). The relocation 800 // (R_386_TLSCALL or R_X86_64_TLSCALL) applies to the beginning. 801 Fixups.push_back(MCFixup::create(0, Sym, FK_NONE, MI.getLoc())); 802 emitByte(modRMByte(0, RegOpcodeField, BaseRegNo), CB); 803 return; 804 } 805 } 806 } 807 808 // Otherwise, if the displacement fits in a byte, encode as [REG+disp8]. 809 // Including a compressed disp8 for EVEX instructions that support it. 810 // This also handles the 0 displacement for [EBP], [R13], [R21] or [R29]. We 811 // can't use disp8 if the {disp32} pseudo prefix is present. 812 if (Disp.isImm() && AllowDisp8) { 813 int ImmOffset = 0; 814 if (isDispOrCDisp8(TSFlags, Disp.getImm(), ImmOffset)) { 815 emitByte(modRMByte(1, RegOpcodeField, BaseRegNo), CB); 816 emitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, StartByte, CB, Fixups, 817 ImmOffset); 818 return; 819 } 820 } 821 822 // Otherwise, emit the most general non-SIB encoding: [REG+disp32]. 823 // Displacement may be 0 for [EBP], [R13], [R21], [R29] case if {disp32} 824 // pseudo prefix prevented using disp8 above. 825 emitByte(modRMByte(2, RegOpcodeField, BaseRegNo), CB); 826 unsigned Opcode = MI.getOpcode(); 827 unsigned FixupKind = Opcode == X86::MOV32rm ? X86::reloc_signed_4byte_relax 828 : X86::reloc_signed_4byte; 829 emitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(FixupKind), StartByte, CB, 830 Fixups); 831 return; 832 } 833 834 // We need a SIB byte, so start by outputting the ModR/M byte first 835 assert(IndexReg.getReg() != X86::ESP && IndexReg.getReg() != X86::RSP && 836 "Cannot use ESP as index reg!"); 837 838 bool ForceDisp32 = false; 839 bool ForceDisp8 = false; 840 int ImmOffset = 0; 841 if (!BaseReg) { 842 // If there is no base register, we emit the special case SIB byte with 843 // MOD=0, BASE=5, to JUST get the index, scale, and displacement. 844 BaseRegNo = 5; 845 emitByte(modRMByte(0, RegOpcodeField, 4), CB); 846 ForceDisp32 = true; 847 } else if (Disp.isImm() && Disp.getImm() == 0 && AllowNoDisp && 848 // Base reg can't be EBP/RBP/R13/R21/R29 as that would end up with 849 // '5' as the base field, but that is the magic [*] nomenclature 850 // that indicates no base when mod=0. For these cases we'll emit a 851 // 0 displacement instead. 852 BaseRegNo != N86::EBP) { 853 // Emit no displacement ModR/M byte 854 emitByte(modRMByte(0, RegOpcodeField, 4), CB); 855 } else if (Disp.isImm() && AllowDisp8 && 856 isDispOrCDisp8(TSFlags, Disp.getImm(), ImmOffset)) { 857 // Displacement fits in a byte or matches an EVEX compressed disp8, use 858 // disp8 encoding. This also handles EBP/R13/R21/R29 base with 0 859 // displacement unless {disp32} pseudo prefix was used. 860 emitByte(modRMByte(1, RegOpcodeField, 4), CB); 861 ForceDisp8 = true; 862 } else { 863 // Otherwise, emit the normal disp32 encoding. 864 emitByte(modRMByte(2, RegOpcodeField, 4), CB); 865 ForceDisp32 = true; 866 } 867 868 // Calculate what the SS field value should be... 869 static const unsigned SSTable[] = {~0U, 0, 1, ~0U, 2, ~0U, ~0U, ~0U, 3}; 870 unsigned SS = SSTable[Scale.getImm()]; 871 872 unsigned IndexRegNo = IndexReg.getReg() ? getX86RegNum(IndexReg) : 4; 873 874 emitSIBByte(SS, IndexRegNo, BaseRegNo, CB); 875 876 // Do we need to output a displacement? 877 if (ForceDisp8) 878 emitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, StartByte, CB, Fixups, 879 ImmOffset); 880 else if (ForceDisp32) 881 emitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(X86::reloc_signed_4byte), 882 StartByte, CB, Fixups); 883 } 884 885 /// Emit all instruction prefixes. 886 /// 887 /// \returns one of the REX, XOP, VEX2, VEX3, EVEX if any of them is used, 888 /// otherwise returns None. 889 PrefixKind X86MCCodeEmitter::emitPrefixImpl(unsigned &CurOp, const MCInst &MI, 890 const MCSubtargetInfo &STI, 891 SmallVectorImpl<char> &CB) const { 892 uint64_t TSFlags = MCII.get(MI.getOpcode()).TSFlags; 893 // Determine where the memory operand starts, if present. 894 int MemoryOperand = X86II::getMemoryOperandNo(TSFlags); 895 // Emit segment override opcode prefix as needed. 896 if (MemoryOperand != -1) { 897 MemoryOperand += CurOp; 898 emitSegmentOverridePrefix(MemoryOperand + X86::AddrSegmentReg, MI, CB); 899 } 900 901 // Emit the repeat opcode prefix as needed. 902 unsigned Flags = MI.getFlags(); 903 if (TSFlags & X86II::REP || Flags & X86::IP_HAS_REPEAT) 904 emitByte(0xF3, CB); 905 if (Flags & X86::IP_HAS_REPEAT_NE) 906 emitByte(0xF2, CB); 907 908 // Emit the address size opcode prefix as needed. 909 if (X86_MC::needsAddressSizeOverride(MI, STI, MemoryOperand, TSFlags) || 910 Flags & X86::IP_HAS_AD_SIZE) 911 emitByte(0x67, CB); 912 913 uint64_t Form = TSFlags & X86II::FormMask; 914 switch (Form) { 915 default: 916 break; 917 case X86II::RawFrmDstSrc: { 918 // Emit segment override opcode prefix as needed (not for %ds). 919 if (MI.getOperand(2).getReg() != X86::DS) 920 emitSegmentOverridePrefix(2, MI, CB); 921 CurOp += 3; // Consume operands. 922 break; 923 } 924 case X86II::RawFrmSrc: { 925 // Emit segment override opcode prefix as needed (not for %ds). 926 if (MI.getOperand(1).getReg() != X86::DS) 927 emitSegmentOverridePrefix(1, MI, CB); 928 CurOp += 2; // Consume operands. 929 break; 930 } 931 case X86II::RawFrmDst: { 932 ++CurOp; // Consume operand. 933 break; 934 } 935 case X86II::RawFrmMemOffs: { 936 // Emit segment override opcode prefix as needed. 937 emitSegmentOverridePrefix(1, MI, CB); 938 break; 939 } 940 } 941 942 // REX prefix is optional, but if used must be immediately before the opcode 943 // Encoding type for this instruction. 944 return (TSFlags & X86II::EncodingMask) 945 ? emitVEXOpcodePrefix(MemoryOperand, MI, STI, CB) 946 : emitOpcodePrefix(MemoryOperand, MI, STI, CB); 947 } 948 949 // AVX instructions are encoded using an encoding scheme that combines 950 // prefix bytes, opcode extension field, operand encoding fields, and vector 951 // length encoding capability into a new prefix, referred to as VEX. 952 953 // The majority of the AVX-512 family of instructions (operating on 954 // 512/256/128-bit vector register operands) are encoded using a new prefix 955 // (called EVEX). 956 957 // XOP is a revised subset of what was originally intended as SSE5. It was 958 // changed to be similar but not overlapping with AVX. 959 960 /// Emit XOP, VEX2, VEX3 or EVEX prefix. 961 /// \returns the used prefix. 962 PrefixKind 963 X86MCCodeEmitter::emitVEXOpcodePrefix(int MemOperand, const MCInst &MI, 964 const MCSubtargetInfo &STI, 965 SmallVectorImpl<char> &CB) const { 966 const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); 967 uint64_t TSFlags = Desc.TSFlags; 968 969 assert(!(TSFlags & X86II::LOCK) && "Can't have LOCK VEX."); 970 971 #ifndef NDEBUG 972 unsigned NumOps = MI.getNumOperands(); 973 for (unsigned I = NumOps ? X86II::getOperandBias(Desc) : 0; I != NumOps; 974 ++I) { 975 const MCOperand &MO = MI.getOperand(I); 976 if (!MO.isReg()) 977 continue; 978 MCRegister Reg = MO.getReg(); 979 if (Reg == X86::AH || Reg == X86::BH || Reg == X86::CH || Reg == X86::DH) 980 report_fatal_error( 981 "Cannot encode high byte register in VEX/EVEX-prefixed instruction"); 982 } 983 #endif 984 985 X86OpcodePrefixHelper Prefix(*Ctx.getRegisterInfo()); 986 switch (TSFlags & X86II::EncodingMask) { 987 default: 988 break; 989 case X86II::XOP: 990 Prefix.setLowerBound(XOP); 991 break; 992 case X86II::VEX: 993 // VEX can be 2 byte or 3 byte, not determined yet if not explicit 994 Prefix.setLowerBound((MI.getFlags() & X86::IP_USE_VEX3) ? VEX3 : VEX2); 995 break; 996 case X86II::EVEX: 997 Prefix.setLowerBound(EVEX); 998 break; 999 } 1000 1001 Prefix.setW(TSFlags & X86II::REX_W); 1002 Prefix.setNF(TSFlags & X86II::EVEX_NF); 1003 1004 bool HasEVEX_K = TSFlags & X86II::EVEX_K; 1005 bool HasVEX_4V = TSFlags & X86II::VEX_4V; 1006 bool IsND = X86II::hasNewDataDest(TSFlags); // IsND implies HasVEX_4V 1007 bool HasEVEX_RC = TSFlags & X86II::EVEX_RC; 1008 1009 switch (TSFlags & X86II::OpMapMask) { 1010 default: 1011 llvm_unreachable("Invalid prefix!"); 1012 case X86II::TB: 1013 Prefix.set5M(0x1); // 0F 1014 break; 1015 case X86II::T8: 1016 Prefix.set5M(0x2); // 0F 38 1017 break; 1018 case X86II::TA: 1019 Prefix.set5M(0x3); // 0F 3A 1020 break; 1021 case X86II::XOP8: 1022 Prefix.set5M(0x8); 1023 break; 1024 case X86II::XOP9: 1025 Prefix.set5M(0x9); 1026 break; 1027 case X86II::XOPA: 1028 Prefix.set5M(0xA); 1029 break; 1030 case X86II::T_MAP4: 1031 Prefix.set5M(0x4); 1032 break; 1033 case X86II::T_MAP5: 1034 Prefix.set5M(0x5); 1035 break; 1036 case X86II::T_MAP6: 1037 Prefix.set5M(0x6); 1038 break; 1039 case X86II::T_MAP7: 1040 Prefix.set5M(0x7); 1041 break; 1042 } 1043 1044 Prefix.setL(TSFlags & X86II::VEX_L); 1045 Prefix.setL2(TSFlags & X86II::EVEX_L2); 1046 if ((TSFlags & X86II::EVEX_L2) && STI.hasFeature(X86::FeatureAVX512) && 1047 !STI.hasFeature(X86::FeatureEVEX512)) 1048 report_fatal_error("ZMM registers are not supported without EVEX512"); 1049 switch (TSFlags & X86II::OpPrefixMask) { 1050 case X86II::PD: 1051 Prefix.setPP(0x1); // 66 1052 break; 1053 case X86II::XS: 1054 Prefix.setPP(0x2); // F3 1055 break; 1056 case X86II::XD: 1057 Prefix.setPP(0x3); // F2 1058 break; 1059 } 1060 1061 Prefix.setZ(HasEVEX_K && (TSFlags & X86II::EVEX_Z)); 1062 Prefix.setEVEX_b(TSFlags & X86II::EVEX_B); 1063 Prefix.setEVEX_U(TSFlags & X86II::EVEX_U); 1064 1065 bool EncodeRC = false; 1066 uint8_t EVEX_rc = 0; 1067 1068 unsigned CurOp = X86II::getOperandBias(Desc); 1069 bool HasTwoConditionalOps = TSFlags & X86II::TwoConditionalOps; 1070 1071 switch (TSFlags & X86II::FormMask) { 1072 default: 1073 llvm_unreachable("Unexpected form in emitVEXOpcodePrefix!"); 1074 case X86II::MRMDestMem4VOp3CC: { 1075 // src1(ModR/M), MemAddr, src2(VEX_4V) 1076 Prefix.setRR2(MI, CurOp++); 1077 Prefix.setBB2(MI, MemOperand + X86::AddrBaseReg); 1078 Prefix.setXX2(MI, MemOperand + X86::AddrIndexReg); 1079 CurOp += X86::AddrNumOperands; 1080 Prefix.set4VV2(MI, CurOp++); 1081 break; 1082 } 1083 case X86II::MRM_C0: 1084 case X86II::RawFrm: 1085 break; 1086 case X86II::MRMDestMemCC: 1087 case X86II::MRMDestMemFSIB: 1088 case X86II::MRMDestMem: { 1089 // MRMDestMem instructions forms: 1090 // MemAddr, src1(ModR/M) 1091 // MemAddr, src1(VEX_4V), src2(ModR/M) 1092 // MemAddr, src1(ModR/M), imm8 1093 // 1094 // NDD: 1095 // dst(VEX_4V), MemAddr, src1(ModR/M) 1096 Prefix.setBB2(MI, MemOperand + X86::AddrBaseReg); 1097 Prefix.setXX2(MI, MemOperand + X86::AddrIndexReg); 1098 Prefix.setV2(MI, MemOperand + X86::AddrIndexReg, HasVEX_4V); 1099 1100 if (IsND) 1101 Prefix.set4VV2(MI, CurOp++); 1102 1103 CurOp += X86::AddrNumOperands; 1104 1105 if (HasEVEX_K) 1106 Prefix.setAAA(MI, CurOp++); 1107 1108 if (!IsND && HasVEX_4V) 1109 Prefix.set4VV2(MI, CurOp++); 1110 1111 Prefix.setRR2(MI, CurOp++); 1112 if (HasTwoConditionalOps) { 1113 Prefix.set4V(MI, CurOp++, /*IsImm=*/true); 1114 Prefix.setSC(MI, CurOp++); 1115 } 1116 break; 1117 } 1118 case X86II::MRMSrcMemCC: 1119 case X86II::MRMSrcMemFSIB: 1120 case X86II::MRMSrcMem: { 1121 // MRMSrcMem instructions forms: 1122 // src1(ModR/M), MemAddr 1123 // src1(ModR/M), src2(VEX_4V), MemAddr 1124 // src1(ModR/M), MemAddr, imm8 1125 // src1(ModR/M), MemAddr, src2(Imm[7:4]) 1126 // 1127 // FMA4: 1128 // dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(Imm[7:4]) 1129 // 1130 // NDD: 1131 // dst(VEX_4V), src1(ModR/M), MemAddr 1132 if (IsND) 1133 Prefix.set4VV2(MI, CurOp++); 1134 1135 Prefix.setRR2(MI, CurOp++); 1136 1137 if (HasEVEX_K) 1138 Prefix.setAAA(MI, CurOp++); 1139 1140 if (!IsND && HasVEX_4V) 1141 Prefix.set4VV2(MI, CurOp++); 1142 1143 Prefix.setBB2(MI, MemOperand + X86::AddrBaseReg); 1144 Prefix.setXX2(MI, MemOperand + X86::AddrIndexReg); 1145 Prefix.setV2(MI, MemOperand + X86::AddrIndexReg, HasVEX_4V); 1146 CurOp += X86::AddrNumOperands; 1147 if (HasTwoConditionalOps) { 1148 Prefix.set4V(MI, CurOp++, /*IsImm=*/true); 1149 Prefix.setSC(MI, CurOp++); 1150 } 1151 break; 1152 } 1153 case X86II::MRMSrcMem4VOp3: { 1154 // Instruction format for 4VOp3: 1155 // src1(ModR/M), MemAddr, src3(VEX_4V) 1156 Prefix.setRR2(MI, CurOp++); 1157 Prefix.setBB2(MI, MemOperand + X86::AddrBaseReg); 1158 Prefix.setXX2(MI, MemOperand + X86::AddrIndexReg); 1159 Prefix.set4VV2(MI, CurOp + X86::AddrNumOperands); 1160 break; 1161 } 1162 case X86II::MRMSrcMemOp4: { 1163 // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M), 1164 Prefix.setR(MI, CurOp++); 1165 Prefix.set4V(MI, CurOp++); 1166 Prefix.setBB2(MI, MemOperand + X86::AddrBaseReg); 1167 Prefix.setXX2(MI, MemOperand + X86::AddrIndexReg); 1168 break; 1169 } 1170 case X86II::MRMXmCC: 1171 case X86II::MRM0m: 1172 case X86II::MRM1m: 1173 case X86II::MRM2m: 1174 case X86II::MRM3m: 1175 case X86II::MRM4m: 1176 case X86II::MRM5m: 1177 case X86II::MRM6m: 1178 case X86II::MRM7m: { 1179 // MRM[0-9]m instructions forms: 1180 // MemAddr 1181 // src1(VEX_4V), MemAddr 1182 if (HasVEX_4V) 1183 Prefix.set4VV2(MI, CurOp++); 1184 1185 if (HasEVEX_K) 1186 Prefix.setAAA(MI, CurOp++); 1187 1188 Prefix.setBB2(MI, MemOperand + X86::AddrBaseReg); 1189 Prefix.setXX2(MI, MemOperand + X86::AddrIndexReg); 1190 Prefix.setV2(MI, MemOperand + X86::AddrIndexReg, HasVEX_4V); 1191 CurOp += X86::AddrNumOperands + 1; // Skip first imm. 1192 if (HasTwoConditionalOps) { 1193 Prefix.set4V(MI, CurOp++, /*IsImm=*/true); 1194 Prefix.setSC(MI, CurOp++); 1195 } 1196 break; 1197 } 1198 case X86II::MRMSrcRegCC: 1199 case X86II::MRMSrcReg: { 1200 // MRMSrcReg instructions forms: 1201 // dst(ModR/M), src1(VEX_4V), src2(ModR/M), src3(Imm[7:4]) 1202 // dst(ModR/M), src1(ModR/M) 1203 // dst(ModR/M), src1(ModR/M), imm8 1204 // 1205 // FMA4: 1206 // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M), 1207 // 1208 // NDD: 1209 // dst(VEX_4V), src1(ModR/M.reg), src2(ModR/M) 1210 if (IsND) 1211 Prefix.set4VV2(MI, CurOp++); 1212 Prefix.setRR2(MI, CurOp++); 1213 1214 if (HasEVEX_K) 1215 Prefix.setAAA(MI, CurOp++); 1216 1217 if (!IsND && HasVEX_4V) 1218 Prefix.set4VV2(MI, CurOp++); 1219 1220 Prefix.setBB2(MI, CurOp); 1221 Prefix.setX(MI, CurOp, 4); 1222 ++CurOp; 1223 1224 if (HasTwoConditionalOps) { 1225 Prefix.set4V(MI, CurOp++, /*IsImm=*/true); 1226 Prefix.setSC(MI, CurOp++); 1227 } 1228 1229 if (TSFlags & X86II::EVEX_B) { 1230 if (HasEVEX_RC) { 1231 unsigned NumOps = Desc.getNumOperands(); 1232 unsigned RcOperand = NumOps - 1; 1233 assert(RcOperand >= CurOp); 1234 EVEX_rc = MI.getOperand(RcOperand).getImm(); 1235 assert(EVEX_rc <= 3 && "Invalid rounding control!"); 1236 } 1237 EncodeRC = true; 1238 } 1239 break; 1240 } 1241 case X86II::MRMSrcReg4VOp3: { 1242 // Instruction format for 4VOp3: 1243 // src1(ModR/M), src2(ModR/M), src3(VEX_4V) 1244 Prefix.setRR2(MI, CurOp++); 1245 Prefix.setBB2(MI, CurOp++); 1246 Prefix.set4VV2(MI, CurOp++); 1247 break; 1248 } 1249 case X86II::MRMSrcRegOp4: { 1250 // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M), 1251 Prefix.setR(MI, CurOp++); 1252 Prefix.set4V(MI, CurOp++); 1253 // Skip second register source (encoded in Imm[7:4]) 1254 ++CurOp; 1255 1256 Prefix.setB(MI, CurOp); 1257 Prefix.setX(MI, CurOp, 4); 1258 ++CurOp; 1259 break; 1260 } 1261 case X86II::MRMDestRegCC: 1262 case X86II::MRMDestReg: { 1263 // MRMDestReg instructions forms: 1264 // dst(ModR/M), src(ModR/M) 1265 // dst(ModR/M), src(ModR/M), imm8 1266 // dst(ModR/M), src1(VEX_4V), src2(ModR/M) 1267 // 1268 // NDD: 1269 // dst(VEX_4V), src1(ModR/M), src2(ModR/M) 1270 if (IsND) 1271 Prefix.set4VV2(MI, CurOp++); 1272 Prefix.setBB2(MI, CurOp); 1273 Prefix.setX(MI, CurOp, 4); 1274 ++CurOp; 1275 1276 if (HasEVEX_K) 1277 Prefix.setAAA(MI, CurOp++); 1278 1279 if (!IsND && HasVEX_4V) 1280 Prefix.set4VV2(MI, CurOp++); 1281 1282 Prefix.setRR2(MI, CurOp++); 1283 if (HasTwoConditionalOps) { 1284 Prefix.set4V(MI, CurOp++, /*IsImm=*/true); 1285 Prefix.setSC(MI, CurOp++); 1286 } 1287 if (TSFlags & X86II::EVEX_B) 1288 EncodeRC = true; 1289 break; 1290 } 1291 case X86II::MRMr0: { 1292 // MRMr0 instructions forms: 1293 // 11:rrr:000 1294 // dst(ModR/M) 1295 Prefix.setRR2(MI, CurOp++); 1296 break; 1297 } 1298 case X86II::MRMXrCC: 1299 case X86II::MRM0r: 1300 case X86II::MRM1r: 1301 case X86II::MRM2r: 1302 case X86II::MRM3r: 1303 case X86II::MRM4r: 1304 case X86II::MRM5r: 1305 case X86II::MRM6r: 1306 case X86II::MRM7r: { 1307 // MRM0r-MRM7r instructions forms: 1308 // dst(VEX_4V), src(ModR/M), imm8 1309 if (HasVEX_4V) 1310 Prefix.set4VV2(MI, CurOp++); 1311 1312 if (HasEVEX_K) 1313 Prefix.setAAA(MI, CurOp++); 1314 1315 Prefix.setBB2(MI, CurOp); 1316 Prefix.setX(MI, CurOp, 4); 1317 ++CurOp; 1318 if (HasTwoConditionalOps) { 1319 Prefix.set4V(MI, ++CurOp, /*IsImm=*/true); 1320 Prefix.setSC(MI, ++CurOp); 1321 } 1322 break; 1323 } 1324 } 1325 if (EncodeRC) { 1326 Prefix.setL(EVEX_rc & 0x1); 1327 Prefix.setL2(EVEX_rc & 0x2); 1328 } 1329 PrefixKind Kind = Prefix.determineOptimalKind(); 1330 Prefix.emit(CB); 1331 return Kind; 1332 } 1333 1334 /// Emit REX prefix which specifies 1335 /// 1) 64-bit instructions, 1336 /// 2) non-default operand size, and 1337 /// 3) use of X86-64 extended registers. 1338 /// 1339 /// \returns the used prefix (REX or None). 1340 PrefixKind X86MCCodeEmitter::emitREXPrefix(int MemOperand, const MCInst &MI, 1341 const MCSubtargetInfo &STI, 1342 SmallVectorImpl<char> &CB) const { 1343 if (!STI.hasFeature(X86::Is64Bit)) 1344 return None; 1345 X86OpcodePrefixHelper Prefix(*Ctx.getRegisterInfo()); 1346 const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); 1347 uint64_t TSFlags = Desc.TSFlags; 1348 Prefix.setW(TSFlags & X86II::REX_W); 1349 unsigned NumOps = MI.getNumOperands(); 1350 bool UsesHighByteReg = false; 1351 #ifndef NDEBUG 1352 bool HasRegOp = false; 1353 #endif 1354 unsigned CurOp = NumOps ? X86II::getOperandBias(Desc) : 0; 1355 for (unsigned i = CurOp; i != NumOps; ++i) { 1356 const MCOperand &MO = MI.getOperand(i); 1357 if (MO.isReg()) { 1358 #ifndef NDEBUG 1359 HasRegOp = true; 1360 #endif 1361 MCRegister Reg = MO.getReg(); 1362 if (Reg == X86::AH || Reg == X86::BH || Reg == X86::CH || Reg == X86::DH) 1363 UsesHighByteReg = true; 1364 // If it accesses SPL, BPL, SIL, or DIL, then it requires a REX prefix. 1365 if (X86II::isX86_64NonExtLowByteReg(Reg)) 1366 Prefix.setLowerBound(REX); 1367 } else if (MO.isExpr() && STI.getTargetTriple().isX32()) { 1368 // GOTTPOFF and TLSDESC relocations require a REX prefix to allow 1369 // linker optimizations: even if the instructions we see may not require 1370 // any prefix, they may be replaced by instructions that do. This is 1371 // handled as a special case here so that it also works for hand-written 1372 // assembly without the user needing to write REX, as with GNU as. 1373 const auto *Ref = dyn_cast<MCSymbolRefExpr>(MO.getExpr()); 1374 if (Ref && (Ref->getKind() == MCSymbolRefExpr::VK_GOTTPOFF || 1375 Ref->getKind() == MCSymbolRefExpr::VK_TLSDESC)) { 1376 Prefix.setLowerBound(REX); 1377 } 1378 } 1379 } 1380 if (MI.getFlags() & X86::IP_USE_REX) 1381 Prefix.setLowerBound(REX); 1382 if ((TSFlags & X86II::ExplicitOpPrefixMask) == X86II::ExplicitREX2Prefix || 1383 MI.getFlags() & X86::IP_USE_REX2) 1384 Prefix.setLowerBound(REX2); 1385 switch (TSFlags & X86II::FormMask) { 1386 default: 1387 assert(!HasRegOp && "Unexpected form in emitREXPrefix!"); 1388 break; 1389 case X86II::RawFrm: 1390 case X86II::RawFrmMemOffs: 1391 case X86II::RawFrmSrc: 1392 case X86II::RawFrmDst: 1393 case X86II::RawFrmDstSrc: 1394 break; 1395 case X86II::AddRegFrm: 1396 Prefix.setBB2(MI, CurOp++); 1397 break; 1398 case X86II::MRMSrcReg: 1399 case X86II::MRMSrcRegCC: 1400 Prefix.setRR2(MI, CurOp++); 1401 Prefix.setBB2(MI, CurOp++); 1402 break; 1403 case X86II::MRMSrcMem: 1404 case X86II::MRMSrcMemCC: 1405 Prefix.setRR2(MI, CurOp++); 1406 Prefix.setBB2(MI, MemOperand + X86::AddrBaseReg); 1407 Prefix.setXX2(MI, MemOperand + X86::AddrIndexReg); 1408 CurOp += X86::AddrNumOperands; 1409 break; 1410 case X86II::MRMDestReg: 1411 Prefix.setBB2(MI, CurOp++); 1412 Prefix.setRR2(MI, CurOp++); 1413 break; 1414 case X86II::MRMDestMem: 1415 Prefix.setBB2(MI, MemOperand + X86::AddrBaseReg); 1416 Prefix.setXX2(MI, MemOperand + X86::AddrIndexReg); 1417 CurOp += X86::AddrNumOperands; 1418 Prefix.setRR2(MI, CurOp++); 1419 break; 1420 case X86II::MRMXmCC: 1421 case X86II::MRMXm: 1422 case X86II::MRM0m: 1423 case X86II::MRM1m: 1424 case X86II::MRM2m: 1425 case X86II::MRM3m: 1426 case X86II::MRM4m: 1427 case X86II::MRM5m: 1428 case X86II::MRM6m: 1429 case X86II::MRM7m: 1430 Prefix.setBB2(MI, MemOperand + X86::AddrBaseReg); 1431 Prefix.setXX2(MI, MemOperand + X86::AddrIndexReg); 1432 break; 1433 case X86II::MRMXrCC: 1434 case X86II::MRMXr: 1435 case X86II::MRM0r: 1436 case X86II::MRM1r: 1437 case X86II::MRM2r: 1438 case X86II::MRM3r: 1439 case X86II::MRM4r: 1440 case X86II::MRM5r: 1441 case X86II::MRM6r: 1442 case X86II::MRM7r: 1443 Prefix.setBB2(MI, CurOp++); 1444 break; 1445 } 1446 Prefix.setM((TSFlags & X86II::OpMapMask) == X86II::TB); 1447 PrefixKind Kind = Prefix.determineOptimalKind(); 1448 if (Kind && UsesHighByteReg) 1449 report_fatal_error( 1450 "Cannot encode high byte register in REX-prefixed instruction"); 1451 Prefix.emit(CB); 1452 return Kind; 1453 } 1454 1455 /// Emit segment override opcode prefix as needed. 1456 void X86MCCodeEmitter::emitSegmentOverridePrefix( 1457 unsigned SegOperand, const MCInst &MI, SmallVectorImpl<char> &CB) const { 1458 // Check for explicit segment override on memory operand. 1459 if (MCRegister Reg = MI.getOperand(SegOperand).getReg()) 1460 emitByte(X86::getSegmentOverridePrefixForReg(Reg), CB); 1461 } 1462 1463 /// Emit all instruction prefixes prior to the opcode. 1464 /// 1465 /// \param MemOperand the operand # of the start of a memory operand if present. 1466 /// If not present, it is -1. 1467 /// 1468 /// \returns the used prefix (REX or None). 1469 PrefixKind X86MCCodeEmitter::emitOpcodePrefix(int MemOperand, const MCInst &MI, 1470 const MCSubtargetInfo &STI, 1471 SmallVectorImpl<char> &CB) const { 1472 const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); 1473 uint64_t TSFlags = Desc.TSFlags; 1474 1475 // Emit the operand size opcode prefix as needed. 1476 if ((TSFlags & X86II::OpSizeMask) == 1477 (STI.hasFeature(X86::Is16Bit) ? X86II::OpSize32 : X86II::OpSize16)) 1478 emitByte(0x66, CB); 1479 1480 // Emit the LOCK opcode prefix. 1481 if (TSFlags & X86II::LOCK || MI.getFlags() & X86::IP_HAS_LOCK) 1482 emitByte(0xF0, CB); 1483 1484 // Emit the NOTRACK opcode prefix. 1485 if (TSFlags & X86II::NOTRACK || MI.getFlags() & X86::IP_HAS_NOTRACK) 1486 emitByte(0x3E, CB); 1487 1488 switch (TSFlags & X86II::OpPrefixMask) { 1489 case X86II::PD: // 66 1490 emitByte(0x66, CB); 1491 break; 1492 case X86II::XS: // F3 1493 emitByte(0xF3, CB); 1494 break; 1495 case X86II::XD: // F2 1496 emitByte(0xF2, CB); 1497 break; 1498 } 1499 1500 // Handle REX prefix. 1501 assert((STI.hasFeature(X86::Is64Bit) || !(TSFlags & X86II::REX_W)) && 1502 "REX.W requires 64bit mode."); 1503 PrefixKind Kind = emitREXPrefix(MemOperand, MI, STI, CB); 1504 1505 // 0x0F escape code must be emitted just before the opcode. 1506 switch (TSFlags & X86II::OpMapMask) { 1507 case X86II::TB: // Two-byte opcode map 1508 // Encoded by M bit in REX2 1509 if (Kind == REX2) 1510 break; 1511 [[fallthrough]]; 1512 case X86II::T8: // 0F 38 1513 case X86II::TA: // 0F 3A 1514 case X86II::ThreeDNow: // 0F 0F, second 0F emitted by caller. 1515 emitByte(0x0F, CB); 1516 break; 1517 } 1518 1519 switch (TSFlags & X86II::OpMapMask) { 1520 case X86II::T8: // 0F 38 1521 emitByte(0x38, CB); 1522 break; 1523 case X86II::TA: // 0F 3A 1524 emitByte(0x3A, CB); 1525 break; 1526 } 1527 1528 return Kind; 1529 } 1530 1531 void X86MCCodeEmitter::emitPrefix(const MCInst &MI, SmallVectorImpl<char> &CB, 1532 const MCSubtargetInfo &STI) const { 1533 unsigned Opcode = MI.getOpcode(); 1534 const MCInstrDesc &Desc = MCII.get(Opcode); 1535 uint64_t TSFlags = Desc.TSFlags; 1536 1537 // Pseudo instructions don't get encoded. 1538 if (X86II::isPseudo(TSFlags)) 1539 return; 1540 1541 unsigned CurOp = X86II::getOperandBias(Desc); 1542 1543 emitPrefixImpl(CurOp, MI, STI, CB); 1544 } 1545 1546 void X86_MC::emitPrefix(MCCodeEmitter &MCE, const MCInst &MI, 1547 SmallVectorImpl<char> &CB, const MCSubtargetInfo &STI) { 1548 static_cast<X86MCCodeEmitter &>(MCE).emitPrefix(MI, CB, STI); 1549 } 1550 1551 void X86MCCodeEmitter::encodeInstruction(const MCInst &MI, 1552 SmallVectorImpl<char> &CB, 1553 SmallVectorImpl<MCFixup> &Fixups, 1554 const MCSubtargetInfo &STI) const { 1555 unsigned Opcode = MI.getOpcode(); 1556 const MCInstrDesc &Desc = MCII.get(Opcode); 1557 uint64_t TSFlags = Desc.TSFlags; 1558 1559 // Pseudo instructions don't get encoded. 1560 if (X86II::isPseudo(TSFlags)) 1561 return; 1562 1563 unsigned NumOps = Desc.getNumOperands(); 1564 unsigned CurOp = X86II::getOperandBias(Desc); 1565 1566 uint64_t StartByte = CB.size(); 1567 1568 PrefixKind Kind = emitPrefixImpl(CurOp, MI, STI, CB); 1569 1570 // It uses the VEX.VVVV field? 1571 bool HasVEX_4V = TSFlags & X86II::VEX_4V; 1572 bool HasVEX_I8Reg = (TSFlags & X86II::ImmMask) == X86II::Imm8Reg; 1573 1574 // It uses the EVEX.aaa field? 1575 bool HasEVEX_K = TSFlags & X86II::EVEX_K; 1576 bool HasEVEX_RC = TSFlags & X86II::EVEX_RC; 1577 1578 // Used if a register is encoded in 7:4 of immediate. 1579 unsigned I8RegNum = 0; 1580 1581 uint8_t BaseOpcode = X86II::getBaseOpcodeFor(TSFlags); 1582 1583 if ((TSFlags & X86II::OpMapMask) == X86II::ThreeDNow) 1584 BaseOpcode = 0x0F; // Weird 3DNow! encoding. 1585 1586 unsigned OpcodeOffset = 0; 1587 1588 bool IsND = X86II::hasNewDataDest(TSFlags); 1589 bool HasTwoConditionalOps = TSFlags & X86II::TwoConditionalOps; 1590 1591 uint64_t Form = TSFlags & X86II::FormMask; 1592 switch (Form) { 1593 default: 1594 errs() << "FORM: " << Form << "\n"; 1595 llvm_unreachable("Unknown FormMask value in X86MCCodeEmitter!"); 1596 case X86II::Pseudo: 1597 llvm_unreachable("Pseudo instruction shouldn't be emitted"); 1598 case X86II::RawFrmDstSrc: 1599 case X86II::RawFrmSrc: 1600 case X86II::RawFrmDst: 1601 case X86II::PrefixByte: 1602 emitByte(BaseOpcode, CB); 1603 break; 1604 case X86II::AddCCFrm: { 1605 // This will be added to the opcode in the fallthrough. 1606 OpcodeOffset = MI.getOperand(NumOps - 1).getImm(); 1607 assert(OpcodeOffset < 16 && "Unexpected opcode offset!"); 1608 --NumOps; // Drop the operand from the end. 1609 [[fallthrough]]; 1610 case X86II::RawFrm: 1611 emitByte(BaseOpcode + OpcodeOffset, CB); 1612 1613 if (!STI.hasFeature(X86::Is64Bit) || !isPCRel32Branch(MI, MCII)) 1614 break; 1615 1616 const MCOperand &Op = MI.getOperand(CurOp++); 1617 emitImmediate(Op, MI.getLoc(), X86II::getSizeOfImm(TSFlags), 1618 MCFixupKind(X86::reloc_branch_4byte_pcrel), StartByte, CB, 1619 Fixups); 1620 break; 1621 } 1622 case X86II::RawFrmMemOffs: 1623 emitByte(BaseOpcode, CB); 1624 emitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 1625 X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags), 1626 StartByte, CB, Fixups); 1627 ++CurOp; // skip segment operand 1628 break; 1629 case X86II::RawFrmImm8: 1630 emitByte(BaseOpcode, CB); 1631 emitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 1632 X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags), 1633 StartByte, CB, Fixups); 1634 emitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 1, FK_Data_1, StartByte, 1635 CB, Fixups); 1636 break; 1637 case X86II::RawFrmImm16: 1638 emitByte(BaseOpcode, CB); 1639 emitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 1640 X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags), 1641 StartByte, CB, Fixups); 1642 emitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 2, FK_Data_2, StartByte, 1643 CB, Fixups); 1644 break; 1645 1646 case X86II::AddRegFrm: 1647 emitByte(BaseOpcode + getX86RegNum(MI.getOperand(CurOp++)), CB); 1648 break; 1649 1650 case X86II::MRMDestReg: { 1651 emitByte(BaseOpcode, CB); 1652 unsigned SrcRegNum = CurOp + 1; 1653 1654 if (HasEVEX_K) // Skip writemask 1655 ++SrcRegNum; 1656 1657 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV) 1658 ++SrcRegNum; 1659 if (IsND) // Skip the NDD operand encoded in EVEX_VVVV 1660 ++CurOp; 1661 1662 emitRegModRMByte(MI.getOperand(CurOp), 1663 getX86RegNum(MI.getOperand(SrcRegNum)), CB); 1664 CurOp = SrcRegNum + 1; 1665 break; 1666 } 1667 case X86II::MRMDestRegCC: { 1668 unsigned FirstOp = CurOp++; 1669 unsigned SecondOp = CurOp++; 1670 unsigned CC = MI.getOperand(CurOp++).getImm(); 1671 emitByte(BaseOpcode + CC, CB); 1672 emitRegModRMByte(MI.getOperand(FirstOp), 1673 getX86RegNum(MI.getOperand(SecondOp)), CB); 1674 break; 1675 } 1676 case X86II::MRMDestMem4VOp3CC: { 1677 unsigned CC = MI.getOperand(8).getImm(); 1678 emitByte(BaseOpcode + CC, CB); 1679 unsigned SrcRegNum = CurOp + X86::AddrNumOperands; 1680 emitMemModRMByte(MI, CurOp + 1, getX86RegNum(MI.getOperand(0)), TSFlags, 1681 Kind, StartByte, CB, Fixups, STI, false); 1682 CurOp = SrcRegNum + 3; // skip reg, VEX_V4 and CC 1683 break; 1684 } 1685 case X86II::MRMDestMemFSIB: 1686 case X86II::MRMDestMem: { 1687 emitByte(BaseOpcode, CB); 1688 unsigned SrcRegNum = CurOp + X86::AddrNumOperands; 1689 1690 if (HasEVEX_K) // Skip writemask 1691 ++SrcRegNum; 1692 1693 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV) 1694 ++SrcRegNum; 1695 1696 if (IsND) // Skip new data destination 1697 ++CurOp; 1698 1699 bool ForceSIB = (Form == X86II::MRMDestMemFSIB); 1700 emitMemModRMByte(MI, CurOp, getX86RegNum(MI.getOperand(SrcRegNum)), TSFlags, 1701 Kind, StartByte, CB, Fixups, STI, ForceSIB); 1702 CurOp = SrcRegNum + 1; 1703 break; 1704 } 1705 case X86II::MRMDestMemCC: { 1706 unsigned MemOp = CurOp; 1707 CurOp = MemOp + X86::AddrNumOperands; 1708 unsigned RegOp = CurOp++; 1709 unsigned CC = MI.getOperand(CurOp++).getImm(); 1710 emitByte(BaseOpcode + CC, CB); 1711 emitMemModRMByte(MI, MemOp, getX86RegNum(MI.getOperand(RegOp)), TSFlags, 1712 Kind, StartByte, CB, Fixups, STI); 1713 break; 1714 } 1715 case X86II::MRMSrcReg: { 1716 emitByte(BaseOpcode, CB); 1717 unsigned SrcRegNum = CurOp + 1; 1718 1719 if (HasEVEX_K) // Skip writemask 1720 ++SrcRegNum; 1721 1722 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV) 1723 ++SrcRegNum; 1724 1725 if (IsND) // Skip new data destination 1726 ++CurOp; 1727 1728 emitRegModRMByte(MI.getOperand(SrcRegNum), 1729 getX86RegNum(MI.getOperand(CurOp)), CB); 1730 CurOp = SrcRegNum + 1; 1731 if (HasVEX_I8Reg) 1732 I8RegNum = getX86RegEncoding(MI, CurOp++); 1733 // do not count the rounding control operand 1734 if (HasEVEX_RC) 1735 --NumOps; 1736 break; 1737 } 1738 case X86II::MRMSrcReg4VOp3: { 1739 emitByte(BaseOpcode, CB); 1740 unsigned SrcRegNum = CurOp + 1; 1741 1742 emitRegModRMByte(MI.getOperand(SrcRegNum), 1743 getX86RegNum(MI.getOperand(CurOp)), CB); 1744 CurOp = SrcRegNum + 1; 1745 ++CurOp; // Encoded in VEX.VVVV 1746 break; 1747 } 1748 case X86II::MRMSrcRegOp4: { 1749 emitByte(BaseOpcode, CB); 1750 unsigned SrcRegNum = CurOp + 1; 1751 1752 // Skip 1st src (which is encoded in VEX_VVVV) 1753 ++SrcRegNum; 1754 1755 // Capture 2nd src (which is encoded in Imm[7:4]) 1756 assert(HasVEX_I8Reg && "MRMSrcRegOp4 should imply VEX_I8Reg"); 1757 I8RegNum = getX86RegEncoding(MI, SrcRegNum++); 1758 1759 emitRegModRMByte(MI.getOperand(SrcRegNum), 1760 getX86RegNum(MI.getOperand(CurOp)), CB); 1761 CurOp = SrcRegNum + 1; 1762 break; 1763 } 1764 case X86II::MRMSrcRegCC: { 1765 if (IsND) // Skip new data destination 1766 ++CurOp; 1767 unsigned FirstOp = CurOp++; 1768 unsigned SecondOp = CurOp++; 1769 1770 unsigned CC = MI.getOperand(CurOp++).getImm(); 1771 emitByte(BaseOpcode + CC, CB); 1772 1773 emitRegModRMByte(MI.getOperand(SecondOp), 1774 getX86RegNum(MI.getOperand(FirstOp)), CB); 1775 break; 1776 } 1777 case X86II::MRMSrcMemFSIB: 1778 case X86II::MRMSrcMem: { 1779 unsigned FirstMemOp = CurOp + 1; 1780 1781 if (IsND) // Skip new data destination 1782 CurOp++; 1783 1784 if (HasEVEX_K) // Skip writemask 1785 ++FirstMemOp; 1786 1787 if (HasVEX_4V) 1788 ++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV). 1789 1790 emitByte(BaseOpcode, CB); 1791 1792 bool ForceSIB = (Form == X86II::MRMSrcMemFSIB); 1793 emitMemModRMByte(MI, FirstMemOp, getX86RegNum(MI.getOperand(CurOp)), 1794 TSFlags, Kind, StartByte, CB, Fixups, STI, ForceSIB); 1795 CurOp = FirstMemOp + X86::AddrNumOperands; 1796 if (HasVEX_I8Reg) 1797 I8RegNum = getX86RegEncoding(MI, CurOp++); 1798 break; 1799 } 1800 case X86II::MRMSrcMem4VOp3: { 1801 unsigned FirstMemOp = CurOp + 1; 1802 1803 emitByte(BaseOpcode, CB); 1804 1805 emitMemModRMByte(MI, FirstMemOp, getX86RegNum(MI.getOperand(CurOp)), 1806 TSFlags, Kind, StartByte, CB, Fixups, STI); 1807 CurOp = FirstMemOp + X86::AddrNumOperands; 1808 ++CurOp; // Encoded in VEX.VVVV. 1809 break; 1810 } 1811 case X86II::MRMSrcMemOp4: { 1812 unsigned FirstMemOp = CurOp + 1; 1813 1814 ++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV). 1815 1816 // Capture second register source (encoded in Imm[7:4]) 1817 assert(HasVEX_I8Reg && "MRMSrcRegOp4 should imply VEX_I8Reg"); 1818 I8RegNum = getX86RegEncoding(MI, FirstMemOp++); 1819 1820 emitByte(BaseOpcode, CB); 1821 1822 emitMemModRMByte(MI, FirstMemOp, getX86RegNum(MI.getOperand(CurOp)), 1823 TSFlags, Kind, StartByte, CB, Fixups, STI); 1824 CurOp = FirstMemOp + X86::AddrNumOperands; 1825 break; 1826 } 1827 case X86II::MRMSrcMemCC: { 1828 if (IsND) // Skip new data destination 1829 ++CurOp; 1830 unsigned RegOp = CurOp++; 1831 unsigned FirstMemOp = CurOp; 1832 CurOp = FirstMemOp + X86::AddrNumOperands; 1833 1834 unsigned CC = MI.getOperand(CurOp++).getImm(); 1835 emitByte(BaseOpcode + CC, CB); 1836 1837 emitMemModRMByte(MI, FirstMemOp, getX86RegNum(MI.getOperand(RegOp)), 1838 TSFlags, Kind, StartByte, CB, Fixups, STI); 1839 break; 1840 } 1841 1842 case X86II::MRMXrCC: { 1843 unsigned RegOp = CurOp++; 1844 1845 unsigned CC = MI.getOperand(CurOp++).getImm(); 1846 emitByte(BaseOpcode + CC, CB); 1847 emitRegModRMByte(MI.getOperand(RegOp), 0, CB); 1848 break; 1849 } 1850 1851 case X86II::MRMXr: 1852 case X86II::MRM0r: 1853 case X86II::MRM1r: 1854 case X86II::MRM2r: 1855 case X86II::MRM3r: 1856 case X86II::MRM4r: 1857 case X86II::MRM5r: 1858 case X86II::MRM6r: 1859 case X86II::MRM7r: 1860 if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV). 1861 ++CurOp; 1862 if (HasEVEX_K) // Skip writemask 1863 ++CurOp; 1864 emitByte(BaseOpcode, CB); 1865 emitRegModRMByte(MI.getOperand(CurOp++), 1866 (Form == X86II::MRMXr) ? 0 : Form - X86II::MRM0r, CB); 1867 break; 1868 case X86II::MRMr0: 1869 emitByte(BaseOpcode, CB); 1870 emitByte(modRMByte(3, getX86RegNum(MI.getOperand(CurOp++)), 0), CB); 1871 break; 1872 1873 case X86II::MRMXmCC: { 1874 unsigned FirstMemOp = CurOp; 1875 CurOp = FirstMemOp + X86::AddrNumOperands; 1876 1877 unsigned CC = MI.getOperand(CurOp++).getImm(); 1878 emitByte(BaseOpcode + CC, CB); 1879 1880 emitMemModRMByte(MI, FirstMemOp, 0, TSFlags, Kind, StartByte, CB, Fixups, 1881 STI); 1882 break; 1883 } 1884 1885 case X86II::MRMXm: 1886 case X86II::MRM0m: 1887 case X86II::MRM1m: 1888 case X86II::MRM2m: 1889 case X86II::MRM3m: 1890 case X86II::MRM4m: 1891 case X86II::MRM5m: 1892 case X86II::MRM6m: 1893 case X86II::MRM7m: 1894 if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV). 1895 ++CurOp; 1896 if (HasEVEX_K) // Skip writemask 1897 ++CurOp; 1898 emitByte(BaseOpcode, CB); 1899 emitMemModRMByte(MI, CurOp, 1900 (Form == X86II::MRMXm) ? 0 : Form - X86II::MRM0m, TSFlags, 1901 Kind, StartByte, CB, Fixups, STI); 1902 CurOp += X86::AddrNumOperands; 1903 break; 1904 1905 case X86II::MRM0X: 1906 case X86II::MRM1X: 1907 case X86II::MRM2X: 1908 case X86II::MRM3X: 1909 case X86II::MRM4X: 1910 case X86II::MRM5X: 1911 case X86II::MRM6X: 1912 case X86II::MRM7X: 1913 emitByte(BaseOpcode, CB); 1914 emitByte(0xC0 + ((Form - X86II::MRM0X) << 3), CB); 1915 break; 1916 1917 case X86II::MRM_C0: 1918 case X86II::MRM_C1: 1919 case X86II::MRM_C2: 1920 case X86II::MRM_C3: 1921 case X86II::MRM_C4: 1922 case X86II::MRM_C5: 1923 case X86II::MRM_C6: 1924 case X86II::MRM_C7: 1925 case X86II::MRM_C8: 1926 case X86II::MRM_C9: 1927 case X86II::MRM_CA: 1928 case X86II::MRM_CB: 1929 case X86II::MRM_CC: 1930 case X86II::MRM_CD: 1931 case X86II::MRM_CE: 1932 case X86II::MRM_CF: 1933 case X86II::MRM_D0: 1934 case X86II::MRM_D1: 1935 case X86II::MRM_D2: 1936 case X86II::MRM_D3: 1937 case X86II::MRM_D4: 1938 case X86II::MRM_D5: 1939 case X86II::MRM_D6: 1940 case X86II::MRM_D7: 1941 case X86II::MRM_D8: 1942 case X86II::MRM_D9: 1943 case X86II::MRM_DA: 1944 case X86II::MRM_DB: 1945 case X86II::MRM_DC: 1946 case X86II::MRM_DD: 1947 case X86II::MRM_DE: 1948 case X86II::MRM_DF: 1949 case X86II::MRM_E0: 1950 case X86II::MRM_E1: 1951 case X86II::MRM_E2: 1952 case X86II::MRM_E3: 1953 case X86II::MRM_E4: 1954 case X86II::MRM_E5: 1955 case X86II::MRM_E6: 1956 case X86II::MRM_E7: 1957 case X86II::MRM_E8: 1958 case X86II::MRM_E9: 1959 case X86II::MRM_EA: 1960 case X86II::MRM_EB: 1961 case X86II::MRM_EC: 1962 case X86II::MRM_ED: 1963 case X86II::MRM_EE: 1964 case X86II::MRM_EF: 1965 case X86II::MRM_F0: 1966 case X86II::MRM_F1: 1967 case X86II::MRM_F2: 1968 case X86II::MRM_F3: 1969 case X86II::MRM_F4: 1970 case X86II::MRM_F5: 1971 case X86II::MRM_F6: 1972 case X86II::MRM_F7: 1973 case X86II::MRM_F8: 1974 case X86II::MRM_F9: 1975 case X86II::MRM_FA: 1976 case X86II::MRM_FB: 1977 case X86II::MRM_FC: 1978 case X86II::MRM_FD: 1979 case X86II::MRM_FE: 1980 case X86II::MRM_FF: 1981 emitByte(BaseOpcode, CB); 1982 emitByte(0xC0 + Form - X86II::MRM_C0, CB); 1983 break; 1984 } 1985 1986 if (HasVEX_I8Reg) { 1987 // The last source register of a 4 operand instruction in AVX is encoded 1988 // in bits[7:4] of a immediate byte. 1989 assert(I8RegNum < 16 && "Register encoding out of range"); 1990 I8RegNum <<= 4; 1991 if (CurOp != NumOps) { 1992 unsigned Val = MI.getOperand(CurOp++).getImm(); 1993 assert(Val < 16 && "Immediate operand value out of range"); 1994 I8RegNum |= Val; 1995 } 1996 emitImmediate(MCOperand::createImm(I8RegNum), MI.getLoc(), 1, FK_Data_1, 1997 StartByte, CB, Fixups); 1998 } else { 1999 // If there is a remaining operand, it must be a trailing immediate. Emit it 2000 // according to the right size for the instruction. Some instructions 2001 // (SSE4a extrq and insertq) have two trailing immediates. 2002 2003 // Skip two trainling conditional operands encoded in EVEX prefix 2004 unsigned RemaningOps = NumOps - CurOp - 2 * HasTwoConditionalOps; 2005 while (RemaningOps) { 2006 emitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 2007 X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags), 2008 StartByte, CB, Fixups); 2009 --RemaningOps; 2010 } 2011 CurOp += 2 * HasTwoConditionalOps; 2012 } 2013 2014 if ((TSFlags & X86II::OpMapMask) == X86II::ThreeDNow) 2015 emitByte(X86II::getBaseOpcodeFor(TSFlags), CB); 2016 2017 if (CB.size() - StartByte > 15) 2018 Ctx.reportError(MI.getLoc(), "instruction length exceeds the limit of 15"); 2019 #ifndef NDEBUG 2020 // FIXME: Verify. 2021 if (/*!Desc.isVariadic() &&*/ CurOp != NumOps) { 2022 errs() << "Cannot encode all operands of: "; 2023 MI.dump(); 2024 errs() << '\n'; 2025 abort(); 2026 } 2027 #endif 2028 } 2029 2030 MCCodeEmitter *llvm::createX86MCCodeEmitter(const MCInstrInfo &MCII, 2031 MCContext &Ctx) { 2032 return new X86MCCodeEmitter(MCII, Ctx); 2033 } 2034