1 //===-- X86MCTargetDesc.cpp - X86 Target Descriptions ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file provides X86 specific target descriptions. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "X86MCTargetDesc.h" 14 #include "TargetInfo/X86TargetInfo.h" 15 #include "X86ATTInstPrinter.h" 16 #include "X86BaseInfo.h" 17 #include "X86IntelInstPrinter.h" 18 #include "X86MCAsmInfo.h" 19 #include "X86TargetStreamer.h" 20 #include "llvm/ADT/APInt.h" 21 #include "llvm/DebugInfo/CodeView/CodeView.h" 22 #include "llvm/MC/MCDwarf.h" 23 #include "llvm/MC/MCInstrAnalysis.h" 24 #include "llvm/MC/MCInstrInfo.h" 25 #include "llvm/MC/MCRegisterInfo.h" 26 #include "llvm/MC/MCStreamer.h" 27 #include "llvm/MC/MCSubtargetInfo.h" 28 #include "llvm/MC/TargetRegistry.h" 29 #include "llvm/Support/ErrorHandling.h" 30 #include "llvm/TargetParser/Host.h" 31 #include "llvm/TargetParser/Triple.h" 32 33 using namespace llvm; 34 35 #define GET_REGINFO_MC_DESC 36 #include "X86GenRegisterInfo.inc" 37 38 #define GET_INSTRINFO_MC_DESC 39 #define GET_INSTRINFO_MC_HELPERS 40 #define ENABLE_INSTR_PREDICATE_VERIFIER 41 #include "X86GenInstrInfo.inc" 42 43 #define GET_SUBTARGETINFO_MC_DESC 44 #include "X86GenSubtargetInfo.inc" 45 46 std::string X86_MC::ParseX86Triple(const Triple &TT) { 47 std::string FS; 48 // SSE2 should default to enabled in 64-bit mode, but can be turned off 49 // explicitly. 50 if (TT.isArch64Bit()) 51 FS = "+64bit-mode,-32bit-mode,-16bit-mode,+sse2"; 52 else if (TT.getEnvironment() != Triple::CODE16) 53 FS = "-64bit-mode,+32bit-mode,-16bit-mode"; 54 else 55 FS = "-64bit-mode,-32bit-mode,+16bit-mode"; 56 57 return FS; 58 } 59 60 unsigned X86_MC::getDwarfRegFlavour(const Triple &TT, bool isEH) { 61 if (TT.getArch() == Triple::x86_64) 62 return DWARFFlavour::X86_64; 63 64 if (TT.isOSDarwin()) 65 return isEH ? DWARFFlavour::X86_32_DarwinEH : DWARFFlavour::X86_32_Generic; 66 if (TT.isOSCygMing()) 67 // Unsupported by now, just quick fallback 68 return DWARFFlavour::X86_32_Generic; 69 return DWARFFlavour::X86_32_Generic; 70 } 71 72 bool X86_MC::hasLockPrefix(const MCInst &MI) { 73 return MI.getFlags() & X86::IP_HAS_LOCK; 74 } 75 76 static bool isMemOperand(const MCInst &MI, unsigned Op, unsigned RegClassID) { 77 const MCOperand &Base = MI.getOperand(Op + X86::AddrBaseReg); 78 const MCOperand &Index = MI.getOperand(Op + X86::AddrIndexReg); 79 const MCRegisterClass &RC = X86MCRegisterClasses[RegClassID]; 80 81 return (Base.isReg() && Base.getReg() && RC.contains(Base.getReg())) || 82 (Index.isReg() && Index.getReg() && RC.contains(Index.getReg())); 83 } 84 85 bool X86_MC::is16BitMemOperand(const MCInst &MI, unsigned Op, 86 const MCSubtargetInfo &STI) { 87 const MCOperand &Base = MI.getOperand(Op + X86::AddrBaseReg); 88 const MCOperand &Index = MI.getOperand(Op + X86::AddrIndexReg); 89 90 if (STI.hasFeature(X86::Is16Bit) && Base.isReg() && !Base.getReg() && 91 Index.isReg() && !Index.getReg()) 92 return true; 93 return isMemOperand(MI, Op, X86::GR16RegClassID); 94 } 95 96 bool X86_MC::is32BitMemOperand(const MCInst &MI, unsigned Op) { 97 const MCOperand &Base = MI.getOperand(Op + X86::AddrBaseReg); 98 const MCOperand &Index = MI.getOperand(Op + X86::AddrIndexReg); 99 if (Base.isReg() && Base.getReg() == X86::EIP) { 100 assert(Index.isReg() && !Index.getReg() && "Invalid eip-based address"); 101 return true; 102 } 103 if (Index.isReg() && Index.getReg() == X86::EIZ) 104 return true; 105 return isMemOperand(MI, Op, X86::GR32RegClassID); 106 } 107 108 #ifndef NDEBUG 109 bool X86_MC::is64BitMemOperand(const MCInst &MI, unsigned Op) { 110 return isMemOperand(MI, Op, X86::GR64RegClassID); 111 } 112 #endif 113 114 bool X86_MC::needsAddressSizeOverride(const MCInst &MI, 115 const MCSubtargetInfo &STI, 116 int MemoryOperand, uint64_t TSFlags) { 117 uint64_t AdSize = TSFlags & X86II::AdSizeMask; 118 bool Is16BitMode = STI.hasFeature(X86::Is16Bit); 119 bool Is32BitMode = STI.hasFeature(X86::Is32Bit); 120 bool Is64BitMode = STI.hasFeature(X86::Is64Bit); 121 if ((Is16BitMode && AdSize == X86II::AdSize32) || 122 (Is32BitMode && AdSize == X86II::AdSize16) || 123 (Is64BitMode && AdSize == X86II::AdSize32)) 124 return true; 125 uint64_t Form = TSFlags & X86II::FormMask; 126 switch (Form) { 127 default: 128 break; 129 case X86II::RawFrmDstSrc: { 130 MCRegister siReg = MI.getOperand(1).getReg(); 131 assert(((siReg == X86::SI && MI.getOperand(0).getReg() == X86::DI) || 132 (siReg == X86::ESI && MI.getOperand(0).getReg() == X86::EDI) || 133 (siReg == X86::RSI && MI.getOperand(0).getReg() == X86::RDI)) && 134 "SI and DI register sizes do not match"); 135 return (!Is32BitMode && siReg == X86::ESI) || 136 (Is32BitMode && siReg == X86::SI); 137 } 138 case X86II::RawFrmSrc: { 139 MCRegister siReg = MI.getOperand(0).getReg(); 140 return (!Is32BitMode && siReg == X86::ESI) || 141 (Is32BitMode && siReg == X86::SI); 142 } 143 case X86II::RawFrmDst: { 144 MCRegister siReg = MI.getOperand(0).getReg(); 145 return (!Is32BitMode && siReg == X86::EDI) || 146 (Is32BitMode && siReg == X86::DI); 147 } 148 } 149 150 // Determine where the memory operand starts, if present. 151 if (MemoryOperand < 0) 152 return false; 153 154 if (STI.hasFeature(X86::Is64Bit)) { 155 assert(!is16BitMemOperand(MI, MemoryOperand, STI)); 156 return is32BitMemOperand(MI, MemoryOperand); 157 } 158 if (STI.hasFeature(X86::Is32Bit)) { 159 assert(!is64BitMemOperand(MI, MemoryOperand)); 160 return is16BitMemOperand(MI, MemoryOperand, STI); 161 } 162 assert(STI.hasFeature(X86::Is16Bit)); 163 assert(!is64BitMemOperand(MI, MemoryOperand)); 164 return !is16BitMemOperand(MI, MemoryOperand, STI); 165 } 166 167 void X86_MC::initLLVMToSEHAndCVRegMapping(MCRegisterInfo *MRI) { 168 // FIXME: TableGen these. 169 for (unsigned Reg = X86::NoRegister + 1; Reg < X86::NUM_TARGET_REGS; ++Reg) { 170 unsigned SEH = MRI->getEncodingValue(Reg); 171 MRI->mapLLVMRegToSEHReg(Reg, SEH); 172 } 173 174 // Mapping from CodeView to MC register id. 175 static const struct { 176 codeview::RegisterId CVReg; 177 MCPhysReg Reg; 178 } RegMap[] = { 179 {codeview::RegisterId::AL, X86::AL}, 180 {codeview::RegisterId::CL, X86::CL}, 181 {codeview::RegisterId::DL, X86::DL}, 182 {codeview::RegisterId::BL, X86::BL}, 183 {codeview::RegisterId::AH, X86::AH}, 184 {codeview::RegisterId::CH, X86::CH}, 185 {codeview::RegisterId::DH, X86::DH}, 186 {codeview::RegisterId::BH, X86::BH}, 187 {codeview::RegisterId::AX, X86::AX}, 188 {codeview::RegisterId::CX, X86::CX}, 189 {codeview::RegisterId::DX, X86::DX}, 190 {codeview::RegisterId::BX, X86::BX}, 191 {codeview::RegisterId::SP, X86::SP}, 192 {codeview::RegisterId::BP, X86::BP}, 193 {codeview::RegisterId::SI, X86::SI}, 194 {codeview::RegisterId::DI, X86::DI}, 195 {codeview::RegisterId::EAX, X86::EAX}, 196 {codeview::RegisterId::ECX, X86::ECX}, 197 {codeview::RegisterId::EDX, X86::EDX}, 198 {codeview::RegisterId::EBX, X86::EBX}, 199 {codeview::RegisterId::ESP, X86::ESP}, 200 {codeview::RegisterId::EBP, X86::EBP}, 201 {codeview::RegisterId::ESI, X86::ESI}, 202 {codeview::RegisterId::EDI, X86::EDI}, 203 204 {codeview::RegisterId::EFLAGS, X86::EFLAGS}, 205 206 {codeview::RegisterId::ST0, X86::ST0}, 207 {codeview::RegisterId::ST1, X86::ST1}, 208 {codeview::RegisterId::ST2, X86::ST2}, 209 {codeview::RegisterId::ST3, X86::ST3}, 210 {codeview::RegisterId::ST4, X86::ST4}, 211 {codeview::RegisterId::ST5, X86::ST5}, 212 {codeview::RegisterId::ST6, X86::ST6}, 213 {codeview::RegisterId::ST7, X86::ST7}, 214 215 {codeview::RegisterId::ST0, X86::FP0}, 216 {codeview::RegisterId::ST1, X86::FP1}, 217 {codeview::RegisterId::ST2, X86::FP2}, 218 {codeview::RegisterId::ST3, X86::FP3}, 219 {codeview::RegisterId::ST4, X86::FP4}, 220 {codeview::RegisterId::ST5, X86::FP5}, 221 {codeview::RegisterId::ST6, X86::FP6}, 222 {codeview::RegisterId::ST7, X86::FP7}, 223 224 {codeview::RegisterId::MM0, X86::MM0}, 225 {codeview::RegisterId::MM1, X86::MM1}, 226 {codeview::RegisterId::MM2, X86::MM2}, 227 {codeview::RegisterId::MM3, X86::MM3}, 228 {codeview::RegisterId::MM4, X86::MM4}, 229 {codeview::RegisterId::MM5, X86::MM5}, 230 {codeview::RegisterId::MM6, X86::MM6}, 231 {codeview::RegisterId::MM7, X86::MM7}, 232 233 {codeview::RegisterId::XMM0, X86::XMM0}, 234 {codeview::RegisterId::XMM1, X86::XMM1}, 235 {codeview::RegisterId::XMM2, X86::XMM2}, 236 {codeview::RegisterId::XMM3, X86::XMM3}, 237 {codeview::RegisterId::XMM4, X86::XMM4}, 238 {codeview::RegisterId::XMM5, X86::XMM5}, 239 {codeview::RegisterId::XMM6, X86::XMM6}, 240 {codeview::RegisterId::XMM7, X86::XMM7}, 241 242 {codeview::RegisterId::XMM8, X86::XMM8}, 243 {codeview::RegisterId::XMM9, X86::XMM9}, 244 {codeview::RegisterId::XMM10, X86::XMM10}, 245 {codeview::RegisterId::XMM11, X86::XMM11}, 246 {codeview::RegisterId::XMM12, X86::XMM12}, 247 {codeview::RegisterId::XMM13, X86::XMM13}, 248 {codeview::RegisterId::XMM14, X86::XMM14}, 249 {codeview::RegisterId::XMM15, X86::XMM15}, 250 251 {codeview::RegisterId::SIL, X86::SIL}, 252 {codeview::RegisterId::DIL, X86::DIL}, 253 {codeview::RegisterId::BPL, X86::BPL}, 254 {codeview::RegisterId::SPL, X86::SPL}, 255 {codeview::RegisterId::RAX, X86::RAX}, 256 {codeview::RegisterId::RBX, X86::RBX}, 257 {codeview::RegisterId::RCX, X86::RCX}, 258 {codeview::RegisterId::RDX, X86::RDX}, 259 {codeview::RegisterId::RSI, X86::RSI}, 260 {codeview::RegisterId::RDI, X86::RDI}, 261 {codeview::RegisterId::RBP, X86::RBP}, 262 {codeview::RegisterId::RSP, X86::RSP}, 263 {codeview::RegisterId::R8, X86::R8}, 264 {codeview::RegisterId::R9, X86::R9}, 265 {codeview::RegisterId::R10, X86::R10}, 266 {codeview::RegisterId::R11, X86::R11}, 267 {codeview::RegisterId::R12, X86::R12}, 268 {codeview::RegisterId::R13, X86::R13}, 269 {codeview::RegisterId::R14, X86::R14}, 270 {codeview::RegisterId::R15, X86::R15}, 271 {codeview::RegisterId::R8B, X86::R8B}, 272 {codeview::RegisterId::R9B, X86::R9B}, 273 {codeview::RegisterId::R10B, X86::R10B}, 274 {codeview::RegisterId::R11B, X86::R11B}, 275 {codeview::RegisterId::R12B, X86::R12B}, 276 {codeview::RegisterId::R13B, X86::R13B}, 277 {codeview::RegisterId::R14B, X86::R14B}, 278 {codeview::RegisterId::R15B, X86::R15B}, 279 {codeview::RegisterId::R8W, X86::R8W}, 280 {codeview::RegisterId::R9W, X86::R9W}, 281 {codeview::RegisterId::R10W, X86::R10W}, 282 {codeview::RegisterId::R11W, X86::R11W}, 283 {codeview::RegisterId::R12W, X86::R12W}, 284 {codeview::RegisterId::R13W, X86::R13W}, 285 {codeview::RegisterId::R14W, X86::R14W}, 286 {codeview::RegisterId::R15W, X86::R15W}, 287 {codeview::RegisterId::R8D, X86::R8D}, 288 {codeview::RegisterId::R9D, X86::R9D}, 289 {codeview::RegisterId::R10D, X86::R10D}, 290 {codeview::RegisterId::R11D, X86::R11D}, 291 {codeview::RegisterId::R12D, X86::R12D}, 292 {codeview::RegisterId::R13D, X86::R13D}, 293 {codeview::RegisterId::R14D, X86::R14D}, 294 {codeview::RegisterId::R15D, X86::R15D}, 295 {codeview::RegisterId::AMD64_YMM0, X86::YMM0}, 296 {codeview::RegisterId::AMD64_YMM1, X86::YMM1}, 297 {codeview::RegisterId::AMD64_YMM2, X86::YMM2}, 298 {codeview::RegisterId::AMD64_YMM3, X86::YMM3}, 299 {codeview::RegisterId::AMD64_YMM4, X86::YMM4}, 300 {codeview::RegisterId::AMD64_YMM5, X86::YMM5}, 301 {codeview::RegisterId::AMD64_YMM6, X86::YMM6}, 302 {codeview::RegisterId::AMD64_YMM7, X86::YMM7}, 303 {codeview::RegisterId::AMD64_YMM8, X86::YMM8}, 304 {codeview::RegisterId::AMD64_YMM9, X86::YMM9}, 305 {codeview::RegisterId::AMD64_YMM10, X86::YMM10}, 306 {codeview::RegisterId::AMD64_YMM11, X86::YMM11}, 307 {codeview::RegisterId::AMD64_YMM12, X86::YMM12}, 308 {codeview::RegisterId::AMD64_YMM13, X86::YMM13}, 309 {codeview::RegisterId::AMD64_YMM14, X86::YMM14}, 310 {codeview::RegisterId::AMD64_YMM15, X86::YMM15}, 311 {codeview::RegisterId::AMD64_YMM16, X86::YMM16}, 312 {codeview::RegisterId::AMD64_YMM17, X86::YMM17}, 313 {codeview::RegisterId::AMD64_YMM18, X86::YMM18}, 314 {codeview::RegisterId::AMD64_YMM19, X86::YMM19}, 315 {codeview::RegisterId::AMD64_YMM20, X86::YMM20}, 316 {codeview::RegisterId::AMD64_YMM21, X86::YMM21}, 317 {codeview::RegisterId::AMD64_YMM22, X86::YMM22}, 318 {codeview::RegisterId::AMD64_YMM23, X86::YMM23}, 319 {codeview::RegisterId::AMD64_YMM24, X86::YMM24}, 320 {codeview::RegisterId::AMD64_YMM25, X86::YMM25}, 321 {codeview::RegisterId::AMD64_YMM26, X86::YMM26}, 322 {codeview::RegisterId::AMD64_YMM27, X86::YMM27}, 323 {codeview::RegisterId::AMD64_YMM28, X86::YMM28}, 324 {codeview::RegisterId::AMD64_YMM29, X86::YMM29}, 325 {codeview::RegisterId::AMD64_YMM30, X86::YMM30}, 326 {codeview::RegisterId::AMD64_YMM31, X86::YMM31}, 327 {codeview::RegisterId::AMD64_ZMM0, X86::ZMM0}, 328 {codeview::RegisterId::AMD64_ZMM1, X86::ZMM1}, 329 {codeview::RegisterId::AMD64_ZMM2, X86::ZMM2}, 330 {codeview::RegisterId::AMD64_ZMM3, X86::ZMM3}, 331 {codeview::RegisterId::AMD64_ZMM4, X86::ZMM4}, 332 {codeview::RegisterId::AMD64_ZMM5, X86::ZMM5}, 333 {codeview::RegisterId::AMD64_ZMM6, X86::ZMM6}, 334 {codeview::RegisterId::AMD64_ZMM7, X86::ZMM7}, 335 {codeview::RegisterId::AMD64_ZMM8, X86::ZMM8}, 336 {codeview::RegisterId::AMD64_ZMM9, X86::ZMM9}, 337 {codeview::RegisterId::AMD64_ZMM10, X86::ZMM10}, 338 {codeview::RegisterId::AMD64_ZMM11, X86::ZMM11}, 339 {codeview::RegisterId::AMD64_ZMM12, X86::ZMM12}, 340 {codeview::RegisterId::AMD64_ZMM13, X86::ZMM13}, 341 {codeview::RegisterId::AMD64_ZMM14, X86::ZMM14}, 342 {codeview::RegisterId::AMD64_ZMM15, X86::ZMM15}, 343 {codeview::RegisterId::AMD64_ZMM16, X86::ZMM16}, 344 {codeview::RegisterId::AMD64_ZMM17, X86::ZMM17}, 345 {codeview::RegisterId::AMD64_ZMM18, X86::ZMM18}, 346 {codeview::RegisterId::AMD64_ZMM19, X86::ZMM19}, 347 {codeview::RegisterId::AMD64_ZMM20, X86::ZMM20}, 348 {codeview::RegisterId::AMD64_ZMM21, X86::ZMM21}, 349 {codeview::RegisterId::AMD64_ZMM22, X86::ZMM22}, 350 {codeview::RegisterId::AMD64_ZMM23, X86::ZMM23}, 351 {codeview::RegisterId::AMD64_ZMM24, X86::ZMM24}, 352 {codeview::RegisterId::AMD64_ZMM25, X86::ZMM25}, 353 {codeview::RegisterId::AMD64_ZMM26, X86::ZMM26}, 354 {codeview::RegisterId::AMD64_ZMM27, X86::ZMM27}, 355 {codeview::RegisterId::AMD64_ZMM28, X86::ZMM28}, 356 {codeview::RegisterId::AMD64_ZMM29, X86::ZMM29}, 357 {codeview::RegisterId::AMD64_ZMM30, X86::ZMM30}, 358 {codeview::RegisterId::AMD64_ZMM31, X86::ZMM31}, 359 {codeview::RegisterId::AMD64_K0, X86::K0}, 360 {codeview::RegisterId::AMD64_K1, X86::K1}, 361 {codeview::RegisterId::AMD64_K2, X86::K2}, 362 {codeview::RegisterId::AMD64_K3, X86::K3}, 363 {codeview::RegisterId::AMD64_K4, X86::K4}, 364 {codeview::RegisterId::AMD64_K5, X86::K5}, 365 {codeview::RegisterId::AMD64_K6, X86::K6}, 366 {codeview::RegisterId::AMD64_K7, X86::K7}, 367 {codeview::RegisterId::AMD64_XMM16, X86::XMM16}, 368 {codeview::RegisterId::AMD64_XMM17, X86::XMM17}, 369 {codeview::RegisterId::AMD64_XMM18, X86::XMM18}, 370 {codeview::RegisterId::AMD64_XMM19, X86::XMM19}, 371 {codeview::RegisterId::AMD64_XMM20, X86::XMM20}, 372 {codeview::RegisterId::AMD64_XMM21, X86::XMM21}, 373 {codeview::RegisterId::AMD64_XMM22, X86::XMM22}, 374 {codeview::RegisterId::AMD64_XMM23, X86::XMM23}, 375 {codeview::RegisterId::AMD64_XMM24, X86::XMM24}, 376 {codeview::RegisterId::AMD64_XMM25, X86::XMM25}, 377 {codeview::RegisterId::AMD64_XMM26, X86::XMM26}, 378 {codeview::RegisterId::AMD64_XMM27, X86::XMM27}, 379 {codeview::RegisterId::AMD64_XMM28, X86::XMM28}, 380 {codeview::RegisterId::AMD64_XMM29, X86::XMM29}, 381 {codeview::RegisterId::AMD64_XMM30, X86::XMM30}, 382 {codeview::RegisterId::AMD64_XMM31, X86::XMM31}, 383 384 }; 385 for (const auto &I : RegMap) 386 MRI->mapLLVMRegToCVReg(I.Reg, static_cast<int>(I.CVReg)); 387 } 388 389 MCSubtargetInfo *X86_MC::createX86MCSubtargetInfo(const Triple &TT, 390 StringRef CPU, StringRef FS) { 391 std::string ArchFS = X86_MC::ParseX86Triple(TT); 392 assert(!ArchFS.empty() && "Failed to parse X86 triple"); 393 if (!FS.empty()) 394 ArchFS = (Twine(ArchFS) + "," + FS).str(); 395 396 if (CPU.empty()) 397 CPU = "generic"; 398 399 size_t posNoEVEX512 = FS.rfind("-evex512"); 400 // Make sure we won't be cheated by "-avx512fp16". 401 size_t posNoAVX512F = 402 FS.ends_with("-avx512f") ? FS.size() - 8 : FS.rfind("-avx512f,"); 403 size_t posEVEX512 = FS.rfind("+evex512"); 404 size_t posAVX512F = FS.rfind("+avx512"); // Any AVX512XXX will enable AVX512F. 405 406 if (posAVX512F != StringRef::npos && 407 (posNoAVX512F == StringRef::npos || posNoAVX512F < posAVX512F)) 408 if (posEVEX512 == StringRef::npos && posNoEVEX512 == StringRef::npos) 409 ArchFS += ",+evex512"; 410 411 return createX86MCSubtargetInfoImpl(TT, CPU, /*TuneCPU*/ CPU, ArchFS); 412 } 413 414 static MCInstrInfo *createX86MCInstrInfo() { 415 MCInstrInfo *X = new MCInstrInfo(); 416 InitX86MCInstrInfo(X); 417 return X; 418 } 419 420 static MCRegisterInfo *createX86MCRegisterInfo(const Triple &TT) { 421 unsigned RA = (TT.getArch() == Triple::x86_64) 422 ? X86::RIP // Should have dwarf #16. 423 : X86::EIP; // Should have dwarf #8. 424 425 MCRegisterInfo *X = new MCRegisterInfo(); 426 InitX86MCRegisterInfo(X, RA, X86_MC::getDwarfRegFlavour(TT, false), 427 X86_MC::getDwarfRegFlavour(TT, true), RA); 428 X86_MC::initLLVMToSEHAndCVRegMapping(X); 429 return X; 430 } 431 432 static MCAsmInfo *createX86MCAsmInfo(const MCRegisterInfo &MRI, 433 const Triple &TheTriple, 434 const MCTargetOptions &Options) { 435 bool is64Bit = TheTriple.getArch() == Triple::x86_64; 436 437 MCAsmInfo *MAI; 438 if (TheTriple.isOSBinFormatMachO()) { 439 if (is64Bit) 440 MAI = new X86_64MCAsmInfoDarwin(TheTriple); 441 else 442 MAI = new X86MCAsmInfoDarwin(TheTriple); 443 } else if (TheTriple.isOSBinFormatELF()) { 444 // Force the use of an ELF container. 445 MAI = new X86ELFMCAsmInfo(TheTriple); 446 } else if (TheTriple.isWindowsMSVCEnvironment() || 447 TheTriple.isWindowsCoreCLREnvironment()) { 448 if (Options.getAssemblyLanguage().equals_insensitive("masm")) 449 MAI = new X86MCAsmInfoMicrosoftMASM(TheTriple); 450 else 451 MAI = new X86MCAsmInfoMicrosoft(TheTriple); 452 } else if (TheTriple.isOSCygMing() || 453 TheTriple.isWindowsItaniumEnvironment()) { 454 MAI = new X86MCAsmInfoGNUCOFF(TheTriple); 455 } else if (TheTriple.isUEFI()) { 456 MAI = new X86MCAsmInfoGNUCOFF(TheTriple); 457 } else { 458 // The default is ELF. 459 MAI = new X86ELFMCAsmInfo(TheTriple); 460 } 461 462 // Initialize initial frame state. 463 // Calculate amount of bytes used for return address storing 464 int stackGrowth = is64Bit ? -8 : -4; 465 466 // Initial state of the frame pointer is esp+stackGrowth. 467 unsigned StackPtr = is64Bit ? X86::RSP : X86::ESP; 468 MCCFIInstruction Inst = MCCFIInstruction::cfiDefCfa( 469 nullptr, MRI.getDwarfRegNum(StackPtr, true), -stackGrowth); 470 MAI->addInitialFrameState(Inst); 471 472 // Add return address to move list 473 unsigned InstPtr = is64Bit ? X86::RIP : X86::EIP; 474 MCCFIInstruction Inst2 = MCCFIInstruction::createOffset( 475 nullptr, MRI.getDwarfRegNum(InstPtr, true), stackGrowth); 476 MAI->addInitialFrameState(Inst2); 477 478 return MAI; 479 } 480 481 static MCInstPrinter *createX86MCInstPrinter(const Triple &T, 482 unsigned SyntaxVariant, 483 const MCAsmInfo &MAI, 484 const MCInstrInfo &MII, 485 const MCRegisterInfo &MRI) { 486 if (SyntaxVariant == 0) 487 return new X86ATTInstPrinter(MAI, MII, MRI); 488 if (SyntaxVariant == 1) 489 return new X86IntelInstPrinter(MAI, MII, MRI); 490 return nullptr; 491 } 492 493 static MCRelocationInfo *createX86MCRelocationInfo(const Triple &TheTriple, 494 MCContext &Ctx) { 495 // Default to the stock relocation info. 496 return llvm::createMCRelocationInfo(TheTriple, Ctx); 497 } 498 499 namespace llvm { 500 namespace X86_MC { 501 502 class X86MCInstrAnalysis : public MCInstrAnalysis { 503 X86MCInstrAnalysis(const X86MCInstrAnalysis &) = delete; 504 X86MCInstrAnalysis &operator=(const X86MCInstrAnalysis &) = delete; 505 virtual ~X86MCInstrAnalysis() = default; 506 507 public: 508 X86MCInstrAnalysis(const MCInstrInfo *MCII) : MCInstrAnalysis(MCII) {} 509 510 #define GET_STIPREDICATE_DECLS_FOR_MC_ANALYSIS 511 #include "X86GenSubtargetInfo.inc" 512 513 bool clearsSuperRegisters(const MCRegisterInfo &MRI, const MCInst &Inst, 514 APInt &Mask) const override; 515 std::vector<std::pair<uint64_t, uint64_t>> 516 findPltEntries(uint64_t PltSectionVA, ArrayRef<uint8_t> PltContents, 517 const Triple &TargetTriple) const override; 518 519 bool evaluateBranch(const MCInst &Inst, uint64_t Addr, uint64_t Size, 520 uint64_t &Target) const override; 521 std::optional<uint64_t> 522 evaluateMemoryOperandAddress(const MCInst &Inst, const MCSubtargetInfo *STI, 523 uint64_t Addr, uint64_t Size) const override; 524 std::optional<uint64_t> 525 getMemoryOperandRelocationOffset(const MCInst &Inst, 526 uint64_t Size) const override; 527 }; 528 529 #define GET_STIPREDICATE_DEFS_FOR_MC_ANALYSIS 530 #include "X86GenSubtargetInfo.inc" 531 532 bool X86MCInstrAnalysis::clearsSuperRegisters(const MCRegisterInfo &MRI, 533 const MCInst &Inst, 534 APInt &Mask) const { 535 const MCInstrDesc &Desc = Info->get(Inst.getOpcode()); 536 unsigned NumDefs = Desc.getNumDefs(); 537 unsigned NumImplicitDefs = Desc.implicit_defs().size(); 538 assert(Mask.getBitWidth() == NumDefs + NumImplicitDefs && 539 "Unexpected number of bits in the mask!"); 540 541 bool HasVEX = (Desc.TSFlags & X86II::EncodingMask) == X86II::VEX; 542 bool HasEVEX = (Desc.TSFlags & X86II::EncodingMask) == X86II::EVEX; 543 bool HasXOP = (Desc.TSFlags & X86II::EncodingMask) == X86II::XOP; 544 545 const MCRegisterClass &GR32RC = MRI.getRegClass(X86::GR32RegClassID); 546 const MCRegisterClass &VR128XRC = MRI.getRegClass(X86::VR128XRegClassID); 547 const MCRegisterClass &VR256XRC = MRI.getRegClass(X86::VR256XRegClassID); 548 549 auto ClearsSuperReg = [=](unsigned RegID) { 550 // On X86-64, a general purpose integer register is viewed as a 64-bit 551 // register internal to the processor. 552 // An update to the lower 32 bits of a 64 bit integer register is 553 // architecturally defined to zero extend the upper 32 bits. 554 if (GR32RC.contains(RegID)) 555 return true; 556 557 // Early exit if this instruction has no vex/evex/xop prefix. 558 if (!HasEVEX && !HasVEX && !HasXOP) 559 return false; 560 561 // All VEX and EVEX encoded instructions are defined to zero the high bits 562 // of the destination register up to VLMAX (i.e. the maximum vector register 563 // width pertaining to the instruction). 564 // We assume the same behavior for XOP instructions too. 565 return VR128XRC.contains(RegID) || VR256XRC.contains(RegID); 566 }; 567 568 Mask.clearAllBits(); 569 for (unsigned I = 0, E = NumDefs; I < E; ++I) { 570 const MCOperand &Op = Inst.getOperand(I); 571 if (ClearsSuperReg(Op.getReg())) 572 Mask.setBit(I); 573 } 574 575 for (unsigned I = 0, E = NumImplicitDefs; I < E; ++I) { 576 const MCPhysReg Reg = Desc.implicit_defs()[I]; 577 if (ClearsSuperReg(Reg)) 578 Mask.setBit(NumDefs + I); 579 } 580 581 return Mask.getBoolValue(); 582 } 583 584 static std::vector<std::pair<uint64_t, uint64_t>> 585 findX86PltEntries(uint64_t PltSectionVA, ArrayRef<uint8_t> PltContents) { 586 // Do a lightweight parsing of PLT entries. 587 std::vector<std::pair<uint64_t, uint64_t>> Result; 588 for (uint64_t Byte = 0, End = PltContents.size(); Byte + 6 < End; ) { 589 // Recognize a jmp. 590 if (PltContents[Byte] == 0xff && PltContents[Byte + 1] == 0xa3) { 591 // The jmp instruction at the beginning of each PLT entry jumps to the 592 // address of the base of the .got.plt section plus the immediate. 593 // Set the 1 << 32 bit to let ELFObjectFileBase::getPltEntries convert the 594 // offset to an address. Imm may be a negative int32_t if the GOT entry is 595 // in .got. 596 uint32_t Imm = support::endian::read32le(PltContents.data() + Byte + 2); 597 Result.emplace_back(PltSectionVA + Byte, Imm | (uint64_t(1) << 32)); 598 Byte += 6; 599 } else if (PltContents[Byte] == 0xff && PltContents[Byte + 1] == 0x25) { 600 // The jmp instruction at the beginning of each PLT entry jumps to the 601 // immediate. 602 uint32_t Imm = support::endian::read32le(PltContents.data() + Byte + 2); 603 Result.push_back(std::make_pair(PltSectionVA + Byte, Imm)); 604 Byte += 6; 605 } else 606 Byte++; 607 } 608 return Result; 609 } 610 611 static std::vector<std::pair<uint64_t, uint64_t>> 612 findX86_64PltEntries(uint64_t PltSectionVA, ArrayRef<uint8_t> PltContents) { 613 // Do a lightweight parsing of PLT entries. 614 std::vector<std::pair<uint64_t, uint64_t>> Result; 615 for (uint64_t Byte = 0, End = PltContents.size(); Byte + 6 < End; ) { 616 // Recognize a jmp. 617 if (PltContents[Byte] == 0xff && PltContents[Byte + 1] == 0x25) { 618 // The jmp instruction at the beginning of each PLT entry jumps to the 619 // address of the next instruction plus the immediate. 620 uint32_t Imm = support::endian::read32le(PltContents.data() + Byte + 2); 621 Result.push_back( 622 std::make_pair(PltSectionVA + Byte, PltSectionVA + Byte + 6 + Imm)); 623 Byte += 6; 624 } else 625 Byte++; 626 } 627 return Result; 628 } 629 630 std::vector<std::pair<uint64_t, uint64_t>> 631 X86MCInstrAnalysis::findPltEntries(uint64_t PltSectionVA, 632 ArrayRef<uint8_t> PltContents, 633 const Triple &TargetTriple) const { 634 switch (TargetTriple.getArch()) { 635 case Triple::x86: 636 return findX86PltEntries(PltSectionVA, PltContents); 637 case Triple::x86_64: 638 return findX86_64PltEntries(PltSectionVA, PltContents); 639 default: 640 return {}; 641 } 642 } 643 644 bool X86MCInstrAnalysis::evaluateBranch(const MCInst &Inst, uint64_t Addr, 645 uint64_t Size, uint64_t &Target) const { 646 if (Inst.getNumOperands() == 0 || 647 Info->get(Inst.getOpcode()).operands()[0].OperandType != 648 MCOI::OPERAND_PCREL) 649 return false; 650 Target = Addr + Size + Inst.getOperand(0).getImm(); 651 return true; 652 } 653 654 std::optional<uint64_t> X86MCInstrAnalysis::evaluateMemoryOperandAddress( 655 const MCInst &Inst, const MCSubtargetInfo *STI, uint64_t Addr, 656 uint64_t Size) const { 657 const MCInstrDesc &MCID = Info->get(Inst.getOpcode()); 658 int MemOpStart = X86II::getMemoryOperandNo(MCID.TSFlags); 659 if (MemOpStart == -1) 660 return std::nullopt; 661 MemOpStart += X86II::getOperandBias(MCID); 662 663 const MCOperand &SegReg = Inst.getOperand(MemOpStart + X86::AddrSegmentReg); 664 const MCOperand &BaseReg = Inst.getOperand(MemOpStart + X86::AddrBaseReg); 665 const MCOperand &IndexReg = Inst.getOperand(MemOpStart + X86::AddrIndexReg); 666 const MCOperand &ScaleAmt = Inst.getOperand(MemOpStart + X86::AddrScaleAmt); 667 const MCOperand &Disp = Inst.getOperand(MemOpStart + X86::AddrDisp); 668 if (SegReg.getReg() || IndexReg.getReg() || ScaleAmt.getImm() != 1 || 669 !Disp.isImm()) 670 return std::nullopt; 671 672 // RIP-relative addressing. 673 if (BaseReg.getReg() == X86::RIP) 674 return Addr + Size + Disp.getImm(); 675 676 return std::nullopt; 677 } 678 679 std::optional<uint64_t> 680 X86MCInstrAnalysis::getMemoryOperandRelocationOffset(const MCInst &Inst, 681 uint64_t Size) const { 682 if (Inst.getOpcode() != X86::LEA64r) 683 return std::nullopt; 684 const MCInstrDesc &MCID = Info->get(Inst.getOpcode()); 685 int MemOpStart = X86II::getMemoryOperandNo(MCID.TSFlags); 686 if (MemOpStart == -1) 687 return std::nullopt; 688 MemOpStart += X86II::getOperandBias(MCID); 689 const MCOperand &SegReg = Inst.getOperand(MemOpStart + X86::AddrSegmentReg); 690 const MCOperand &BaseReg = Inst.getOperand(MemOpStart + X86::AddrBaseReg); 691 const MCOperand &IndexReg = Inst.getOperand(MemOpStart + X86::AddrIndexReg); 692 const MCOperand &ScaleAmt = Inst.getOperand(MemOpStart + X86::AddrScaleAmt); 693 const MCOperand &Disp = Inst.getOperand(MemOpStart + X86::AddrDisp); 694 // Must be a simple rip-relative address. 695 if (BaseReg.getReg() != X86::RIP || SegReg.getReg() || IndexReg.getReg() || 696 ScaleAmt.getImm() != 1 || !Disp.isImm()) 697 return std::nullopt; 698 // rip-relative ModR/M immediate is 32 bits. 699 assert(Size > 4 && "invalid instruction size for rip-relative lea"); 700 return Size - 4; 701 } 702 703 } // end of namespace X86_MC 704 705 } // end of namespace llvm 706 707 static MCInstrAnalysis *createX86MCInstrAnalysis(const MCInstrInfo *Info) { 708 return new X86_MC::X86MCInstrAnalysis(Info); 709 } 710 711 // Force static initialization. 712 extern "C" LLVM_C_ABI void LLVMInitializeX86TargetMC() { 713 for (Target *T : {&getTheX86_32Target(), &getTheX86_64Target()}) { 714 // Register the MC asm info. 715 RegisterMCAsmInfoFn X(*T, createX86MCAsmInfo); 716 717 // Register the MC instruction info. 718 TargetRegistry::RegisterMCInstrInfo(*T, createX86MCInstrInfo); 719 720 // Register the MC register info. 721 TargetRegistry::RegisterMCRegInfo(*T, createX86MCRegisterInfo); 722 723 // Register the MC subtarget info. 724 TargetRegistry::RegisterMCSubtargetInfo(*T, 725 X86_MC::createX86MCSubtargetInfo); 726 727 // Register the MC instruction analyzer. 728 TargetRegistry::RegisterMCInstrAnalysis(*T, createX86MCInstrAnalysis); 729 730 // Register the code emitter. 731 TargetRegistry::RegisterMCCodeEmitter(*T, createX86MCCodeEmitter); 732 733 // Register the obj target streamer. 734 TargetRegistry::RegisterObjectTargetStreamer(*T, 735 createX86ObjectTargetStreamer); 736 737 // Register the asm target streamer. 738 TargetRegistry::RegisterAsmTargetStreamer(*T, createX86AsmTargetStreamer); 739 740 // Register the null streamer. 741 TargetRegistry::RegisterNullTargetStreamer(*T, createX86NullTargetStreamer); 742 743 TargetRegistry::RegisterCOFFStreamer(*T, createX86WinCOFFStreamer); 744 TargetRegistry::RegisterELFStreamer(*T, createX86ELFStreamer); 745 746 // Register the MCInstPrinter. 747 TargetRegistry::RegisterMCInstPrinter(*T, createX86MCInstPrinter); 748 749 // Register the MC relocation info. 750 TargetRegistry::RegisterMCRelocationInfo(*T, createX86MCRelocationInfo); 751 } 752 753 // Register the asm backend. 754 TargetRegistry::RegisterMCAsmBackend(getTheX86_32Target(), 755 createX86_32AsmBackend); 756 TargetRegistry::RegisterMCAsmBackend(getTheX86_64Target(), 757 createX86_64AsmBackend); 758 } 759 760 MCRegister llvm::getX86SubSuperRegister(MCRegister Reg, unsigned Size, 761 bool High) { 762 #define DEFAULT_NOREG \ 763 default: \ 764 return X86::NoRegister; 765 #define SUB_SUPER(R1, R2, R3, R4, R) \ 766 case X86::R1: \ 767 case X86::R2: \ 768 case X86::R3: \ 769 case X86::R4: \ 770 return X86::R; 771 #define A_SUB_SUPER(R) \ 772 case X86::AH: \ 773 SUB_SUPER(AL, AX, EAX, RAX, R) 774 #define D_SUB_SUPER(R) \ 775 case X86::DH: \ 776 SUB_SUPER(DL, DX, EDX, RDX, R) 777 #define C_SUB_SUPER(R) \ 778 case X86::CH: \ 779 SUB_SUPER(CL, CX, ECX, RCX, R) 780 #define B_SUB_SUPER(R) \ 781 case X86::BH: \ 782 SUB_SUPER(BL, BX, EBX, RBX, R) 783 #define SI_SUB_SUPER(R) SUB_SUPER(SIL, SI, ESI, RSI, R) 784 #define DI_SUB_SUPER(R) SUB_SUPER(DIL, DI, EDI, RDI, R) 785 #define BP_SUB_SUPER(R) SUB_SUPER(BPL, BP, EBP, RBP, R) 786 #define SP_SUB_SUPER(R) SUB_SUPER(SPL, SP, ESP, RSP, R) 787 #define NO_SUB_SUPER(NO, REG) \ 788 SUB_SUPER(R##NO##B, R##NO##W, R##NO##D, R##NO, REG) 789 #define NO_SUB_SUPER_B(NO) NO_SUB_SUPER(NO, R##NO##B) 790 #define NO_SUB_SUPER_W(NO) NO_SUB_SUPER(NO, R##NO##W) 791 #define NO_SUB_SUPER_D(NO) NO_SUB_SUPER(NO, R##NO##D) 792 #define NO_SUB_SUPER_Q(NO) NO_SUB_SUPER(NO, R##NO) 793 switch (Size) { 794 default: 795 llvm_unreachable("illegal register size"); 796 case 8: 797 if (High) { 798 switch (Reg.id()) { 799 DEFAULT_NOREG 800 A_SUB_SUPER(AH) 801 D_SUB_SUPER(DH) 802 C_SUB_SUPER(CH) 803 B_SUB_SUPER(BH) 804 } 805 } else { 806 switch (Reg.id()) { 807 DEFAULT_NOREG 808 A_SUB_SUPER(AL) 809 D_SUB_SUPER(DL) 810 C_SUB_SUPER(CL) 811 B_SUB_SUPER(BL) 812 SI_SUB_SUPER(SIL) 813 DI_SUB_SUPER(DIL) 814 BP_SUB_SUPER(BPL) 815 SP_SUB_SUPER(SPL) 816 NO_SUB_SUPER_B(8) 817 NO_SUB_SUPER_B(9) 818 NO_SUB_SUPER_B(10) 819 NO_SUB_SUPER_B(11) 820 NO_SUB_SUPER_B(12) 821 NO_SUB_SUPER_B(13) 822 NO_SUB_SUPER_B(14) 823 NO_SUB_SUPER_B(15) 824 NO_SUB_SUPER_B(16) 825 NO_SUB_SUPER_B(17) 826 NO_SUB_SUPER_B(18) 827 NO_SUB_SUPER_B(19) 828 NO_SUB_SUPER_B(20) 829 NO_SUB_SUPER_B(21) 830 NO_SUB_SUPER_B(22) 831 NO_SUB_SUPER_B(23) 832 NO_SUB_SUPER_B(24) 833 NO_SUB_SUPER_B(25) 834 NO_SUB_SUPER_B(26) 835 NO_SUB_SUPER_B(27) 836 NO_SUB_SUPER_B(28) 837 NO_SUB_SUPER_B(29) 838 NO_SUB_SUPER_B(30) 839 NO_SUB_SUPER_B(31) 840 } 841 } 842 case 16: 843 switch (Reg.id()) { 844 DEFAULT_NOREG 845 A_SUB_SUPER(AX) 846 D_SUB_SUPER(DX) 847 C_SUB_SUPER(CX) 848 B_SUB_SUPER(BX) 849 SI_SUB_SUPER(SI) 850 DI_SUB_SUPER(DI) 851 BP_SUB_SUPER(BP) 852 SP_SUB_SUPER(SP) 853 NO_SUB_SUPER_W(8) 854 NO_SUB_SUPER_W(9) 855 NO_SUB_SUPER_W(10) 856 NO_SUB_SUPER_W(11) 857 NO_SUB_SUPER_W(12) 858 NO_SUB_SUPER_W(13) 859 NO_SUB_SUPER_W(14) 860 NO_SUB_SUPER_W(15) 861 NO_SUB_SUPER_W(16) 862 NO_SUB_SUPER_W(17) 863 NO_SUB_SUPER_W(18) 864 NO_SUB_SUPER_W(19) 865 NO_SUB_SUPER_W(20) 866 NO_SUB_SUPER_W(21) 867 NO_SUB_SUPER_W(22) 868 NO_SUB_SUPER_W(23) 869 NO_SUB_SUPER_W(24) 870 NO_SUB_SUPER_W(25) 871 NO_SUB_SUPER_W(26) 872 NO_SUB_SUPER_W(27) 873 NO_SUB_SUPER_W(28) 874 NO_SUB_SUPER_W(29) 875 NO_SUB_SUPER_W(30) 876 NO_SUB_SUPER_W(31) 877 } 878 case 32: 879 switch (Reg.id()) { 880 DEFAULT_NOREG 881 A_SUB_SUPER(EAX) 882 D_SUB_SUPER(EDX) 883 C_SUB_SUPER(ECX) 884 B_SUB_SUPER(EBX) 885 SI_SUB_SUPER(ESI) 886 DI_SUB_SUPER(EDI) 887 BP_SUB_SUPER(EBP) 888 SP_SUB_SUPER(ESP) 889 NO_SUB_SUPER_D(8) 890 NO_SUB_SUPER_D(9) 891 NO_SUB_SUPER_D(10) 892 NO_SUB_SUPER_D(11) 893 NO_SUB_SUPER_D(12) 894 NO_SUB_SUPER_D(13) 895 NO_SUB_SUPER_D(14) 896 NO_SUB_SUPER_D(15) 897 NO_SUB_SUPER_D(16) 898 NO_SUB_SUPER_D(17) 899 NO_SUB_SUPER_D(18) 900 NO_SUB_SUPER_D(19) 901 NO_SUB_SUPER_D(20) 902 NO_SUB_SUPER_D(21) 903 NO_SUB_SUPER_D(22) 904 NO_SUB_SUPER_D(23) 905 NO_SUB_SUPER_D(24) 906 NO_SUB_SUPER_D(25) 907 NO_SUB_SUPER_D(26) 908 NO_SUB_SUPER_D(27) 909 NO_SUB_SUPER_D(28) 910 NO_SUB_SUPER_D(29) 911 NO_SUB_SUPER_D(30) 912 NO_SUB_SUPER_D(31) 913 } 914 case 64: 915 switch (Reg.id()) { 916 DEFAULT_NOREG 917 A_SUB_SUPER(RAX) 918 D_SUB_SUPER(RDX) 919 C_SUB_SUPER(RCX) 920 B_SUB_SUPER(RBX) 921 SI_SUB_SUPER(RSI) 922 DI_SUB_SUPER(RDI) 923 BP_SUB_SUPER(RBP) 924 SP_SUB_SUPER(RSP) 925 NO_SUB_SUPER_Q(8) 926 NO_SUB_SUPER_Q(9) 927 NO_SUB_SUPER_Q(10) 928 NO_SUB_SUPER_Q(11) 929 NO_SUB_SUPER_Q(12) 930 NO_SUB_SUPER_Q(13) 931 NO_SUB_SUPER_Q(14) 932 NO_SUB_SUPER_Q(15) 933 NO_SUB_SUPER_Q(16) 934 NO_SUB_SUPER_Q(17) 935 NO_SUB_SUPER_Q(18) 936 NO_SUB_SUPER_Q(19) 937 NO_SUB_SUPER_Q(20) 938 NO_SUB_SUPER_Q(21) 939 NO_SUB_SUPER_Q(22) 940 NO_SUB_SUPER_Q(23) 941 NO_SUB_SUPER_Q(24) 942 NO_SUB_SUPER_Q(25) 943 NO_SUB_SUPER_Q(26) 944 NO_SUB_SUPER_Q(27) 945 NO_SUB_SUPER_Q(28) 946 NO_SUB_SUPER_Q(29) 947 NO_SUB_SUPER_Q(30) 948 NO_SUB_SUPER_Q(31) 949 } 950 } 951 } 952