1 //===-- LoongArchAsmBackend.cpp - LoongArch Assembler Backend -*- C++ -*---===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the LoongArchAsmBackend class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "LoongArchAsmBackend.h" 14 #include "LoongArchFixupKinds.h" 15 #include "llvm/MC/MCAsmInfo.h" 16 #include "llvm/MC/MCAssembler.h" 17 #include "llvm/MC/MCContext.h" 18 #include "llvm/MC/MCELFObjectWriter.h" 19 #include "llvm/MC/MCExpr.h" 20 #include "llvm/MC/MCSection.h" 21 #include "llvm/MC/MCValue.h" 22 #include "llvm/Support/EndianStream.h" 23 #include "llvm/Support/LEB128.h" 24 #include "llvm/Support/MathExtras.h" 25 26 #define DEBUG_TYPE "loongarch-asmbackend" 27 28 using namespace llvm; 29 30 std::optional<MCFixupKind> 31 LoongArchAsmBackend::getFixupKind(StringRef Name) const { 32 if (STI.getTargetTriple().isOSBinFormatELF()) { 33 auto Type = llvm::StringSwitch<unsigned>(Name) 34 #define ELF_RELOC(X, Y) .Case(#X, Y) 35 #include "llvm/BinaryFormat/ELFRelocs/LoongArch.def" 36 #undef ELF_RELOC 37 .Case("BFD_RELOC_NONE", ELF::R_LARCH_NONE) 38 .Case("BFD_RELOC_32", ELF::R_LARCH_32) 39 .Case("BFD_RELOC_64", ELF::R_LARCH_64) 40 .Default(-1u); 41 if (Type != -1u) 42 return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type); 43 } 44 return std::nullopt; 45 } 46 47 const MCFixupKindInfo & 48 LoongArchAsmBackend::getFixupKindInfo(MCFixupKind Kind) const { 49 const static MCFixupKindInfo Infos[] = { 50 // This table *must* be in the order that the fixup_* kinds are defined in 51 // LoongArchFixupKinds.h. 52 // 53 // {name, offset, bits, flags} 54 {"fixup_loongarch_b16", 10, 16, MCFixupKindInfo::FKF_IsPCRel}, 55 {"fixup_loongarch_b21", 0, 26, MCFixupKindInfo::FKF_IsPCRel}, 56 {"fixup_loongarch_b26", 0, 26, MCFixupKindInfo::FKF_IsPCRel}, 57 {"fixup_loongarch_abs_hi20", 5, 20, 0}, 58 {"fixup_loongarch_abs_lo12", 10, 12, 0}, 59 {"fixup_loongarch_abs64_lo20", 5, 20, 0}, 60 {"fixup_loongarch_abs64_hi12", 10, 12, 0}, 61 {"fixup_loongarch_tls_le_hi20", 5, 20, 0}, 62 {"fixup_loongarch_tls_le_lo12", 10, 12, 0}, 63 {"fixup_loongarch_tls_le64_lo20", 5, 20, 0}, 64 {"fixup_loongarch_tls_le64_hi12", 10, 12, 0}, 65 // TODO: Add more fixup kinds. 66 }; 67 68 static_assert((std::size(Infos)) == LoongArch::NumTargetFixupKinds, 69 "Not all fixup kinds added to Infos array"); 70 71 // Fixup kinds from .reloc directive are like R_LARCH_NONE. They 72 // do not require any extra processing. 73 if (Kind >= FirstLiteralRelocationKind) 74 return MCAsmBackend::getFixupKindInfo(FK_NONE); 75 76 if (Kind < FirstTargetFixupKind) 77 return MCAsmBackend::getFixupKindInfo(Kind); 78 79 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() && 80 "Invalid kind!"); 81 return Infos[Kind - FirstTargetFixupKind]; 82 } 83 84 static void reportOutOfRangeError(MCContext &Ctx, SMLoc Loc, unsigned N) { 85 Ctx.reportError(Loc, "fixup value out of range [" + Twine(llvm::minIntN(N)) + 86 ", " + Twine(llvm::maxIntN(N)) + "]"); 87 } 88 89 static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value, 90 MCContext &Ctx) { 91 switch (Fixup.getTargetKind()) { 92 default: 93 llvm_unreachable("Unknown fixup kind"); 94 case FK_Data_1: 95 case FK_Data_2: 96 case FK_Data_4: 97 case FK_Data_8: 98 case FK_Data_leb128: 99 return Value; 100 case LoongArch::fixup_loongarch_b16: { 101 if (!isInt<18>(Value)) 102 reportOutOfRangeError(Ctx, Fixup.getLoc(), 18); 103 if (Value % 4) 104 Ctx.reportError(Fixup.getLoc(), "fixup value must be 4-byte aligned"); 105 return (Value >> 2) & 0xffff; 106 } 107 case LoongArch::fixup_loongarch_b21: { 108 if (!isInt<23>(Value)) 109 reportOutOfRangeError(Ctx, Fixup.getLoc(), 23); 110 if (Value % 4) 111 Ctx.reportError(Fixup.getLoc(), "fixup value must be 4-byte aligned"); 112 return ((Value & 0x3fffc) << 8) | ((Value >> 18) & 0x1f); 113 } 114 case LoongArch::fixup_loongarch_b26: { 115 if (!isInt<28>(Value)) 116 reportOutOfRangeError(Ctx, Fixup.getLoc(), 28); 117 if (Value % 4) 118 Ctx.reportError(Fixup.getLoc(), "fixup value must be 4-byte aligned"); 119 return ((Value & 0x3fffc) << 8) | ((Value >> 18) & 0x3ff); 120 } 121 case LoongArch::fixup_loongarch_abs_hi20: 122 case LoongArch::fixup_loongarch_tls_le_hi20: 123 return (Value >> 12) & 0xfffff; 124 case LoongArch::fixup_loongarch_abs_lo12: 125 case LoongArch::fixup_loongarch_tls_le_lo12: 126 return Value & 0xfff; 127 case LoongArch::fixup_loongarch_abs64_lo20: 128 case LoongArch::fixup_loongarch_tls_le64_lo20: 129 return (Value >> 32) & 0xfffff; 130 case LoongArch::fixup_loongarch_abs64_hi12: 131 case LoongArch::fixup_loongarch_tls_le64_hi12: 132 return (Value >> 52) & 0xfff; 133 } 134 } 135 136 static void fixupLeb128(MCContext &Ctx, const MCFixup &Fixup, 137 MutableArrayRef<char> Data, uint64_t Value) { 138 unsigned I; 139 for (I = 0; I != Data.size() && Value; ++I, Value >>= 7) 140 Data[I] |= uint8_t(Value & 0x7f); 141 if (Value) 142 Ctx.reportError(Fixup.getLoc(), "Invalid uleb128 value!"); 143 } 144 145 void LoongArchAsmBackend::applyFixup(const MCAssembler &Asm, 146 const MCFixup &Fixup, 147 const MCValue &Target, 148 MutableArrayRef<char> Data, uint64_t Value, 149 bool IsResolved, 150 const MCSubtargetInfo *STI) const { 151 if (!Value) 152 return; // Doesn't change encoding. 153 154 MCFixupKind Kind = Fixup.getKind(); 155 if (Kind >= FirstLiteralRelocationKind) 156 return; 157 MCFixupKindInfo Info = getFixupKindInfo(Kind); 158 MCContext &Ctx = Asm.getContext(); 159 160 // Fixup leb128 separately. 161 if (Fixup.getTargetKind() == FK_Data_leb128) 162 return fixupLeb128(Ctx, Fixup, Data, Value); 163 164 // Apply any target-specific value adjustments. 165 Value = adjustFixupValue(Fixup, Value, Ctx); 166 167 // Shift the value into position. 168 Value <<= Info.TargetOffset; 169 170 unsigned Offset = Fixup.getOffset(); 171 unsigned NumBytes = alignTo(Info.TargetSize + Info.TargetOffset, 8) / 8; 172 173 assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!"); 174 // For each byte of the fragment that the fixup touches, mask in the 175 // bits from the fixup value. 176 for (unsigned I = 0; I != NumBytes; ++I) { 177 Data[Offset + I] |= uint8_t((Value >> (I * 8)) & 0xff); 178 } 179 } 180 181 // Linker relaxation may change code size. We have to insert Nops 182 // for .align directive when linker relaxation enabled. So then Linker 183 // could satisfy alignment by removing Nops. 184 // The function returns the total Nops Size we need to insert. 185 bool LoongArchAsmBackend::shouldInsertExtraNopBytesForCodeAlign( 186 const MCAlignFragment &AF, unsigned &Size) { 187 // Calculate Nops Size only when linker relaxation enabled. 188 if (!AF.getSubtargetInfo()->hasFeature(LoongArch::FeatureRelax)) 189 return false; 190 191 // Ignore alignment if MaxBytesToEmit is less than the minimum Nop size. 192 const unsigned MinNopLen = 4; 193 if (AF.getMaxBytesToEmit() < MinNopLen) 194 return false; 195 Size = AF.getAlignment().value() - MinNopLen; 196 return AF.getAlignment() > MinNopLen; 197 } 198 199 // We need to insert R_LARCH_ALIGN relocation type to indicate the 200 // position of Nops and the total bytes of the Nops have been inserted 201 // when linker relaxation enabled. 202 // The function inserts fixup_loongarch_align fixup which eventually will 203 // transfer to R_LARCH_ALIGN relocation type. 204 // The improved R_LARCH_ALIGN requires symbol index. The lowest 8 bits of 205 // addend represent alignment and the other bits of addend represent the 206 // maximum number of bytes to emit. The maximum number of bytes is zero 207 // means ignore the emit limit. 208 bool LoongArchAsmBackend::shouldInsertFixupForCodeAlign(MCAssembler &Asm, 209 MCAlignFragment &AF) { 210 // Insert the fixup only when linker relaxation enabled. 211 if (!AF.getSubtargetInfo()->hasFeature(LoongArch::FeatureRelax)) 212 return false; 213 214 // Calculate total Nops we need to insert. If there are none to insert 215 // then simply return. 216 unsigned InsertedNopBytes; 217 if (!shouldInsertExtraNopBytesForCodeAlign(AF, InsertedNopBytes)) 218 return false; 219 220 MCSection *Sec = AF.getParent(); 221 MCContext &Ctx = Asm.getContext(); 222 const MCExpr *Dummy = MCConstantExpr::create(0, Ctx); 223 // Create fixup_loongarch_align fixup. 224 MCFixup Fixup = 225 MCFixup::create(0, Dummy, MCFixupKind(LoongArch::fixup_loongarch_align)); 226 unsigned MaxBytesToEmit = AF.getMaxBytesToEmit(); 227 228 auto createExtendedValue = [&]() { 229 const MCSymbolRefExpr *MCSym = getSecToAlignSym()[Sec]; 230 if (MCSym == nullptr) { 231 // Define a marker symbol at the section with an offset of 0. 232 MCSymbol *Sym = Ctx.createNamedTempSymbol("la-relax-align"); 233 Sym->setFragment(&*Sec->getBeginSymbol()->getFragment()); 234 Asm.registerSymbol(*Sym); 235 MCSym = MCSymbolRefExpr::create(Sym, Ctx); 236 getSecToAlignSym()[Sec] = MCSym; 237 } 238 return MCValue::get(MCSym, nullptr, 239 MaxBytesToEmit << 8 | Log2(AF.getAlignment())); 240 }; 241 242 uint64_t FixedValue = 0; 243 MCValue Value = MaxBytesToEmit >= InsertedNopBytes 244 ? MCValue::get(InsertedNopBytes) 245 : createExtendedValue(); 246 Asm.getWriter().recordRelocation(Asm, &AF, Fixup, Value, FixedValue); 247 248 return true; 249 } 250 251 bool LoongArchAsmBackend::shouldForceRelocation(const MCAssembler &Asm, 252 const MCFixup &Fixup, 253 const MCValue &Target, 254 const uint64_t, 255 const MCSubtargetInfo *STI) { 256 if (Fixup.getKind() >= FirstLiteralRelocationKind) 257 return true; 258 switch (Fixup.getTargetKind()) { 259 default: 260 return STI->hasFeature(LoongArch::FeatureRelax); 261 case FK_Data_1: 262 case FK_Data_2: 263 case FK_Data_4: 264 case FK_Data_8: 265 case FK_Data_leb128: 266 return !Target.isAbsolute(); 267 } 268 } 269 270 static inline std::pair<MCFixupKind, MCFixupKind> 271 getRelocPairForSize(unsigned Size) { 272 switch (Size) { 273 default: 274 llvm_unreachable("unsupported fixup size"); 275 case 6: 276 return std::make_pair( 277 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_ADD6), 278 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_SUB6)); 279 case 8: 280 return std::make_pair( 281 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_ADD8), 282 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_SUB8)); 283 case 16: 284 return std::make_pair( 285 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_ADD16), 286 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_SUB16)); 287 case 32: 288 return std::make_pair( 289 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_ADD32), 290 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_SUB32)); 291 case 64: 292 return std::make_pair( 293 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_ADD64), 294 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_SUB64)); 295 case 128: 296 return std::make_pair( 297 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_ADD_ULEB128), 298 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_SUB_ULEB128)); 299 } 300 } 301 302 std::pair<bool, bool> LoongArchAsmBackend::relaxLEB128(const MCAssembler &Asm, 303 MCLEBFragment &LF, 304 int64_t &Value) const { 305 const MCExpr &Expr = LF.getValue(); 306 if (LF.isSigned() || !Expr.evaluateKnownAbsolute(Value, Asm)) 307 return std::make_pair(false, false); 308 LF.getFixups().push_back( 309 MCFixup::create(0, &Expr, FK_Data_leb128, Expr.getLoc())); 310 return std::make_pair(true, true); 311 } 312 313 bool LoongArchAsmBackend::relaxDwarfLineAddr(const MCAssembler &Asm, 314 MCDwarfLineAddrFragment &DF, 315 bool &WasRelaxed) const { 316 MCContext &C = Asm.getContext(); 317 318 int64_t LineDelta = DF.getLineDelta(); 319 const MCExpr &AddrDelta = DF.getAddrDelta(); 320 SmallVectorImpl<char> &Data = DF.getContents(); 321 SmallVectorImpl<MCFixup> &Fixups = DF.getFixups(); 322 size_t OldSize = Data.size(); 323 324 int64_t Value; 325 if (AddrDelta.evaluateAsAbsolute(Value, Asm)) 326 return false; 327 bool IsAbsolute = AddrDelta.evaluateKnownAbsolute(Value, Asm); 328 assert(IsAbsolute && "CFA with invalid expression"); 329 (void)IsAbsolute; 330 331 Data.clear(); 332 Fixups.clear(); 333 raw_svector_ostream OS(Data); 334 335 // INT64_MAX is a signal that this is actually a DW_LNE_end_sequence. 336 if (LineDelta != INT64_MAX) { 337 OS << uint8_t(dwarf::DW_LNS_advance_line); 338 encodeSLEB128(LineDelta, OS); 339 } 340 341 unsigned Offset; 342 std::pair<MCFixupKind, MCFixupKind> FK; 343 344 // According to the DWARF specification, the `DW_LNS_fixed_advance_pc` opcode 345 // takes a single unsigned half (unencoded) operand. The maximum encodable 346 // value is therefore 65535. Set a conservative upper bound for relaxation. 347 if (Value > 60000) { 348 unsigned PtrSize = C.getAsmInfo()->getCodePointerSize(); 349 350 OS << uint8_t(dwarf::DW_LNS_extended_op); 351 encodeULEB128(PtrSize + 1, OS); 352 353 OS << uint8_t(dwarf::DW_LNE_set_address); 354 Offset = OS.tell(); 355 assert((PtrSize == 4 || PtrSize == 8) && "Unexpected pointer size"); 356 FK = getRelocPairForSize(PtrSize == 4 ? 32 : 64); 357 OS.write_zeros(PtrSize); 358 } else { 359 OS << uint8_t(dwarf::DW_LNS_fixed_advance_pc); 360 Offset = OS.tell(); 361 FK = getRelocPairForSize(16); 362 support::endian::write<uint16_t>(OS, 0, llvm::endianness::little); 363 } 364 365 const MCBinaryExpr &MBE = cast<MCBinaryExpr>(AddrDelta); 366 Fixups.push_back(MCFixup::create(Offset, MBE.getLHS(), std::get<0>(FK))); 367 Fixups.push_back(MCFixup::create(Offset, MBE.getRHS(), std::get<1>(FK))); 368 369 if (LineDelta == INT64_MAX) { 370 OS << uint8_t(dwarf::DW_LNS_extended_op); 371 OS << uint8_t(1); 372 OS << uint8_t(dwarf::DW_LNE_end_sequence); 373 } else { 374 OS << uint8_t(dwarf::DW_LNS_copy); 375 } 376 377 WasRelaxed = OldSize != Data.size(); 378 return true; 379 } 380 381 bool LoongArchAsmBackend::relaxDwarfCFA(const MCAssembler &Asm, 382 MCDwarfCallFrameFragment &DF, 383 bool &WasRelaxed) const { 384 const MCExpr &AddrDelta = DF.getAddrDelta(); 385 SmallVectorImpl<char> &Data = DF.getContents(); 386 SmallVectorImpl<MCFixup> &Fixups = DF.getFixups(); 387 size_t OldSize = Data.size(); 388 389 int64_t Value; 390 if (AddrDelta.evaluateAsAbsolute(Value, Asm)) 391 return false; 392 bool IsAbsolute = AddrDelta.evaluateKnownAbsolute(Value, Asm); 393 assert(IsAbsolute && "CFA with invalid expression"); 394 (void)IsAbsolute; 395 396 Data.clear(); 397 Fixups.clear(); 398 raw_svector_ostream OS(Data); 399 400 assert(Asm.getContext().getAsmInfo()->getMinInstAlignment() == 1 && 401 "expected 1-byte alignment"); 402 if (Value == 0) { 403 WasRelaxed = OldSize != Data.size(); 404 return true; 405 } 406 407 auto AddFixups = [&Fixups, 408 &AddrDelta](unsigned Offset, 409 std::pair<MCFixupKind, MCFixupKind> FK) { 410 const MCBinaryExpr &MBE = cast<MCBinaryExpr>(AddrDelta); 411 Fixups.push_back(MCFixup::create(Offset, MBE.getLHS(), std::get<0>(FK))); 412 Fixups.push_back(MCFixup::create(Offset, MBE.getRHS(), std::get<1>(FK))); 413 }; 414 415 if (isUIntN(6, Value)) { 416 OS << uint8_t(dwarf::DW_CFA_advance_loc); 417 AddFixups(0, getRelocPairForSize(6)); 418 } else if (isUInt<8>(Value)) { 419 OS << uint8_t(dwarf::DW_CFA_advance_loc1); 420 support::endian::write<uint8_t>(OS, 0, llvm::endianness::little); 421 AddFixups(1, getRelocPairForSize(8)); 422 } else if (isUInt<16>(Value)) { 423 OS << uint8_t(dwarf::DW_CFA_advance_loc2); 424 support::endian::write<uint16_t>(OS, 0, llvm::endianness::little); 425 AddFixups(1, getRelocPairForSize(16)); 426 } else if (isUInt<32>(Value)) { 427 OS << uint8_t(dwarf::DW_CFA_advance_loc4); 428 support::endian::write<uint32_t>(OS, 0, llvm::endianness::little); 429 AddFixups(1, getRelocPairForSize(32)); 430 } else { 431 llvm_unreachable("unsupported CFA encoding"); 432 } 433 434 WasRelaxed = OldSize != Data.size(); 435 return true; 436 } 437 438 bool LoongArchAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count, 439 const MCSubtargetInfo *STI) const { 440 // We mostly follow binutils' convention here: align to 4-byte boundary with a 441 // 0-fill padding. 442 OS.write_zeros(Count % 4); 443 444 // The remainder is now padded with 4-byte nops. 445 // nop: andi r0, r0, 0 446 for (; Count >= 4; Count -= 4) 447 OS.write("\0\0\x40\x03", 4); 448 449 return true; 450 } 451 452 bool LoongArchAsmBackend::handleAddSubRelocations(const MCAssembler &Asm, 453 const MCFragment &F, 454 const MCFixup &Fixup, 455 const MCValue &Target, 456 uint64_t &FixedValue) const { 457 std::pair<MCFixupKind, MCFixupKind> FK; 458 uint64_t FixedValueA, FixedValueB; 459 const MCSymbol &SA = Target.getSymA()->getSymbol(); 460 const MCSymbol &SB = Target.getSymB()->getSymbol(); 461 462 bool force = !SA.isInSection() || !SB.isInSection(); 463 if (!force) { 464 const MCSection &SecA = SA.getSection(); 465 const MCSection &SecB = SB.getSection(); 466 467 // We need record relocation if SecA != SecB. Usually SecB is same as the 468 // section of Fixup, which will be record the relocation as PCRel. If SecB 469 // is not same as the section of Fixup, it will report error. Just return 470 // false and then this work can be finished by handleFixup. 471 if (&SecA != &SecB) 472 return false; 473 474 // In SecA == SecB case. If the linker relaxation is enabled, we need record 475 // the ADD, SUB relocations. Otherwise the FixedValue has already been calc- 476 // ulated out in evaluateFixup, return true and avoid record relocations. 477 if (!STI.hasFeature(LoongArch::FeatureRelax)) 478 return true; 479 } 480 481 switch (Fixup.getKind()) { 482 case llvm::FK_Data_1: 483 FK = getRelocPairForSize(8); 484 break; 485 case llvm::FK_Data_2: 486 FK = getRelocPairForSize(16); 487 break; 488 case llvm::FK_Data_4: 489 FK = getRelocPairForSize(32); 490 break; 491 case llvm::FK_Data_8: 492 FK = getRelocPairForSize(64); 493 break; 494 case llvm::FK_Data_leb128: 495 FK = getRelocPairForSize(128); 496 break; 497 default: 498 llvm_unreachable("unsupported fixup size"); 499 } 500 MCValue A = MCValue::get(Target.getSymA(), nullptr, Target.getConstant()); 501 MCValue B = MCValue::get(Target.getSymB()); 502 auto FA = MCFixup::create(Fixup.getOffset(), nullptr, std::get<0>(FK)); 503 auto FB = MCFixup::create(Fixup.getOffset(), nullptr, std::get<1>(FK)); 504 auto &Assembler = const_cast<MCAssembler &>(Asm); 505 Asm.getWriter().recordRelocation(Assembler, &F, FA, A, FixedValueA); 506 Asm.getWriter().recordRelocation(Assembler, &F, FB, B, FixedValueB); 507 FixedValue = FixedValueA - FixedValueB; 508 return true; 509 } 510 511 std::unique_ptr<MCObjectTargetWriter> 512 LoongArchAsmBackend::createObjectTargetWriter() const { 513 return createLoongArchELFObjectWriter( 514 OSABI, Is64Bit, STI.hasFeature(LoongArch::FeatureRelax)); 515 } 516 517 MCAsmBackend *llvm::createLoongArchAsmBackend(const Target &T, 518 const MCSubtargetInfo &STI, 519 const MCRegisterInfo &MRI, 520 const MCTargetOptions &Options) { 521 const Triple &TT = STI.getTargetTriple(); 522 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TT.getOS()); 523 return new LoongArchAsmBackend(STI, OSABI, TT.isArch64Bit(), Options); 524 } 525