1 //===-- LoongArchAsmBackend.cpp - LoongArch Assembler Backend -*- C++ -*---===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the LoongArchAsmBackend class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "LoongArchAsmBackend.h" 14 #include "LoongArchFixupKinds.h" 15 #include "llvm/MC/MCAsmLayout.h" 16 #include "llvm/MC/MCAssembler.h" 17 #include "llvm/MC/MCContext.h" 18 #include "llvm/MC/MCELFObjectWriter.h" 19 #include "llvm/MC/MCValue.h" 20 #include "llvm/Support/EndianStream.h" 21 22 #define DEBUG_TYPE "loongarch-asmbackend" 23 24 using namespace llvm; 25 26 std::optional<MCFixupKind> 27 LoongArchAsmBackend::getFixupKind(StringRef Name) const { 28 if (STI.getTargetTriple().isOSBinFormatELF()) { 29 auto Type = llvm::StringSwitch<unsigned>(Name) 30 #define ELF_RELOC(X, Y) .Case(#X, Y) 31 #include "llvm/BinaryFormat/ELFRelocs/LoongArch.def" 32 #undef ELF_RELOC 33 .Case("BFD_RELOC_NONE", ELF::R_LARCH_NONE) 34 .Case("BFD_RELOC_32", ELF::R_LARCH_32) 35 .Case("BFD_RELOC_64", ELF::R_LARCH_64) 36 .Default(-1u); 37 if (Type != -1u) 38 return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type); 39 } 40 return std::nullopt; 41 } 42 43 const MCFixupKindInfo & 44 LoongArchAsmBackend::getFixupKindInfo(MCFixupKind Kind) const { 45 const static MCFixupKindInfo Infos[] = { 46 // This table *must* be in the order that the fixup_* kinds are defined in 47 // LoongArchFixupKinds.h. 48 // 49 // {name, offset, bits, flags} 50 {"fixup_loongarch_b16", 10, 16, MCFixupKindInfo::FKF_IsPCRel}, 51 {"fixup_loongarch_b21", 0, 26, MCFixupKindInfo::FKF_IsPCRel}, 52 {"fixup_loongarch_b26", 0, 26, MCFixupKindInfo::FKF_IsPCRel}, 53 {"fixup_loongarch_abs_hi20", 5, 20, 0}, 54 {"fixup_loongarch_abs_lo12", 10, 12, 0}, 55 {"fixup_loongarch_abs64_lo20", 5, 20, 0}, 56 {"fixup_loongarch_abs64_hi12", 10, 12, 0}, 57 {"fixup_loongarch_tls_le_hi20", 5, 20, 0}, 58 {"fixup_loongarch_tls_le_lo12", 10, 12, 0}, 59 {"fixup_loongarch_tls_le64_lo20", 5, 20, 0}, 60 {"fixup_loongarch_tls_le64_hi12", 10, 12, 0}, 61 // TODO: Add more fixup kinds. 62 }; 63 64 static_assert((std::size(Infos)) == LoongArch::NumTargetFixupKinds, 65 "Not all fixup kinds added to Infos array"); 66 67 // Fixup kinds from .reloc directive are like R_LARCH_NONE. They 68 // do not require any extra processing. 69 if (Kind >= FirstLiteralRelocationKind) 70 return MCAsmBackend::getFixupKindInfo(FK_NONE); 71 72 if (Kind < FirstTargetFixupKind) 73 return MCAsmBackend::getFixupKindInfo(Kind); 74 75 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() && 76 "Invalid kind!"); 77 return Infos[Kind - FirstTargetFixupKind]; 78 } 79 80 static void reportOutOfRangeError(MCContext &Ctx, SMLoc Loc, unsigned N) { 81 Ctx.reportError(Loc, "fixup value out of range [" + Twine(llvm::minIntN(N)) + 82 ", " + Twine(llvm::maxIntN(N)) + "]"); 83 } 84 85 static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value, 86 MCContext &Ctx) { 87 switch (Fixup.getTargetKind()) { 88 default: 89 llvm_unreachable("Unknown fixup kind"); 90 case FK_Data_1: 91 case FK_Data_2: 92 case FK_Data_4: 93 case FK_Data_8: 94 return Value; 95 case LoongArch::fixup_loongarch_b16: { 96 if (!isInt<18>(Value)) 97 reportOutOfRangeError(Ctx, Fixup.getLoc(), 18); 98 if (Value % 4) 99 Ctx.reportError(Fixup.getLoc(), "fixup value must be 4-byte aligned"); 100 return (Value >> 2) & 0xffff; 101 } 102 case LoongArch::fixup_loongarch_b21: { 103 if (!isInt<23>(Value)) 104 reportOutOfRangeError(Ctx, Fixup.getLoc(), 23); 105 if (Value % 4) 106 Ctx.reportError(Fixup.getLoc(), "fixup value must be 4-byte aligned"); 107 return ((Value & 0x3fffc) << 8) | ((Value >> 18) & 0x1f); 108 } 109 case LoongArch::fixup_loongarch_b26: { 110 if (!isInt<28>(Value)) 111 reportOutOfRangeError(Ctx, Fixup.getLoc(), 28); 112 if (Value % 4) 113 Ctx.reportError(Fixup.getLoc(), "fixup value must be 4-byte aligned"); 114 return ((Value & 0x3fffc) << 8) | ((Value >> 18) & 0x3ff); 115 } 116 case LoongArch::fixup_loongarch_abs_hi20: 117 case LoongArch::fixup_loongarch_tls_le_hi20: 118 return (Value >> 12) & 0xfffff; 119 case LoongArch::fixup_loongarch_abs_lo12: 120 case LoongArch::fixup_loongarch_tls_le_lo12: 121 return Value & 0xfff; 122 case LoongArch::fixup_loongarch_abs64_lo20: 123 case LoongArch::fixup_loongarch_tls_le64_lo20: 124 return (Value >> 32) & 0xfffff; 125 case LoongArch::fixup_loongarch_abs64_hi12: 126 case LoongArch::fixup_loongarch_tls_le64_hi12: 127 return (Value >> 52) & 0xfff; 128 } 129 } 130 131 void LoongArchAsmBackend::applyFixup(const MCAssembler &Asm, 132 const MCFixup &Fixup, 133 const MCValue &Target, 134 MutableArrayRef<char> Data, uint64_t Value, 135 bool IsResolved, 136 const MCSubtargetInfo *STI) const { 137 if (!Value) 138 return; // Doesn't change encoding. 139 140 MCFixupKind Kind = Fixup.getKind(); 141 if (Kind >= FirstLiteralRelocationKind) 142 return; 143 MCFixupKindInfo Info = getFixupKindInfo(Kind); 144 MCContext &Ctx = Asm.getContext(); 145 146 // Apply any target-specific value adjustments. 147 Value = adjustFixupValue(Fixup, Value, Ctx); 148 149 // Shift the value into position. 150 Value <<= Info.TargetOffset; 151 152 unsigned Offset = Fixup.getOffset(); 153 unsigned NumBytes = alignTo(Info.TargetSize + Info.TargetOffset, 8) / 8; 154 155 assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!"); 156 // For each byte of the fragment that the fixup touches, mask in the 157 // bits from the fixup value. 158 for (unsigned I = 0; I != NumBytes; ++I) { 159 Data[Offset + I] |= uint8_t((Value >> (I * 8)) & 0xff); 160 } 161 } 162 163 bool LoongArchAsmBackend::shouldForceRelocation(const MCAssembler &Asm, 164 const MCFixup &Fixup, 165 const MCValue &Target, 166 const MCSubtargetInfo *STI) { 167 if (Fixup.getKind() >= FirstLiteralRelocationKind) 168 return true; 169 switch (Fixup.getTargetKind()) { 170 default: 171 return STI->hasFeature(LoongArch::FeatureRelax); 172 case FK_Data_1: 173 case FK_Data_2: 174 case FK_Data_4: 175 case FK_Data_8: 176 return !Target.isAbsolute(); 177 } 178 } 179 180 static inline std::pair<MCFixupKind, MCFixupKind> 181 getRelocPairForSize(unsigned Size) { 182 switch (Size) { 183 default: 184 llvm_unreachable("unsupported fixup size"); 185 case 6: 186 return std::make_pair( 187 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_ADD6), 188 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_SUB6)); 189 case 8: 190 return std::make_pair( 191 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_ADD8), 192 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_SUB8)); 193 case 16: 194 return std::make_pair( 195 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_ADD16), 196 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_SUB16)); 197 case 32: 198 return std::make_pair( 199 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_ADD32), 200 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_SUB32)); 201 case 64: 202 return std::make_pair( 203 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_ADD64), 204 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_SUB64)); 205 } 206 } 207 208 bool LoongArchAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count, 209 const MCSubtargetInfo *STI) const { 210 // We mostly follow binutils' convention here: align to 4-byte boundary with a 211 // 0-fill padding. 212 OS.write_zeros(Count % 4); 213 214 // The remainder is now padded with 4-byte nops. 215 // nop: andi r0, r0, 0 216 for (; Count >= 4; Count -= 4) 217 OS.write("\0\0\x40\x03", 4); 218 219 return true; 220 } 221 222 bool LoongArchAsmBackend::handleAddSubRelocations(const MCAsmLayout &Layout, 223 const MCFragment &F, 224 const MCFixup &Fixup, 225 const MCValue &Target, 226 uint64_t &FixedValue) const { 227 std::pair<MCFixupKind, MCFixupKind> FK; 228 uint64_t FixedValueA, FixedValueB; 229 const MCSection &SecA = Target.getSymA()->getSymbol().getSection(); 230 const MCSection &SecB = Target.getSymB()->getSymbol().getSection(); 231 232 // We need record relocation if SecA != SecB. Usually SecB is same as the 233 // section of Fixup, which will be record the relocation as PCRel. If SecB 234 // is not same as the section of Fixup, it will report error. Just return 235 // false and then this work can be finished by handleFixup. 236 if (&SecA != &SecB) 237 return false; 238 239 // In SecA == SecB case. If the linker relaxation is enabled, we need record 240 // the ADD, SUB relocations. Otherwise the FixedValue has already been 241 // calculated out in evaluateFixup, return true and avoid record relocations. 242 if (!STI.hasFeature(LoongArch::FeatureRelax)) 243 return true; 244 245 switch (Fixup.getKind()) { 246 case llvm::FK_Data_1: 247 FK = getRelocPairForSize(8); 248 break; 249 case llvm::FK_Data_2: 250 FK = getRelocPairForSize(16); 251 break; 252 case llvm::FK_Data_4: 253 FK = getRelocPairForSize(32); 254 break; 255 case llvm::FK_Data_8: 256 FK = getRelocPairForSize(64); 257 break; 258 default: 259 llvm_unreachable("unsupported fixup size"); 260 } 261 MCValue A = MCValue::get(Target.getSymA(), nullptr, Target.getConstant()); 262 MCValue B = MCValue::get(Target.getSymB()); 263 auto FA = MCFixup::create(Fixup.getOffset(), nullptr, std::get<0>(FK)); 264 auto FB = MCFixup::create(Fixup.getOffset(), nullptr, std::get<1>(FK)); 265 auto &Asm = Layout.getAssembler(); 266 Asm.getWriter().recordRelocation(Asm, Layout, &F, FA, A, FixedValueA); 267 Asm.getWriter().recordRelocation(Asm, Layout, &F, FB, B, FixedValueB); 268 FixedValue = FixedValueA - FixedValueB; 269 return true; 270 } 271 272 std::unique_ptr<MCObjectTargetWriter> 273 LoongArchAsmBackend::createObjectTargetWriter() const { 274 return createLoongArchELFObjectWriter( 275 OSABI, Is64Bit, STI.hasFeature(LoongArch::FeatureRelax)); 276 } 277 278 MCAsmBackend *llvm::createLoongArchAsmBackend(const Target &T, 279 const MCSubtargetInfo &STI, 280 const MCRegisterInfo &MRI, 281 const MCTargetOptions &Options) { 282 const Triple &TT = STI.getTargetTriple(); 283 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TT.getOS()); 284 return new LoongArchAsmBackend(STI, OSABI, TT.isArch64Bit(), Options); 285 } 286