1 //===- lib/MC/MCAssembler.cpp - Assembler Backend Implementation ----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "llvm/MC/MCAssembler.h" 10 #include "llvm/ADT/ArrayRef.h" 11 #include "llvm/ADT/SmallString.h" 12 #include "llvm/ADT/SmallVector.h" 13 #include "llvm/ADT/Statistic.h" 14 #include "llvm/ADT/StringRef.h" 15 #include "llvm/ADT/Twine.h" 16 #include "llvm/MC/MCAsmBackend.h" 17 #include "llvm/MC/MCAsmInfo.h" 18 #include "llvm/MC/MCCodeEmitter.h" 19 #include "llvm/MC/MCCodeView.h" 20 #include "llvm/MC/MCContext.h" 21 #include "llvm/MC/MCDwarf.h" 22 #include "llvm/MC/MCExpr.h" 23 #include "llvm/MC/MCFixup.h" 24 #include "llvm/MC/MCFixupKindInfo.h" 25 #include "llvm/MC/MCFragment.h" 26 #include "llvm/MC/MCInst.h" 27 #include "llvm/MC/MCObjectWriter.h" 28 #include "llvm/MC/MCSection.h" 29 #include "llvm/MC/MCSymbol.h" 30 #include "llvm/MC/MCValue.h" 31 #include "llvm/Support/Alignment.h" 32 #include "llvm/Support/Casting.h" 33 #include "llvm/Support/Debug.h" 34 #include "llvm/Support/EndianStream.h" 35 #include "llvm/Support/ErrorHandling.h" 36 #include "llvm/Support/LEB128.h" 37 #include "llvm/Support/raw_ostream.h" 38 #include <cassert> 39 #include <cstdint> 40 #include <tuple> 41 #include <utility> 42 43 using namespace llvm; 44 45 namespace llvm { 46 class MCSubtargetInfo; 47 } 48 49 #define DEBUG_TYPE "assembler" 50 51 namespace { 52 namespace stats { 53 54 STATISTIC(EmittedFragments, "Number of emitted assembler fragments - total"); 55 STATISTIC(EmittedRelaxableFragments, 56 "Number of emitted assembler fragments - relaxable"); 57 STATISTIC(EmittedDataFragments, 58 "Number of emitted assembler fragments - data"); 59 STATISTIC(EmittedCompactEncodedInstFragments, 60 "Number of emitted assembler fragments - compact encoded inst"); 61 STATISTIC(EmittedAlignFragments, 62 "Number of emitted assembler fragments - align"); 63 STATISTIC(EmittedFillFragments, 64 "Number of emitted assembler fragments - fill"); 65 STATISTIC(EmittedNopsFragments, "Number of emitted assembler fragments - nops"); 66 STATISTIC(EmittedOrgFragments, "Number of emitted assembler fragments - org"); 67 STATISTIC(evaluateFixup, "Number of evaluated fixups"); 68 STATISTIC(ObjectBytes, "Number of emitted object file bytes"); 69 STATISTIC(RelaxationSteps, "Number of assembler layout and relaxation steps"); 70 STATISTIC(RelaxedInstructions, "Number of relaxed instructions"); 71 72 } // end namespace stats 73 } // end anonymous namespace 74 75 // FIXME FIXME FIXME: There are number of places in this file where we convert 76 // what is a 64-bit assembler value used for computation into a value in the 77 // object file, which may truncate it. We should detect that truncation where 78 // invalid and report errors back. 79 80 /* *** */ 81 82 MCAssembler::MCAssembler(MCContext &Context, 83 std::unique_ptr<MCAsmBackend> Backend, 84 std::unique_ptr<MCCodeEmitter> Emitter, 85 std::unique_ptr<MCObjectWriter> Writer) 86 : Context(Context), Backend(std::move(Backend)), 87 Emitter(std::move(Emitter)), Writer(std::move(Writer)) {} 88 89 MCAssembler::~MCAssembler() = default; 90 91 void MCAssembler::reset() { 92 RelaxAll = false; 93 SubsectionsViaSymbols = false; 94 Sections.clear(); 95 Symbols.clear(); 96 LinkerOptions.clear(); 97 ThumbFuncs.clear(); 98 BundleAlignSize = 0; 99 100 // reset objects owned by us 101 if (getBackendPtr()) 102 getBackendPtr()->reset(); 103 if (getEmitterPtr()) 104 getEmitterPtr()->reset(); 105 if (getWriterPtr()) 106 getWriterPtr()->reset(); 107 } 108 109 bool MCAssembler::registerSection(MCSection &Section) { 110 if (Section.isRegistered()) 111 return false; 112 assert(Section.curFragList()->Head && "allocInitialFragment not called"); 113 Sections.push_back(&Section); 114 Section.setIsRegistered(true); 115 return true; 116 } 117 118 bool MCAssembler::isThumbFunc(const MCSymbol *Symbol) const { 119 if (ThumbFuncs.count(Symbol)) 120 return true; 121 122 if (!Symbol->isVariable()) 123 return false; 124 125 const MCExpr *Expr = Symbol->getVariableValue(); 126 127 MCValue V; 128 if (!Expr->evaluateAsRelocatable(V, nullptr, nullptr)) 129 return false; 130 131 if (V.getSymB() || V.getRefKind() != MCSymbolRefExpr::VK_None) 132 return false; 133 134 const MCSymbolRefExpr *Ref = V.getSymA(); 135 if (!Ref) 136 return false; 137 138 if (Ref->getKind() != MCSymbolRefExpr::VK_None) 139 return false; 140 141 const MCSymbol &Sym = Ref->getSymbol(); 142 if (!isThumbFunc(&Sym)) 143 return false; 144 145 ThumbFuncs.insert(Symbol); // Cache it. 146 return true; 147 } 148 149 bool MCAssembler::evaluateFixup(const MCFixup &Fixup, const MCFragment *DF, 150 MCValue &Target, const MCSubtargetInfo *STI, 151 uint64_t &Value, bool &WasForced) const { 152 ++stats::evaluateFixup; 153 154 // FIXME: This code has some duplication with recordRelocation. We should 155 // probably merge the two into a single callback that tries to evaluate a 156 // fixup and records a relocation if one is needed. 157 158 // On error claim to have completely evaluated the fixup, to prevent any 159 // further processing from being done. 160 const MCExpr *Expr = Fixup.getValue(); 161 MCContext &Ctx = getContext(); 162 Value = 0; 163 WasForced = false; 164 if (!Expr->evaluateAsRelocatable(Target, this, &Fixup)) { 165 Ctx.reportError(Fixup.getLoc(), "expected relocatable expression"); 166 return true; 167 } 168 if (const MCSymbolRefExpr *RefB = Target.getSymB()) { 169 if (RefB->getKind() != MCSymbolRefExpr::VK_None) { 170 Ctx.reportError(Fixup.getLoc(), 171 "unsupported subtraction of qualified symbol"); 172 return true; 173 } 174 } 175 176 assert(getBackendPtr() && "Expected assembler backend"); 177 bool IsTarget = getBackendPtr()->getFixupKindInfo(Fixup.getKind()).Flags & 178 MCFixupKindInfo::FKF_IsTarget; 179 180 if (IsTarget) 181 return getBackend().evaluateTargetFixup(*this, Fixup, DF, Target, STI, 182 Value, WasForced); 183 184 unsigned FixupFlags = getBackendPtr()->getFixupKindInfo(Fixup.getKind()).Flags; 185 bool IsPCRel = getBackendPtr()->getFixupKindInfo(Fixup.getKind()).Flags & 186 MCFixupKindInfo::FKF_IsPCRel; 187 188 bool IsResolved = false; 189 if (IsPCRel) { 190 if (Target.getSymB()) { 191 IsResolved = false; 192 } else if (!Target.getSymA()) { 193 IsResolved = false; 194 } else { 195 const MCSymbolRefExpr *A = Target.getSymA(); 196 const MCSymbol &SA = A->getSymbol(); 197 if (A->getKind() != MCSymbolRefExpr::VK_None || SA.isUndefined()) { 198 IsResolved = false; 199 } else if (auto *Writer = getWriterPtr()) { 200 IsResolved = (FixupFlags & MCFixupKindInfo::FKF_Constant) || 201 Writer->isSymbolRefDifferenceFullyResolvedImpl( 202 *this, SA, *DF, false, true); 203 } 204 } 205 } else { 206 IsResolved = Target.isAbsolute(); 207 } 208 209 Value = Target.getConstant(); 210 211 if (const MCSymbolRefExpr *A = Target.getSymA()) { 212 const MCSymbol &Sym = A->getSymbol(); 213 if (Sym.isDefined()) 214 Value += getSymbolOffset(Sym); 215 } 216 if (const MCSymbolRefExpr *B = Target.getSymB()) { 217 const MCSymbol &Sym = B->getSymbol(); 218 if (Sym.isDefined()) 219 Value -= getSymbolOffset(Sym); 220 } 221 222 bool ShouldAlignPC = getBackend().getFixupKindInfo(Fixup.getKind()).Flags & 223 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits; 224 assert((ShouldAlignPC ? IsPCRel : true) && 225 "FKF_IsAlignedDownTo32Bits is only allowed on PC-relative fixups!"); 226 227 if (IsPCRel) { 228 uint64_t Offset = getFragmentOffset(*DF) + Fixup.getOffset(); 229 230 // A number of ARM fixups in Thumb mode require that the effective PC 231 // address be determined as the 32-bit aligned version of the actual offset. 232 if (ShouldAlignPC) Offset &= ~0x3; 233 Value -= Offset; 234 } 235 236 // Let the backend force a relocation if needed. 237 if (IsResolved && 238 getBackend().shouldForceRelocation(*this, Fixup, Target, STI)) { 239 IsResolved = false; 240 WasForced = true; 241 } 242 243 // A linker relaxation target may emit ADD/SUB relocations for A-B+C. Let 244 // recordRelocation handle non-VK_None cases like A@plt-B+C. 245 if (!IsResolved && Target.getSymA() && Target.getSymB() && 246 Target.getSymA()->getKind() == MCSymbolRefExpr::VK_None && 247 getBackend().handleAddSubRelocations(*this, *DF, Fixup, Target, Value)) 248 return true; 249 250 return IsResolved; 251 } 252 253 uint64_t MCAssembler::computeFragmentSize(const MCFragment &F) const { 254 assert(getBackendPtr() && "Requires assembler backend"); 255 switch (F.getKind()) { 256 case MCFragment::FT_Data: 257 return cast<MCDataFragment>(F).getContents().size(); 258 case MCFragment::FT_Relaxable: 259 return cast<MCRelaxableFragment>(F).getContents().size(); 260 case MCFragment::FT_CompactEncodedInst: 261 return cast<MCCompactEncodedInstFragment>(F).getContents().size(); 262 case MCFragment::FT_Fill: { 263 auto &FF = cast<MCFillFragment>(F); 264 int64_t NumValues = 0; 265 if (!FF.getNumValues().evaluateKnownAbsolute(NumValues, *this)) { 266 getContext().reportError(FF.getLoc(), 267 "expected assembly-time absolute expression"); 268 return 0; 269 } 270 int64_t Size = NumValues * FF.getValueSize(); 271 if (Size < 0) { 272 getContext().reportError(FF.getLoc(), "invalid number of bytes"); 273 return 0; 274 } 275 return Size; 276 } 277 278 case MCFragment::FT_Nops: 279 return cast<MCNopsFragment>(F).getNumBytes(); 280 281 case MCFragment::FT_LEB: 282 return cast<MCLEBFragment>(F).getContents().size(); 283 284 case MCFragment::FT_BoundaryAlign: 285 return cast<MCBoundaryAlignFragment>(F).getSize(); 286 287 case MCFragment::FT_SymbolId: 288 return 4; 289 290 case MCFragment::FT_Align: { 291 const MCAlignFragment &AF = cast<MCAlignFragment>(F); 292 unsigned Offset = getFragmentOffset(AF); 293 unsigned Size = offsetToAlignment(Offset, AF.getAlignment()); 294 295 // Insert extra Nops for code alignment if the target define 296 // shouldInsertExtraNopBytesForCodeAlign target hook. 297 if (AF.getParent()->useCodeAlign() && AF.hasEmitNops() && 298 getBackend().shouldInsertExtraNopBytesForCodeAlign(AF, Size)) 299 return Size; 300 301 // If we are padding with nops, force the padding to be larger than the 302 // minimum nop size. 303 if (Size > 0 && AF.hasEmitNops()) { 304 while (Size % getBackend().getMinimumNopSize()) 305 Size += AF.getAlignment().value(); 306 } 307 if (Size > AF.getMaxBytesToEmit()) 308 return 0; 309 return Size; 310 } 311 312 case MCFragment::FT_Org: { 313 const MCOrgFragment &OF = cast<MCOrgFragment>(F); 314 MCValue Value; 315 if (!OF.getOffset().evaluateAsValue(Value, *this)) { 316 getContext().reportError(OF.getLoc(), 317 "expected assembly-time absolute expression"); 318 return 0; 319 } 320 321 uint64_t FragmentOffset = getFragmentOffset(OF); 322 int64_t TargetLocation = Value.getConstant(); 323 if (const MCSymbolRefExpr *A = Value.getSymA()) { 324 uint64_t Val; 325 if (!getSymbolOffset(A->getSymbol(), Val)) { 326 getContext().reportError(OF.getLoc(), "expected absolute expression"); 327 return 0; 328 } 329 TargetLocation += Val; 330 } 331 int64_t Size = TargetLocation - FragmentOffset; 332 if (Size < 0 || Size >= 0x40000000) { 333 getContext().reportError( 334 OF.getLoc(), "invalid .org offset '" + Twine(TargetLocation) + 335 "' (at offset '" + Twine(FragmentOffset) + "')"); 336 return 0; 337 } 338 return Size; 339 } 340 341 case MCFragment::FT_Dwarf: 342 return cast<MCDwarfLineAddrFragment>(F).getContents().size(); 343 case MCFragment::FT_DwarfFrame: 344 return cast<MCDwarfCallFrameFragment>(F).getContents().size(); 345 case MCFragment::FT_CVInlineLines: 346 return cast<MCCVInlineLineTableFragment>(F).getContents().size(); 347 case MCFragment::FT_CVDefRange: 348 return cast<MCCVDefRangeFragment>(F).getContents().size(); 349 case MCFragment::FT_PseudoProbe: 350 return cast<MCPseudoProbeAddrFragment>(F).getContents().size(); 351 case MCFragment::FT_Dummy: 352 llvm_unreachable("Should not have been added"); 353 } 354 355 llvm_unreachable("invalid fragment kind"); 356 } 357 358 // Compute the amount of padding required before the fragment \p F to 359 // obey bundling restrictions, where \p FOffset is the fragment's offset in 360 // its section and \p FSize is the fragment's size. 361 static uint64_t computeBundlePadding(unsigned BundleSize, 362 const MCEncodedFragment *F, 363 uint64_t FOffset, uint64_t FSize) { 364 uint64_t OffsetInBundle = FOffset & (BundleSize - 1); 365 uint64_t EndOfFragment = OffsetInBundle + FSize; 366 367 // There are two kinds of bundling restrictions: 368 // 369 // 1) For alignToBundleEnd(), add padding to ensure that the fragment will 370 // *end* on a bundle boundary. 371 // 2) Otherwise, check if the fragment would cross a bundle boundary. If it 372 // would, add padding until the end of the bundle so that the fragment 373 // will start in a new one. 374 if (F->alignToBundleEnd()) { 375 // Three possibilities here: 376 // 377 // A) The fragment just happens to end at a bundle boundary, so we're good. 378 // B) The fragment ends before the current bundle boundary: pad it just 379 // enough to reach the boundary. 380 // C) The fragment ends after the current bundle boundary: pad it until it 381 // reaches the end of the next bundle boundary. 382 // 383 // Note: this code could be made shorter with some modulo trickery, but it's 384 // intentionally kept in its more explicit form for simplicity. 385 if (EndOfFragment == BundleSize) 386 return 0; 387 else if (EndOfFragment < BundleSize) 388 return BundleSize - EndOfFragment; 389 else { // EndOfFragment > BundleSize 390 return 2 * BundleSize - EndOfFragment; 391 } 392 } else if (OffsetInBundle > 0 && EndOfFragment > BundleSize) 393 return BundleSize - OffsetInBundle; 394 else 395 return 0; 396 } 397 398 void MCAssembler::layoutBundle(MCFragment *Prev, MCFragment *F) const { 399 // If bundling is enabled and this fragment has instructions in it, it has to 400 // obey the bundling restrictions. With padding, we'll have: 401 // 402 // 403 // BundlePadding 404 // ||| 405 // ------------------------------------- 406 // Prev |##########| F | 407 // ------------------------------------- 408 // ^ 409 // | 410 // F->Offset 411 // 412 // The fragment's offset will point to after the padding, and its computed 413 // size won't include the padding. 414 // 415 // ".align N" is an example of a directive that introduces multiple 416 // fragments. We could add a special case to handle ".align N" by emitting 417 // within-fragment padding (which would produce less padding when N is less 418 // than the bundle size), but for now we don't. 419 // 420 assert(isa<MCEncodedFragment>(F) && 421 "Only MCEncodedFragment implementations have instructions"); 422 MCEncodedFragment *EF = cast<MCEncodedFragment>(F); 423 uint64_t FSize = computeFragmentSize(*EF); 424 425 if (FSize > getBundleAlignSize()) 426 report_fatal_error("Fragment can't be larger than a bundle size"); 427 428 uint64_t RequiredBundlePadding = 429 computeBundlePadding(getBundleAlignSize(), EF, EF->Offset, FSize); 430 if (RequiredBundlePadding > UINT8_MAX) 431 report_fatal_error("Padding cannot exceed 255 bytes"); 432 EF->setBundlePadding(static_cast<uint8_t>(RequiredBundlePadding)); 433 EF->Offset += RequiredBundlePadding; 434 if (auto *DF = dyn_cast_or_null<MCDataFragment>(Prev)) 435 if (DF->getContents().empty()) 436 DF->Offset = EF->Offset; 437 } 438 439 // Simple getSymbolOffset helper for the non-variable case. 440 static bool getLabelOffset(const MCAssembler &Asm, const MCSymbol &S, 441 bool ReportError, uint64_t &Val) { 442 if (!S.getFragment()) { 443 if (ReportError) 444 report_fatal_error("unable to evaluate offset to undefined symbol '" + 445 S.getName() + "'"); 446 return false; 447 } 448 Val = Asm.getFragmentOffset(*S.getFragment()) + S.getOffset(); 449 return true; 450 } 451 452 static bool getSymbolOffsetImpl(const MCAssembler &Asm, const MCSymbol &S, 453 bool ReportError, uint64_t &Val) { 454 if (!S.isVariable()) 455 return getLabelOffset(Asm, S, ReportError, Val); 456 457 // If SD is a variable, evaluate it. 458 MCValue Target; 459 if (!S.getVariableValue()->evaluateAsValue(Target, Asm)) 460 report_fatal_error("unable to evaluate offset for variable '" + 461 S.getName() + "'"); 462 463 uint64_t Offset = Target.getConstant(); 464 465 const MCSymbolRefExpr *A = Target.getSymA(); 466 if (A) { 467 uint64_t ValA; 468 // FIXME: On most platforms, `Target`'s component symbols are labels from 469 // having been simplified during evaluation, but on Mach-O they can be 470 // variables due to PR19203. This, and the line below for `B` can be 471 // restored to call `getLabelOffset` when PR19203 is fixed. 472 if (!getSymbolOffsetImpl(Asm, A->getSymbol(), ReportError, ValA)) 473 return false; 474 Offset += ValA; 475 } 476 477 const MCSymbolRefExpr *B = Target.getSymB(); 478 if (B) { 479 uint64_t ValB; 480 if (!getSymbolOffsetImpl(Asm, B->getSymbol(), ReportError, ValB)) 481 return false; 482 Offset -= ValB; 483 } 484 485 Val = Offset; 486 return true; 487 } 488 489 bool MCAssembler::getSymbolOffset(const MCSymbol &S, uint64_t &Val) const { 490 return getSymbolOffsetImpl(*this, S, false, Val); 491 } 492 493 uint64_t MCAssembler::getSymbolOffset(const MCSymbol &S) const { 494 uint64_t Val; 495 getSymbolOffsetImpl(*this, S, true, Val); 496 return Val; 497 } 498 499 const MCSymbol *MCAssembler::getBaseSymbol(const MCSymbol &Symbol) const { 500 assert(HasLayout); 501 if (!Symbol.isVariable()) 502 return &Symbol; 503 504 const MCExpr *Expr = Symbol.getVariableValue(); 505 MCValue Value; 506 if (!Expr->evaluateAsValue(Value, *this)) { 507 getContext().reportError(Expr->getLoc(), 508 "expression could not be evaluated"); 509 return nullptr; 510 } 511 512 const MCSymbolRefExpr *RefB = Value.getSymB(); 513 if (RefB) { 514 getContext().reportError( 515 Expr->getLoc(), 516 Twine("symbol '") + RefB->getSymbol().getName() + 517 "' could not be evaluated in a subtraction expression"); 518 return nullptr; 519 } 520 521 const MCSymbolRefExpr *A = Value.getSymA(); 522 if (!A) 523 return nullptr; 524 525 const MCSymbol &ASym = A->getSymbol(); 526 if (ASym.isCommon()) { 527 getContext().reportError(Expr->getLoc(), 528 "Common symbol '" + ASym.getName() + 529 "' cannot be used in assignment expr"); 530 return nullptr; 531 } 532 533 return &ASym; 534 } 535 536 uint64_t MCAssembler::getSectionAddressSize(const MCSection &Sec) const { 537 assert(HasLayout); 538 // The size is the last fragment's end offset. 539 const MCFragment &F = *Sec.curFragList()->Tail; 540 return getFragmentOffset(F) + computeFragmentSize(F); 541 } 542 543 uint64_t MCAssembler::getSectionFileSize(const MCSection &Sec) const { 544 // Virtual sections have no file size. 545 if (Sec.isVirtualSection()) 546 return 0; 547 return getSectionAddressSize(Sec); 548 } 549 550 bool MCAssembler::registerSymbol(const MCSymbol &Symbol) { 551 bool Changed = !Symbol.isRegistered(); 552 if (Changed) { 553 Symbol.setIsRegistered(true); 554 Symbols.push_back(&Symbol); 555 } 556 return Changed; 557 } 558 559 void MCAssembler::writeFragmentPadding(raw_ostream &OS, 560 const MCEncodedFragment &EF, 561 uint64_t FSize) const { 562 assert(getBackendPtr() && "Expected assembler backend"); 563 // Should NOP padding be written out before this fragment? 564 unsigned BundlePadding = EF.getBundlePadding(); 565 if (BundlePadding > 0) { 566 assert(isBundlingEnabled() && 567 "Writing bundle padding with disabled bundling"); 568 assert(EF.hasInstructions() && 569 "Writing bundle padding for a fragment without instructions"); 570 571 unsigned TotalLength = BundlePadding + static_cast<unsigned>(FSize); 572 const MCSubtargetInfo *STI = EF.getSubtargetInfo(); 573 if (EF.alignToBundleEnd() && TotalLength > getBundleAlignSize()) { 574 // If the padding itself crosses a bundle boundary, it must be emitted 575 // in 2 pieces, since even nop instructions must not cross boundaries. 576 // v--------------v <- BundleAlignSize 577 // v---------v <- BundlePadding 578 // ---------------------------- 579 // | Prev |####|####| F | 580 // ---------------------------- 581 // ^-------------------^ <- TotalLength 582 unsigned DistanceToBoundary = TotalLength - getBundleAlignSize(); 583 if (!getBackend().writeNopData(OS, DistanceToBoundary, STI)) 584 report_fatal_error("unable to write NOP sequence of " + 585 Twine(DistanceToBoundary) + " bytes"); 586 BundlePadding -= DistanceToBoundary; 587 } 588 if (!getBackend().writeNopData(OS, BundlePadding, STI)) 589 report_fatal_error("unable to write NOP sequence of " + 590 Twine(BundlePadding) + " bytes"); 591 } 592 } 593 594 /// Write the fragment \p F to the output file. 595 static void writeFragment(raw_ostream &OS, const MCAssembler &Asm, 596 const MCFragment &F) { 597 // FIXME: Embed in fragments instead? 598 uint64_t FragmentSize = Asm.computeFragmentSize(F); 599 600 llvm::endianness Endian = Asm.getBackend().Endian; 601 602 if (const MCEncodedFragment *EF = dyn_cast<MCEncodedFragment>(&F)) 603 Asm.writeFragmentPadding(OS, *EF, FragmentSize); 604 605 // This variable (and its dummy usage) is to participate in the assert at 606 // the end of the function. 607 uint64_t Start = OS.tell(); 608 (void) Start; 609 610 ++stats::EmittedFragments; 611 612 switch (F.getKind()) { 613 case MCFragment::FT_Align: { 614 ++stats::EmittedAlignFragments; 615 const MCAlignFragment &AF = cast<MCAlignFragment>(F); 616 assert(AF.getValueSize() && "Invalid virtual align in concrete fragment!"); 617 618 uint64_t Count = FragmentSize / AF.getValueSize(); 619 620 // FIXME: This error shouldn't actually occur (the front end should emit 621 // multiple .align directives to enforce the semantics it wants), but is 622 // severe enough that we want to report it. How to handle this? 623 if (Count * AF.getValueSize() != FragmentSize) 624 report_fatal_error("undefined .align directive, value size '" + 625 Twine(AF.getValueSize()) + 626 "' is not a divisor of padding size '" + 627 Twine(FragmentSize) + "'"); 628 629 // See if we are aligning with nops, and if so do that first to try to fill 630 // the Count bytes. Then if that did not fill any bytes or there are any 631 // bytes left to fill use the Value and ValueSize to fill the rest. 632 // If we are aligning with nops, ask that target to emit the right data. 633 if (AF.hasEmitNops()) { 634 if (!Asm.getBackend().writeNopData(OS, Count, AF.getSubtargetInfo())) 635 report_fatal_error("unable to write nop sequence of " + 636 Twine(Count) + " bytes"); 637 break; 638 } 639 640 // Otherwise, write out in multiples of the value size. 641 for (uint64_t i = 0; i != Count; ++i) { 642 switch (AF.getValueSize()) { 643 default: llvm_unreachable("Invalid size!"); 644 case 1: OS << char(AF.getValue()); break; 645 case 2: 646 support::endian::write<uint16_t>(OS, AF.getValue(), Endian); 647 break; 648 case 4: 649 support::endian::write<uint32_t>(OS, AF.getValue(), Endian); 650 break; 651 case 8: 652 support::endian::write<uint64_t>(OS, AF.getValue(), Endian); 653 break; 654 } 655 } 656 break; 657 } 658 659 case MCFragment::FT_Data: 660 ++stats::EmittedDataFragments; 661 OS << cast<MCDataFragment>(F).getContents(); 662 break; 663 664 case MCFragment::FT_Relaxable: 665 ++stats::EmittedRelaxableFragments; 666 OS << cast<MCRelaxableFragment>(F).getContents(); 667 break; 668 669 case MCFragment::FT_CompactEncodedInst: 670 ++stats::EmittedCompactEncodedInstFragments; 671 OS << cast<MCCompactEncodedInstFragment>(F).getContents(); 672 break; 673 674 case MCFragment::FT_Fill: { 675 ++stats::EmittedFillFragments; 676 const MCFillFragment &FF = cast<MCFillFragment>(F); 677 uint64_t V = FF.getValue(); 678 unsigned VSize = FF.getValueSize(); 679 const unsigned MaxChunkSize = 16; 680 char Data[MaxChunkSize]; 681 assert(0 < VSize && VSize <= MaxChunkSize && "Illegal fragment fill size"); 682 // Duplicate V into Data as byte vector to reduce number of 683 // writes done. As such, do endian conversion here. 684 for (unsigned I = 0; I != VSize; ++I) { 685 unsigned index = Endian == llvm::endianness::little ? I : (VSize - I - 1); 686 Data[I] = uint8_t(V >> (index * 8)); 687 } 688 for (unsigned I = VSize; I < MaxChunkSize; ++I) 689 Data[I] = Data[I - VSize]; 690 691 // Set to largest multiple of VSize in Data. 692 const unsigned NumPerChunk = MaxChunkSize / VSize; 693 // Set ChunkSize to largest multiple of VSize in Data 694 const unsigned ChunkSize = VSize * NumPerChunk; 695 696 // Do copies by chunk. 697 StringRef Ref(Data, ChunkSize); 698 for (uint64_t I = 0, E = FragmentSize / ChunkSize; I != E; ++I) 699 OS << Ref; 700 701 // do remainder if needed. 702 unsigned TrailingCount = FragmentSize % ChunkSize; 703 if (TrailingCount) 704 OS.write(Data, TrailingCount); 705 break; 706 } 707 708 case MCFragment::FT_Nops: { 709 ++stats::EmittedNopsFragments; 710 const MCNopsFragment &NF = cast<MCNopsFragment>(F); 711 712 int64_t NumBytes = NF.getNumBytes(); 713 int64_t ControlledNopLength = NF.getControlledNopLength(); 714 int64_t MaximumNopLength = 715 Asm.getBackend().getMaximumNopSize(*NF.getSubtargetInfo()); 716 717 assert(NumBytes > 0 && "Expected positive NOPs fragment size"); 718 assert(ControlledNopLength >= 0 && "Expected non-negative NOP size"); 719 720 if (ControlledNopLength > MaximumNopLength) { 721 Asm.getContext().reportError(NF.getLoc(), 722 "illegal NOP size " + 723 std::to_string(ControlledNopLength) + 724 ". (expected within [0, " + 725 std::to_string(MaximumNopLength) + "])"); 726 // Clamp the NOP length as reportError does not stop the execution 727 // immediately. 728 ControlledNopLength = MaximumNopLength; 729 } 730 731 // Use maximum value if the size of each NOP is not specified 732 if (!ControlledNopLength) 733 ControlledNopLength = MaximumNopLength; 734 735 while (NumBytes) { 736 uint64_t NumBytesToEmit = 737 (uint64_t)std::min(NumBytes, ControlledNopLength); 738 assert(NumBytesToEmit && "try to emit empty NOP instruction"); 739 if (!Asm.getBackend().writeNopData(OS, NumBytesToEmit, 740 NF.getSubtargetInfo())) { 741 report_fatal_error("unable to write nop sequence of the remaining " + 742 Twine(NumBytesToEmit) + " bytes"); 743 break; 744 } 745 NumBytes -= NumBytesToEmit; 746 } 747 break; 748 } 749 750 case MCFragment::FT_LEB: { 751 const MCLEBFragment &LF = cast<MCLEBFragment>(F); 752 OS << LF.getContents(); 753 break; 754 } 755 756 case MCFragment::FT_BoundaryAlign: { 757 const MCBoundaryAlignFragment &BF = cast<MCBoundaryAlignFragment>(F); 758 if (!Asm.getBackend().writeNopData(OS, FragmentSize, BF.getSubtargetInfo())) 759 report_fatal_error("unable to write nop sequence of " + 760 Twine(FragmentSize) + " bytes"); 761 break; 762 } 763 764 case MCFragment::FT_SymbolId: { 765 const MCSymbolIdFragment &SF = cast<MCSymbolIdFragment>(F); 766 support::endian::write<uint32_t>(OS, SF.getSymbol()->getIndex(), Endian); 767 break; 768 } 769 770 case MCFragment::FT_Org: { 771 ++stats::EmittedOrgFragments; 772 const MCOrgFragment &OF = cast<MCOrgFragment>(F); 773 774 for (uint64_t i = 0, e = FragmentSize; i != e; ++i) 775 OS << char(OF.getValue()); 776 777 break; 778 } 779 780 case MCFragment::FT_Dwarf: { 781 const MCDwarfLineAddrFragment &OF = cast<MCDwarfLineAddrFragment>(F); 782 OS << OF.getContents(); 783 break; 784 } 785 case MCFragment::FT_DwarfFrame: { 786 const MCDwarfCallFrameFragment &CF = cast<MCDwarfCallFrameFragment>(F); 787 OS << CF.getContents(); 788 break; 789 } 790 case MCFragment::FT_CVInlineLines: { 791 const auto &OF = cast<MCCVInlineLineTableFragment>(F); 792 OS << OF.getContents(); 793 break; 794 } 795 case MCFragment::FT_CVDefRange: { 796 const auto &DRF = cast<MCCVDefRangeFragment>(F); 797 OS << DRF.getContents(); 798 break; 799 } 800 case MCFragment::FT_PseudoProbe: { 801 const MCPseudoProbeAddrFragment &PF = cast<MCPseudoProbeAddrFragment>(F); 802 OS << PF.getContents(); 803 break; 804 } 805 case MCFragment::FT_Dummy: 806 llvm_unreachable("Should not have been added"); 807 } 808 809 assert(OS.tell() - Start == FragmentSize && 810 "The stream should advance by fragment size"); 811 } 812 813 void MCAssembler::writeSectionData(raw_ostream &OS, 814 const MCSection *Sec) const { 815 assert(getBackendPtr() && "Expected assembler backend"); 816 817 // Ignore virtual sections. 818 if (Sec->isVirtualSection()) { 819 assert(getSectionFileSize(*Sec) == 0 && "Invalid size for section!"); 820 821 // Check that contents are only things legal inside a virtual section. 822 for (const MCFragment &F : *Sec) { 823 switch (F.getKind()) { 824 default: llvm_unreachable("Invalid fragment in virtual section!"); 825 case MCFragment::FT_Data: { 826 // Check that we aren't trying to write a non-zero contents (or fixups) 827 // into a virtual section. This is to support clients which use standard 828 // directives to fill the contents of virtual sections. 829 const MCDataFragment &DF = cast<MCDataFragment>(F); 830 if (DF.fixup_begin() != DF.fixup_end()) 831 getContext().reportError(SMLoc(), Sec->getVirtualSectionKind() + 832 " section '" + Sec->getName() + 833 "' cannot have fixups"); 834 for (unsigned i = 0, e = DF.getContents().size(); i != e; ++i) 835 if (DF.getContents()[i]) { 836 getContext().reportError(SMLoc(), 837 Sec->getVirtualSectionKind() + 838 " section '" + Sec->getName() + 839 "' cannot have non-zero initializers"); 840 break; 841 } 842 break; 843 } 844 case MCFragment::FT_Align: 845 // Check that we aren't trying to write a non-zero value into a virtual 846 // section. 847 assert((cast<MCAlignFragment>(F).getValueSize() == 0 || 848 cast<MCAlignFragment>(F).getValue() == 0) && 849 "Invalid align in virtual section!"); 850 break; 851 case MCFragment::FT_Fill: 852 assert((cast<MCFillFragment>(F).getValue() == 0) && 853 "Invalid fill in virtual section!"); 854 break; 855 case MCFragment::FT_Org: 856 break; 857 } 858 } 859 860 return; 861 } 862 863 uint64_t Start = OS.tell(); 864 (void)Start; 865 866 for (const MCFragment &F : *Sec) 867 writeFragment(OS, *this, F); 868 869 assert(getContext().hadError() || 870 OS.tell() - Start == getSectionAddressSize(*Sec)); 871 } 872 873 std::tuple<MCValue, uint64_t, bool> 874 MCAssembler::handleFixup(MCFragment &F, const MCFixup &Fixup, 875 const MCSubtargetInfo *STI) { 876 // Evaluate the fixup. 877 MCValue Target; 878 uint64_t FixedValue; 879 bool WasForced; 880 bool IsResolved = 881 evaluateFixup(Fixup, &F, Target, STI, FixedValue, WasForced); 882 if (!IsResolved) { 883 // The fixup was unresolved, we need a relocation. Inform the object 884 // writer of the relocation, and give it an opportunity to adjust the 885 // fixup value if need be. 886 getWriter().recordRelocation(*this, &F, Fixup, Target, FixedValue); 887 } 888 return std::make_tuple(Target, FixedValue, IsResolved); 889 } 890 891 void MCAssembler::layout() { 892 assert(getBackendPtr() && "Expected assembler backend"); 893 DEBUG_WITH_TYPE("mc-dump", { 894 errs() << "assembler backend - pre-layout\n--\n"; 895 dump(); }); 896 897 // Assign section ordinals. 898 unsigned SectionIndex = 0; 899 for (MCSection &Sec : *this) { 900 Sec.setOrdinal(SectionIndex++); 901 902 // Chain together fragments from all subsections. 903 if (Sec.Subsections.size() > 1) { 904 MCDummyFragment Dummy; 905 MCFragment *Tail = &Dummy; 906 for (auto &[_, List] : Sec.Subsections) { 907 assert(List.Head); 908 Tail->Next = List.Head; 909 Tail = List.Tail; 910 } 911 Sec.Subsections.clear(); 912 Sec.Subsections.push_back({0u, {Dummy.getNext(), Tail}}); 913 Sec.CurFragList = &Sec.Subsections[0].second; 914 915 unsigned FragmentIndex = 0; 916 for (MCFragment &Frag : Sec) 917 Frag.setLayoutOrder(FragmentIndex++); 918 } 919 } 920 921 // Layout until everything fits. 922 this->HasLayout = true; 923 for (MCSection &Sec : *this) 924 layoutSection(Sec); 925 while (layoutOnce()) { 926 } 927 928 DEBUG_WITH_TYPE("mc-dump", { 929 errs() << "assembler backend - post-relaxation\n--\n"; 930 dump(); }); 931 932 // Some targets might want to adjust fragment offsets. If so, perform another 933 // layout loop. 934 if (getBackend().finishLayout(*this)) 935 for (MCSection &Sec : *this) 936 layoutSection(Sec); 937 938 DEBUG_WITH_TYPE("mc-dump", { 939 errs() << "assembler backend - final-layout\n--\n"; 940 dump(); }); 941 942 // Allow the object writer a chance to perform post-layout binding (for 943 // example, to set the index fields in the symbol data). 944 getWriter().executePostLayoutBinding(*this); 945 946 // Evaluate and apply the fixups, generating relocation entries as necessary. 947 for (MCSection &Sec : *this) { 948 for (MCFragment &Frag : Sec) { 949 ArrayRef<MCFixup> Fixups; 950 MutableArrayRef<char> Contents; 951 const MCSubtargetInfo *STI = nullptr; 952 953 // Process MCAlignFragment and MCEncodedFragmentWithFixups here. 954 switch (Frag.getKind()) { 955 default: 956 continue; 957 case MCFragment::FT_Align: { 958 MCAlignFragment &AF = cast<MCAlignFragment>(Frag); 959 // Insert fixup type for code alignment if the target define 960 // shouldInsertFixupForCodeAlign target hook. 961 if (Sec.useCodeAlign() && AF.hasEmitNops()) 962 getBackend().shouldInsertFixupForCodeAlign(*this, AF); 963 continue; 964 } 965 case MCFragment::FT_Data: { 966 MCDataFragment &DF = cast<MCDataFragment>(Frag); 967 Fixups = DF.getFixups(); 968 Contents = DF.getContents(); 969 STI = DF.getSubtargetInfo(); 970 assert(!DF.hasInstructions() || STI != nullptr); 971 break; 972 } 973 case MCFragment::FT_Relaxable: { 974 MCRelaxableFragment &RF = cast<MCRelaxableFragment>(Frag); 975 Fixups = RF.getFixups(); 976 Contents = RF.getContents(); 977 STI = RF.getSubtargetInfo(); 978 assert(!RF.hasInstructions() || STI != nullptr); 979 break; 980 } 981 case MCFragment::FT_CVDefRange: { 982 MCCVDefRangeFragment &CF = cast<MCCVDefRangeFragment>(Frag); 983 Fixups = CF.getFixups(); 984 Contents = CF.getContents(); 985 break; 986 } 987 case MCFragment::FT_Dwarf: { 988 MCDwarfLineAddrFragment &DF = cast<MCDwarfLineAddrFragment>(Frag); 989 Fixups = DF.getFixups(); 990 Contents = DF.getContents(); 991 break; 992 } 993 case MCFragment::FT_DwarfFrame: { 994 MCDwarfCallFrameFragment &DF = cast<MCDwarfCallFrameFragment>(Frag); 995 Fixups = DF.getFixups(); 996 Contents = DF.getContents(); 997 break; 998 } 999 case MCFragment::FT_LEB: { 1000 auto &LF = cast<MCLEBFragment>(Frag); 1001 Fixups = LF.getFixups(); 1002 Contents = LF.getContents(); 1003 break; 1004 } 1005 case MCFragment::FT_PseudoProbe: { 1006 MCPseudoProbeAddrFragment &PF = cast<MCPseudoProbeAddrFragment>(Frag); 1007 Fixups = PF.getFixups(); 1008 Contents = PF.getContents(); 1009 break; 1010 } 1011 } 1012 for (const MCFixup &Fixup : Fixups) { 1013 uint64_t FixedValue; 1014 bool IsResolved; 1015 MCValue Target; 1016 std::tie(Target, FixedValue, IsResolved) = 1017 handleFixup(Frag, Fixup, STI); 1018 getBackend().applyFixup(*this, Fixup, Target, Contents, FixedValue, 1019 IsResolved, STI); 1020 } 1021 } 1022 } 1023 } 1024 1025 void MCAssembler::Finish() { 1026 layout(); 1027 1028 // Write the object file. 1029 stats::ObjectBytes += getWriter().writeObject(*this); 1030 1031 HasLayout = false; 1032 } 1033 1034 bool MCAssembler::fixupNeedsRelaxation(const MCFixup &Fixup, 1035 const MCRelaxableFragment *DF) const { 1036 assert(getBackendPtr() && "Expected assembler backend"); 1037 MCValue Target; 1038 uint64_t Value; 1039 bool WasForced; 1040 bool Resolved = evaluateFixup(Fixup, DF, Target, DF->getSubtargetInfo(), 1041 Value, WasForced); 1042 if (Target.getSymA() && 1043 Target.getSymA()->getKind() == MCSymbolRefExpr::VK_X86_ABS8 && 1044 Fixup.getKind() == FK_Data_1) 1045 return false; 1046 return getBackend().fixupNeedsRelaxationAdvanced(*this, Fixup, Resolved, 1047 Value, DF, WasForced); 1048 } 1049 1050 bool MCAssembler::fragmentNeedsRelaxation(const MCRelaxableFragment *F) const { 1051 assert(getBackendPtr() && "Expected assembler backend"); 1052 // If this inst doesn't ever need relaxation, ignore it. This occurs when we 1053 // are intentionally pushing out inst fragments, or because we relaxed a 1054 // previous instruction to one that doesn't need relaxation. 1055 if (!getBackend().mayNeedRelaxation(F->getInst(), *F->getSubtargetInfo())) 1056 return false; 1057 1058 for (const MCFixup &Fixup : F->getFixups()) 1059 if (fixupNeedsRelaxation(Fixup, F)) 1060 return true; 1061 1062 return false; 1063 } 1064 1065 bool MCAssembler::relaxInstruction(MCRelaxableFragment &F) { 1066 assert(getEmitterPtr() && 1067 "Expected CodeEmitter defined for relaxInstruction"); 1068 if (!fragmentNeedsRelaxation(&F)) 1069 return false; 1070 1071 ++stats::RelaxedInstructions; 1072 1073 // FIXME-PERF: We could immediately lower out instructions if we can tell 1074 // they are fully resolved, to avoid retesting on later passes. 1075 1076 // Relax the fragment. 1077 1078 MCInst Relaxed = F.getInst(); 1079 getBackend().relaxInstruction(Relaxed, *F.getSubtargetInfo()); 1080 1081 // Encode the new instruction. 1082 F.setInst(Relaxed); 1083 F.getFixups().clear(); 1084 F.getContents().clear(); 1085 getEmitter().encodeInstruction(Relaxed, F.getContents(), F.getFixups(), 1086 *F.getSubtargetInfo()); 1087 return true; 1088 } 1089 1090 bool MCAssembler::relaxLEB(MCLEBFragment &LF) { 1091 const unsigned OldSize = static_cast<unsigned>(LF.getContents().size()); 1092 unsigned PadTo = OldSize; 1093 int64_t Value; 1094 SmallVectorImpl<char> &Data = LF.getContents(); 1095 LF.getFixups().clear(); 1096 // Use evaluateKnownAbsolute for Mach-O as a hack: .subsections_via_symbols 1097 // requires that .uleb128 A-B is foldable where A and B reside in different 1098 // fragments. This is used by __gcc_except_table. 1099 bool Abs = getSubsectionsViaSymbols() 1100 ? LF.getValue().evaluateKnownAbsolute(Value, *this) 1101 : LF.getValue().evaluateAsAbsolute(Value, *this); 1102 if (!Abs) { 1103 bool Relaxed, UseZeroPad; 1104 std::tie(Relaxed, UseZeroPad) = getBackend().relaxLEB128(*this, LF, Value); 1105 if (!Relaxed) { 1106 getContext().reportError(LF.getValue().getLoc(), 1107 Twine(LF.isSigned() ? ".s" : ".u") + 1108 "leb128 expression is not absolute"); 1109 LF.setValue(MCConstantExpr::create(0, Context)); 1110 } 1111 uint8_t Tmp[10]; // maximum size: ceil(64/7) 1112 PadTo = std::max(PadTo, encodeULEB128(uint64_t(Value), Tmp)); 1113 if (UseZeroPad) 1114 Value = 0; 1115 } 1116 Data.clear(); 1117 raw_svector_ostream OSE(Data); 1118 // The compiler can generate EH table assembly that is impossible to assemble 1119 // without either adding padding to an LEB fragment or adding extra padding 1120 // to a later alignment fragment. To accommodate such tables, relaxation can 1121 // only increase an LEB fragment size here, not decrease it. See PR35809. 1122 if (LF.isSigned()) 1123 encodeSLEB128(Value, OSE, PadTo); 1124 else 1125 encodeULEB128(Value, OSE, PadTo); 1126 return OldSize != LF.getContents().size(); 1127 } 1128 1129 /// Check if the branch crosses the boundary. 1130 /// 1131 /// \param StartAddr start address of the fused/unfused branch. 1132 /// \param Size size of the fused/unfused branch. 1133 /// \param BoundaryAlignment alignment requirement of the branch. 1134 /// \returns true if the branch cross the boundary. 1135 static bool mayCrossBoundary(uint64_t StartAddr, uint64_t Size, 1136 Align BoundaryAlignment) { 1137 uint64_t EndAddr = StartAddr + Size; 1138 return (StartAddr >> Log2(BoundaryAlignment)) != 1139 ((EndAddr - 1) >> Log2(BoundaryAlignment)); 1140 } 1141 1142 /// Check if the branch is against the boundary. 1143 /// 1144 /// \param StartAddr start address of the fused/unfused branch. 1145 /// \param Size size of the fused/unfused branch. 1146 /// \param BoundaryAlignment alignment requirement of the branch. 1147 /// \returns true if the branch is against the boundary. 1148 static bool isAgainstBoundary(uint64_t StartAddr, uint64_t Size, 1149 Align BoundaryAlignment) { 1150 uint64_t EndAddr = StartAddr + Size; 1151 return (EndAddr & (BoundaryAlignment.value() - 1)) == 0; 1152 } 1153 1154 /// Check if the branch needs padding. 1155 /// 1156 /// \param StartAddr start address of the fused/unfused branch. 1157 /// \param Size size of the fused/unfused branch. 1158 /// \param BoundaryAlignment alignment requirement of the branch. 1159 /// \returns true if the branch needs padding. 1160 static bool needPadding(uint64_t StartAddr, uint64_t Size, 1161 Align BoundaryAlignment) { 1162 return mayCrossBoundary(StartAddr, Size, BoundaryAlignment) || 1163 isAgainstBoundary(StartAddr, Size, BoundaryAlignment); 1164 } 1165 1166 bool MCAssembler::relaxBoundaryAlign(MCBoundaryAlignFragment &BF) { 1167 // BoundaryAlignFragment that doesn't need to align any fragment should not be 1168 // relaxed. 1169 if (!BF.getLastFragment()) 1170 return false; 1171 1172 uint64_t AlignedOffset = getFragmentOffset(BF); 1173 uint64_t AlignedSize = 0; 1174 for (const MCFragment *F = BF.getNext();; F = F->getNext()) { 1175 AlignedSize += computeFragmentSize(*F); 1176 if (F == BF.getLastFragment()) 1177 break; 1178 } 1179 1180 Align BoundaryAlignment = BF.getAlignment(); 1181 uint64_t NewSize = needPadding(AlignedOffset, AlignedSize, BoundaryAlignment) 1182 ? offsetToAlignment(AlignedOffset, BoundaryAlignment) 1183 : 0U; 1184 if (NewSize == BF.getSize()) 1185 return false; 1186 BF.setSize(NewSize); 1187 return true; 1188 } 1189 1190 bool MCAssembler::relaxDwarfLineAddr(MCDwarfLineAddrFragment &DF) { 1191 bool WasRelaxed; 1192 if (getBackend().relaxDwarfLineAddr(*this, DF, WasRelaxed)) 1193 return WasRelaxed; 1194 1195 MCContext &Context = getContext(); 1196 uint64_t OldSize = DF.getContents().size(); 1197 int64_t AddrDelta; 1198 bool Abs = DF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, *this); 1199 assert(Abs && "We created a line delta with an invalid expression"); 1200 (void)Abs; 1201 int64_t LineDelta; 1202 LineDelta = DF.getLineDelta(); 1203 SmallVectorImpl<char> &Data = DF.getContents(); 1204 Data.clear(); 1205 DF.getFixups().clear(); 1206 1207 MCDwarfLineAddr::encode(Context, getDWARFLinetableParams(), LineDelta, 1208 AddrDelta, Data); 1209 return OldSize != Data.size(); 1210 } 1211 1212 bool MCAssembler::relaxDwarfCallFrameFragment(MCDwarfCallFrameFragment &DF) { 1213 bool WasRelaxed; 1214 if (getBackend().relaxDwarfCFA(*this, DF, WasRelaxed)) 1215 return WasRelaxed; 1216 1217 MCContext &Context = getContext(); 1218 int64_t Value; 1219 bool Abs = DF.getAddrDelta().evaluateAsAbsolute(Value, *this); 1220 if (!Abs) { 1221 getContext().reportError(DF.getAddrDelta().getLoc(), 1222 "invalid CFI advance_loc expression"); 1223 DF.setAddrDelta(MCConstantExpr::create(0, Context)); 1224 return false; 1225 } 1226 1227 SmallVectorImpl<char> &Data = DF.getContents(); 1228 uint64_t OldSize = Data.size(); 1229 Data.clear(); 1230 DF.getFixups().clear(); 1231 1232 MCDwarfFrameEmitter::encodeAdvanceLoc(Context, Value, Data); 1233 return OldSize != Data.size(); 1234 } 1235 1236 bool MCAssembler::relaxCVInlineLineTable(MCCVInlineLineTableFragment &F) { 1237 unsigned OldSize = F.getContents().size(); 1238 getContext().getCVContext().encodeInlineLineTable(*this, F); 1239 return OldSize != F.getContents().size(); 1240 } 1241 1242 bool MCAssembler::relaxCVDefRange(MCCVDefRangeFragment &F) { 1243 unsigned OldSize = F.getContents().size(); 1244 getContext().getCVContext().encodeDefRange(*this, F); 1245 return OldSize != F.getContents().size(); 1246 } 1247 1248 bool MCAssembler::relaxPseudoProbeAddr(MCPseudoProbeAddrFragment &PF) { 1249 uint64_t OldSize = PF.getContents().size(); 1250 int64_t AddrDelta; 1251 bool Abs = PF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, *this); 1252 assert(Abs && "We created a pseudo probe with an invalid expression"); 1253 (void)Abs; 1254 SmallVectorImpl<char> &Data = PF.getContents(); 1255 Data.clear(); 1256 raw_svector_ostream OSE(Data); 1257 PF.getFixups().clear(); 1258 1259 // AddrDelta is a signed integer 1260 encodeSLEB128(AddrDelta, OSE, OldSize); 1261 return OldSize != Data.size(); 1262 } 1263 1264 bool MCAssembler::relaxFragment(MCFragment &F) { 1265 switch(F.getKind()) { 1266 default: 1267 return false; 1268 case MCFragment::FT_Relaxable: 1269 assert(!getRelaxAll() && 1270 "Did not expect a MCRelaxableFragment in RelaxAll mode"); 1271 return relaxInstruction(cast<MCRelaxableFragment>(F)); 1272 case MCFragment::FT_Dwarf: 1273 return relaxDwarfLineAddr(cast<MCDwarfLineAddrFragment>(F)); 1274 case MCFragment::FT_DwarfFrame: 1275 return relaxDwarfCallFrameFragment(cast<MCDwarfCallFrameFragment>(F)); 1276 case MCFragment::FT_LEB: 1277 return relaxLEB(cast<MCLEBFragment>(F)); 1278 case MCFragment::FT_BoundaryAlign: 1279 return relaxBoundaryAlign(cast<MCBoundaryAlignFragment>(F)); 1280 case MCFragment::FT_CVInlineLines: 1281 return relaxCVInlineLineTable(cast<MCCVInlineLineTableFragment>(F)); 1282 case MCFragment::FT_CVDefRange: 1283 return relaxCVDefRange(cast<MCCVDefRangeFragment>(F)); 1284 case MCFragment::FT_PseudoProbe: 1285 return relaxPseudoProbeAddr(cast<MCPseudoProbeAddrFragment>(F)); 1286 } 1287 } 1288 1289 void MCAssembler::layoutSection(MCSection &Sec) { 1290 MCFragment *Prev = nullptr; 1291 uint64_t Offset = 0; 1292 for (MCFragment &F : Sec) { 1293 F.Offset = Offset; 1294 if (LLVM_UNLIKELY(isBundlingEnabled())) { 1295 if (F.hasInstructions()) { 1296 layoutBundle(Prev, &F); 1297 Offset = F.Offset; 1298 } 1299 Prev = &F; 1300 } 1301 Offset += computeFragmentSize(F); 1302 } 1303 } 1304 1305 bool MCAssembler::layoutOnce() { 1306 ++stats::RelaxationSteps; 1307 1308 // Size of fragments in one section can depend on the size of fragments in 1309 // another. If any fragment has changed size, we have to re-layout (and 1310 // as a result possibly further relax) all. 1311 bool ChangedAny = false; 1312 for (MCSection &Sec : *this) { 1313 for (;;) { 1314 bool Changed = false; 1315 for (MCFragment &F : Sec) 1316 if (relaxFragment(F)) 1317 Changed = true; 1318 ChangedAny |= Changed; 1319 if (!Changed) 1320 break; 1321 layoutSection(Sec); 1322 } 1323 } 1324 return ChangedAny; 1325 } 1326 1327 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1328 LLVM_DUMP_METHOD void MCAssembler::dump() const{ 1329 raw_ostream &OS = errs(); 1330 1331 OS << "<MCAssembler\n"; 1332 OS << " Sections:[\n "; 1333 bool First = true; 1334 for (const MCSection &Sec : *this) { 1335 if (First) 1336 First = false; 1337 else 1338 OS << ",\n "; 1339 Sec.dump(); 1340 } 1341 OS << "],\n"; 1342 OS << " Symbols:["; 1343 1344 First = true; 1345 for (const MCSymbol &Sym : symbols()) { 1346 if (First) 1347 First = false; 1348 else 1349 OS << ",\n "; 1350 OS << "("; 1351 Sym.dump(); 1352 OS << ", Index:" << Sym.getIndex() << ", "; 1353 OS << ")"; 1354 } 1355 OS << "]>\n"; 1356 } 1357 #endif 1358