xref: /llvm-project/lld/ELF/OutputSections.cpp (revision 4dac0dff086090d071fc3ef60d7458b3d6cfde60)
1 //===- OutputSections.cpp -------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "OutputSections.h"
10 #include "Config.h"
11 #include "InputFiles.h"
12 #include "LinkerScript.h"
13 #include "Symbols.h"
14 #include "SyntheticSections.h"
15 #include "Target.h"
16 #include "lld/Common/Arrays.h"
17 #include "lld/Common/Memory.h"
18 #include "llvm/BinaryFormat/Dwarf.h"
19 #include "llvm/Config/llvm-config.h" // LLVM_ENABLE_ZLIB, LLVM_ENABLE_ZSTD
20 #include "llvm/Support/Compression.h"
21 #include "llvm/Support/LEB128.h"
22 #include "llvm/Support/Parallel.h"
23 #include "llvm/Support/Path.h"
24 #include "llvm/Support/TimeProfiler.h"
25 #undef in
26 #if LLVM_ENABLE_ZLIB
27 // Avoid introducing max as a macro from Windows headers.
28 #define NOMINMAX
29 #include <zlib.h>
30 #endif
31 #if LLVM_ENABLE_ZSTD
32 #include <zstd.h>
33 #endif
34 
35 using namespace llvm;
36 using namespace llvm::dwarf;
37 using namespace llvm::object;
38 using namespace llvm::support::endian;
39 using namespace llvm::ELF;
40 using namespace lld;
41 using namespace lld::elf;
42 
43 uint32_t OutputSection::getPhdrFlags() const {
44   uint32_t ret = 0;
45   if (ctx.arg.emachine != EM_ARM || !(flags & SHF_ARM_PURECODE))
46     ret |= PF_R;
47   if (flags & SHF_WRITE)
48     ret |= PF_W;
49   if (flags & SHF_EXECINSTR)
50     ret |= PF_X;
51   return ret;
52 }
53 
54 template <class ELFT>
55 void OutputSection::writeHeaderTo(typename ELFT::Shdr *shdr) {
56   shdr->sh_entsize = entsize;
57   shdr->sh_addralign = addralign;
58   shdr->sh_type = type;
59   shdr->sh_offset = offset;
60   shdr->sh_flags = flags;
61   shdr->sh_info = info;
62   shdr->sh_link = link;
63   shdr->sh_addr = addr;
64   shdr->sh_size = size;
65   shdr->sh_name = shName;
66 }
67 
68 OutputSection::OutputSection(Ctx &ctx, StringRef name, uint32_t type,
69                              uint64_t flags)
70     : SectionBase(Output, ctx.internalFile, name, type, flags, /*link=*/0,
71                   /*info=*/0, /*addralign=*/1, /*entsize=*/0),
72       ctx(ctx) {}
73 
74 uint64_t OutputSection::getLMA() const {
75   return ptLoad ? addr + ptLoad->lmaOffset : addr;
76 }
77 
78 // We allow sections of types listed below to merged into a
79 // single progbits section. This is typically done by linker
80 // scripts. Merging nobits and progbits will force disk space
81 // to be allocated for nobits sections. Other ones don't require
82 // any special treatment on top of progbits, so there doesn't
83 // seem to be a harm in merging them.
84 //
85 // NOTE: clang since rL252300 emits SHT_X86_64_UNWIND .eh_frame sections. Allow
86 // them to be merged into SHT_PROGBITS .eh_frame (GNU as .cfi_*).
87 static bool canMergeToProgbits(Ctx &ctx, unsigned type) {
88   return type == SHT_NOBITS || type == SHT_PROGBITS || type == SHT_INIT_ARRAY ||
89          type == SHT_PREINIT_ARRAY || type == SHT_FINI_ARRAY ||
90          type == SHT_NOTE ||
91          (type == SHT_X86_64_UNWIND && ctx.arg.emachine == EM_X86_64);
92 }
93 
94 // Record that isec will be placed in the OutputSection. isec does not become
95 // permanent until finalizeInputSections() is called. The function should not be
96 // used after finalizeInputSections() is called. If you need to add an
97 // InputSection post finalizeInputSections(), then you must do the following:
98 //
99 // 1. Find or create an InputSectionDescription to hold InputSection.
100 // 2. Add the InputSection to the InputSectionDescription::sections.
101 // 3. Call commitSection(isec).
102 void OutputSection::recordSection(InputSectionBase *isec) {
103   partition = isec->partition;
104   isec->parent = this;
105   if (commands.empty() || !isa<InputSectionDescription>(commands.back()))
106     commands.push_back(make<InputSectionDescription>(""));
107   auto *isd = cast<InputSectionDescription>(commands.back());
108   isd->sectionBases.push_back(isec);
109 }
110 
111 // Update fields (type, flags, alignment, etc) according to the InputSection
112 // isec. Also check whether the InputSection flags and type are consistent with
113 // other InputSections.
114 void OutputSection::commitSection(InputSection *isec) {
115   if (LLVM_UNLIKELY(type != isec->type)) {
116     if (!hasInputSections && !typeIsSet) {
117       type = isec->type;
118     } else if (isStaticRelSecType(type) && isStaticRelSecType(isec->type) &&
119                (type == SHT_CREL) != (isec->type == SHT_CREL)) {
120       // Combine mixed SHT_REL[A] and SHT_CREL to SHT_CREL.
121       type = SHT_CREL;
122       if (type == SHT_REL) {
123         if (name.consume_front(".rel"))
124           name = ctx.saver.save(".crel" + name);
125       } else if (name.consume_front(".rela")) {
126         name = ctx.saver.save(".crel" + name);
127       }
128     } else {
129       if (typeIsSet || !canMergeToProgbits(ctx, type) ||
130           !canMergeToProgbits(ctx, isec->type)) {
131         // The (NOLOAD) changes the section type to SHT_NOBITS, the intention is
132         // that the contents at that address is provided by some other means.
133         // Some projects (e.g.
134         // https://github.com/ClangBuiltLinux/linux/issues/1597) rely on the
135         // behavior. Other types get an error.
136         if (type != SHT_NOBITS) {
137           Err(ctx) << "section type mismatch for " << isec->name << "\n>>> "
138                    << isec << ": "
139                    << getELFSectionTypeName(ctx.arg.emachine, isec->type)
140                    << "\n>>> output section " << name << ": "
141                    << getELFSectionTypeName(ctx.arg.emachine, type);
142         }
143       }
144       if (!typeIsSet)
145         type = SHT_PROGBITS;
146     }
147   }
148   if (!hasInputSections) {
149     // If IS is the first section to be added to this section,
150     // initialize type, entsize and flags from isec.
151     hasInputSections = true;
152     entsize = isec->entsize;
153     flags = isec->flags;
154   } else {
155     // Otherwise, check if new type or flags are compatible with existing ones.
156     if ((flags ^ isec->flags) & SHF_TLS)
157       ErrAlways(ctx) << "incompatible section flags for " << name << "\n>>> "
158                      << isec << ": 0x" << utohexstr(isec->flags, true)
159                      << "\n>>> output section " << name << ": 0x"
160                      << utohexstr(flags, true);
161   }
162 
163   isec->parent = this;
164   uint64_t andMask =
165       ctx.arg.emachine == EM_ARM ? (uint64_t)SHF_ARM_PURECODE : 0;
166   uint64_t orMask = ~andMask;
167   uint64_t andFlags = (flags & isec->flags) & andMask;
168   uint64_t orFlags = (flags | isec->flags) & orMask;
169   flags = andFlags | orFlags;
170   if (nonAlloc)
171     flags &= ~(uint64_t)SHF_ALLOC;
172 
173   addralign = std::max(addralign, isec->addralign);
174 
175   // If this section contains a table of fixed-size entries, sh_entsize
176   // holds the element size. If it contains elements of different size we
177   // set sh_entsize to 0.
178   if (entsize != isec->entsize)
179     entsize = 0;
180 }
181 
182 static MergeSyntheticSection *createMergeSynthetic(Ctx &ctx, StringRef name,
183                                                    uint32_t type,
184                                                    uint64_t flags,
185                                                    uint32_t addralign) {
186   if ((flags & SHF_STRINGS) && ctx.arg.optimize >= 2)
187     return make<MergeTailSection>(ctx, name, type, flags, addralign);
188   return make<MergeNoTailSection>(ctx, name, type, flags, addralign);
189 }
190 
191 // This function scans over the InputSectionBase list sectionBases to create
192 // InputSectionDescription::sections.
193 //
194 // It removes MergeInputSections from the input section array and adds
195 // new synthetic sections at the location of the first input section
196 // that it replaces. It then finalizes each synthetic section in order
197 // to compute an output offset for each piece of each input section.
198 void OutputSection::finalizeInputSections() {
199   auto *script = ctx.script;
200   std::vector<MergeSyntheticSection *> mergeSections;
201   for (SectionCommand *cmd : commands) {
202     auto *isd = dyn_cast<InputSectionDescription>(cmd);
203     if (!isd)
204       continue;
205     isd->sections.reserve(isd->sectionBases.size());
206     for (InputSectionBase *s : isd->sectionBases) {
207       MergeInputSection *ms = dyn_cast<MergeInputSection>(s);
208       if (!ms) {
209         isd->sections.push_back(cast<InputSection>(s));
210         continue;
211       }
212 
213       // We do not want to handle sections that are not alive, so just remove
214       // them instead of trying to merge.
215       if (!ms->isLive())
216         continue;
217 
218       auto i = llvm::find_if(mergeSections, [=](MergeSyntheticSection *sec) {
219         // While we could create a single synthetic section for two different
220         // values of Entsize, it is better to take Entsize into consideration.
221         //
222         // With a single synthetic section no two pieces with different Entsize
223         // could be equal, so we may as well have two sections.
224         //
225         // Using Entsize in here also allows us to propagate it to the synthetic
226         // section.
227         //
228         // SHF_STRINGS section with different alignments should not be merged.
229         return sec->flags == ms->flags && sec->entsize == ms->entsize &&
230                (sec->addralign == ms->addralign || !(sec->flags & SHF_STRINGS));
231       });
232       if (i == mergeSections.end()) {
233         MergeSyntheticSection *syn = createMergeSynthetic(
234             ctx, s->name, ms->type, ms->flags, ms->addralign);
235         mergeSections.push_back(syn);
236         i = std::prev(mergeSections.end());
237         syn->entsize = ms->entsize;
238         isd->sections.push_back(syn);
239         // The merge synthetic section inherits the potential spill locations of
240         // its first contained section.
241         auto it = script->potentialSpillLists.find(ms);
242         if (it != script->potentialSpillLists.end())
243           script->potentialSpillLists.try_emplace(syn, it->second);
244       }
245       (*i)->addSection(ms);
246     }
247 
248     // sectionBases should not be used from this point onwards. Clear it to
249     // catch misuses.
250     isd->sectionBases.clear();
251 
252     // Some input sections may be removed from the list after ICF.
253     for (InputSection *s : isd->sections)
254       commitSection(s);
255   }
256   for (auto *ms : mergeSections) {
257     // Merging may have increased the alignment of a spillable section. Update
258     // the alignment of potential spill sections and their containing output
259     // sections.
260     if (auto it = script->potentialSpillLists.find(ms);
261         it != script->potentialSpillLists.end()) {
262       for (PotentialSpillSection *s = it->second.head; s; s = s->next) {
263         s->addralign = std::max(s->addralign, ms->addralign);
264         s->parent->addralign = std::max(s->parent->addralign, s->addralign);
265       }
266     }
267 
268     ms->finalizeContents();
269   }
270 }
271 
272 static void sortByOrder(MutableArrayRef<InputSection *> in,
273                         llvm::function_ref<int(InputSectionBase *s)> order) {
274   std::vector<std::pair<int, InputSection *>> v;
275   for (InputSection *s : in)
276     v.emplace_back(order(s), s);
277   llvm::stable_sort(v, less_first());
278 
279   for (size_t i = 0; i < v.size(); ++i)
280     in[i] = v[i].second;
281 }
282 
283 uint64_t elf::getHeaderSize(Ctx &ctx) {
284   if (ctx.arg.oFormatBinary)
285     return 0;
286   return ctx.out.elfHeader->size + ctx.out.programHeaders->size;
287 }
288 
289 void OutputSection::sort(llvm::function_ref<int(InputSectionBase *s)> order) {
290   assert(isLive());
291   for (SectionCommand *b : commands)
292     if (auto *isd = dyn_cast<InputSectionDescription>(b))
293       sortByOrder(isd->sections, order);
294 }
295 
296 static void nopInstrFill(Ctx &ctx, uint8_t *buf, size_t size) {
297   if (size == 0)
298     return;
299   unsigned i = 0;
300   if (size == 0)
301     return;
302   std::vector<std::vector<uint8_t>> nopFiller = *ctx.target->nopInstrs;
303   unsigned num = size / nopFiller.back().size();
304   for (unsigned c = 0; c < num; ++c) {
305     memcpy(buf + i, nopFiller.back().data(), nopFiller.back().size());
306     i += nopFiller.back().size();
307   }
308   unsigned remaining = size - i;
309   if (!remaining)
310     return;
311   assert(nopFiller[remaining - 1].size() == remaining);
312   memcpy(buf + i, nopFiller[remaining - 1].data(), remaining);
313 }
314 
315 // Fill [Buf, Buf + Size) with Filler.
316 // This is used for linker script "=fillexp" command.
317 static void fill(uint8_t *buf, size_t size,
318                  const std::array<uint8_t, 4> &filler) {
319   size_t i = 0;
320   for (; i + 4 < size; i += 4)
321     memcpy(buf + i, filler.data(), 4);
322   memcpy(buf + i, filler.data(), size - i);
323 }
324 
325 #if LLVM_ENABLE_ZLIB
326 static SmallVector<uint8_t, 0> deflateShard(Ctx &ctx, ArrayRef<uint8_t> in,
327                                             int level, int flush) {
328   // 15 and 8 are default. windowBits=-15 is negative to generate raw deflate
329   // data with no zlib header or trailer.
330   z_stream s = {};
331   auto res = deflateInit2(&s, level, Z_DEFLATED, -15, 8, Z_DEFAULT_STRATEGY);
332   if (res != 0) {
333     Err(ctx) << "--compress-sections: deflateInit2 returned " << res;
334     return {};
335   }
336   s.next_in = const_cast<uint8_t *>(in.data());
337   s.avail_in = in.size();
338 
339   // Allocate a buffer of half of the input size, and grow it by 1.5x if
340   // insufficient.
341   SmallVector<uint8_t, 0> out;
342   size_t pos = 0;
343   out.resize_for_overwrite(std::max<size_t>(in.size() / 2, 64));
344   do {
345     if (pos == out.size())
346       out.resize_for_overwrite(out.size() * 3 / 2);
347     s.next_out = out.data() + pos;
348     s.avail_out = out.size() - pos;
349     (void)deflate(&s, flush);
350     pos = s.next_out - out.data();
351   } while (s.avail_out == 0);
352   assert(s.avail_in == 0);
353 
354   out.truncate(pos);
355   deflateEnd(&s);
356   return out;
357 }
358 #endif
359 
360 // Compress certain non-SHF_ALLOC sections:
361 //
362 // * (if --compress-debug-sections is specified) non-empty .debug_* sections
363 // * (if --compress-sections is specified) matched sections
364 template <class ELFT> void OutputSection::maybeCompress(Ctx &ctx) {
365   using Elf_Chdr = typename ELFT::Chdr;
366   (void)sizeof(Elf_Chdr);
367 
368   DebugCompressionType ctype = DebugCompressionType::None;
369   size_t compressedSize = sizeof(Elf_Chdr);
370   unsigned level = 0; // default compression level
371   if (!(flags & SHF_ALLOC) && ctx.arg.compressDebugSections &&
372       name.starts_with(".debug_"))
373     ctype = *ctx.arg.compressDebugSections;
374   for (auto &[glob, t, l] : ctx.arg.compressSections)
375     if (glob.match(name))
376       std::tie(ctype, level) = {t, l};
377   if (ctype == DebugCompressionType::None)
378     return;
379   if (flags & SHF_ALLOC) {
380     Err(ctx) << "--compress-sections: section '" << name
381              << "' with the SHF_ALLOC flag cannot be compressed";
382     return;
383   }
384 
385   llvm::TimeTraceScope timeScope("Compress sections");
386   auto buf = std::make_unique<uint8_t[]>(size);
387   // Write uncompressed data to a temporary zero-initialized buffer.
388   {
389     parallel::TaskGroup tg;
390     writeTo<ELFT>(ctx, buf.get(), tg);
391   }
392   // The generic ABI specifies "The sh_size and sh_addralign fields of the
393   // section header for a compressed section reflect the requirements of the
394   // compressed section." However, 1-byte alignment has been wildly accepted
395   // and utilized for a long time. Removing alignment padding is particularly
396   // useful when there are many compressed output sections.
397   addralign = 1;
398 
399   // Split input into 1-MiB shards.
400   [[maybe_unused]] constexpr size_t shardSize = 1 << 20;
401   auto shardsIn = split(ArrayRef<uint8_t>(buf.get(), size), shardSize);
402   const size_t numShards = shardsIn.size();
403   auto shardsOut = std::make_unique<SmallVector<uint8_t, 0>[]>(numShards);
404 
405 #if LLVM_ENABLE_ZSTD
406   // Use ZSTD's streaming compression API. See
407   // http://facebook.github.io/zstd/zstd_manual.html "Streaming compression -
408   // HowTo".
409   if (ctype == DebugCompressionType::Zstd) {
410     parallelFor(0, numShards, [&](size_t i) {
411       SmallVector<uint8_t, 0> out;
412       ZSTD_CCtx *cctx = ZSTD_createCCtx();
413       ZSTD_CCtx_setParameter(cctx, ZSTD_c_compressionLevel, level);
414       ZSTD_inBuffer zib = {shardsIn[i].data(), shardsIn[i].size(), 0};
415       ZSTD_outBuffer zob = {nullptr, 0, 0};
416       size_t size;
417       do {
418         // Allocate a buffer of half of the input size, and grow it by 1.5x if
419         // insufficient.
420         if (zob.pos == zob.size) {
421           out.resize_for_overwrite(
422               zob.size ? zob.size * 3 / 2 : std::max<size_t>(zib.size / 4, 64));
423           zob = {out.data(), out.size(), zob.pos};
424         }
425         size = ZSTD_compressStream2(cctx, &zob, &zib, ZSTD_e_end);
426         assert(!ZSTD_isError(size));
427       } while (size != 0);
428       out.truncate(zob.pos);
429       ZSTD_freeCCtx(cctx);
430       shardsOut[i] = std::move(out);
431     });
432     compressed.type = ELFCOMPRESS_ZSTD;
433     for (size_t i = 0; i != numShards; ++i)
434       compressedSize += shardsOut[i].size();
435   }
436 #endif
437 
438 #if LLVM_ENABLE_ZLIB
439   // We chose 1 (Z_BEST_SPEED) as the default compression level because it is
440   // fast and provides decent compression ratios.
441   if (ctype == DebugCompressionType::Zlib) {
442     if (!level)
443       level = Z_BEST_SPEED;
444 
445     // Compress shards and compute Alder-32 checksums. Use Z_SYNC_FLUSH for all
446     // shards but the last to flush the output to a byte boundary to be
447     // concatenated with the next shard.
448     auto shardsAdler = std::make_unique<uint32_t[]>(numShards);
449     parallelFor(0, numShards, [&](size_t i) {
450       shardsOut[i] = deflateShard(ctx, shardsIn[i], level,
451                                   i != numShards - 1 ? Z_SYNC_FLUSH : Z_FINISH);
452       shardsAdler[i] = adler32(1, shardsIn[i].data(), shardsIn[i].size());
453     });
454 
455     // Update section size and combine Alder-32 checksums.
456     uint32_t checksum = 1;       // Initial Adler-32 value
457     compressedSize += 2;         // Elf_Chdir and zlib header
458     for (size_t i = 0; i != numShards; ++i) {
459       compressedSize += shardsOut[i].size();
460       checksum = adler32_combine(checksum, shardsAdler[i], shardsIn[i].size());
461     }
462     compressedSize += 4; // checksum
463     compressed.type = ELFCOMPRESS_ZLIB;
464     compressed.checksum = checksum;
465   }
466 #endif
467 
468   if (compressedSize >= size)
469     return;
470   compressed.uncompressedSize = size;
471   compressed.shards = std::move(shardsOut);
472   compressed.numShards = numShards;
473   size = compressedSize;
474   flags |= SHF_COMPRESSED;
475 }
476 
477 static void writeInt(Ctx &ctx, uint8_t *buf, uint64_t data, uint64_t size) {
478   if (size == 1)
479     *buf = data;
480   else if (size == 2)
481     write16(ctx, buf, data);
482   else if (size == 4)
483     write32(ctx, buf, data);
484   else if (size == 8)
485     write64(ctx, buf, data);
486   else
487     llvm_unreachable("unsupported Size argument");
488 }
489 
490 template <class ELFT>
491 void OutputSection::writeTo(Ctx &ctx, uint8_t *buf, parallel::TaskGroup &tg) {
492   llvm::TimeTraceScope timeScope("Write sections", name);
493   if (type == SHT_NOBITS)
494     return;
495   if (type == SHT_CREL && !(flags & SHF_ALLOC)) {
496     buf += encodeULEB128(crelHeader, buf);
497     memcpy(buf, crelBody.data(), crelBody.size());
498     return;
499   }
500 
501   // If the section is compressed due to
502   // --compress-debug-section/--compress-sections, the content is already known.
503   if (compressed.shards) {
504     auto *chdr = reinterpret_cast<typename ELFT::Chdr *>(buf);
505     chdr->ch_type = compressed.type;
506     chdr->ch_size = compressed.uncompressedSize;
507     chdr->ch_addralign = addralign;
508     buf += sizeof(*chdr);
509 
510     auto offsets = std::make_unique<size_t[]>(compressed.numShards);
511     if (compressed.type == ELFCOMPRESS_ZLIB) {
512       buf[0] = 0x78;  // CMF
513       buf[1] = 0x01;  // FLG: best speed
514       offsets[0] = 2; // zlib header
515       write32be(buf + (size - sizeof(*chdr) - 4), compressed.checksum);
516     }
517 
518     // Compute shard offsets.
519     for (size_t i = 1; i != compressed.numShards; ++i)
520       offsets[i] = offsets[i - 1] + compressed.shards[i - 1].size();
521     parallelFor(0, compressed.numShards, [&](size_t i) {
522       memcpy(buf + offsets[i], compressed.shards[i].data(),
523              compressed.shards[i].size());
524     });
525     return;
526   }
527 
528   // Write leading padding.
529   ArrayRef<InputSection *> sections = getInputSections(*this, storage);
530   std::array<uint8_t, 4> filler = getFiller(ctx);
531   bool nonZeroFiller = read32(ctx, filler.data()) != 0;
532   if (nonZeroFiller)
533     fill(buf, sections.empty() ? size : sections[0]->outSecOff, filler);
534 
535   if (type == SHT_CREL && !(flags & SHF_ALLOC)) {
536     buf += encodeULEB128(crelHeader, buf);
537     memcpy(buf, crelBody.data(), crelBody.size());
538     return;
539   }
540 
541   auto fn = [=, &ctx](size_t begin, size_t end) {
542     size_t numSections = sections.size();
543     for (size_t i = begin; i != end; ++i) {
544       InputSection *isec = sections[i];
545       if (auto *s = dyn_cast<SyntheticSection>(isec))
546         s->writeTo(buf + isec->outSecOff);
547       else
548         isec->writeTo<ELFT>(ctx, buf + isec->outSecOff);
549 
550       // When in Arm BE8 mode, the linker has to convert the big-endian
551       // instructions to little-endian, leaving the data big-endian.
552       if (ctx.arg.emachine == EM_ARM && !ctx.arg.isLE && ctx.arg.armBe8 &&
553           (flags & SHF_EXECINSTR))
554         convertArmInstructionstoBE8(ctx, isec, buf + isec->outSecOff);
555 
556       // Fill gaps between sections.
557       if (nonZeroFiller) {
558         uint8_t *start = buf + isec->outSecOff + isec->getSize();
559         uint8_t *end;
560         if (i + 1 == numSections)
561           end = buf + size;
562         else
563           end = buf + sections[i + 1]->outSecOff;
564         if (isec->nopFiller) {
565           assert(ctx.target->nopInstrs);
566           nopInstrFill(ctx, start, end - start);
567         } else
568           fill(start, end - start, filler);
569       }
570     }
571   };
572 
573   // If there is any BYTE()-family command (rare), write the section content
574   // first then process BYTE to overwrite the filler content. The write is
575   // serial due to the limitation of llvm/Support/Parallel.h.
576   bool written = false;
577   size_t numSections = sections.size();
578   for (SectionCommand *cmd : commands)
579     if (auto *data = dyn_cast<ByteCommand>(cmd)) {
580       if (!std::exchange(written, true))
581         fn(0, numSections);
582       writeInt(ctx, buf + data->offset, data->expression().getValue(),
583                data->size);
584     }
585   if (written || !numSections)
586     return;
587 
588   // There is no data command. Write content asynchronously to overlap the write
589   // time with other output sections. Note, if a linker script specifies
590   // overlapping output sections (needs --noinhibit-exec or --no-check-sections
591   // to supress the error), the output may be non-deterministic.
592   const size_t taskSizeLimit = 4 << 20;
593   for (size_t begin = 0, i = 0, taskSize = 0;;) {
594     taskSize += sections[i]->getSize();
595     bool done = ++i == numSections;
596     if (done || taskSize >= taskSizeLimit) {
597       tg.spawn([=] { fn(begin, i); });
598       if (done)
599         break;
600       begin = i;
601       taskSize = 0;
602     }
603   }
604 }
605 
606 static void finalizeShtGroup(Ctx &ctx, OutputSection *os,
607                              InputSection *section) {
608   // sh_link field for SHT_GROUP sections should contain the section index of
609   // the symbol table.
610   os->link = ctx.in.symTab->getParent()->sectionIndex;
611 
612   if (!section)
613     return;
614 
615   // sh_info then contain index of an entry in symbol table section which
616   // provides signature of the section group.
617   ArrayRef<Symbol *> symbols = section->file->getSymbols();
618   os->info = ctx.in.symTab->getSymbolIndex(*symbols[section->info]);
619 
620   // Some group members may be combined or discarded, so we need to compute the
621   // new size. The content will be rewritten in InputSection::copyShtGroup.
622   DenseSet<uint32_t> seen;
623   ArrayRef<InputSectionBase *> sections = section->file->getSections();
624   for (const uint32_t &idx : section->getDataAs<uint32_t>().slice(1))
625     if (OutputSection *osec = sections[read32(ctx, &idx)]->getOutputSection())
626       seen.insert(osec->sectionIndex);
627   os->size = (1 + seen.size()) * sizeof(uint32_t);
628 }
629 
630 template <class uint>
631 LLVM_ATTRIBUTE_ALWAYS_INLINE static void
632 encodeOneCrel(Ctx &ctx, raw_svector_ostream &os,
633               Elf_Crel<sizeof(uint) == 8> &out, uint offset, const Symbol &sym,
634               uint32_t type, uint addend) {
635   const auto deltaOffset = static_cast<uint64_t>(offset - out.r_offset);
636   out.r_offset = offset;
637   int64_t symidx = ctx.in.symTab->getSymbolIndex(sym);
638   if (sym.type == STT_SECTION) {
639     auto *d = dyn_cast<Defined>(&sym);
640     if (d) {
641       SectionBase *section = d->section;
642       assert(section->isLive());
643       addend = sym.getVA(ctx, addend) - section->getOutputSection()->addr;
644     } else {
645       // Encode R_*_NONE(symidx=0).
646       symidx = type = addend = 0;
647     }
648   }
649 
650   // Similar to llvm::ELF::encodeCrel.
651   uint8_t b = deltaOffset * 8 + (out.r_symidx != symidx) +
652               (out.r_type != type ? 2 : 0) +
653               (uint(out.r_addend) != addend ? 4 : 0);
654   if (deltaOffset < 0x10) {
655     os << char(b);
656   } else {
657     os << char(b | 0x80);
658     encodeULEB128(deltaOffset >> 4, os);
659   }
660   if (b & 1) {
661     encodeSLEB128(static_cast<int32_t>(symidx - out.r_symidx), os);
662     out.r_symidx = symidx;
663   }
664   if (b & 2) {
665     encodeSLEB128(static_cast<int32_t>(type - out.r_type), os);
666     out.r_type = type;
667   }
668   if (b & 4) {
669     encodeSLEB128(std::make_signed_t<uint>(addend - out.r_addend), os);
670     out.r_addend = addend;
671   }
672 }
673 
674 template <class ELFT>
675 static size_t relToCrel(Ctx &ctx, raw_svector_ostream &os,
676                         Elf_Crel<ELFT::Is64Bits> &out, InputSection *relSec,
677                         InputSectionBase *sec) {
678   const auto &file = *cast<ELFFileBase>(relSec->file);
679   if (relSec->type == SHT_REL) {
680     // REL conversion is complex and unsupported yet.
681     Err(ctx) << relSec << ": REL cannot be converted to CREL";
682     return 0;
683   }
684   auto rels = relSec->getDataAs<typename ELFT::Rela>();
685   for (auto rel : rels) {
686     encodeOneCrel<typename ELFT::uint>(
687         ctx, os, out, sec->getVA(rel.r_offset), file.getRelocTargetSym(rel),
688         rel.getType(ctx.arg.isMips64EL), getAddend<ELFT>(rel));
689   }
690   return rels.size();
691 }
692 
693 // Compute the content of a non-alloc CREL section due to -r or --emit-relocs.
694 // Input CREL sections are decoded while REL[A] need to be converted.
695 template <bool is64> void OutputSection::finalizeNonAllocCrel(Ctx &ctx) {
696   using uint = typename Elf_Crel_Impl<is64>::uint;
697   raw_svector_ostream os(crelBody);
698   uint64_t totalCount = 0;
699   Elf_Crel<is64> out{};
700   assert(commands.size() == 1);
701   auto *isd = cast<InputSectionDescription>(commands[0]);
702   for (InputSection *relSec : isd->sections) {
703     const auto &file = *cast<ELFFileBase>(relSec->file);
704     InputSectionBase *sec = relSec->getRelocatedSection();
705     if (relSec->type == SHT_CREL) {
706       RelocsCrel<is64> entries(relSec->content_);
707       totalCount += entries.size();
708       for (Elf_Crel_Impl<is64> r : entries) {
709         encodeOneCrel<uint>(ctx, os, out, uint(sec->getVA(r.r_offset)),
710                             file.getSymbol(r.r_symidx), r.r_type, r.r_addend);
711       }
712       continue;
713     }
714 
715     // Convert REL[A] to CREL.
716     if constexpr (is64) {
717       totalCount += ctx.arg.isLE
718                         ? relToCrel<ELF64LE>(ctx, os, out, relSec, sec)
719                         : relToCrel<ELF64BE>(ctx, os, out, relSec, sec);
720     } else {
721       totalCount += ctx.arg.isLE
722                         ? relToCrel<ELF32LE>(ctx, os, out, relSec, sec)
723                         : relToCrel<ELF32BE>(ctx, os, out, relSec, sec);
724     }
725   }
726 
727   crelHeader = totalCount * 8 + 4;
728   size = getULEB128Size(crelHeader) + crelBody.size();
729 }
730 
731 void OutputSection::finalize(Ctx &ctx) {
732   InputSection *first = getFirstInputSection(this);
733 
734   if (flags & SHF_LINK_ORDER) {
735     // We must preserve the link order dependency of sections with the
736     // SHF_LINK_ORDER flag. The dependency is indicated by the sh_link field. We
737     // need to translate the InputSection sh_link to the OutputSection sh_link,
738     // all InputSections in the OutputSection have the same dependency.
739     if (auto *ex = dyn_cast<ARMExidxSyntheticSection>(first))
740       link = ex->getLinkOrderDep()->getParent()->sectionIndex;
741     else if (first->flags & SHF_LINK_ORDER)
742       if (auto *d = first->getLinkOrderDep())
743         link = d->getParent()->sectionIndex;
744   }
745 
746   if (type == SHT_GROUP) {
747     finalizeShtGroup(ctx, this, first);
748     return;
749   }
750 
751   if (!ctx.arg.copyRelocs || !isStaticRelSecType(type))
752     return;
753 
754   // Skip if 'first' is synthetic, i.e. not a section created by --emit-relocs.
755   // Normally 'type' was changed by 'first' so 'first' should be non-null.
756   // However, if the output section is .rela.dyn, 'type' can be set by the empty
757   // synthetic .rela.plt and first can be null.
758   if (!first || isa<SyntheticSection>(first))
759     return;
760 
761   link = ctx.in.symTab->getParent()->sectionIndex;
762   // sh_info for SHT_REL[A] sections should contain the section header index of
763   // the section to which the relocation applies.
764   InputSectionBase *s = first->getRelocatedSection();
765   info = s->getOutputSection()->sectionIndex;
766   flags |= SHF_INFO_LINK;
767   // Finalize the content of non-alloc CREL.
768   if (type == SHT_CREL) {
769     if (ctx.arg.is64)
770       finalizeNonAllocCrel<true>(ctx);
771     else
772       finalizeNonAllocCrel<false>(ctx);
773   }
774 }
775 
776 // Returns true if S is in one of the many forms the compiler driver may pass
777 // crtbegin files.
778 //
779 // Gcc uses any of crtbegin[<empty>|S|T].o.
780 // Clang uses Gcc's plus clang_rt.crtbegin[-<arch>|<empty>].o.
781 
782 static bool isCrt(StringRef s, StringRef beginEnd) {
783   s = sys::path::filename(s);
784   if (!s.consume_back(".o"))
785     return false;
786   if (s.consume_front("clang_rt."))
787     return s.consume_front(beginEnd);
788   return s.consume_front(beginEnd) && s.size() <= 1;
789 }
790 
791 // .ctors and .dtors are sorted by this order:
792 //
793 // 1. .ctors/.dtors in crtbegin (which contains a sentinel value -1).
794 // 2. The section is named ".ctors" or ".dtors" (priority: 65536).
795 // 3. The section has an optional priority value in the form of ".ctors.N" or
796 //    ".dtors.N" where N is a number in the form of %05u (priority: 65535-N).
797 // 4. .ctors/.dtors in crtend (which contains a sentinel value 0).
798 //
799 // For 2 and 3, the sections are sorted by priority from high to low, e.g.
800 // .ctors (65536), .ctors.00100 (65436), .ctors.00200 (65336).  In GNU ld's
801 // internal linker scripts, the sorting is by string comparison which can
802 // achieve the same goal given the optional priority values are of the same
803 // length.
804 //
805 // In an ideal world, we don't need this function because .init_array and
806 // .ctors are duplicate features (and .init_array is newer.) However, there
807 // are too many real-world use cases of .ctors, so we had no choice to
808 // support that with this rather ad-hoc semantics.
809 static bool compCtors(const InputSection *a, const InputSection *b) {
810   bool beginA = isCrt(a->file->getName(), "crtbegin");
811   bool beginB = isCrt(b->file->getName(), "crtbegin");
812   if (beginA != beginB)
813     return beginA;
814   bool endA = isCrt(a->file->getName(), "crtend");
815   bool endB = isCrt(b->file->getName(), "crtend");
816   if (endA != endB)
817     return endB;
818   return getPriority(a->name) > getPriority(b->name);
819 }
820 
821 // Sorts input sections by the special rules for .ctors and .dtors.
822 // Unfortunately, the rules are different from the one for .{init,fini}_array.
823 // Read the comment above.
824 void OutputSection::sortCtorsDtors() {
825   assert(commands.size() == 1);
826   auto *isd = cast<InputSectionDescription>(commands[0]);
827   llvm::stable_sort(isd->sections, compCtors);
828 }
829 
830 // If an input string is in the form of "foo.N" where N is a number, return N
831 // (65535-N if .ctors.N or .dtors.N). Otherwise, returns 65536, which is one
832 // greater than the lowest priority.
833 int elf::getPriority(StringRef s) {
834   size_t pos = s.rfind('.');
835   if (pos == StringRef::npos)
836     return 65536;
837   int v = 65536;
838   if (to_integer(s.substr(pos + 1), v, 10) &&
839       (pos == 6 && (s.starts_with(".ctors") || s.starts_with(".dtors"))))
840     v = 65535 - v;
841   return v;
842 }
843 
844 InputSection *elf::getFirstInputSection(const OutputSection *os) {
845   for (SectionCommand *cmd : os->commands)
846     if (auto *isd = dyn_cast<InputSectionDescription>(cmd))
847       if (!isd->sections.empty())
848         return isd->sections[0];
849   return nullptr;
850 }
851 
852 ArrayRef<InputSection *>
853 elf::getInputSections(const OutputSection &os,
854                       SmallVector<InputSection *, 0> &storage) {
855   ArrayRef<InputSection *> ret;
856   storage.clear();
857   for (SectionCommand *cmd : os.commands) {
858     auto *isd = dyn_cast<InputSectionDescription>(cmd);
859     if (!isd)
860       continue;
861     if (ret.empty()) {
862       ret = isd->sections;
863     } else {
864       if (storage.empty())
865         storage.assign(ret.begin(), ret.end());
866       storage.insert(storage.end(), isd->sections.begin(), isd->sections.end());
867     }
868   }
869   return storage.empty() ? ret : ArrayRef(storage);
870 }
871 
872 // Sorts input sections by section name suffixes, so that .foo.N comes
873 // before .foo.M if N < M. Used to sort .{init,fini}_array.N sections.
874 // We want to keep the original order if the priorities are the same
875 // because the compiler keeps the original initialization order in a
876 // translation unit and we need to respect that.
877 // For more detail, read the section of the GCC's manual about init_priority.
878 void OutputSection::sortInitFini() {
879   // Sort sections by priority.
880   sort([](InputSectionBase *s) { return getPriority(s->name); });
881 }
882 
883 std::array<uint8_t, 4> OutputSection::getFiller(Ctx &ctx) {
884   if (filler)
885     return *filler;
886   if (flags & SHF_EXECINSTR)
887     return ctx.target->trapInstr;
888   return {0, 0, 0, 0};
889 }
890 
891 void OutputSection::checkDynRelAddends(Ctx &ctx) {
892   assert(ctx.arg.writeAddends && ctx.arg.checkDynamicRelocs);
893   assert(isStaticRelSecType(type));
894   SmallVector<InputSection *, 0> storage;
895   ArrayRef<InputSection *> sections = getInputSections(*this, storage);
896   parallelFor(0, sections.size(), [&](size_t i) {
897     // When linking with -r or --emit-relocs we might also call this function
898     // for input .rel[a].<sec> sections which we simply pass through to the
899     // output. We skip over those and only look at the synthetic relocation
900     // sections created during linking.
901     if (!SyntheticSection::classof(sections[i]) ||
902         !is_contained({ELF::SHT_REL, ELF::SHT_RELA, ELF::SHT_RELR},
903                       sections[i]->type))
904       return;
905     const auto *sec = cast<RelocationBaseSection>(sections[i]);
906     if (!sec)
907       return;
908     for (const DynamicReloc &rel : sec->relocs) {
909       int64_t addend = rel.addend;
910       const OutputSection *relOsec = rel.inputSec->getOutputSection();
911       assert(relOsec != nullptr && "missing output section for relocation");
912       // Some targets have NOBITS synthetic sections with dynamic relocations
913       // with non-zero addends. Skip such sections.
914       if (is_contained({EM_PPC, EM_PPC64}, ctx.arg.emachine) &&
915           (rel.inputSec == ctx.in.ppc64LongBranchTarget.get() ||
916            rel.inputSec == ctx.in.igotPlt.get()))
917         continue;
918       const uint8_t *relocTarget = ctx.bufferStart + relOsec->offset +
919                                    rel.inputSec->getOffset(rel.offsetInSec);
920       // For SHT_NOBITS the written addend is always zero.
921       int64_t writtenAddend =
922           relOsec->type == SHT_NOBITS
923               ? 0
924               : ctx.target->getImplicitAddend(relocTarget, rel.type);
925       if (addend != writtenAddend)
926         InternalErr(ctx, relocTarget)
927             << "wrote incorrect addend value 0x" << utohexstr(writtenAddend)
928             << " instead of 0x" << utohexstr(addend)
929             << " for dynamic relocation " << rel.type << " at offset 0x"
930             << utohexstr(rel.getOffset())
931             << (rel.sym ? " against symbol " + rel.sym->getName() : "");
932     }
933   });
934 }
935 
936 template void OutputSection::writeHeaderTo<ELF32LE>(ELF32LE::Shdr *Shdr);
937 template void OutputSection::writeHeaderTo<ELF32BE>(ELF32BE::Shdr *Shdr);
938 template void OutputSection::writeHeaderTo<ELF64LE>(ELF64LE::Shdr *Shdr);
939 template void OutputSection::writeHeaderTo<ELF64BE>(ELF64BE::Shdr *Shdr);
940 
941 template void OutputSection::writeTo<ELF32LE>(Ctx &, uint8_t *,
942                                               llvm::parallel::TaskGroup &);
943 template void OutputSection::writeTo<ELF32BE>(Ctx &, uint8_t *,
944                                               llvm::parallel::TaskGroup &);
945 template void OutputSection::writeTo<ELF64LE>(Ctx &, uint8_t *,
946                                               llvm::parallel::TaskGroup &);
947 template void OutputSection::writeTo<ELF64BE>(Ctx &, uint8_t *,
948                                               llvm::parallel::TaskGroup &);
949 
950 template void OutputSection::maybeCompress<ELF32LE>(Ctx &);
951 template void OutputSection::maybeCompress<ELF32BE>(Ctx &);
952 template void OutputSection::maybeCompress<ELF64LE>(Ctx &);
953 template void OutputSection::maybeCompress<ELF64BE>(Ctx &);
954