xref: /netbsd-src/external/gpl3/binutils/dist/bfd/elfnn-kvx.c (revision cb63e24e8d6aae7ddac1859a9015f48b1d8bd90e)
1 /* KVX-specific support for NN-bit ELF.
2    Copyright (C) 2009-2024 Free Software Foundation, Inc.
3    Contributed by Kalray SA.
4 
5    This file is part of BFD, the Binary File Descriptor library.
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 3 of the License, or
10    (at your option) any later version.
11 
12    This program is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15    GNU General Public License for more details.
16 
17    You should have received a copy of the GNU General Public License
18    along with this program; see the file COPYING3. If not,
19    see <http://www.gnu.org/licenses/>.  */
20 
21 #include "sysdep.h"
22 #include "bfd.h"
23 #include "libiberty.h"
24 #include "libbfd.h"
25 #include "elf-bfd.h"
26 #include "bfdlink.h"
27 #include "objalloc.h"
28 #include "elf/kvx.h"
29 #include "elfxx-kvx.h"
30 
31 #define ARCH_SIZE	NN
32 
33 #if ARCH_SIZE == 64
34 #define LOG_FILE_ALIGN	3
35 #endif
36 
37 #if ARCH_SIZE == 32
38 #define LOG_FILE_ALIGN	2
39 #endif
40 
41 #define IS_KVX_TLS_RELOC(R_TYPE)			\
42   ((R_TYPE) == BFD_RELOC_KVX_S37_TLS_LE_LO10	\
43    || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_LE_UP27	\
44    || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LE_LO10	\
45    || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LE_UP27	\
46    || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LE_EX6	\
47    || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_DTPOFF_LO10	\
48    || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_DTPOFF_UP27	\
49    || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_DTPOFF_LO10	\
50    || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_DTPOFF_UP27	\
51    || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_DTPOFF_EX6	\
52    || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_IE_LO10	\
53    || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_IE_UP27	\
54    || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_IE_LO10	\
55    || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_IE_UP27	\
56    || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_IE_EX6	\
57    || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_GD_LO10	\
58    || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_GD_UP27	\
59    || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_GD_LO10	\
60    || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_GD_UP27	\
61    || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_GD_EX6	\
62    || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_LD_LO10	\
63    || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_LD_UP27	\
64    || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LD_LO10	\
65    || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LD_UP27	\
66    || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LD_EX6	\
67    )
68 
69 #define IS_KVX_TLS_RELAX_RELOC(R_TYPE) 0
70 
71 #define ELIMINATE_COPY_RELOCS 0
72 
73 /* Return size of a relocation entry.  HTAB is the bfd's
74    elf_kvx_link_hash_entry.  */
75 #define RELOC_SIZE(HTAB) (sizeof (ElfNN_External_Rela))
76 
77 /* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32.  */
78 #define GOT_ENTRY_SIZE                  (ARCH_SIZE / 8)
79 #define PLT_ENTRY_SIZE                  (32)
80 
81 #define PLT_SMALL_ENTRY_SIZE            (4*4)
82 
83 /* Encoding of the nop instruction */
84 #define INSN_NOP 0x00f0037f
85 
86 #define kvx_compute_jump_table_size(htab)		\
87   (((htab)->root.srelplt == NULL) ? 0			\
88    : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
89 
90 static const bfd_byte elfNN_kvx_small_plt0_entry[PLT_ENTRY_SIZE] =
91 {
92  /* FIXME KVX: no first entry, not used yet */
93   0
94 };
95 
96 /* Per function entry in a procedure linkage table looks like this
97    if the distance between the PLTGOT and the PLT is < 4GB use
98    these PLT entries.  */
99 static const bfd_byte elfNN_kvx_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
100 {
101   0x10, 0x00, 0xc4, 0x0f,       /* get $r16 = $pc     ;; */
102 #if ARCH_SIZE == 32
103   0x10, 0x00, 0x40, 0xb0,       /* lwz $r16 = 0[$r16]   ;; */
104 #else
105   0x10, 0x00, 0x40, 0xb8,       /* ld $r16 = 0[$r16] ;; */
106 #endif
107   0x00, 0x00, 0x00, 0x18,       /* upper 27 bits for LSU */
108   0x10, 0x00, 0xd8, 0x0f,	/* igoto $r16          ;; */
109 };
110 
111 /* Long stub use 43bits format of make. */
112 static const uint32_t elfNN_kvx_long_branch_stub[] =
113 {
114   0xe0400000,      /* make $r16 = LO10<emm43> EX6<imm43> */
115   0x00000000,      /* UP27<imm43> ;; */
116   0x0fd80010,      /* igoto "r16  ;; */
117 };
118 
119 #define elf_info_to_howto               elfNN_kvx_info_to_howto
120 #define elf_info_to_howto_rel           elfNN_kvx_info_to_howto
121 
122 #define KVX_ELF_ABI_VERSION		0
123 
124 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value.  */
125 #define ALL_ONES (~ (bfd_vma) 0)
126 
127 /* Indexed by the bfd interal reloc enumerators.
128    Therefore, the table needs to be synced with BFD_RELOC_KVX_*
129    in reloc.c.   */
130 
131 #define KVX_KV3_V1_KV3_V2_KV4_V1
132 #include "elfxx-kvx-relocs.h"
133 #undef KVX_KV3_V1_KV3_V2_KV4_V1
134 
135 /* Given HOWTO, return the bfd internal relocation enumerator.  */
136 
137 static bfd_reloc_code_real_type
elfNN_kvx_bfd_reloc_from_howto(reloc_howto_type * howto)138 elfNN_kvx_bfd_reloc_from_howto (reloc_howto_type *howto)
139 {
140   const int size = (int) ARRAY_SIZE (elf_kvx_howto_table);
141   const ptrdiff_t offset = howto - elf_kvx_howto_table;
142 
143   if (offset >= 0 && offset < size)
144     return BFD_RELOC_KVX_RELOC_START + offset + 1;
145 
146   return BFD_RELOC_KVX_RELOC_START + 1;
147 }
148 
149 /* Given R_TYPE, return the bfd internal relocation enumerator.  */
150 
151 static bfd_reloc_code_real_type
elfNN_kvx_bfd_reloc_from_type(bfd * abfd ATTRIBUTE_UNUSED,unsigned int r_type)152 elfNN_kvx_bfd_reloc_from_type (bfd *abfd ATTRIBUTE_UNUSED, unsigned int r_type)
153 {
154   static bool initialized_p = false;
155   /* Indexed by R_TYPE, values are offsets in the howto_table.  */
156   static unsigned int offsets[R_KVX_end];
157 
158   if (!initialized_p)
159     {
160       unsigned int i;
161 
162       for (i = 0; i < ARRAY_SIZE (elf_kvx_howto_table); ++i)
163 	offsets[elf_kvx_howto_table[i].type] = i;
164 
165       initialized_p = true;
166     }
167 
168   /* PR 17512: file: b371e70a.  */
169   if (r_type >= R_KVX_end)
170     {
171       bfd_set_error (bfd_error_bad_value);
172       return BFD_RELOC_KVX_RELOC_END;
173     }
174 
175   return (BFD_RELOC_KVX_RELOC_START + 1) + offsets[r_type];
176 }
177 
178 struct elf_kvx_reloc_map
179 {
180   bfd_reloc_code_real_type from;
181   bfd_reloc_code_real_type to;
182 };
183 
184 /* Map bfd generic reloc to KVX-specific reloc.  */
185 static const struct elf_kvx_reloc_map elf_kvx_reloc_map[] =
186 {
187   {BFD_RELOC_NONE, BFD_RELOC_KVX_NONE},
188 
189   /* Basic data relocations.  */
190   {BFD_RELOC_CTOR, BFD_RELOC_KVX_NN},
191   {BFD_RELOC_64, BFD_RELOC_KVX_64},
192   {BFD_RELOC_32, BFD_RELOC_KVX_32},
193   {BFD_RELOC_16, BFD_RELOC_KVX_16},
194   {BFD_RELOC_8,  BFD_RELOC_KVX_8},
195 
196   {BFD_RELOC_64_PCREL, BFD_RELOC_KVX_64_PCREL},
197   {BFD_RELOC_32_PCREL, BFD_RELOC_KVX_32_PCREL},
198 };
199 
200 /* Given the bfd internal relocation enumerator in CODE, return the
201    corresponding howto entry.  */
202 
203 static reloc_howto_type *
elfNN_kvx_howto_from_bfd_reloc(bfd_reloc_code_real_type code)204 elfNN_kvx_howto_from_bfd_reloc (bfd_reloc_code_real_type code)
205 {
206   unsigned int i;
207 
208   /* Convert bfd generic reloc to KVX-specific reloc.  */
209   if (code < BFD_RELOC_KVX_RELOC_START || code > BFD_RELOC_KVX_RELOC_END)
210     for (i = 0; i < ARRAY_SIZE (elf_kvx_reloc_map) ; i++)
211       if (elf_kvx_reloc_map[i].from == code)
212 	{
213 	  code = elf_kvx_reloc_map[i].to;
214 	  break;
215 	}
216 
217   if (code > BFD_RELOC_KVX_RELOC_START && code < BFD_RELOC_KVX_RELOC_END)
218       return &elf_kvx_howto_table[code - (BFD_RELOC_KVX_RELOC_START + 1)];
219 
220   return NULL;
221 }
222 
223 static reloc_howto_type *
elfNN_kvx_howto_from_type(bfd * abfd,unsigned int r_type)224 elfNN_kvx_howto_from_type (bfd *abfd, unsigned int r_type)
225 {
226   bfd_reloc_code_real_type val;
227   reloc_howto_type *howto;
228 
229 #if ARCH_SIZE == 32
230   if (r_type > 256)
231     {
232       bfd_set_error (bfd_error_bad_value);
233       return NULL;
234     }
235 #endif
236 
237   val = elfNN_kvx_bfd_reloc_from_type (abfd, r_type);
238   howto = elfNN_kvx_howto_from_bfd_reloc (val);
239 
240   if (howto != NULL)
241     return howto;
242 
243   bfd_set_error (bfd_error_bad_value);
244   return NULL;
245 }
246 
247 static bool
elfNN_kvx_info_to_howto(bfd * abfd ATTRIBUTE_UNUSED,arelent * bfd_reloc,Elf_Internal_Rela * elf_reloc)248 elfNN_kvx_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
249 			 Elf_Internal_Rela *elf_reloc)
250 {
251   unsigned int r_type;
252 
253   r_type = ELFNN_R_TYPE (elf_reloc->r_info);
254   bfd_reloc->howto = elfNN_kvx_howto_from_type (abfd, r_type);
255 
256   if (bfd_reloc->howto == NULL)
257     {
258       /* xgettext:c-format */
259       _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
260 			  abfd, r_type);
261       return false;
262     }
263   return true;
264 }
265 
266 static reloc_howto_type *
elfNN_kvx_reloc_type_lookup(bfd * abfd ATTRIBUTE_UNUSED,bfd_reloc_code_real_type code)267 elfNN_kvx_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
268 			     bfd_reloc_code_real_type code)
269 {
270   reloc_howto_type *howto = elfNN_kvx_howto_from_bfd_reloc (code);
271 
272   if (howto != NULL)
273     return howto;
274 
275   bfd_set_error (bfd_error_bad_value);
276   return NULL;
277 }
278 
279 static reloc_howto_type *
elfNN_kvx_reloc_name_lookup(bfd * abfd ATTRIBUTE_UNUSED,const char * r_name)280 elfNN_kvx_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
281 			     const char *r_name)
282 {
283   unsigned int i;
284 
285   for (i = 0; i < ARRAY_SIZE (elf_kvx_howto_table); ++i)
286     if (elf_kvx_howto_table[i].name != NULL
287 	&& strcasecmp (elf_kvx_howto_table[i].name, r_name) == 0)
288       return &elf_kvx_howto_table[i];
289 
290   return NULL;
291 }
292 
293 #define TARGET_LITTLE_SYM               kvx_elfNN_vec
294 #define TARGET_LITTLE_NAME              "elfNN-kvx"
295 
296 /* The linker script knows the section names for placement.
297    The entry_names are used to do simple name mangling on the stubs.
298    Given a function name, and its type, the stub can be found. The
299    name can be changed. The only requirement is the %s be present.  */
300 #define STUB_ENTRY_NAME   "__%s_veneer"
301 
302 /* The name of the dynamic interpreter.  This is put in the .interp
303    section.  */
304 #define ELF_DYNAMIC_INTERPRETER     "/lib/ld.so.1"
305 
306 
307 /* PCREL 27 is signed-extended and scaled by 4 */
308 #define KVX_MAX_FWD_CALL_OFFSET \
309   (((1 << 26) - 1) << 2)
310 #define KVX_MAX_BWD_CALL_OFFSET \
311   (-((1 << 26) << 2))
312 
313 /* Check that the destination of the call is within the PCREL27
314    range. */
315 static int
kvx_valid_call_p(bfd_vma value,bfd_vma place)316 kvx_valid_call_p (bfd_vma value, bfd_vma place)
317 {
318   bfd_signed_vma offset = (bfd_signed_vma) (value - place);
319   return (offset <= KVX_MAX_FWD_CALL_OFFSET
320 	  && offset >= KVX_MAX_BWD_CALL_OFFSET);
321 }
322 
323 /* Section name for stubs is the associated section name plus this
324    string.  */
325 #define STUB_SUFFIX ".stub"
326 
327 enum elf_kvx_stub_type
328 {
329   kvx_stub_none,
330   kvx_stub_long_branch,
331 };
332 
333 struct elf_kvx_stub_hash_entry
334 {
335   /* Base hash table entry structure.  */
336   struct bfd_hash_entry root;
337 
338   /* The stub section.  */
339   asection *stub_sec;
340 
341   /* Offset within stub_sec of the beginning of this stub.  */
342   bfd_vma stub_offset;
343 
344   /* Given the symbol's value and its section we can determine its final
345      value when building the stubs (so the stub knows where to jump).  */
346   bfd_vma target_value;
347   asection *target_section;
348 
349   enum elf_kvx_stub_type stub_type;
350 
351   /* The symbol table entry, if any, that this was derived from.  */
352   struct elf_kvx_link_hash_entry *h;
353 
354   /* Destination symbol type */
355   unsigned char st_type;
356 
357   /* Where this stub is being called from, or, in the case of combined
358      stub sections, the first input section in the group.  */
359   asection *id_sec;
360 
361   /* The name for the local symbol at the start of this stub.  The
362      stub name in the hash table has to be unique; this does not, so
363      it can be friendlier.  */
364   char *output_name;
365 };
366 
367 /* Used to build a map of a section.  This is required for mixed-endian
368    code/data.  */
369 
370 typedef struct elf_elf_section_map
371 {
372   bfd_vma vma;
373   char type;
374 }
375 elf_kvx_section_map;
376 
377 
378 typedef struct _kvx_elf_section_data
379 {
380   struct bfd_elf_section_data elf;
381   unsigned int mapcount;
382   unsigned int mapsize;
383   elf_kvx_section_map *map;
384 }
385 _kvx_elf_section_data;
386 
387 #define elf_kvx_section_data(sec) \
388   ((_kvx_elf_section_data *) elf_section_data (sec))
389 
390 struct elf_kvx_local_symbol
391 {
392   unsigned int got_type;
393   bfd_signed_vma got_refcount;
394   bfd_vma got_offset;
395 };
396 
397 struct elf_kvx_obj_tdata
398 {
399   struct elf_obj_tdata root;
400 
401   /* local symbol descriptors */
402   struct elf_kvx_local_symbol *locals;
403 
404   /* Zero to warn when linking objects with incompatible enum sizes.  */
405   int no_enum_size_warning;
406 
407   /* Zero to warn when linking objects with incompatible wchar_t sizes.  */
408   int no_wchar_size_warning;
409 };
410 
411 #define elf_kvx_tdata(bfd)				\
412   ((struct elf_kvx_obj_tdata *) (bfd)->tdata.any)
413 
414 #define elf_kvx_locals(bfd) (elf_kvx_tdata (bfd)->locals)
415 
416 #define is_kvx_elf(bfd)				\
417   (bfd_get_flavour (bfd) == bfd_target_elf_flavour	\
418    && elf_tdata (bfd) != NULL				\
419    && elf_object_id (bfd) == KVX_ELF_DATA)
420 
421 static bool
elfNN_kvx_mkobject(bfd * abfd)422 elfNN_kvx_mkobject (bfd *abfd)
423 {
424   return bfd_elf_allocate_object (abfd, sizeof (struct elf_kvx_obj_tdata),
425 				  KVX_ELF_DATA);
426 }
427 
428 #define elf_kvx_hash_entry(ent) \
429   ((struct elf_kvx_link_hash_entry *)(ent))
430 
431 #define GOT_UNKNOWN    0
432 #define GOT_NORMAL     1
433 
434 #define GOT_TLS_GD     2
435 #define GOT_TLS_IE     4
436 #define GOT_TLS_LD     8
437 
438 /* KVX ELF linker hash entry.  */
439 struct elf_kvx_link_hash_entry
440 {
441   struct elf_link_hash_entry root;
442 
443   /* Since PLT entries have variable size, we need to record the
444      index into .got.plt instead of recomputing it from the PLT
445      offset.  */
446   bfd_signed_vma plt_got_offset;
447 
448   /* Bit mask representing the type of GOT entry(s) if any required by
449      this symbol.  */
450   unsigned int got_type;
451 
452   /* A pointer to the most recently used stub hash entry against this
453      symbol.  */
454   struct elf_kvx_stub_hash_entry *stub_cache;
455 };
456 
457 /* Get the KVX elf linker hash table from a link_info structure.  */
458 #define elf_kvx_hash_table(info)					\
459   ((struct elf_kvx_link_hash_table *) ((info)->hash))
460 
461 #define kvx_stub_hash_lookup(table, string, create, copy)		\
462   ((struct elf_kvx_stub_hash_entry *)				\
463    bfd_hash_lookup ((table), (string), (create), (copy)))
464 
465 /* KVX ELF linker hash table.  */
466 struct elf_kvx_link_hash_table
467 {
468   /* The main hash table.  */
469   struct elf_link_hash_table root;
470 
471   /* Nonzero to force PIC branch veneers.  */
472   int pic_veneer;
473 
474   /* The number of bytes in the initial entry in the PLT.  */
475   bfd_size_type plt_header_size;
476 
477   /* The number of bytes in the subsequent PLT etries.  */
478   bfd_size_type plt_entry_size;
479 
480   /* The bytes of the subsequent PLT entry.  */
481   const bfd_byte *plt_entry;
482 
483   /* Short-cuts to get to dynamic linker sections.  */
484   asection *sdynbss;
485   asection *srelbss;
486 
487   /* Small local sym cache.  */
488   struct sym_cache sym_cache;
489 
490   /* For convenience in allocate_dynrelocs.  */
491   bfd *obfd;
492 
493   /* The amount of space used by the reserved portion of the sgotplt
494      section, plus whatever space is used by the jump slots.  */
495   bfd_vma sgotplt_jump_table_size;
496 
497   /* The stub hash table.  */
498   struct bfd_hash_table stub_hash_table;
499 
500   /* Linker stub bfd.  */
501   bfd *stub_bfd;
502 
503   /* Linker call-backs.  */
504   asection *(*add_stub_section) (const char *, asection *);
505   void (*layout_sections_again) (void);
506 
507   /* Array to keep track of which stub sections have been created, and
508      information on stub grouping.  */
509   struct map_stub
510   {
511     /* This is the section to which stubs in the group will be
512        attached.  */
513     asection *link_sec;
514     /* The stub section.  */
515     asection *stub_sec;
516   } *stub_group;
517 
518   /* Assorted information used by elfNN_kvx_size_stubs.  */
519   unsigned int bfd_count;
520   unsigned int top_index;
521   asection **input_list;
522 };
523 
524 /* Create an entry in an KVX ELF linker hash table.  */
525 
526 static struct bfd_hash_entry *
elfNN_kvx_link_hash_newfunc(struct bfd_hash_entry * entry,struct bfd_hash_table * table,const char * string)527 elfNN_kvx_link_hash_newfunc (struct bfd_hash_entry *entry,
528 			     struct bfd_hash_table *table,
529 			     const char *string)
530 {
531   struct elf_kvx_link_hash_entry *ret =
532     (struct elf_kvx_link_hash_entry *) entry;
533 
534   /* Allocate the structure if it has not already been allocated by a
535      subclass.  */
536   if (ret == NULL)
537     ret = bfd_hash_allocate (table,
538 			     sizeof (struct elf_kvx_link_hash_entry));
539   if (ret == NULL)
540     return (struct bfd_hash_entry *) ret;
541 
542   /* Call the allocation method of the superclass.  */
543   ret = ((struct elf_kvx_link_hash_entry *)
544 	 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
545 				     table, string));
546   if (ret != NULL)
547     {
548       ret->got_type = GOT_UNKNOWN;
549       ret->plt_got_offset = (bfd_vma) - 1;
550       ret->stub_cache = NULL;
551     }
552 
553   return (struct bfd_hash_entry *) ret;
554 }
555 
556 /* Initialize an entry in the stub hash table.  */
557 
558 static struct bfd_hash_entry *
stub_hash_newfunc(struct bfd_hash_entry * entry,struct bfd_hash_table * table,const char * string)559 stub_hash_newfunc (struct bfd_hash_entry *entry,
560 		   struct bfd_hash_table *table, const char *string)
561 {
562   /* Allocate the structure if it has not already been allocated by a
563      subclass.  */
564   if (entry == NULL)
565     {
566       entry = bfd_hash_allocate (table,
567 				 sizeof (struct
568 					 elf_kvx_stub_hash_entry));
569       if (entry == NULL)
570 	return entry;
571     }
572 
573   /* Call the allocation method of the superclass.  */
574   entry = bfd_hash_newfunc (entry, table, string);
575   if (entry != NULL)
576     {
577       struct elf_kvx_stub_hash_entry *eh;
578 
579       /* Initialize the local fields.  */
580       eh = (struct elf_kvx_stub_hash_entry *) entry;
581       eh->stub_sec = NULL;
582       eh->stub_offset = 0;
583       eh->target_value = 0;
584       eh->target_section = NULL;
585       eh->stub_type = kvx_stub_none;
586       eh->h = NULL;
587       eh->id_sec = NULL;
588     }
589 
590   return entry;
591 }
592 
593 /* Copy the extra info we tack onto an elf_link_hash_entry.  */
594 
595 static void
elfNN_kvx_copy_indirect_symbol(struct bfd_link_info * info,struct elf_link_hash_entry * dir,struct elf_link_hash_entry * ind)596 elfNN_kvx_copy_indirect_symbol (struct bfd_link_info *info,
597 				struct elf_link_hash_entry *dir,
598 				struct elf_link_hash_entry *ind)
599 {
600   struct elf_kvx_link_hash_entry *edir, *eind;
601 
602   edir = (struct elf_kvx_link_hash_entry *) dir;
603   eind = (struct elf_kvx_link_hash_entry *) ind;
604 
605   if (ind->root.type == bfd_link_hash_indirect)
606     {
607       /* Copy over PLT info.  */
608       if (dir->got.refcount <= 0)
609 	{
610 	  edir->got_type = eind->got_type;
611 	  eind->got_type = GOT_UNKNOWN;
612 	}
613     }
614 
615   _bfd_elf_link_hash_copy_indirect (info, dir, ind);
616 }
617 
618 /* Destroy a KVX elf linker hash table.  */
619 
620 static void
elfNN_kvx_link_hash_table_free(bfd * obfd)621 elfNN_kvx_link_hash_table_free (bfd *obfd)
622 {
623   struct elf_kvx_link_hash_table *ret
624     = (struct elf_kvx_link_hash_table *) obfd->link.hash;
625 
626   bfd_hash_table_free (&ret->stub_hash_table);
627   _bfd_elf_link_hash_table_free (obfd);
628 }
629 
630 /* Create a KVX elf linker hash table.  */
631 
632 static struct bfd_link_hash_table *
elfNN_kvx_link_hash_table_create(bfd * abfd)633 elfNN_kvx_link_hash_table_create (bfd *abfd)
634 {
635   struct elf_kvx_link_hash_table *ret;
636   bfd_size_type amt = sizeof (struct elf_kvx_link_hash_table);
637 
638   ret = bfd_zmalloc (amt);
639   if (ret == NULL)
640     return NULL;
641 
642   if (!_bfd_elf_link_hash_table_init
643       (&ret->root, abfd, elfNN_kvx_link_hash_newfunc,
644        sizeof (struct elf_kvx_link_hash_entry), KVX_ELF_DATA))
645     {
646       free (ret);
647       return NULL;
648     }
649 
650   ret->plt_header_size = PLT_ENTRY_SIZE;
651   ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
652   ret->plt_entry = elfNN_kvx_small_plt_entry;
653 
654   ret->obfd = abfd;
655 
656   if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
657 			    sizeof (struct elf_kvx_stub_hash_entry)))
658     {
659       _bfd_elf_link_hash_table_free (abfd);
660       return NULL;
661     }
662 
663   ret->root.root.hash_table_free = elfNN_kvx_link_hash_table_free;
664 
665   return &ret->root.root;
666 }
667 
668 static bfd_reloc_status_type
kvx_relocate(unsigned int r_type,bfd * input_bfd,asection * input_section,bfd_vma offset,bfd_vma value)669 kvx_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
670 	      bfd_vma offset, bfd_vma value)
671 {
672   reloc_howto_type *howto;
673 
674   howto = elfNN_kvx_howto_from_type (input_bfd, r_type);
675   r_type = elfNN_kvx_bfd_reloc_from_type (input_bfd, r_type);
676   return _bfd_kvx_elf_put_addend (input_bfd,
677 				  input_section->contents + offset, r_type,
678 				  howto, value);
679 }
680 
681 /* Determine the type of stub needed, if any, for a call.  */
682 
683 static enum elf_kvx_stub_type
kvx_type_of_stub(asection * input_sec,const Elf_Internal_Rela * rel,asection * sym_sec,unsigned char st_type,bfd_vma destination)684 kvx_type_of_stub (asection *input_sec,
685 		  const Elf_Internal_Rela *rel,
686 		  asection *sym_sec,
687 		  unsigned char st_type,
688 		  bfd_vma destination)
689 {
690   bfd_vma location;
691   bfd_signed_vma branch_offset;
692   unsigned int r_type;
693   enum elf_kvx_stub_type stub_type = kvx_stub_none;
694 
695   if (st_type != STT_FUNC
696       && (sym_sec == input_sec))
697     return stub_type;
698 
699   /* Determine where the call point is.  */
700   location = (input_sec->output_offset
701 	      + input_sec->output_section->vma + rel->r_offset);
702 
703   branch_offset = (bfd_signed_vma) (destination - location);
704 
705   r_type = ELFNN_R_TYPE (rel->r_info);
706 
707   /* We don't want to redirect any old unconditional jump in this way,
708      only one which is being used for a sibcall, where it is
709      acceptable for the R16 and R17 registers to be clobbered.  */
710   if (r_type == R_KVX_PCREL27
711       && (branch_offset > KVX_MAX_FWD_CALL_OFFSET
712 	  || branch_offset < KVX_MAX_BWD_CALL_OFFSET))
713     {
714       stub_type = kvx_stub_long_branch;
715     }
716 
717   return stub_type;
718 }
719 
720 /* Build a name for an entry in the stub hash table.  */
721 
722 static char *
elfNN_kvx_stub_name(const asection * input_section,const asection * sym_sec,const struct elf_kvx_link_hash_entry * hash,const Elf_Internal_Rela * rel)723 elfNN_kvx_stub_name (const asection *input_section,
724 		     const asection *sym_sec,
725 		     const struct elf_kvx_link_hash_entry *hash,
726 		     const Elf_Internal_Rela *rel)
727 {
728   char *stub_name;
729   bfd_size_type len;
730 
731   if (hash)
732     {
733       len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
734       stub_name = bfd_malloc (len);
735       if (stub_name != NULL)
736 	snprintf (stub_name, len, "%08x_%s+%" PRIx64 "x",
737 		  (unsigned int) input_section->id,
738 		  hash->root.root.root.string,
739 		  (uint64_t) rel->r_addend);
740     }
741   else
742     {
743       len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
744       stub_name = bfd_malloc (len);
745       if (stub_name != NULL)
746 	snprintf (stub_name, len, "%08x_%x:%x+%" PRIx64 "x",
747 		  (unsigned int) input_section->id,
748 		  (unsigned int) sym_sec->id,
749 		  (unsigned int) ELFNN_R_SYM (rel->r_info),
750 		  (uint64_t) rel->r_addend);
751     }
752 
753   return stub_name;
754 }
755 
756 /* Return true if symbol H should be hashed in the `.gnu.hash' section.  For
757    executable PLT slots where the executable never takes the address of those
758    functions, the function symbols are not added to the hash table.  */
759 
760 static bool
elf_kvx_hash_symbol(struct elf_link_hash_entry * h)761 elf_kvx_hash_symbol (struct elf_link_hash_entry *h)
762 {
763   if (h->plt.offset != (bfd_vma) -1
764       && !h->def_regular
765       && !h->pointer_equality_needed)
766     return false;
767 
768   return _bfd_elf_hash_symbol (h);
769 }
770 
771 
772 /* Look up an entry in the stub hash.  Stub entries are cached because
773    creating the stub name takes a bit of time.  */
774 
775 static struct elf_kvx_stub_hash_entry *
elfNN_kvx_get_stub_entry(const asection * input_section,const asection * sym_sec,struct elf_link_hash_entry * hash,const Elf_Internal_Rela * rel,struct elf_kvx_link_hash_table * htab)776 elfNN_kvx_get_stub_entry (const asection *input_section,
777 			  const asection *sym_sec,
778 			  struct elf_link_hash_entry *hash,
779 			  const Elf_Internal_Rela *rel,
780 			  struct elf_kvx_link_hash_table *htab)
781 {
782   struct elf_kvx_stub_hash_entry *stub_entry;
783   struct elf_kvx_link_hash_entry *h =
784     (struct elf_kvx_link_hash_entry *) hash;
785   const asection *id_sec;
786 
787   if ((input_section->flags & SEC_CODE) == 0)
788     return NULL;
789 
790   /* If this input section is part of a group of sections sharing one
791      stub section, then use the id of the first section in the group.
792      Stub names need to include a section id, as there may well be
793      more than one stub used to reach say, printf, and we need to
794      distinguish between them.  */
795   id_sec = htab->stub_group[input_section->id].link_sec;
796 
797   if (h != NULL && h->stub_cache != NULL
798       && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
799     {
800       stub_entry = h->stub_cache;
801     }
802   else
803     {
804       char *stub_name;
805 
806       stub_name = elfNN_kvx_stub_name (id_sec, sym_sec, h, rel);
807       if (stub_name == NULL)
808 	return NULL;
809 
810       stub_entry = kvx_stub_hash_lookup (&htab->stub_hash_table,
811 					 stub_name, false, false);
812       if (h != NULL)
813 	h->stub_cache = stub_entry;
814 
815       free (stub_name);
816     }
817 
818   return stub_entry;
819 }
820 
821 
822 /* Create a stub section.  */
823 
824 static asection *
_bfd_kvx_create_stub_section(asection * section,struct elf_kvx_link_hash_table * htab)825 _bfd_kvx_create_stub_section (asection *section,
826 			      struct elf_kvx_link_hash_table *htab)
827 
828 {
829   size_t namelen;
830   bfd_size_type len;
831   char *s_name;
832 
833   namelen = strlen (section->name);
834   len = namelen + sizeof (STUB_SUFFIX);
835   s_name = bfd_alloc (htab->stub_bfd, len);
836   if (s_name == NULL)
837     return NULL;
838 
839   memcpy (s_name, section->name, namelen);
840   memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
841   return (*htab->add_stub_section) (s_name, section);
842 }
843 
844 
845 /* Find or create a stub section for a link section.
846 
847    Fix or create the stub section used to collect stubs attached to
848    the specified link section.  */
849 
850 static asection *
_bfd_kvx_get_stub_for_link_section(asection * link_section,struct elf_kvx_link_hash_table * htab)851 _bfd_kvx_get_stub_for_link_section (asection *link_section,
852 				    struct elf_kvx_link_hash_table *htab)
853 {
854   if (htab->stub_group[link_section->id].stub_sec == NULL)
855     htab->stub_group[link_section->id].stub_sec
856       = _bfd_kvx_create_stub_section (link_section, htab);
857   return htab->stub_group[link_section->id].stub_sec;
858 }
859 
860 
861 /* Find or create a stub section in the stub group for an input
862    section.  */
863 
864 static asection *
_bfd_kvx_create_or_find_stub_sec(asection * section,struct elf_kvx_link_hash_table * htab)865 _bfd_kvx_create_or_find_stub_sec (asection *section,
866 				  struct elf_kvx_link_hash_table *htab)
867 {
868   asection *link_sec = htab->stub_group[section->id].link_sec;
869   return _bfd_kvx_get_stub_for_link_section (link_sec, htab);
870 }
871 
872 
873 /* Add a new stub entry in the stub group associated with an input
874    section to the stub hash.  Not all fields of the new stub entry are
875    initialised.  */
876 
877 static struct elf_kvx_stub_hash_entry *
_bfd_kvx_add_stub_entry_in_group(const char * stub_name,asection * section,struct elf_kvx_link_hash_table * htab)878 _bfd_kvx_add_stub_entry_in_group (const char *stub_name,
879 				  asection *section,
880 				  struct elf_kvx_link_hash_table *htab)
881 {
882   asection *link_sec;
883   asection *stub_sec;
884   struct elf_kvx_stub_hash_entry *stub_entry;
885 
886   link_sec = htab->stub_group[section->id].link_sec;
887   stub_sec = _bfd_kvx_create_or_find_stub_sec (section, htab);
888 
889   /* Enter this entry into the linker stub hash table.  */
890   stub_entry = kvx_stub_hash_lookup (&htab->stub_hash_table, stub_name,
891 				     true, false);
892   if (stub_entry == NULL)
893     {
894       /* xgettext:c-format */
895       _bfd_error_handler (_("%pB: cannot create stub entry %s"),
896 			  section->owner, stub_name);
897       return NULL;
898     }
899 
900   stub_entry->stub_sec = stub_sec;
901   stub_entry->stub_offset = 0;
902   stub_entry->id_sec = link_sec;
903 
904   return stub_entry;
905 }
906 
907 static bool
kvx_build_one_stub(struct bfd_hash_entry * gen_entry,void * in_arg)908 kvx_build_one_stub (struct bfd_hash_entry *gen_entry,
909 		    void *in_arg)
910 {
911   struct elf_kvx_stub_hash_entry *stub_entry;
912   asection *stub_sec;
913   bfd *stub_bfd;
914   bfd_byte *loc;
915   bfd_vma sym_value;
916   unsigned int template_size;
917   const uint32_t *template;
918   unsigned int i;
919   struct bfd_link_info *info;
920 
921   /* Massage our args to the form they really have.  */
922   stub_entry = (struct elf_kvx_stub_hash_entry *) gen_entry;
923 
924   info = (struct bfd_link_info *) in_arg;
925 
926   /* Fail if the target section could not be assigned to an output
927      section.  The user should fix his linker script.  */
928   if (stub_entry->target_section->output_section == NULL
929       && info->non_contiguous_regions)
930     info->callbacks->einfo (_("%F%P: Could not assign '%pA' to an output section. "
931 			      "Retry without "
932 			      "--enable-non-contiguous-regions.\n"),
933 			    stub_entry->target_section);
934 
935   stub_sec = stub_entry->stub_sec;
936 
937   /* Make a note of the offset within the stubs for this entry.  */
938   stub_entry->stub_offset = stub_sec->size;
939   loc = stub_sec->contents + stub_entry->stub_offset;
940 
941   stub_bfd = stub_sec->owner;
942 
943   /* This is the address of the stub destination.  */
944   sym_value = (stub_entry->target_value
945 	       + stub_entry->target_section->output_offset
946 	       + stub_entry->target_section->output_section->vma);
947 
948   switch (stub_entry->stub_type)
949     {
950     case kvx_stub_long_branch:
951       template = elfNN_kvx_long_branch_stub;
952       template_size = sizeof (elfNN_kvx_long_branch_stub);
953       break;
954     default:
955       abort ();
956     }
957 
958   for (i = 0; i < (template_size / sizeof template[0]); i++)
959     {
960       bfd_putl32 (template[i], loc);
961       loc += 4;
962     }
963 
964   stub_sec->size += template_size;
965 
966   switch (stub_entry->stub_type)
967     {
968     case kvx_stub_long_branch:
969       /* The stub uses a make insn with 43bits immediate.
970 	 We need to apply 3 relocations:
971 	 BFD_RELOC_KVX_S43_LO10,
972 	 BFD_RELOC_KVX_S43_UP27,
973 	 BFD_RELOC_KVX_S43_EX6.  */
974       if (kvx_relocate (R_KVX_S43_LO10, stub_bfd, stub_sec,
975 			stub_entry->stub_offset, sym_value) != bfd_reloc_ok)
976 	BFD_FAIL ();
977       if (kvx_relocate (R_KVX_S43_EX6, stub_bfd, stub_sec,
978 			stub_entry->stub_offset, sym_value) != bfd_reloc_ok)
979 	BFD_FAIL ();
980       if (kvx_relocate (R_KVX_S43_UP27, stub_bfd, stub_sec,
981 			stub_entry->stub_offset + 4, sym_value) != bfd_reloc_ok)
982 	BFD_FAIL ();
983       break;
984     default:
985       abort ();
986     }
987 
988   return true;
989 }
990 
991 /* As above, but don't actually build the stub.  Just bump offset so
992    we know stub section sizes.  */
993 
994 static bool
kvx_size_one_stub(struct bfd_hash_entry * gen_entry,void * in_arg ATTRIBUTE_UNUSED)995 kvx_size_one_stub (struct bfd_hash_entry *gen_entry,
996 		   void *in_arg ATTRIBUTE_UNUSED)
997 {
998   struct elf_kvx_stub_hash_entry *stub_entry;
999   int size;
1000 
1001   /* Massage our args to the form they really have.  */
1002   stub_entry = (struct elf_kvx_stub_hash_entry *) gen_entry;
1003 
1004   switch (stub_entry->stub_type)
1005     {
1006     case kvx_stub_long_branch:
1007       size = sizeof (elfNN_kvx_long_branch_stub);
1008       break;
1009     default:
1010       abort ();
1011     }
1012 
1013   stub_entry->stub_sec->size += size;
1014   return true;
1015 }
1016 
1017 /* External entry points for sizing and building linker stubs.  */
1018 
1019 /* Set up various things so that we can make a list of input sections
1020    for each output section included in the link.  Returns -1 on error,
1021    0 when no stubs will be needed, and 1 on success.  */
1022 
1023 int
elfNN_kvx_setup_section_lists(bfd * output_bfd,struct bfd_link_info * info)1024 elfNN_kvx_setup_section_lists (bfd *output_bfd,
1025 			       struct bfd_link_info *info)
1026 {
1027   bfd *input_bfd;
1028   unsigned int bfd_count;
1029   unsigned int top_id, top_index;
1030   asection *section;
1031   asection **input_list, **list;
1032   bfd_size_type amt;
1033   struct elf_kvx_link_hash_table *htab =
1034     elf_kvx_hash_table (info);
1035 
1036   if (!is_elf_hash_table ((const struct bfd_link_hash_table *)htab))
1037     return 0;
1038 
1039   /* Count the number of input BFDs and find the top input section id.  */
1040   for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
1041        input_bfd != NULL; input_bfd = input_bfd->link.next)
1042     {
1043       bfd_count += 1;
1044       for (section = input_bfd->sections;
1045 	   section != NULL; section = section->next)
1046 	{
1047 	  if (top_id < section->id)
1048 	    top_id = section->id;
1049 	}
1050     }
1051   htab->bfd_count = bfd_count;
1052 
1053   amt = sizeof (struct map_stub) * (top_id + 1);
1054   htab->stub_group = bfd_zmalloc (amt);
1055   if (htab->stub_group == NULL)
1056     return -1;
1057 
1058   /* We can't use output_bfd->section_count here to find the top output
1059      section index as some sections may have been removed, and
1060      _bfd_strip_section_from_output doesn't renumber the indices.  */
1061   for (section = output_bfd->sections, top_index = 0;
1062        section != NULL; section = section->next)
1063     {
1064       if (top_index < section->index)
1065 	top_index = section->index;
1066     }
1067 
1068   htab->top_index = top_index;
1069   amt = sizeof (asection *) * (top_index + 1);
1070   input_list = bfd_malloc (amt);
1071   htab->input_list = input_list;
1072   if (input_list == NULL)
1073     return -1;
1074 
1075   /* For sections we aren't interested in, mark their entries with a
1076      value we can check later.  */
1077   list = input_list + top_index;
1078   do
1079     *list = bfd_abs_section_ptr;
1080   while (list-- != input_list);
1081 
1082   for (section = output_bfd->sections;
1083        section != NULL; section = section->next)
1084     {
1085       if ((section->flags & SEC_CODE) != 0)
1086 	input_list[section->index] = NULL;
1087     }
1088 
1089   return 1;
1090 }
1091 
1092 /* Used by elfNN_kvx_next_input_section and group_sections.  */
1093 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
1094 
1095 /* The linker repeatedly calls this function for each input section,
1096    in the order that input sections are linked into output sections.
1097    Build lists of input sections to determine groupings between which
1098    we may insert linker stubs.  */
1099 
1100 void
elfNN_kvx_next_input_section(struct bfd_link_info * info,asection * isec)1101 elfNN_kvx_next_input_section (struct bfd_link_info *info, asection *isec)
1102 {
1103   struct elf_kvx_link_hash_table *htab =
1104     elf_kvx_hash_table (info);
1105 
1106   if (isec->output_section->index <= htab->top_index)
1107     {
1108       asection **list = htab->input_list + isec->output_section->index;
1109 
1110       if (*list != bfd_abs_section_ptr)
1111 	{
1112 	  /* Steal the link_sec pointer for our list.  */
1113 	  /* This happens to make the list in reverse order,
1114 	     which is what we want.  */
1115 	  PREV_SEC (isec) = *list;
1116 	  *list = isec;
1117 	}
1118     }
1119 }
1120 
1121 /* See whether we can group stub sections together.  Grouping stub
1122    sections may result in fewer stubs.  More importantly, we need to
1123    put all .init* and .fini* stubs at the beginning of the .init or
1124    .fini output sections respectively, because glibc splits the
1125    _init and _fini functions into multiple parts.  Putting a stub in
1126    the middle of a function is not a good idea.  */
1127 
1128 static void
group_sections(struct elf_kvx_link_hash_table * htab,bfd_size_type stub_group_size,bool stubs_always_after_branch)1129 group_sections (struct elf_kvx_link_hash_table *htab,
1130 		bfd_size_type stub_group_size,
1131 		bool stubs_always_after_branch)
1132 {
1133   asection **list = htab->input_list;
1134 
1135   do
1136     {
1137       asection *tail = *list;
1138       asection *head;
1139 
1140       if (tail == bfd_abs_section_ptr)
1141 	continue;
1142 
1143       /* Reverse the list: we must avoid placing stubs at the
1144 	 beginning of the section because the beginning of the text
1145 	 section may be required for an interrupt vector in bare metal
1146 	 code.  */
1147 #define NEXT_SEC PREV_SEC
1148       head = NULL;
1149       while (tail != NULL)
1150 	{
1151 	  /* Pop from tail.  */
1152 	  asection *item = tail;
1153 	  tail = PREV_SEC (item);
1154 
1155 	  /* Push on head.  */
1156 	  NEXT_SEC (item) = head;
1157 	  head = item;
1158 	}
1159 
1160       while (head != NULL)
1161 	{
1162 	  asection *curr;
1163 	  asection *next;
1164 	  bfd_vma stub_group_start = head->output_offset;
1165 	  bfd_vma end_of_next;
1166 
1167 	  curr = head;
1168 	  while (NEXT_SEC (curr) != NULL)
1169 	    {
1170 	      next = NEXT_SEC (curr);
1171 	      end_of_next = next->output_offset + next->size;
1172 	      if (end_of_next - stub_group_start >= stub_group_size)
1173 		/* End of NEXT is too far from start, so stop.  */
1174 		break;
1175 	      /* Add NEXT to the group.  */
1176 	      curr = next;
1177 	    }
1178 
1179 	  /* OK, the size from the start to the start of CURR is less
1180 	     than stub_group_size and thus can be handled by one stub
1181 	     section.  (Or the head section is itself larger than
1182 	     stub_group_size, in which case we may be toast.)
1183 	     We should really be keeping track of the total size of
1184 	     stubs added here, as stubs contribute to the final output
1185 	     section size.  */
1186 	  do
1187 	    {
1188 	      next = NEXT_SEC (head);
1189 	      /* Set up this stub group.  */
1190 	      htab->stub_group[head->id].link_sec = curr;
1191 	    }
1192 	  while (head != curr && (head = next) != NULL);
1193 
1194 	  /* But wait, there's more!  Input sections up to stub_group_size
1195 	     bytes after the stub section can be handled by it too.  */
1196 	  if (!stubs_always_after_branch)
1197 	    {
1198 	      stub_group_start = curr->output_offset + curr->size;
1199 
1200 	      while (next != NULL)
1201 		{
1202 		  end_of_next = next->output_offset + next->size;
1203 		  if (end_of_next - stub_group_start >= stub_group_size)
1204 		    /* End of NEXT is too far from stubs, so stop.  */
1205 		    break;
1206 		  /* Add NEXT to the stub group.  */
1207 		  head = next;
1208 		  next = NEXT_SEC (head);
1209 		  htab->stub_group[head->id].link_sec = curr;
1210 		}
1211 	    }
1212 	  head = next;
1213 	}
1214     }
1215   while (list++ != htab->input_list + htab->top_index);
1216 
1217   free (htab->input_list);
1218 }
1219 
1220 static void
_bfd_kvx_resize_stubs(struct elf_kvx_link_hash_table * htab)1221 _bfd_kvx_resize_stubs (struct elf_kvx_link_hash_table *htab)
1222 {
1223   asection *section;
1224 
1225   /* OK, we've added some stubs.  Find out the new size of the
1226      stub sections.  */
1227   for (section = htab->stub_bfd->sections;
1228        section != NULL; section = section->next)
1229     {
1230       /* Ignore non-stub sections.  */
1231       if (!strstr (section->name, STUB_SUFFIX))
1232 	continue;
1233       section->size = 0;
1234     }
1235 
1236   bfd_hash_traverse (&htab->stub_hash_table, kvx_size_one_stub, htab);
1237 }
1238 
1239 /* Satisfy the ELF linker by filling in some fields in our fake bfd.  */
1240 
1241 bool
kvx_elfNN_init_stub_bfd(struct bfd_link_info * info,bfd * stub_bfd)1242 kvx_elfNN_init_stub_bfd (struct bfd_link_info *info,
1243 			bfd *stub_bfd)
1244 {
1245   struct elf_kvx_link_hash_table *htab;
1246 
1247   elf_elfheader (stub_bfd)->e_ident[EI_CLASS] = ELFCLASSNN;
1248 
1249 /* Always hook our dynamic sections into the first bfd, which is the
1250    linker created stub bfd.  This ensures that the GOT header is at
1251    the start of the output TOC section.  */
1252   htab = elf_kvx_hash_table (info);
1253   if (htab == NULL)
1254     return false;
1255 
1256   return true;
1257 }
1258 
1259 /* Determine and set the size of the stub section for a final link.
1260 
1261    The basic idea here is to examine all the relocations looking for
1262    PC-relative calls to a target that is unreachable with a 27bits
1263    immediate (found in call and goto).  */
1264 
1265 bool
elfNN_kvx_size_stubs(bfd * output_bfd,bfd * stub_bfd,struct bfd_link_info * info,bfd_signed_vma group_size,asection * (* add_stub_section)(const char *,asection *),void (* layout_sections_again)(void))1266 elfNN_kvx_size_stubs (bfd *output_bfd,
1267 		     bfd *stub_bfd,
1268 		     struct bfd_link_info *info,
1269 		     bfd_signed_vma group_size,
1270 		     asection * (*add_stub_section) (const char *,
1271 						     asection *),
1272 		     void (*layout_sections_again) (void))
1273 {
1274   bfd_size_type stub_group_size;
1275   bool stubs_always_before_branch;
1276   bool stub_changed = false;
1277   struct elf_kvx_link_hash_table *htab = elf_kvx_hash_table (info);
1278 
1279   /* Propagate mach to stub bfd, because it may not have been
1280      finalized when we created stub_bfd.  */
1281   bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
1282 		     bfd_get_mach (output_bfd));
1283 
1284   /* Stash our params away.  */
1285   htab->stub_bfd = stub_bfd;
1286   htab->add_stub_section = add_stub_section;
1287   htab->layout_sections_again = layout_sections_again;
1288   stubs_always_before_branch = group_size < 0;
1289   if (group_size < 0)
1290     stub_group_size = -group_size;
1291   else
1292     stub_group_size = group_size;
1293 
1294   if (stub_group_size == 1)
1295     {
1296       /* Default values.  */
1297       /* KVX branch range is +-256MB. The value used is 1MB less.  */
1298       stub_group_size = 255 * 1024 * 1024;
1299     }
1300 
1301   group_sections (htab, stub_group_size, stubs_always_before_branch);
1302 
1303   (*htab->layout_sections_again) ();
1304 
1305   while (1)
1306     {
1307       bfd *input_bfd;
1308 
1309       for (input_bfd = info->input_bfds;
1310 	   input_bfd != NULL; input_bfd = input_bfd->link.next)
1311 	{
1312 	  Elf_Internal_Shdr *symtab_hdr;
1313 	  asection *section;
1314 	  Elf_Internal_Sym *local_syms = NULL;
1315 
1316 	  if (!is_kvx_elf (input_bfd)
1317 	      || (input_bfd->flags & BFD_LINKER_CREATED) != 0)
1318 	    continue;
1319 
1320 	  /* We'll need the symbol table in a second.  */
1321 	  symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
1322 	  if (symtab_hdr->sh_info == 0)
1323 	    continue;
1324 
1325 	  /* Walk over each section attached to the input bfd.  */
1326 	  for (section = input_bfd->sections;
1327 	       section != NULL; section = section->next)
1328 	    {
1329 	      Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1330 
1331 	      /* If there aren't any relocs, then there's nothing more
1332 		 to do.  */
1333 	      if ((section->flags & SEC_RELOC) == 0
1334 		  || section->reloc_count == 0
1335 		  || (section->flags & SEC_CODE) == 0)
1336 		continue;
1337 
1338 	      /* If this section is a link-once section that will be
1339 		 discarded, then don't create any stubs.  */
1340 	      if (section->output_section == NULL
1341 		  || section->output_section->owner != output_bfd)
1342 		continue;
1343 
1344 	      /* Get the relocs.  */
1345 	      internal_relocs
1346 		= _bfd_elf_link_read_relocs (input_bfd, section, NULL,
1347 					     NULL, info->keep_memory);
1348 	      if (internal_relocs == NULL)
1349 		goto error_ret_free_local;
1350 
1351 	      /* Now examine each relocation.  */
1352 	      irela = internal_relocs;
1353 	      irelaend = irela + section->reloc_count;
1354 	      for (; irela < irelaend; irela++)
1355 		{
1356 		  unsigned int r_type, r_indx;
1357 		  enum elf_kvx_stub_type stub_type;
1358 		  struct elf_kvx_stub_hash_entry *stub_entry;
1359 		  asection *sym_sec;
1360 		  bfd_vma sym_value;
1361 		  bfd_vma destination;
1362 		  struct elf_kvx_link_hash_entry *hash;
1363 		  const char *sym_name;
1364 		  char *stub_name;
1365 		  const asection *id_sec;
1366 		  unsigned char st_type;
1367 		  bfd_size_type len;
1368 
1369 		  r_type = ELFNN_R_TYPE (irela->r_info);
1370 		  r_indx = ELFNN_R_SYM (irela->r_info);
1371 
1372 		  if (r_type >= (unsigned int) R_KVX_end)
1373 		    {
1374 		      bfd_set_error (bfd_error_bad_value);
1375 		    error_ret_free_internal:
1376 		      if (elf_section_data (section)->relocs == NULL)
1377 			free (internal_relocs);
1378 		      goto error_ret_free_local;
1379 		    }
1380 
1381 		  /* Only look for stubs on unconditional branch and
1382 		     branch and link instructions.  */
1383 		  /* This catches CALL and GOTO insn */
1384 		  if (r_type != (unsigned int) R_KVX_PCREL27)
1385 		    continue;
1386 
1387 		  /* Now determine the call target, its name, value,
1388 		     section.  */
1389 		  sym_sec = NULL;
1390 		  sym_value = 0;
1391 		  destination = 0;
1392 		  hash = NULL;
1393 		  sym_name = NULL;
1394 		  if (r_indx < symtab_hdr->sh_info)
1395 		    {
1396 		      /* It's a local symbol.  */
1397 		      Elf_Internal_Sym *sym;
1398 		      Elf_Internal_Shdr *hdr;
1399 
1400 		      if (local_syms == NULL)
1401 			{
1402 			  local_syms
1403 			    = (Elf_Internal_Sym *) symtab_hdr->contents;
1404 			  if (local_syms == NULL)
1405 			    local_syms
1406 			      = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
1407 						      symtab_hdr->sh_info, 0,
1408 						      NULL, NULL, NULL);
1409 			  if (local_syms == NULL)
1410 			    goto error_ret_free_internal;
1411 			}
1412 
1413 		      sym = local_syms + r_indx;
1414 		      hdr = elf_elfsections (input_bfd)[sym->st_shndx];
1415 		      sym_sec = hdr->bfd_section;
1416 		      if (!sym_sec)
1417 			/* This is an undefined symbol.  It can never
1418 			   be resolved.  */
1419 			continue;
1420 
1421 		      if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
1422 			sym_value = sym->st_value;
1423 		      destination = (sym_value + irela->r_addend
1424 				     + sym_sec->output_offset
1425 				     + sym_sec->output_section->vma);
1426 		      st_type = ELF_ST_TYPE (sym->st_info);
1427 		      sym_name
1428 			= bfd_elf_string_from_elf_section (input_bfd,
1429 							   symtab_hdr->sh_link,
1430 							   sym->st_name);
1431 		    }
1432 		  else
1433 		    {
1434 		      int e_indx;
1435 
1436 		      e_indx = r_indx - symtab_hdr->sh_info;
1437 		      hash = ((struct elf_kvx_link_hash_entry *)
1438 			      elf_sym_hashes (input_bfd)[e_indx]);
1439 
1440 		      while (hash->root.root.type == bfd_link_hash_indirect
1441 			     || hash->root.root.type == bfd_link_hash_warning)
1442 			hash = ((struct elf_kvx_link_hash_entry *)
1443 				hash->root.root.u.i.link);
1444 
1445 		      if (hash->root.root.type == bfd_link_hash_defined
1446 			  || hash->root.root.type == bfd_link_hash_defweak)
1447 			{
1448 			  struct elf_kvx_link_hash_table *globals =
1449 			    elf_kvx_hash_table (info);
1450 			  sym_sec = hash->root.root.u.def.section;
1451 			  sym_value = hash->root.root.u.def.value;
1452 			  /* For a destination in a shared library,
1453 			     use the PLT stub as target address to
1454 			     decide whether a branch stub is
1455 			     needed.  */
1456 			  if (globals->root.splt != NULL && hash != NULL
1457 			      && hash->root.plt.offset != (bfd_vma) - 1)
1458 			    {
1459 			      sym_sec = globals->root.splt;
1460 			      sym_value = hash->root.plt.offset;
1461 			      if (sym_sec->output_section != NULL)
1462 				destination = (sym_value
1463 					       + sym_sec->output_offset
1464 					       + sym_sec->output_section->vma);
1465 			    }
1466 			  else if (sym_sec->output_section != NULL)
1467 			    destination = (sym_value + irela->r_addend
1468 					   + sym_sec->output_offset
1469 					   + sym_sec->output_section->vma);
1470 			}
1471 		      else if (hash->root.root.type == bfd_link_hash_undefined
1472 			       || (hash->root.root.type
1473 				   == bfd_link_hash_undefweak))
1474 			{
1475 			  /* For a shared library, use the PLT stub as
1476 			     target address to decide whether a long
1477 			     branch stub is needed.
1478 			     For absolute code, they cannot be handled.  */
1479 			  struct elf_kvx_link_hash_table *globals =
1480 			    elf_kvx_hash_table (info);
1481 
1482 			  if (globals->root.splt != NULL && hash != NULL
1483 			      && hash->root.plt.offset != (bfd_vma) - 1)
1484 			    {
1485 			      sym_sec = globals->root.splt;
1486 			      sym_value = hash->root.plt.offset;
1487 			      if (sym_sec->output_section != NULL)
1488 				destination = (sym_value
1489 					       + sym_sec->output_offset
1490 					       + sym_sec->output_section->vma);
1491 			    }
1492 			  else
1493 			    continue;
1494 			}
1495 		      else
1496 			{
1497 			  bfd_set_error (bfd_error_bad_value);
1498 			  goto error_ret_free_internal;
1499 			}
1500 		      st_type = ELF_ST_TYPE (hash->root.type);
1501 		      sym_name = hash->root.root.root.string;
1502 		    }
1503 
1504 		  /* Determine what (if any) linker stub is needed.  */
1505 		  stub_type = kvx_type_of_stub (section, irela, sym_sec,
1506 						st_type, destination);
1507 		  if (stub_type == kvx_stub_none)
1508 		    continue;
1509 
1510 		  /* Support for grouping stub sections.  */
1511 		  id_sec = htab->stub_group[section->id].link_sec;
1512 
1513 		  /* Get the name of this stub.  */
1514 		  stub_name = elfNN_kvx_stub_name (id_sec, sym_sec, hash,
1515 						  irela);
1516 		  if (!stub_name)
1517 		    goto error_ret_free_internal;
1518 
1519 		  stub_entry =
1520 		    kvx_stub_hash_lookup (&htab->stub_hash_table,
1521 					 stub_name, false, false);
1522 		  if (stub_entry != NULL)
1523 		    {
1524 		      /* The proper stub has already been created.  */
1525 		      free (stub_name);
1526 		      /* Always update this stub's target since it may have
1527 			 changed after layout.  */
1528 		      stub_entry->target_value = sym_value + irela->r_addend;
1529 		      continue;
1530 		    }
1531 
1532 		  stub_entry = _bfd_kvx_add_stub_entry_in_group
1533 		    (stub_name, section, htab);
1534 		  if (stub_entry == NULL)
1535 		    {
1536 		      free (stub_name);
1537 		      goto error_ret_free_internal;
1538 		    }
1539 
1540 		  stub_entry->target_value = sym_value + irela->r_addend;
1541 		  stub_entry->target_section = sym_sec;
1542 		  stub_entry->stub_type = stub_type;
1543 		  stub_entry->h = hash;
1544 		  stub_entry->st_type = st_type;
1545 
1546 		  if (sym_name == NULL)
1547 		    sym_name = "unnamed";
1548 		  len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
1549 		  stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
1550 		  if (stub_entry->output_name == NULL)
1551 		    {
1552 		      free (stub_name);
1553 		      goto error_ret_free_internal;
1554 		    }
1555 
1556 		  snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
1557 			    sym_name);
1558 
1559 		  stub_changed = true;
1560 		}
1561 
1562 	      /* We're done with the internal relocs, free them.  */
1563 	      if (elf_section_data (section)->relocs == NULL)
1564 		free (internal_relocs);
1565 	    }
1566 	}
1567 
1568       if (!stub_changed)
1569 	break;
1570 
1571       _bfd_kvx_resize_stubs (htab);
1572 
1573       /* Ask the linker to do its stuff.  */
1574       (*htab->layout_sections_again) ();
1575       stub_changed = false;
1576     }
1577 
1578   return true;
1579 
1580 error_ret_free_local:
1581   return false;
1582 
1583 }
1584 
1585 /* Build all the stubs associated with the current output file.  The
1586    stubs are kept in a hash table attached to the main linker hash
1587    table.  We also set up the .plt entries for statically linked PIC
1588    functions here.  This function is called via kvx_elf_finish in the
1589    linker.  */
1590 
1591 bool
elfNN_kvx_build_stubs(struct bfd_link_info * info)1592 elfNN_kvx_build_stubs (struct bfd_link_info *info)
1593 {
1594   asection *stub_sec;
1595   struct bfd_hash_table *table;
1596   struct elf_kvx_link_hash_table *htab;
1597 
1598   htab = elf_kvx_hash_table (info);
1599 
1600   for (stub_sec = htab->stub_bfd->sections;
1601        stub_sec != NULL; stub_sec = stub_sec->next)
1602     {
1603       bfd_size_type size;
1604 
1605       /* Ignore non-stub sections.  */
1606       if (!strstr (stub_sec->name, STUB_SUFFIX))
1607 	continue;
1608 
1609       /* Allocate memory to hold the linker stubs.  */
1610       size = stub_sec->size;
1611       stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
1612       if (stub_sec->contents == NULL && size != 0)
1613 	return false;
1614       stub_sec->size = 0;
1615     }
1616 
1617   /* Build the stubs as directed by the stub hash table.  */
1618   table = &htab->stub_hash_table;
1619   bfd_hash_traverse (table, kvx_build_one_stub, info);
1620 
1621   return true;
1622 }
1623 
1624 static bfd_vma
kvx_calculate_got_entry_vma(struct elf_link_hash_entry * h,struct elf_kvx_link_hash_table * globals,struct bfd_link_info * info,bfd_vma value,bfd * output_bfd,bool * unresolved_reloc_p)1625 kvx_calculate_got_entry_vma (struct elf_link_hash_entry *h,
1626 				 struct elf_kvx_link_hash_table
1627 				 *globals, struct bfd_link_info *info,
1628 				 bfd_vma value, bfd *output_bfd,
1629 				 bool *unresolved_reloc_p)
1630 {
1631   bfd_vma off = (bfd_vma) - 1;
1632   asection *basegot = globals->root.sgot;
1633   bool dyn = globals->root.dynamic_sections_created;
1634 
1635   if (h != NULL)
1636     {
1637       BFD_ASSERT (basegot != NULL);
1638       off = h->got.offset;
1639       BFD_ASSERT (off != (bfd_vma) - 1);
1640       if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h)
1641 	  || (bfd_link_pic (info)
1642 	      && SYMBOL_REFERENCES_LOCAL (info, h))
1643 	  || (ELF_ST_VISIBILITY (h->other)
1644 	      && h->root.type == bfd_link_hash_undefweak))
1645 	{
1646 	  /* This is actually a static link, or it is a -Bsymbolic link
1647 	     and the symbol is defined locally.  We must initialize this
1648 	     entry in the global offset table.  Since the offset must
1649 	     always be a multiple of 8 (4 in the case of ILP32), we use
1650 	     the least significant bit to record whether we have
1651 	     initialized it already.
1652 	     When doing a dynamic link, we create a .rel(a).got relocation
1653 	     entry to initialize the value.  This is done in the
1654 	     finish_dynamic_symbol routine.  */
1655 	  if ((off & 1) != 0)
1656 	    off &= ~1;
1657 	  else
1658 	    {
1659 	      bfd_put_NN (output_bfd, value, basegot->contents + off);
1660 	      h->got.offset |= 1;
1661 	    }
1662 	}
1663       else
1664 	*unresolved_reloc_p = false;
1665     }
1666 
1667   return off;
1668 }
1669 
1670 static unsigned int
kvx_reloc_got_type(bfd_reloc_code_real_type r_type)1671 kvx_reloc_got_type (bfd_reloc_code_real_type r_type)
1672 {
1673   switch (r_type)
1674     {
1675       /* Extracted with:
1676 	 awk 'match ($0, /HOWTO.*R_(KVX.*_GOT(OFF)?(64)?_.*),/,ary) \
1677 	 {print "case BFD_RELOC_" ary[1] ":";}' elfxx-kvxc.def  */
1678     case BFD_RELOC_KVX_S37_GOTOFF_LO10:
1679     case BFD_RELOC_KVX_S37_GOTOFF_UP27:
1680 
1681     case BFD_RELOC_KVX_S37_GOT_LO10:
1682     case BFD_RELOC_KVX_S37_GOT_UP27:
1683 
1684     case BFD_RELOC_KVX_S43_GOTOFF_LO10:
1685     case BFD_RELOC_KVX_S43_GOTOFF_UP27:
1686     case BFD_RELOC_KVX_S43_GOTOFF_EX6:
1687 
1688     case BFD_RELOC_KVX_S43_GOT_LO10:
1689     case BFD_RELOC_KVX_S43_GOT_UP27:
1690     case BFD_RELOC_KVX_S43_GOT_EX6:
1691       return GOT_NORMAL;
1692 
1693     case BFD_RELOC_KVX_S37_TLS_GD_LO10:
1694     case BFD_RELOC_KVX_S37_TLS_GD_UP27:
1695     case BFD_RELOC_KVX_S43_TLS_GD_LO10:
1696     case BFD_RELOC_KVX_S43_TLS_GD_UP27:
1697     case BFD_RELOC_KVX_S43_TLS_GD_EX6:
1698       return GOT_TLS_GD;
1699 
1700     case BFD_RELOC_KVX_S37_TLS_LD_LO10:
1701     case BFD_RELOC_KVX_S37_TLS_LD_UP27:
1702     case BFD_RELOC_KVX_S43_TLS_LD_LO10:
1703     case BFD_RELOC_KVX_S43_TLS_LD_UP27:
1704     case BFD_RELOC_KVX_S43_TLS_LD_EX6:
1705       return GOT_TLS_LD;
1706 
1707     case BFD_RELOC_KVX_S37_TLS_IE_LO10:
1708     case BFD_RELOC_KVX_S37_TLS_IE_UP27:
1709     case BFD_RELOC_KVX_S43_TLS_IE_LO10:
1710     case BFD_RELOC_KVX_S43_TLS_IE_UP27:
1711     case BFD_RELOC_KVX_S43_TLS_IE_EX6:
1712       return GOT_TLS_IE;
1713 
1714     default:
1715       break;
1716     }
1717   return GOT_UNKNOWN;
1718 }
1719 
1720 static bool
kvx_can_relax_tls(bfd * input_bfd ATTRIBUTE_UNUSED,struct bfd_link_info * info ATTRIBUTE_UNUSED,bfd_reloc_code_real_type r_type ATTRIBUTE_UNUSED,struct elf_link_hash_entry * h ATTRIBUTE_UNUSED,unsigned long r_symndx ATTRIBUTE_UNUSED)1721 kvx_can_relax_tls (bfd *input_bfd ATTRIBUTE_UNUSED,
1722 		       struct bfd_link_info *info ATTRIBUTE_UNUSED,
1723 		       bfd_reloc_code_real_type r_type ATTRIBUTE_UNUSED,
1724 		       struct elf_link_hash_entry *h ATTRIBUTE_UNUSED,
1725 		       unsigned long r_symndx ATTRIBUTE_UNUSED)
1726 {
1727   if (! IS_KVX_TLS_RELAX_RELOC (r_type))
1728     return false;
1729 
1730   /* Relaxing hook. Disabled on KVX. */
1731   /* See elfnn-aarch64.c */
1732   return true;
1733 }
1734 
1735 /* Given the relocation code R_TYPE, return the relaxed bfd reloc
1736    enumerator.  */
1737 
1738 static bfd_reloc_code_real_type
kvx_tls_transition(bfd * input_bfd,struct bfd_link_info * info,unsigned int r_type,struct elf_link_hash_entry * h,unsigned long r_symndx)1739 kvx_tls_transition (bfd *input_bfd,
1740 			struct bfd_link_info *info,
1741 			unsigned int r_type,
1742 			struct elf_link_hash_entry *h,
1743 			unsigned long r_symndx)
1744 {
1745   bfd_reloc_code_real_type bfd_r_type
1746     = elfNN_kvx_bfd_reloc_from_type (input_bfd, r_type);
1747 
1748   if (! kvx_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx))
1749     return bfd_r_type;
1750 
1751   return bfd_r_type;
1752 }
1753 
1754 /* Return the base VMA address which should be subtracted from real addresses
1755    when resolving R_KVX_*_TLS_GD_* and R_KVX_*_TLS_LD_* relocation.  */
1756 
1757 static bfd_vma
dtpoff_base(struct bfd_link_info * info)1758 dtpoff_base (struct bfd_link_info *info)
1759 {
1760   /* If tls_sec is NULL, we should have signalled an error already.  */
1761   BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
1762   return elf_hash_table (info)->tls_sec->vma;
1763 }
1764 
1765 /* Return the base VMA address which should be subtracted from real addresses
1766    when resolving R_KVX_*_TLS_IE_* and R_KVX_*_TLS_LE_* relocations.  */
1767 
1768 static bfd_vma
tpoff_base(struct bfd_link_info * info)1769 tpoff_base (struct bfd_link_info *info)
1770 {
1771   struct elf_link_hash_table *htab = elf_hash_table (info);
1772 
1773   /* If tls_sec is NULL, we should have signalled an error already.  */
1774   BFD_ASSERT (htab->tls_sec != NULL);
1775 
1776   bfd_vma base = align_power ((bfd_vma) 0,
1777 			      htab->tls_sec->alignment_power);
1778   return htab->tls_sec->vma - base;
1779 }
1780 
1781 static bfd_vma *
symbol_got_offset_ref(bfd * input_bfd,struct elf_link_hash_entry * h,unsigned long r_symndx)1782 symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
1783 		       unsigned long r_symndx)
1784 {
1785   /* Calculate the address of the GOT entry for symbol
1786      referred to in h.  */
1787   if (h != NULL)
1788     return &h->got.offset;
1789   else
1790     {
1791       /* local symbol */
1792       struct elf_kvx_local_symbol *l;
1793 
1794       l = elf_kvx_locals (input_bfd);
1795       return &l[r_symndx].got_offset;
1796     }
1797 }
1798 
1799 static void
symbol_got_offset_mark(bfd * input_bfd,struct elf_link_hash_entry * h,unsigned long r_symndx)1800 symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
1801 			unsigned long r_symndx)
1802 {
1803   bfd_vma *p;
1804   p = symbol_got_offset_ref (input_bfd, h, r_symndx);
1805   *p |= 1;
1806 }
1807 
1808 static int
symbol_got_offset_mark_p(bfd * input_bfd,struct elf_link_hash_entry * h,unsigned long r_symndx)1809 symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
1810 			  unsigned long r_symndx)
1811 {
1812   bfd_vma value;
1813   value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
1814   return value & 1;
1815 }
1816 
1817 static bfd_vma
symbol_got_offset(bfd * input_bfd,struct elf_link_hash_entry * h,unsigned long r_symndx)1818 symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
1819 		   unsigned long r_symndx)
1820 {
1821   bfd_vma value;
1822   value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
1823   value &= ~1;
1824   return value;
1825 }
1826 
1827 /* N_ONES produces N one bits, without overflowing machine arithmetic.  */
1828 #define N_ONES(n) (((((bfd_vma) 1 << ((n) -1)) - 1) << 1) | 1)
1829 
1830 /* This is a copy/paste + modification from
1831    reloc.c:_bfd_relocate_contents. Relocations are applied to 32bits
1832    words, so all overflow checks will overflow for values above
1833    32bits.  */
1834 static bfd_reloc_status_type
check_signed_overflow(enum complain_overflow complain_on_overflow,bfd_reloc_code_real_type bfd_r_type,bfd * input_bfd,bfd_vma relocation)1835 check_signed_overflow (enum complain_overflow complain_on_overflow,
1836 		       bfd_reloc_code_real_type bfd_r_type, bfd *input_bfd,
1837 		       bfd_vma relocation)
1838 {
1839   bfd_reloc_status_type flag = bfd_reloc_ok;
1840   bfd_vma addrmask, fieldmask, signmask, ss;
1841   bfd_vma a, b, sum;
1842   bfd_vma x = 0;
1843 
1844   /* These usually come from howto struct. As we don't check for
1845      values fitting in bitfields or in subpart of words, we set all
1846      these to values to check as if the field is starting from first
1847      bit.  */
1848   unsigned int rightshift = 0;
1849   unsigned int bitpos = 0;
1850   unsigned int bitsize = 0;
1851   bfd_vma src_mask = -1;
1852 
1853   /* Only regular symbol relocations are checked here. Others
1854      relocations (GOT, TLS) could be checked if the need is
1855      confirmed. At the moment, we keep previous behavior
1856      (ie. unchecked) for those. */
1857   switch (bfd_r_type)
1858     {
1859     case BFD_RELOC_KVX_S37_LO10:
1860     case BFD_RELOC_KVX_S37_UP27:
1861       bitsize = 37;
1862       break;
1863 
1864     case BFD_RELOC_KVX_S32_LO5:
1865     case BFD_RELOC_KVX_S32_UP27:
1866       bitsize = 32;
1867       break;
1868 
1869     case BFD_RELOC_KVX_S43_LO10:
1870     case BFD_RELOC_KVX_S43_UP27:
1871     case BFD_RELOC_KVX_S43_EX6:
1872       bitsize = 43;
1873       break;
1874 
1875     case BFD_RELOC_KVX_S64_LO10:
1876     case BFD_RELOC_KVX_S64_UP27:
1877     case BFD_RELOC_KVX_S64_EX27:
1878       bitsize = 64;
1879       break;
1880 
1881     default:
1882       return bfd_reloc_ok;
1883     }
1884 
1885   /* direct copy/paste from reloc.c below */
1886 
1887   /* Get the values to be added together.  For signed and unsigned
1888      relocations, we assume that all values should be truncated to
1889      the size of an address.  For bitfields, all the bits matter.
1890      See also bfd_check_overflow.  */
1891   fieldmask = N_ONES (bitsize);
1892   signmask = ~fieldmask;
1893   addrmask = (N_ONES (bfd_arch_bits_per_address (input_bfd))
1894 	      | (fieldmask << rightshift));
1895   a = (relocation & addrmask) >> rightshift;
1896   b = (x & src_mask & addrmask) >> bitpos;
1897   addrmask >>= rightshift;
1898 
1899   switch (complain_on_overflow)
1900     {
1901     case complain_overflow_signed:
1902       /* If any sign bits are set, all sign bits must be set.
1903 	 That is, A must be a valid negative address after
1904 	 shifting.  */
1905       signmask = ~(fieldmask >> 1);
1906       /* Fall thru */
1907 
1908     case complain_overflow_bitfield:
1909       /* Much like the signed check, but for a field one bit
1910 	 wider.  We allow a bitfield to represent numbers in the
1911 	 range -2**n to 2**n-1, where n is the number of bits in the
1912 	 field.  Note that when bfd_vma is 32 bits, a 32-bit reloc
1913 	 can't overflow, which is exactly what we want.  */
1914       ss = a & signmask;
1915       if (ss != 0 && ss != (addrmask & signmask))
1916 	flag = bfd_reloc_overflow;
1917 
1918       /* We only need this next bit of code if the sign bit of B
1919 	 is below the sign bit of A.  This would only happen if
1920 	 SRC_MASK had fewer bits than BITSIZE.  Note that if
1921 	 SRC_MASK has more bits than BITSIZE, we can get into
1922 	 trouble; we would need to verify that B is in range, as
1923 	 we do for A above.  */
1924       ss = ((~src_mask) >> 1) & src_mask;
1925       ss >>= bitpos;
1926 
1927       /* Set all the bits above the sign bit.  */
1928       b = (b ^ ss) - ss;
1929 
1930       /* Now we can do the addition.  */
1931       sum = a + b;
1932 
1933       /* See if the result has the correct sign.  Bits above the
1934 	 sign bit are junk now; ignore them.  If the sum is
1935 	 positive, make sure we did not have all negative inputs;
1936 	 if the sum is negative, make sure we did not have all
1937 	 positive inputs.  The test below looks only at the sign
1938 	 bits, and it really just
1939 	 SIGN (A) == SIGN (B) && SIGN (A) != SIGN (SUM)
1940 
1941 	 We mask with addrmask here to explicitly allow an address
1942 	 wrap-around.  The Linux kernel relies on it, and it is
1943 	 the only way to write assembler code which can run when
1944 	 loaded at a location 0x80000000 away from the location at
1945 	 which it is linked.  */
1946       if (((~(a ^ b)) & (a ^ sum)) & signmask & addrmask)
1947 	flag = bfd_reloc_overflow;
1948       break;
1949 
1950     case complain_overflow_unsigned:
1951       /* Checking for an unsigned overflow is relatively easy:
1952 	 trim the addresses and add, and trim the result as well.
1953 	 Overflow is normally indicated when the result does not
1954 	 fit in the field.  However, we also need to consider the
1955 	 case when, e.g., fieldmask is 0x7fffffff or smaller, an
1956 	 input is 0x80000000, and bfd_vma is only 32 bits; then we
1957 	 will get sum == 0, but there is an overflow, since the
1958 	 inputs did not fit in the field.  Instead of doing a
1959 	 separate test, we can check for this by or-ing in the
1960 	 operands when testing for the sum overflowing its final
1961 	 field.  */
1962       sum = (a + b) & addrmask;
1963       if ((a | b | sum) & signmask)
1964 	flag = bfd_reloc_overflow;
1965       break;
1966 
1967     default:
1968       abort ();
1969     }
1970   return flag;
1971 }
1972 
1973 /* Perform a relocation as part of a final link.  */
1974 static bfd_reloc_status_type
elfNN_kvx_final_link_relocate(reloc_howto_type * howto,bfd * input_bfd,bfd * output_bfd,asection * input_section,bfd_byte * contents,Elf_Internal_Rela * rel,bfd_vma value,struct bfd_link_info * info,asection * sym_sec,struct elf_link_hash_entry * h,bool * unresolved_reloc_p,bool save_addend,bfd_vma * saved_addend,Elf_Internal_Sym * sym)1975 elfNN_kvx_final_link_relocate (reloc_howto_type *howto,
1976 			       bfd *input_bfd,
1977 			       bfd *output_bfd,
1978 			       asection *input_section,
1979 			       bfd_byte *contents,
1980 			       Elf_Internal_Rela *rel,
1981 			       bfd_vma value,
1982 			       struct bfd_link_info *info,
1983 			       asection *sym_sec,
1984 			       struct elf_link_hash_entry *h,
1985 			       bool *unresolved_reloc_p,
1986 			       bool save_addend,
1987 			       bfd_vma *saved_addend,
1988 			       Elf_Internal_Sym *sym)
1989 {
1990   Elf_Internal_Shdr *symtab_hdr;
1991   unsigned int r_type = howto->type;
1992   bfd_reloc_code_real_type bfd_r_type
1993     = elfNN_kvx_bfd_reloc_from_howto (howto);
1994   bfd_reloc_code_real_type new_bfd_r_type;
1995   unsigned long r_symndx;
1996   bfd_byte *hit_data = contents + rel->r_offset;
1997   bfd_vma place, off;
1998   bfd_vma addend;
1999   struct elf_kvx_link_hash_table *globals;
2000   bool weak_undef_p;
2001   asection *base_got;
2002   bfd_reloc_status_type rret = bfd_reloc_ok;
2003   bool resolved_to_zero;
2004   globals = elf_kvx_hash_table (info);
2005 
2006   symtab_hdr = &elf_symtab_hdr (input_bfd);
2007 
2008   BFD_ASSERT (is_kvx_elf (input_bfd));
2009 
2010   r_symndx = ELFNN_R_SYM (rel->r_info);
2011 
2012   /* It is possible to have linker relaxations on some TLS access
2013      models.  Update our information here.  */
2014   new_bfd_r_type = kvx_tls_transition (input_bfd, info, r_type, h, r_symndx);
2015   if (new_bfd_r_type != bfd_r_type)
2016     {
2017       bfd_r_type = new_bfd_r_type;
2018       howto = elfNN_kvx_howto_from_bfd_reloc (bfd_r_type);
2019       BFD_ASSERT (howto != NULL);
2020       r_type = howto->type;
2021     }
2022 
2023   place = input_section->output_section->vma
2024     + input_section->output_offset + rel->r_offset;
2025 
2026   /* Get addend, accumulating the addend for consecutive relocs
2027      which refer to the same offset.  */
2028   addend = saved_addend ? *saved_addend : 0;
2029   addend += rel->r_addend;
2030 
2031   weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
2032 		  : bfd_is_und_section (sym_sec));
2033   resolved_to_zero = (h != NULL
2034 		      && UNDEFWEAK_NO_DYNAMIC_RELOC (info, h));
2035 
2036   switch (bfd_r_type)
2037     {
2038     case BFD_RELOC_KVX_NN:
2039 #if ARCH_SIZE == 64
2040     case BFD_RELOC_KVX_32:
2041 #endif
2042     case BFD_RELOC_KVX_S37_LO10:
2043     case BFD_RELOC_KVX_S37_UP27:
2044 
2045     case BFD_RELOC_KVX_S32_LO5:
2046     case BFD_RELOC_KVX_S32_UP27:
2047 
2048     case BFD_RELOC_KVX_S43_LO10:
2049     case BFD_RELOC_KVX_S43_UP27:
2050     case BFD_RELOC_KVX_S43_EX6:
2051 
2052     case BFD_RELOC_KVX_S64_LO10:
2053     case BFD_RELOC_KVX_S64_UP27:
2054     case BFD_RELOC_KVX_S64_EX27:
2055       /* When generating a shared object or relocatable executable, these
2056 	 relocations are copied into the output file to be resolved at
2057 	 run time.  */
2058       if (((bfd_link_pic (info) == true)
2059 	   || globals->root.is_relocatable_executable)
2060 	  && (input_section->flags & SEC_ALLOC)
2061 	  && (h == NULL
2062 	      || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2063 		  && !resolved_to_zero)
2064 	      || h->root.type != bfd_link_hash_undefweak))
2065 	{
2066 	  Elf_Internal_Rela outrel;
2067 	  bfd_byte *loc;
2068 	  bool skip, relocate;
2069 	  asection *sreloc;
2070 
2071 	  *unresolved_reloc_p = false;
2072 
2073 	  skip = false;
2074 	  relocate = false;
2075 
2076 	  outrel.r_addend = addend;
2077 	  outrel.r_offset =
2078 	    _bfd_elf_section_offset (output_bfd, info, input_section,
2079 				     rel->r_offset);
2080 	  if (outrel.r_offset == (bfd_vma) - 1)
2081 	    skip = true;
2082 	  else if (outrel.r_offset == (bfd_vma) - 2)
2083 	    {
2084 	      skip = true;
2085 	      relocate = true;
2086 	    }
2087 
2088 	  outrel.r_offset += (input_section->output_section->vma
2089 			      + input_section->output_offset);
2090 
2091 	  if (skip)
2092 	    memset (&outrel, 0, sizeof outrel);
2093 	  else if (h != NULL
2094 		   && h->dynindx != -1
2095 		   && (!bfd_link_pic (info) || !info->symbolic
2096 		       || !h->def_regular))
2097 	    outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
2098 	  else if (bfd_r_type == BFD_RELOC_KVX_32
2099 		   || bfd_r_type == BFD_RELOC_KVX_64)
2100 	    {
2101 	      int symbol;
2102 
2103 	      /* On SVR4-ish systems, the dynamic loader cannot
2104 		 relocate the text and data segments independently,
2105 		 so the symbol does not matter.  */
2106 	      symbol = 0;
2107 	      outrel.r_info = ELFNN_R_INFO (symbol, R_KVX_RELATIVE);
2108 	      outrel.r_addend += value;
2109 	    }
2110 	  else if (bfd_link_pic (info) && info->symbolic)
2111 	    {
2112 	      goto skip_because_pic;
2113 	    }
2114 	  else
2115 	    {
2116 	      /* We may endup here from bad input code trying to
2117 		 insert relocation on symbols within code.  We do not
2118 		 want that currently, and such code should use GOT +
2119 		 KVX_32/64 reloc that translate in KVX_RELATIVE.  */
2120 	      const char *name;
2121 	      if (h && h->root.root.string)
2122 		name = h->root.root.string;
2123 	      else
2124 		name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2125 					 NULL);
2126 
2127 	      (*_bfd_error_handler)
2128 		/* xgettext:c-format */
2129 		(_("%pB(%pA+%#" PRIx64 "): "
2130 		   "unresolvable %s relocation in section `%s'"),
2131 		 input_bfd, input_section, (uint64_t) rel->r_offset, howto->name,
2132 		 name);
2133 	      return bfd_reloc_notsupported;
2134 	    }
2135 
2136 	  sreloc = elf_section_data (input_section)->sreloc;
2137 	  if (sreloc == NULL || sreloc->contents == NULL)
2138 	    return bfd_reloc_notsupported;
2139 
2140 	  loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals);
2141 	  bfd_elfNN_swap_reloca_out (output_bfd, &outrel, loc);
2142 
2143 	  if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size)
2144 	    {
2145 	      /* Sanity to check that we have previously allocated
2146 		 sufficient space in the relocation section for the
2147 		 number of relocations we actually want to emit.  */
2148 	      abort ();
2149 	    }
2150 
2151 	  /* If this reloc is against an external symbol, we do not want to
2152 	     fiddle with the addend.  Otherwise, we need to include the symbol
2153 	     value so that it becomes an addend for the dynamic reloc.  */
2154 	  if (!relocate)
2155 	    return bfd_reloc_ok;
2156 
2157 	  rret = check_signed_overflow (complain_overflow_signed, bfd_r_type,
2158 					input_bfd, value + addend);
2159 	  if (rret != bfd_reloc_ok)
2160 	    return rret;
2161 
2162 	  return _bfd_final_link_relocate (howto, input_bfd, input_section,
2163 					   contents, rel->r_offset, value,
2164 					   addend);
2165 	}
2166 
2167     skip_because_pic:
2168       rret = check_signed_overflow (complain_overflow_signed, bfd_r_type,
2169 				    input_bfd, value + addend);
2170       if (rret != bfd_reloc_ok)
2171 	return rret;
2172 
2173       return _bfd_final_link_relocate (howto, input_bfd, input_section,
2174 				       contents, rel->r_offset, value,
2175 				       addend);
2176       break;
2177 
2178     case BFD_RELOC_KVX_PCREL17:
2179     case BFD_RELOC_KVX_PCREL27:
2180       {
2181 	/* BCU insn are always first in a bundle, so there is no need
2182 	   to correct the address using offset within bundle.  */
2183 
2184 	asection *splt = globals->root.splt;
2185 	bool via_plt_p =
2186 	  splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
2187 
2188 	/* A call to an undefined weak symbol is converted to a jump to
2189 	   the next instruction unless a PLT entry will be created.
2190 	   The jump to the next instruction is optimized as a NOP.
2191 	   Do the same for local undefined symbols.  */
2192 	if (weak_undef_p && ! via_plt_p)
2193 	  {
2194 	    bfd_putl32 (INSN_NOP, hit_data);
2195 	    return bfd_reloc_ok;
2196 	  }
2197 
2198 	/* If the call goes through a PLT entry, make sure to
2199 	   check distance to the right destination address.  */
2200 	if (via_plt_p)
2201 	  value = (splt->output_section->vma
2202 		   + splt->output_offset + h->plt.offset);
2203 
2204 	/* Check if a stub has to be inserted because the destination
2205 	   is too far away.  */
2206 	struct elf_kvx_stub_hash_entry *stub_entry = NULL;
2207 
2208 	/* If the target symbol is global and marked as a function the
2209 	   relocation applies a function call or a tail call.  In this
2210 	   situation we can veneer out of range branches.  The veneers
2211 	   use R16 and R17 hence cannot be used arbitrary out of range
2212 	   branches that occur within the body of a function.  */
2213 
2214 	/* Check if a stub has to be inserted because the destination
2215 	   is too far away.  */
2216 	if (! kvx_valid_call_p (value, place))
2217 	  {
2218 	    /* The target is out of reach, so redirect the branch to
2219 	       the local stub for this function.  */
2220 	    stub_entry = elfNN_kvx_get_stub_entry (input_section,
2221 						   sym_sec, h,
2222 						   rel, globals);
2223 	    if (stub_entry != NULL)
2224 	      value = (stub_entry->stub_offset
2225 		       + stub_entry->stub_sec->output_offset
2226 		       + stub_entry->stub_sec->output_section->vma);
2227 	    /* We have redirected the destination to stub entry address,
2228 	       so ignore any addend record in the original rela entry.  */
2229 	    addend = 0;
2230 	  }
2231       }
2232       *unresolved_reloc_p = false;
2233 
2234       /* FALLTHROUGH */
2235 
2236       /* PCREL 32 are used in dwarf2 table for exception handling */
2237     case BFD_RELOC_KVX_32_PCREL:
2238     case BFD_RELOC_KVX_S64_PCREL_LO10:
2239     case BFD_RELOC_KVX_S64_PCREL_UP27:
2240     case BFD_RELOC_KVX_S64_PCREL_EX27:
2241     case BFD_RELOC_KVX_S37_PCREL_LO10:
2242     case BFD_RELOC_KVX_S37_PCREL_UP27:
2243     case BFD_RELOC_KVX_S43_PCREL_LO10:
2244     case BFD_RELOC_KVX_S43_PCREL_UP27:
2245     case BFD_RELOC_KVX_S43_PCREL_EX6:
2246       return _bfd_final_link_relocate (howto, input_bfd, input_section,
2247 				       contents, rel->r_offset, value,
2248 				       addend);
2249       break;
2250 
2251     case BFD_RELOC_KVX_S37_TLS_LE_LO10:
2252     case BFD_RELOC_KVX_S37_TLS_LE_UP27:
2253 
2254     case BFD_RELOC_KVX_S43_TLS_LE_LO10:
2255     case BFD_RELOC_KVX_S43_TLS_LE_UP27:
2256     case BFD_RELOC_KVX_S43_TLS_LE_EX6:
2257       return _bfd_final_link_relocate (howto, input_bfd, input_section,
2258 				       contents, rel->r_offset,
2259 				       value - tpoff_base (info), addend);
2260       break;
2261 
2262     case BFD_RELOC_KVX_S37_TLS_DTPOFF_LO10:
2263     case BFD_RELOC_KVX_S37_TLS_DTPOFF_UP27:
2264 
2265     case BFD_RELOC_KVX_S43_TLS_DTPOFF_LO10:
2266     case BFD_RELOC_KVX_S43_TLS_DTPOFF_UP27:
2267     case BFD_RELOC_KVX_S43_TLS_DTPOFF_EX6:
2268       return _bfd_final_link_relocate (howto, input_bfd, input_section,
2269 				       contents, rel->r_offset,
2270 				       value - dtpoff_base (info), addend);
2271 
2272     case BFD_RELOC_KVX_S37_TLS_GD_UP27:
2273     case BFD_RELOC_KVX_S37_TLS_GD_LO10:
2274 
2275     case BFD_RELOC_KVX_S43_TLS_GD_UP27:
2276     case BFD_RELOC_KVX_S43_TLS_GD_EX6:
2277     case BFD_RELOC_KVX_S43_TLS_GD_LO10:
2278 
2279     case BFD_RELOC_KVX_S37_TLS_IE_UP27:
2280     case BFD_RELOC_KVX_S37_TLS_IE_LO10:
2281 
2282     case BFD_RELOC_KVX_S43_TLS_IE_UP27:
2283     case BFD_RELOC_KVX_S43_TLS_IE_EX6:
2284     case BFD_RELOC_KVX_S43_TLS_IE_LO10:
2285 
2286     case BFD_RELOC_KVX_S37_TLS_LD_UP27:
2287     case BFD_RELOC_KVX_S37_TLS_LD_LO10:
2288 
2289     case BFD_RELOC_KVX_S43_TLS_LD_UP27:
2290     case BFD_RELOC_KVX_S43_TLS_LD_EX6:
2291     case BFD_RELOC_KVX_S43_TLS_LD_LO10:
2292 
2293       if (globals->root.sgot == NULL)
2294 	return bfd_reloc_notsupported;
2295       value = symbol_got_offset (input_bfd, h, r_symndx);
2296 
2297       _bfd_final_link_relocate (howto, input_bfd, input_section,
2298 				contents, rel->r_offset, value, addend);
2299       *unresolved_reloc_p = false;
2300       break;
2301 
2302     case BFD_RELOC_KVX_S37_GOTADDR_UP27:
2303     case BFD_RELOC_KVX_S37_GOTADDR_LO10:
2304 
2305     case BFD_RELOC_KVX_S43_GOTADDR_UP27:
2306     case BFD_RELOC_KVX_S43_GOTADDR_EX6:
2307     case BFD_RELOC_KVX_S43_GOTADDR_LO10:
2308 
2309     case BFD_RELOC_KVX_S64_GOTADDR_UP27:
2310     case BFD_RELOC_KVX_S64_GOTADDR_EX27:
2311     case BFD_RELOC_KVX_S64_GOTADDR_LO10:
2312       {
2313 	if (globals->root.sgot == NULL)
2314 	  BFD_ASSERT (h != NULL);
2315 
2316 	value = globals->root.sgot->output_section->vma
2317 	  + globals->root.sgot->output_offset;
2318 
2319 	return _bfd_final_link_relocate (howto, input_bfd, input_section,
2320 					 contents, rel->r_offset, value,
2321 					 addend);
2322       }
2323       break;
2324 
2325     case BFD_RELOC_KVX_S37_GOTOFF_LO10:
2326     case BFD_RELOC_KVX_S37_GOTOFF_UP27:
2327 
2328     case BFD_RELOC_KVX_32_GOTOFF:
2329     case BFD_RELOC_KVX_64_GOTOFF:
2330 
2331     case BFD_RELOC_KVX_S43_GOTOFF_LO10:
2332     case BFD_RELOC_KVX_S43_GOTOFF_UP27:
2333     case BFD_RELOC_KVX_S43_GOTOFF_EX6:
2334 
2335       {
2336 	asection *basegot = globals->root.sgot;
2337 	/* BFD_ASSERT(h == NULL); */
2338 	BFD_ASSERT(globals->root.sgot != NULL);
2339 	value -= basegot->output_section->vma + basegot->output_offset;
2340 	return _bfd_final_link_relocate (howto, input_bfd, input_section,
2341 					 contents, rel->r_offset, value,
2342 					 addend);
2343       }
2344       break;
2345 
2346     case BFD_RELOC_KVX_S37_GOT_LO10:
2347     case BFD_RELOC_KVX_S37_GOT_UP27:
2348 
2349     case BFD_RELOC_KVX_32_GOT:
2350     case BFD_RELOC_KVX_64_GOT:
2351 
2352     case BFD_RELOC_KVX_S43_GOT_LO10:
2353     case BFD_RELOC_KVX_S43_GOT_UP27:
2354     case BFD_RELOC_KVX_S43_GOT_EX6:
2355 
2356       if (globals->root.sgot == NULL)
2357 	BFD_ASSERT (h != NULL);
2358 
2359       if (h != NULL)
2360 	{
2361 	  value = kvx_calculate_got_entry_vma (h, globals, info, value,
2362 					       output_bfd,
2363 					       unresolved_reloc_p);
2364 #ifdef UGLY_DEBUG
2365 	  printf("GOT_LO/HI for %s, value %x\n", h->root.root.string, value);
2366 #endif
2367 
2368 	  return _bfd_final_link_relocate (howto, input_bfd, input_section,
2369 					   contents, rel->r_offset, value,
2370 					   addend);
2371 	}
2372       else
2373 	{
2374 #ifdef UGLY_DEBUG
2375 	  printf("GOT_LO/HI with h NULL, initial value %x\n", value);
2376 #endif
2377 	  struct elf_kvx_local_symbol *locals = elf_kvx_locals (input_bfd);
2378 
2379 	  if (locals == NULL)
2380 	    {
2381 	      int howto_index = bfd_r_type - BFD_RELOC_KVX_RELOC_START;
2382 	      _bfd_error_handler
2383 		/* xgettext:c-format */
2384 		(_("%pB: local symbol descriptor table be NULL when applying "
2385 		   "relocation %s against local symbol"),
2386 		 input_bfd, elf_kvx_howto_table[howto_index].name);
2387 	      abort ();
2388 	    }
2389 
2390 	  off = symbol_got_offset (input_bfd, h, r_symndx);
2391 	  base_got = globals->root.sgot;
2392 	  bfd_vma got_entry_addr = (base_got->output_section->vma
2393 				    + base_got->output_offset + off);
2394 
2395 	  if (!symbol_got_offset_mark_p (input_bfd, h, r_symndx))
2396 	    {
2397 	      bfd_put_64 (output_bfd, value, base_got->contents + off);
2398 
2399 	      if (bfd_link_pic (info))
2400 		{
2401 		  asection *s;
2402 		  Elf_Internal_Rela outrel;
2403 
2404 		  /* For PIC executables and shared libraries we need
2405 		     to relocate the GOT entry at run time.  */
2406 		  s = globals->root.srelgot;
2407 		  if (s == NULL)
2408 		    abort ();
2409 
2410 		  outrel.r_offset = got_entry_addr;
2411 		  outrel.r_info = ELFNN_R_INFO (0, R_KVX_RELATIVE);
2412 		  outrel.r_addend = value;
2413 		  elf_append_rela (output_bfd, s, &outrel);
2414 		}
2415 
2416 	      symbol_got_offset_mark (input_bfd, h, r_symndx);
2417 	    }
2418 
2419 	  /* Update the relocation value to GOT entry addr as we have
2420 	     transformed the direct data access into an indirect data
2421 	     access through GOT.  */
2422 	  value = got_entry_addr;
2423 
2424 	  return _bfd_final_link_relocate (howto, input_bfd, input_section,
2425 					   contents, rel->r_offset, off, 0);
2426 	}
2427       break;
2428 
2429     default:
2430       return bfd_reloc_notsupported;
2431     }
2432 
2433   if (saved_addend)
2434     *saved_addend = value;
2435 
2436   /* Only apply the final relocation in a sequence.  */
2437   if (save_addend)
2438     return bfd_reloc_continue;
2439 
2440   return _bfd_kvx_elf_put_addend (input_bfd, hit_data, bfd_r_type,
2441 				  howto, value);
2442 }
2443 
2444 
2445 
2446 /* Relocate a KVX ELF section.  */
2447 
2448 static int
elfNN_kvx_relocate_section(bfd * output_bfd,struct bfd_link_info * info,bfd * input_bfd,asection * input_section,bfd_byte * contents,Elf_Internal_Rela * relocs,Elf_Internal_Sym * local_syms,asection ** local_sections)2449 elfNN_kvx_relocate_section (bfd *output_bfd,
2450 			    struct bfd_link_info *info,
2451 			    bfd *input_bfd,
2452 			    asection *input_section,
2453 			    bfd_byte *contents,
2454 			    Elf_Internal_Rela *relocs,
2455 			    Elf_Internal_Sym *local_syms,
2456 			    asection **local_sections)
2457 {
2458   Elf_Internal_Shdr *symtab_hdr;
2459   struct elf_link_hash_entry **sym_hashes;
2460   Elf_Internal_Rela *rel;
2461   Elf_Internal_Rela *relend;
2462   const char *name;
2463   struct elf_kvx_link_hash_table *globals;
2464   bool save_addend = false;
2465   bfd_vma addend = 0;
2466 
2467   globals = elf_kvx_hash_table (info);
2468 
2469   symtab_hdr = &elf_symtab_hdr (input_bfd);
2470   sym_hashes = elf_sym_hashes (input_bfd);
2471 
2472   rel = relocs;
2473   relend = relocs + input_section->reloc_count;
2474   for (; rel < relend; rel++)
2475     {
2476       unsigned int r_type;
2477       bfd_reloc_code_real_type bfd_r_type;
2478       reloc_howto_type *howto;
2479       unsigned long r_symndx;
2480       Elf_Internal_Sym *sym;
2481       asection *sec;
2482       struct elf_link_hash_entry *h;
2483       bfd_vma relocation;
2484       bfd_reloc_status_type r;
2485       arelent bfd_reloc;
2486       char sym_type;
2487       bool unresolved_reloc = false;
2488       char *error_message = NULL;
2489 
2490       r_symndx = ELFNN_R_SYM (rel->r_info);
2491       r_type = ELFNN_R_TYPE (rel->r_info);
2492 
2493       bfd_reloc.howto = elfNN_kvx_howto_from_type (input_bfd, r_type);
2494       howto = bfd_reloc.howto;
2495 
2496       if (howto == NULL)
2497 	return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2498 
2499       bfd_r_type = elfNN_kvx_bfd_reloc_from_howto (howto);
2500 
2501       h = NULL;
2502       sym = NULL;
2503       sec = NULL;
2504 
2505       if (r_symndx < symtab_hdr->sh_info) /* A local symbol. */
2506 	{
2507 	  sym = local_syms + r_symndx;
2508 	  sym_type = ELFNN_ST_TYPE (sym->st_info);
2509 	  sec = local_sections[r_symndx];
2510 
2511 	  /* An object file might have a reference to a local
2512 	     undefined symbol.  This is a draft object file, but we
2513 	     should at least do something about it.  */
2514 	  if (r_type != R_KVX_NONE
2515 	      && r_type != R_KVX_S37_GOTADDR_LO10
2516 	      && r_type != R_KVX_S37_GOTADDR_UP27
2517 	      && r_type != R_KVX_S64_GOTADDR_LO10
2518 	      && r_type != R_KVX_S64_GOTADDR_UP27
2519 	      && r_type != R_KVX_S64_GOTADDR_EX27
2520 	      && r_type != R_KVX_S43_GOTADDR_LO10
2521 	      && r_type != R_KVX_S43_GOTADDR_UP27
2522 	      && r_type != R_KVX_S43_GOTADDR_EX6
2523 	      && bfd_is_und_section (sec)
2524 	      && ELF_ST_BIND (sym->st_info) != STB_WEAK)
2525 	    (*info->callbacks->undefined_symbol)
2526 	      (info, bfd_elf_string_from_elf_section
2527 	       (input_bfd, symtab_hdr->sh_link, sym->st_name),
2528 	       input_bfd, input_section, rel->r_offset, true);
2529 
2530 	  relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
2531 	}
2532       else
2533 	{
2534 	  bool warned, ignored;
2535 
2536 	  RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2537 				   r_symndx, symtab_hdr, sym_hashes,
2538 				   h, sec, relocation,
2539 				   unresolved_reloc, warned, ignored);
2540 
2541 	  sym_type = h->type;
2542 	}
2543 
2544       if (sec != NULL && discarded_section (sec))
2545 	RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
2546 					 rel, 1, relend, howto, 0, contents);
2547 
2548       if (bfd_link_relocatable (info))
2549 	continue;
2550 
2551       if (h != NULL)
2552 	name = h->root.root.string;
2553       else
2554 	{
2555 	  name = (bfd_elf_string_from_elf_section
2556 		  (input_bfd, symtab_hdr->sh_link, sym->st_name));
2557 	  if (name == NULL || *name == '\0')
2558 	    name = bfd_section_name (sec);
2559 	}
2560 
2561       if (r_symndx != 0
2562 	  && r_type != R_KVX_NONE
2563 	  && (h == NULL
2564 	      || h->root.type == bfd_link_hash_defined
2565 	      || h->root.type == bfd_link_hash_defweak)
2566 	  && IS_KVX_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS))
2567 	{
2568 	  (*_bfd_error_handler)
2569 	    ((sym_type == STT_TLS
2570 	      /* xgettext:c-format */
2571 	      ? _("%pB(%pA+%#" PRIx64 "): %s used with TLS symbol %s")
2572 	      /* xgettext:c-format */
2573 	      : _("%pB(%pA+%#" PRIx64 "): %s used with non-TLS symbol %s")),
2574 	     input_bfd,
2575 	     input_section, (uint64_t) rel->r_offset, howto->name, name);
2576 	}
2577 
2578       /* Original aarch64 has relaxation handling for TLS here. */
2579       r = bfd_reloc_continue;
2580 
2581       /* There may be multiple consecutive relocations for the
2582 	 same offset.  In that case we are supposed to treat the
2583 	 output of each relocation as the addend for the next.  */
2584       if (rel + 1 < relend
2585 	  && rel->r_offset == rel[1].r_offset
2586 	  && ELFNN_R_TYPE (rel[1].r_info) != R_KVX_NONE)
2587 
2588 	save_addend = true;
2589       else
2590 	save_addend = false;
2591 
2592       if (r == bfd_reloc_continue)
2593 	r = elfNN_kvx_final_link_relocate (howto, input_bfd, output_bfd,
2594 					   input_section, contents, rel,
2595 					   relocation, info, sec,
2596 					   h, &unresolved_reloc,
2597 					   save_addend, &addend, sym);
2598 
2599       switch (elfNN_kvx_bfd_reloc_from_type (input_bfd, r_type))
2600 	{
2601 	case BFD_RELOC_KVX_S37_TLS_GD_LO10:
2602 	case BFD_RELOC_KVX_S37_TLS_GD_UP27:
2603 
2604 	case BFD_RELOC_KVX_S43_TLS_GD_LO10:
2605 	case BFD_RELOC_KVX_S43_TLS_GD_UP27:
2606 	case BFD_RELOC_KVX_S43_TLS_GD_EX6:
2607 
2608 	case BFD_RELOC_KVX_S37_TLS_LD_LO10:
2609 	case BFD_RELOC_KVX_S37_TLS_LD_UP27:
2610 
2611 	case BFD_RELOC_KVX_S43_TLS_LD_LO10:
2612 	case BFD_RELOC_KVX_S43_TLS_LD_UP27:
2613 	case BFD_RELOC_KVX_S43_TLS_LD_EX6:
2614 
2615 	  if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
2616 	    {
2617 	      bool need_relocs = false;
2618 	      bfd_byte *loc;
2619 	      int indx;
2620 	      bfd_vma off;
2621 
2622 	      off = symbol_got_offset (input_bfd, h, r_symndx);
2623 	      indx = h && h->dynindx != -1 ? h->dynindx : 0;
2624 
2625 	      need_relocs =
2626 		(bfd_link_pic (info) || indx != 0) &&
2627 		(h == NULL
2628 		 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2629 		 || h->root.type != bfd_link_hash_undefweak);
2630 
2631 	      BFD_ASSERT (globals->root.srelgot != NULL);
2632 
2633 	      if (need_relocs)
2634 		{
2635 		  Elf_Internal_Rela rela;
2636 		  rela.r_info = ELFNN_R_INFO (indx, R_KVX_64_DTPMOD);
2637 		  rela.r_addend = 0;
2638 		  rela.r_offset = globals->root.sgot->output_section->vma +
2639 		    globals->root.sgot->output_offset + off;
2640 
2641 		  loc = globals->root.srelgot->contents;
2642 		  loc += globals->root.srelgot->reloc_count++
2643 		    * RELOC_SIZE (htab);
2644 		  bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
2645 
2646 		  bfd_reloc_code_real_type real_type =
2647 		    elfNN_kvx_bfd_reloc_from_type (input_bfd, r_type);
2648 
2649 		  if (real_type == BFD_RELOC_KVX_S37_TLS_LD_LO10
2650 		      || real_type == BFD_RELOC_KVX_S37_TLS_LD_UP27
2651 		      || real_type == BFD_RELOC_KVX_S43_TLS_LD_LO10
2652 		      || real_type == BFD_RELOC_KVX_S43_TLS_LD_UP27
2653 		      || real_type == BFD_RELOC_KVX_S43_TLS_LD_EX6)
2654 		    {
2655 		      /* For local dynamic, don't generate DTPOFF in any case.
2656 			 Initialize the DTPOFF slot into zero, so we get module
2657 			 base address when invoke runtime TLS resolver.  */
2658 		      bfd_put_NN (output_bfd, 0,
2659 				  globals->root.sgot->contents + off
2660 				  + GOT_ENTRY_SIZE);
2661 		    }
2662 		  else if (indx == 0)
2663 		    {
2664 		      bfd_put_NN (output_bfd,
2665 				  relocation - dtpoff_base (info),
2666 				  globals->root.sgot->contents + off
2667 				  + GOT_ENTRY_SIZE);
2668 		    }
2669 		  else
2670 		    {
2671 		      /* This TLS symbol is global. We emit a
2672 			 relocation to fixup the tls offset at load
2673 			 time.  */
2674 		      rela.r_info =
2675 			ELFNN_R_INFO (indx, R_KVX_64_DTPOFF);
2676 		      rela.r_addend = 0;
2677 		      rela.r_offset =
2678 			(globals->root.sgot->output_section->vma
2679 			 + globals->root.sgot->output_offset + off
2680 			 + GOT_ENTRY_SIZE);
2681 
2682 		      loc = globals->root.srelgot->contents;
2683 		      loc += globals->root.srelgot->reloc_count++
2684 			* RELOC_SIZE (globals);
2685 		      bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
2686 		      bfd_put_NN (output_bfd, (bfd_vma) 0,
2687 				  globals->root.sgot->contents + off
2688 				  + GOT_ENTRY_SIZE);
2689 		    }
2690 		}
2691 	      else
2692 		{
2693 		  bfd_put_NN (output_bfd, (bfd_vma) 1,
2694 			      globals->root.sgot->contents + off);
2695 		  bfd_put_NN (output_bfd,
2696 			      relocation - dtpoff_base (info),
2697 			      globals->root.sgot->contents + off
2698 			      + GOT_ENTRY_SIZE);
2699 		}
2700 
2701 	      symbol_got_offset_mark (input_bfd, h, r_symndx);
2702 	    }
2703 	  break;
2704 
2705 	case BFD_RELOC_KVX_S37_TLS_IE_LO10:
2706 	case BFD_RELOC_KVX_S37_TLS_IE_UP27:
2707 
2708 	case BFD_RELOC_KVX_S43_TLS_IE_LO10:
2709 	case BFD_RELOC_KVX_S43_TLS_IE_UP27:
2710 	case BFD_RELOC_KVX_S43_TLS_IE_EX6:
2711 	  if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
2712 	    {
2713 	      bool need_relocs = false;
2714 	      bfd_byte *loc;
2715 	      int indx;
2716 	      bfd_vma off;
2717 
2718 	      off = symbol_got_offset (input_bfd, h, r_symndx);
2719 
2720 	      indx = h && h->dynindx != -1 ? h->dynindx : 0;
2721 
2722 	      need_relocs =
2723 		(bfd_link_pic (info) || indx != 0) &&
2724 		(h == NULL
2725 		 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2726 		 || h->root.type != bfd_link_hash_undefweak);
2727 
2728 	      BFD_ASSERT (globals->root.srelgot != NULL);
2729 
2730 	      if (need_relocs)
2731 		{
2732 		  Elf_Internal_Rela rela;
2733 
2734 		  if (indx == 0)
2735 		    rela.r_addend = relocation - dtpoff_base (info);
2736 		  else
2737 		    rela.r_addend = 0;
2738 
2739 		  rela.r_info = ELFNN_R_INFO (indx, R_KVX_64_TPOFF);
2740 		  rela.r_offset = globals->root.sgot->output_section->vma +
2741 		    globals->root.sgot->output_offset + off;
2742 
2743 		  loc = globals->root.srelgot->contents;
2744 		  loc += globals->root.srelgot->reloc_count++
2745 		    * RELOC_SIZE (htab);
2746 
2747 		  bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
2748 
2749 		  bfd_put_NN (output_bfd, rela.r_addend,
2750 			      globals->root.sgot->contents + off);
2751 		}
2752 	      else
2753 		bfd_put_NN (output_bfd, relocation - tpoff_base (info),
2754 			    globals->root.sgot->contents + off);
2755 
2756 	      symbol_got_offset_mark (input_bfd, h, r_symndx);
2757 	    }
2758 	  break;
2759 
2760 	default:
2761 	  break;
2762 	}
2763 
2764       /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
2765 	 because such sections are not SEC_ALLOC and thus ld.so will
2766 	 not process them.  */
2767       if (unresolved_reloc
2768 	  && !((input_section->flags & SEC_DEBUGGING) != 0
2769 	       && h->def_dynamic)
2770 	  && _bfd_elf_section_offset (output_bfd, info, input_section,
2771 				      +rel->r_offset) != (bfd_vma) - 1)
2772 	{
2773 	  (*_bfd_error_handler)
2774 	    /* xgettext:c-format */
2775 	    (_("%pB(%pA+%#" PRIx64 "): "
2776 	       "unresolvable %s relocation against symbol `%s'"),
2777 	     input_bfd, input_section, (uint64_t) rel->r_offset, howto->name,
2778 	     h->root.root.string);
2779 	  return false;
2780 	}
2781 
2782       if (r != bfd_reloc_ok && r != bfd_reloc_continue)
2783 	{
2784 	  switch (r)
2785 	    {
2786 	    case bfd_reloc_overflow:
2787 	      (*info->callbacks->reloc_overflow)
2788 		(info, (h ? &h->root : NULL), name, howto->name, (bfd_vma) 0,
2789 		 input_bfd, input_section, rel->r_offset);
2790 
2791 	      /* Original aarch64 code had a check for alignement correctness */
2792 	      break;
2793 
2794 	    case bfd_reloc_undefined:
2795 	      (*info->callbacks->undefined_symbol)
2796 		(info, name, input_bfd, input_section, rel->r_offset, true);
2797 	      break;
2798 
2799 	    case bfd_reloc_outofrange:
2800 	      error_message = _("out of range");
2801 	      goto common_error;
2802 
2803 	    case bfd_reloc_notsupported:
2804 	      error_message = _("unsupported relocation");
2805 	      goto common_error;
2806 
2807 	    case bfd_reloc_dangerous:
2808 	      /* error_message should already be set.  */
2809 	      goto common_error;
2810 
2811 	    default:
2812 	      error_message = _("unknown error");
2813 	      /* Fall through.  */
2814 
2815 	    common_error:
2816 	      BFD_ASSERT (error_message != NULL);
2817 	      (*info->callbacks->reloc_dangerous)
2818 		(info, error_message, input_bfd, input_section, rel->r_offset);
2819 	      break;
2820 	    }
2821 	}
2822 
2823       if (!save_addend)
2824 	addend = 0;
2825     }
2826 
2827   return true;
2828 }
2829 
2830 /* Set the right machine number.  */
2831 
2832 static bool
elfNN_kvx_object_p(bfd * abfd)2833 elfNN_kvx_object_p (bfd *abfd)
2834 {
2835   /* must be coherent with default arch in cpu-kvx.c */
2836   int e_set = bfd_mach_kv3_1;
2837 
2838   if (elf_elfheader (abfd)->e_machine == EM_KVX)
2839     {
2840       int e_core = elf_elfheader (abfd)->e_flags & ELF_KVX_CORE_MASK;
2841       switch(e_core)
2842 	{
2843 #if ARCH_SIZE == 64
2844 	case ELF_KVX_CORE_KV3_1 : e_set = bfd_mach_kv3_1_64; break;
2845 	case ELF_KVX_CORE_KV3_2 : e_set = bfd_mach_kv3_2_64; break;
2846 	case ELF_KVX_CORE_KV4_1 : e_set = bfd_mach_kv4_1_64; break;
2847 #else
2848 	case ELF_KVX_CORE_KV3_1 : e_set = bfd_mach_kv3_1; break;
2849 	case ELF_KVX_CORE_KV3_2 : e_set = bfd_mach_kv3_2; break;
2850 	case ELF_KVX_CORE_KV4_1 : e_set = bfd_mach_kv4_1; break;
2851 #endif
2852 	default:
2853 	  (*_bfd_error_handler)(_("%s: Bad ELF id: `%d'"),
2854 				abfd->filename, e_core);
2855 	}
2856     }
2857   return bfd_default_set_arch_mach (abfd, bfd_arch_kvx, e_set);
2858 }
2859 
2860 /* Function to keep KVX specific flags in the ELF header.  */
2861 
2862 static bool
elfNN_kvx_set_private_flags(bfd * abfd,flagword flags)2863 elfNN_kvx_set_private_flags (bfd *abfd, flagword flags)
2864 {
2865   if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
2866     {
2867     }
2868   else
2869     {
2870       elf_elfheader (abfd)->e_flags = flags;
2871       elf_flags_init (abfd) = true;
2872     }
2873 
2874   return true;
2875 }
2876 
2877 /* Merge backend specific data from an object file to the output
2878    object file when linking.  */
2879 
2880 static bool
elfNN_kvx_merge_private_bfd_data(bfd * ibfd,struct bfd_link_info * info)2881 elfNN_kvx_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info)
2882 {
2883   bfd *obfd = info->output_bfd;
2884   flagword out_flags;
2885   flagword in_flags;
2886   bool flags_compatible = true;
2887   asection *sec;
2888 
2889   /* Check if we have the same endianess.  */
2890   if (!_bfd_generic_verify_endian_match (ibfd, info))
2891     return false;
2892 
2893   if (!is_kvx_elf (ibfd) || !is_kvx_elf (obfd))
2894     return true;
2895 
2896   /* The input BFD must have had its flags initialised.  */
2897   /* The following seems bogus to me -- The flags are initialized in
2898      the assembler but I don't think an elf_flags_init field is
2899      written into the object.  */
2900   /* BFD_ASSERT (elf_flags_init (ibfd)); */
2901 
2902   if (bfd_get_arch_size (ibfd) != bfd_get_arch_size (obfd))
2903     {
2904       const char *msg;
2905 
2906       if (bfd_get_arch_size (ibfd) == 32
2907 	  && bfd_get_arch_size (obfd) == 64)
2908 	msg = _("%s: compiled as 32-bit object and %s is 64-bit");
2909       else if (bfd_get_arch_size (ibfd) == 64
2910 	       && bfd_get_arch_size (obfd) == 32)
2911 	msg = _("%s: compiled as 64-bit object and %s is 32-bit");
2912       else
2913 	msg = _("%s: object size does not match that of target %s");
2914 
2915       (*_bfd_error_handler) (msg, bfd_get_filename (ibfd),
2916 			     bfd_get_filename (obfd));
2917       bfd_set_error (bfd_error_wrong_format);
2918       return false;
2919     }
2920 
2921   in_flags = elf_elfheader (ibfd)->e_flags;
2922   out_flags = elf_elfheader (obfd)->e_flags;
2923 
2924   if (!elf_flags_init (obfd))
2925     {
2926       /* If the input is the default architecture and had the default
2927 	 flags then do not bother setting the flags for the output
2928 	 architecture, instead allow future merges to do this.  If no
2929 	 future merges ever set these flags then they will retain their
2930 	 uninitialised values, which surprise surprise, correspond
2931 	 to the default values.  */
2932       if (bfd_get_arch_info (ibfd)->the_default
2933 	  && elf_elfheader (ibfd)->e_flags == 0)
2934 	return true;
2935 
2936       elf_flags_init (obfd) = true;
2937       elf_elfheader (obfd)->e_flags = in_flags;
2938 
2939       if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
2940 	  && bfd_get_arch_info (obfd)->the_default)
2941 	return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
2942 				  bfd_get_mach (ibfd));
2943 
2944       return true;
2945     }
2946 
2947   /* Identical flags must be compatible.  */
2948   if (in_flags == out_flags)
2949     return true;
2950 
2951   /* Check to see if the input BFD actually contains any sections.  If
2952      not, its flags may not have been initialised either, but it
2953      cannot actually cause any incompatiblity.  Do not short-circuit
2954      dynamic objects; their section list may be emptied by
2955      elf_link_add_object_symbols.
2956 
2957      Also check to see if there are no code sections in the input.
2958      In this case there is no need to check for code specific flags.
2959      XXX - do we need to worry about floating-point format compatability
2960      in data sections ?  */
2961   if (!(ibfd->flags & DYNAMIC))
2962     {
2963       bool null_input_bfd = true;
2964       bool only_data_sections = true;
2965 
2966       for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2967 	{
2968 	  if ((bfd_section_flags (sec)
2969 	       & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
2970 	      == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
2971 	    only_data_sections = false;
2972 
2973 	  null_input_bfd = false;
2974 	  break;
2975 	}
2976 
2977       if (null_input_bfd || only_data_sections)
2978 	return true;
2979     }
2980   return flags_compatible;
2981 }
2982 
2983 /* Display the flags field.  */
2984 
2985 static bool
elfNN_kvx_print_private_bfd_data(bfd * abfd,void * ptr)2986 elfNN_kvx_print_private_bfd_data (bfd *abfd, void *ptr)
2987 {
2988   FILE *file = (FILE *) ptr;
2989   unsigned long flags;
2990 
2991   BFD_ASSERT (abfd != NULL && ptr != NULL);
2992 
2993   /* Print normal ELF private data.  */
2994   _bfd_elf_print_private_bfd_data (abfd, ptr);
2995 
2996   flags = elf_elfheader (abfd)->e_flags;
2997   /* Ignore init flag - it may not be set, despite the flags field
2998      containing valid data.  */
2999 
3000   /* xgettext:c-format */
3001   fprintf (file, _("Private flags = 0x%lx : "), elf_elfheader (abfd)->e_flags);
3002   if((flags & ELF_KVX_ABI_64B_ADDR_BIT) == ELF_KVX_ABI_64B_ADDR_BIT)
3003     {
3004       if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV3_1))
3005 	fprintf (file, _("Coolidge (kv3) V1 64 bits"));
3006       else if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV3_2))
3007 	fprintf (file, _("Coolidge (kv3) V2 64 bits"));
3008       else if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV4_1))
3009 	fprintf (file, _("Coolidge (kv4) V1 64 bits"));
3010     }
3011   else
3012     {
3013       if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV3_1))
3014 	fprintf (file, _("Coolidge (kv3) V1 32 bits"));
3015       else if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV3_2))
3016 	fprintf (file, _("Coolidge (kv3) V2 32 bits"));
3017       else if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV4_1))
3018 	fprintf (file, _("Coolidge (kv4) V1 32 bits"));
3019     }
3020 
3021   fputc ('\n', file);
3022 
3023   return true;
3024 }
3025 
3026 /* Adjust a symbol defined by a dynamic object and referenced by a
3027    regular object.  The current definition is in some section of the
3028    dynamic object, but we're not including those sections.  We have to
3029    change the definition to something the rest of the link can
3030    understand.	*/
3031 
3032 static bool
elfNN_kvx_adjust_dynamic_symbol(struct bfd_link_info * info,struct elf_link_hash_entry * h)3033 elfNN_kvx_adjust_dynamic_symbol (struct bfd_link_info *info,
3034 				 struct elf_link_hash_entry *h)
3035 {
3036   struct elf_kvx_link_hash_table *htab;
3037   asection *s;
3038 
3039   /* If this is a function, put it in the procedure linkage table.  We
3040      will fill in the contents of the procedure linkage table later,
3041      when we know the address of the .got section.  */
3042   if (h->type == STT_FUNC || h->needs_plt)
3043     {
3044       if (h->plt.refcount <= 0
3045 	  || ((SYMBOL_CALLS_LOCAL (info, h)
3046 	       || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
3047 		   && h->root.type == bfd_link_hash_undefweak))))
3048 	{
3049 	  /* This case can occur if we saw a CALL26 reloc in
3050 	     an input file, but the symbol wasn't referred to
3051 	     by a dynamic object or all references were
3052 	     garbage collected. In which case we can end up
3053 	     resolving.  */
3054 	  h->plt.offset = (bfd_vma) - 1;
3055 	  h->needs_plt = 0;
3056 	}
3057 
3058       return true;
3059     }
3060   else
3061     /* Otherwise, reset to -1.  */
3062     h->plt.offset = (bfd_vma) - 1;
3063 
3064 
3065   /* If this is a weak symbol, and there is a real definition, the
3066      processor independent code will have arranged for us to see the
3067      real definition first, and we can just use the same value.  */
3068   if (h->is_weakalias)
3069     {
3070       struct elf_link_hash_entry *def = weakdef (h);
3071       BFD_ASSERT (def->root.type == bfd_link_hash_defined);
3072       h->root.u.def.section = def->root.u.def.section;
3073       h->root.u.def.value = def->root.u.def.value;
3074       if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
3075 	h->non_got_ref = def->non_got_ref;
3076       return true;
3077     }
3078 
3079   /* If we are creating a shared library, we must presume that the
3080      only references to the symbol are via the global offset table.
3081      For such cases we need not do anything here; the relocations will
3082      be handled correctly by relocate_section.  */
3083   if (bfd_link_pic (info))
3084     return true;
3085 
3086   /* If there are no references to this symbol that do not use the
3087      GOT, we don't need to generate a copy reloc.  */
3088   if (!h->non_got_ref)
3089     return true;
3090 
3091   /* If -z nocopyreloc was given, we won't generate them either.  */
3092   if (info->nocopyreloc)
3093     {
3094       h->non_got_ref = 0;
3095       return true;
3096     }
3097 
3098   /* We must allocate the symbol in our .dynbss section, which will
3099      become part of the .bss section of the executable.  There will be
3100      an entry for this symbol in the .dynsym section.  The dynamic
3101      object will contain position independent code, so all references
3102      from the dynamic object to this symbol will go through the global
3103      offset table.  The dynamic linker will use the .dynsym entry to
3104      determine the address it must put in the global offset table, so
3105      both the dynamic object and the regular object will refer to the
3106      same memory location for the variable.  */
3107 
3108   htab = elf_kvx_hash_table (info);
3109 
3110   /* We must generate a R_KVX_COPY reloc to tell the dynamic linker
3111      to copy the initial value out of the dynamic object and into the
3112      runtime process image.  */
3113   if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
3114     {
3115       htab->srelbss->size += RELOC_SIZE (htab);
3116       h->needs_copy = 1;
3117     }
3118 
3119   s = htab->sdynbss;
3120 
3121   return _bfd_elf_adjust_dynamic_copy (info, h, s);
3122 }
3123 
3124 static bool
elfNN_kvx_allocate_local_symbols(bfd * abfd,unsigned number)3125 elfNN_kvx_allocate_local_symbols (bfd *abfd, unsigned number)
3126 {
3127   struct elf_kvx_local_symbol *locals;
3128   locals = elf_kvx_locals (abfd);
3129   if (locals == NULL)
3130     {
3131       locals = (struct elf_kvx_local_symbol *)
3132 	bfd_zalloc (abfd, number * sizeof (struct elf_kvx_local_symbol));
3133       if (locals == NULL)
3134 	return false;
3135       elf_kvx_locals (abfd) = locals;
3136     }
3137   return true;
3138 }
3139 
3140 /* Create the .got section to hold the global offset table.  */
3141 
3142 static bool
kvx_elf_create_got_section(bfd * abfd,struct bfd_link_info * info)3143 kvx_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
3144 {
3145   const struct elf_backend_data *bed = get_elf_backend_data (abfd);
3146   flagword flags;
3147   asection *s;
3148   struct elf_link_hash_entry *h;
3149   struct elf_link_hash_table *htab = elf_hash_table (info);
3150 
3151   /* This function may be called more than once.  */
3152   s = bfd_get_linker_section (abfd, ".got");
3153   if (s != NULL)
3154     return true;
3155 
3156   flags = bed->dynamic_sec_flags;
3157 
3158   s = bfd_make_section_anyway_with_flags (abfd,
3159 					  (bed->rela_plts_and_copies_p
3160 					   ? ".rela.got" : ".rel.got"),
3161 					  (bed->dynamic_sec_flags
3162 					   | SEC_READONLY));
3163   if (s == NULL
3164       || !bfd_set_section_alignment (s, bed->s->log_file_align))
3165 
3166     return false;
3167   htab->srelgot = s;
3168 
3169   s = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
3170   if (s == NULL
3171       || !bfd_set_section_alignment (s, bed->s->log_file_align))
3172     return false;
3173   htab->sgot = s;
3174   htab->sgot->size += GOT_ENTRY_SIZE;
3175 
3176   if (bed->want_got_sym)
3177     {
3178       /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
3179 	 (or .got.plt) section.  We don't do this in the linker script
3180 	 because we don't want to define the symbol if we are not creating
3181 	 a global offset table.  */
3182       h = _bfd_elf_define_linkage_sym (abfd, info, s,
3183 				       "_GLOBAL_OFFSET_TABLE_");
3184       elf_hash_table (info)->hgot = h;
3185       if (h == NULL)
3186 	return false;
3187     }
3188 
3189   if (bed->want_got_plt)
3190     {
3191       s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
3192       if (s == NULL
3193 	  || !bfd_set_section_alignment (s,
3194 					 bed->s->log_file_align))
3195 	return false;
3196       htab->sgotplt = s;
3197     }
3198 
3199   /* The first bit of the global offset table is the header.  */
3200   s->size += bed->got_header_size;
3201 
3202   /* we still need to handle got content when doing static link with PIC */
3203   if (bfd_link_executable (info) && !bfd_link_pic (info)) {
3204     htab->dynobj = abfd;
3205   }
3206 
3207   return true;
3208 }
3209 
3210 /* Look through the relocs for a section during the first phase.  */
3211 
3212 static bool
elfNN_kvx_check_relocs(bfd * abfd,struct bfd_link_info * info,asection * sec,const Elf_Internal_Rela * relocs)3213 elfNN_kvx_check_relocs (bfd *abfd, struct bfd_link_info *info,
3214 			    asection *sec, const Elf_Internal_Rela *relocs)
3215 {
3216   Elf_Internal_Shdr *symtab_hdr;
3217   struct elf_link_hash_entry **sym_hashes;
3218   const Elf_Internal_Rela *rel;
3219   const Elf_Internal_Rela *rel_end;
3220   asection *sreloc;
3221 
3222   struct elf_kvx_link_hash_table *htab;
3223 
3224   if (bfd_link_relocatable (info))
3225     return true;
3226 
3227   BFD_ASSERT (is_kvx_elf (abfd));
3228 
3229   htab = elf_kvx_hash_table (info);
3230   sreloc = NULL;
3231 
3232   symtab_hdr = &elf_symtab_hdr (abfd);
3233   sym_hashes = elf_sym_hashes (abfd);
3234 
3235   rel_end = relocs + sec->reloc_count;
3236   for (rel = relocs; rel < rel_end; rel++)
3237     {
3238       struct elf_link_hash_entry *h;
3239       unsigned int r_symndx;
3240       unsigned int r_type;
3241       bfd_reloc_code_real_type bfd_r_type;
3242       Elf_Internal_Sym *isym;
3243 
3244       r_symndx = ELFNN_R_SYM (rel->r_info);
3245       r_type = ELFNN_R_TYPE (rel->r_info);
3246 
3247       if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
3248 	{
3249 	  /* xgettext:c-format */
3250 	  _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd, r_symndx);
3251 	  return false;
3252 	}
3253 
3254       if (r_symndx < symtab_hdr->sh_info)
3255 	{
3256 	  /* A local symbol.  */
3257 	  isym = bfd_sym_from_r_symndx (&htab->sym_cache,
3258 					abfd, r_symndx);
3259 	  if (isym == NULL)
3260 	    return false;
3261 
3262 	  h = NULL;
3263 	}
3264       else
3265 	{
3266 	  h = sym_hashes[r_symndx - symtab_hdr->sh_info];
3267 	  while (h->root.type == bfd_link_hash_indirect
3268 		 || h->root.type == bfd_link_hash_warning)
3269 	    h = (struct elf_link_hash_entry *) h->root.u.i.link;
3270 	}
3271 
3272       /* Could be done earlier, if h were already available.  */
3273       bfd_r_type = kvx_tls_transition (abfd, info, r_type, h, r_symndx);
3274 
3275       if (h != NULL)
3276 	{
3277 	  /* Create the ifunc sections for static executables.  If we
3278 	     never see an indirect function symbol nor we are building
3279 	     a static executable, those sections will be empty and
3280 	     won't appear in output.  */
3281 	  switch (bfd_r_type)
3282 	    {
3283 	    default:
3284 	      break;
3285 	    }
3286 
3287 	  /* It is referenced by a non-shared object. */
3288 	  h->ref_regular = 1;
3289 	}
3290 
3291       switch (bfd_r_type)
3292 	{
3293 
3294 	case BFD_RELOC_KVX_S43_LO10:
3295 	case BFD_RELOC_KVX_S43_UP27:
3296 	case BFD_RELOC_KVX_S43_EX6:
3297 
3298 	case BFD_RELOC_KVX_S37_LO10:
3299 	case BFD_RELOC_KVX_S37_UP27:
3300 
3301 	case BFD_RELOC_KVX_S64_LO10:
3302 	case BFD_RELOC_KVX_S64_UP27:
3303 	case BFD_RELOC_KVX_S64_EX27:
3304 
3305 	case BFD_RELOC_KVX_32:
3306 	case BFD_RELOC_KVX_64:
3307 
3308 	  /* We don't need to handle relocs into sections not going into
3309 	     the "real" output.  */
3310 	  if ((sec->flags & SEC_ALLOC) == 0)
3311 	    break;
3312 
3313 	  if (h != NULL)
3314 	    {
3315 	      if (!bfd_link_pic (info))
3316 		h->non_got_ref = 1;
3317 
3318 	      h->plt.refcount += 1;
3319 	      h->pointer_equality_needed = 1;
3320 	    }
3321 
3322 	  /* No need to do anything if we're not creating a shared
3323 	     object.  */
3324 	  if (! bfd_link_pic (info))
3325 	    break;
3326 
3327 	  {
3328 	    struct elf_dyn_relocs *p;
3329 	    struct elf_dyn_relocs **head;
3330 
3331 	    /* We must copy these reloc types into the output file.
3332 	       Create a reloc section in dynobj and make room for
3333 	       this reloc.  */
3334 	    if (sreloc == NULL)
3335 	      {
3336 		if (htab->root.dynobj == NULL)
3337 		  htab->root.dynobj = abfd;
3338 
3339 		sreloc = _bfd_elf_make_dynamic_reloc_section
3340 		  (sec, htab->root.dynobj, LOG_FILE_ALIGN, abfd, /*rela? */ true);
3341 
3342 		if (sreloc == NULL)
3343 		  return false;
3344 	      }
3345 
3346 	    /* If this is a global symbol, we count the number of
3347 	       relocations we need for this symbol.  */
3348 	    if (h != NULL)
3349 	      {
3350 		head = &h->dyn_relocs;
3351 	      }
3352 	    else
3353 	      {
3354 		/* Track dynamic relocs needed for local syms too.
3355 		   We really need local syms available to do this
3356 		   easily.  Oh well.  */
3357 
3358 		asection *s;
3359 		void **vpp;
3360 
3361 		isym = bfd_sym_from_r_symndx (&htab->sym_cache,
3362 					      abfd, r_symndx);
3363 		if (isym == NULL)
3364 		  return false;
3365 
3366 		s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3367 		if (s == NULL)
3368 		  s = sec;
3369 
3370 		/* Beware of type punned pointers vs strict aliasing
3371 		   rules.  */
3372 		vpp = &(elf_section_data (s)->local_dynrel);
3373 		head = (struct elf_dyn_relocs **) vpp;
3374 	      }
3375 
3376 	    p = *head;
3377 	    if (p == NULL || p->sec != sec)
3378 	      {
3379 		bfd_size_type amt = sizeof *p;
3380 		p = ((struct elf_dyn_relocs *)
3381 		     bfd_zalloc (htab->root.dynobj, amt));
3382 		if (p == NULL)
3383 		  return false;
3384 		p->next = *head;
3385 		*head = p;
3386 		p->sec = sec;
3387 	      }
3388 
3389 	    p->count += 1;
3390 
3391 	  }
3392 	  break;
3393 
3394 	case BFD_RELOC_KVX_S37_GOT_LO10:
3395 	case BFD_RELOC_KVX_S37_GOT_UP27:
3396 
3397 	case BFD_RELOC_KVX_S37_GOTOFF_LO10:
3398 	case BFD_RELOC_KVX_S37_GOTOFF_UP27:
3399 
3400 	case BFD_RELOC_KVX_S43_GOT_LO10:
3401 	case BFD_RELOC_KVX_S43_GOT_UP27:
3402 	case BFD_RELOC_KVX_S43_GOT_EX6:
3403 
3404 	case BFD_RELOC_KVX_S43_GOTOFF_LO10:
3405 	case BFD_RELOC_KVX_S43_GOTOFF_UP27:
3406 	case BFD_RELOC_KVX_S43_GOTOFF_EX6:
3407 
3408 	case BFD_RELOC_KVX_S37_TLS_GD_LO10:
3409 	case BFD_RELOC_KVX_S37_TLS_GD_UP27:
3410 
3411 	case BFD_RELOC_KVX_S43_TLS_GD_LO10:
3412 	case BFD_RELOC_KVX_S43_TLS_GD_UP27:
3413 	case BFD_RELOC_KVX_S43_TLS_GD_EX6:
3414 
3415 	case BFD_RELOC_KVX_S37_TLS_IE_LO10:
3416 	case BFD_RELOC_KVX_S37_TLS_IE_UP27:
3417 
3418 	case BFD_RELOC_KVX_S43_TLS_IE_LO10:
3419 	case BFD_RELOC_KVX_S43_TLS_IE_UP27:
3420 	case BFD_RELOC_KVX_S43_TLS_IE_EX6:
3421 
3422 	case BFD_RELOC_KVX_S37_TLS_LD_LO10:
3423 	case BFD_RELOC_KVX_S37_TLS_LD_UP27:
3424 
3425 	case BFD_RELOC_KVX_S43_TLS_LD_LO10:
3426 	case BFD_RELOC_KVX_S43_TLS_LD_UP27:
3427 	case BFD_RELOC_KVX_S43_TLS_LD_EX6:
3428 	  {
3429 	    unsigned got_type;
3430 	    unsigned old_got_type;
3431 
3432 	    got_type = kvx_reloc_got_type (bfd_r_type);
3433 
3434 	    if (h)
3435 	      {
3436 		h->got.refcount += 1;
3437 		old_got_type = elf_kvx_hash_entry (h)->got_type;
3438 	      }
3439 	    else
3440 	      {
3441 		struct elf_kvx_local_symbol *locals;
3442 
3443 		if (!elfNN_kvx_allocate_local_symbols
3444 		    (abfd, symtab_hdr->sh_info))
3445 		  return false;
3446 
3447 		locals = elf_kvx_locals (abfd);
3448 		BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
3449 		locals[r_symndx].got_refcount += 1;
3450 		old_got_type = locals[r_symndx].got_type;
3451 	      }
3452 
3453 	    /* We will already have issued an error message if there
3454 	       is a TLS/non-TLS mismatch, based on the symbol type.
3455 	       So just combine any TLS types needed.  */
3456 	    if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
3457 		&& got_type != GOT_NORMAL)
3458 	      got_type |= old_got_type;
3459 
3460 	    /* If the symbol is accessed by both IE and GD methods, we
3461 	       are able to relax.  Turn off the GD flag, without
3462 	       messing up with any other kind of TLS types that may be
3463 	       involved.  */
3464 	    /* Disabled untested and unused TLS */
3465 	    /* if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type)) */
3466 	    /*   got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD); */
3467 
3468 	    if (old_got_type != got_type)
3469 	      {
3470 		if (h != NULL)
3471 		  elf_kvx_hash_entry (h)->got_type = got_type;
3472 		else
3473 		  {
3474 		    struct elf_kvx_local_symbol *locals;
3475 		    locals = elf_kvx_locals (abfd);
3476 		    BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
3477 		    locals[r_symndx].got_type = got_type;
3478 		  }
3479 	      }
3480 
3481 	    if (htab->root.dynobj == NULL)
3482 	      htab->root.dynobj = abfd;
3483 	    if (! kvx_elf_create_got_section (htab->root.dynobj, info))
3484 	      return false;
3485 	    break;
3486 	  }
3487 
3488 	case BFD_RELOC_KVX_S64_GOTADDR_LO10:
3489 	case BFD_RELOC_KVX_S64_GOTADDR_UP27:
3490 	case BFD_RELOC_KVX_S64_GOTADDR_EX27:
3491 
3492 	case BFD_RELOC_KVX_S43_GOTADDR_LO10:
3493 	case BFD_RELOC_KVX_S43_GOTADDR_UP27:
3494 	case BFD_RELOC_KVX_S43_GOTADDR_EX6:
3495 
3496 	case BFD_RELOC_KVX_S37_GOTADDR_LO10:
3497 	case BFD_RELOC_KVX_S37_GOTADDR_UP27:
3498 
3499 	  if (htab->root.dynobj == NULL)
3500 	    htab->root.dynobj = abfd;
3501 	  if (! kvx_elf_create_got_section (htab->root.dynobj, info))
3502 	    return false;
3503 	  break;
3504 
3505 	case BFD_RELOC_KVX_PCREL27:
3506 	case BFD_RELOC_KVX_PCREL17:
3507 	  /* If this is a local symbol then we resolve it
3508 	     directly without creating a PLT entry.  */
3509 	  if (h == NULL)
3510 	    continue;
3511 
3512 	  h->needs_plt = 1;
3513 	  if (h->plt.refcount <= 0)
3514 	    h->plt.refcount = 1;
3515 	  else
3516 	    h->plt.refcount += 1;
3517 	  break;
3518 
3519 	default:
3520 	  break;
3521 	}
3522     }
3523 
3524   return true;
3525 }
3526 
3527 static bool
elfNN_kvx_init_file_header(bfd * abfd,struct bfd_link_info * link_info)3528 elfNN_kvx_init_file_header (bfd *abfd, struct bfd_link_info *link_info)
3529 {
3530   Elf_Internal_Ehdr *i_ehdrp;	/* ELF file header, internal form.  */
3531 
3532   if (!_bfd_elf_init_file_header (abfd, link_info))
3533     return false;
3534 
3535   i_ehdrp = elf_elfheader (abfd);
3536   i_ehdrp->e_ident[EI_ABIVERSION] = KVX_ELF_ABI_VERSION;
3537   return true;
3538 }
3539 
3540 static enum elf_reloc_type_class
elfNN_kvx_reloc_type_class(const struct bfd_link_info * info ATTRIBUTE_UNUSED,const asection * rel_sec ATTRIBUTE_UNUSED,const Elf_Internal_Rela * rela)3541 elfNN_kvx_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
3542 				const asection *rel_sec ATTRIBUTE_UNUSED,
3543 				const Elf_Internal_Rela *rela)
3544 {
3545   switch ((int) ELFNN_R_TYPE (rela->r_info))
3546     {
3547     case R_KVX_RELATIVE:
3548       return reloc_class_relative;
3549     case R_KVX_JMP_SLOT:
3550       return reloc_class_plt;
3551     case R_KVX_COPY:
3552       return reloc_class_copy;
3553     default:
3554       return reloc_class_normal;
3555     }
3556 }
3557 
3558 /* A structure used to record a list of sections, independently
3559    of the next and prev fields in the asection structure.  */
3560 typedef struct section_list
3561 {
3562   asection *sec;
3563   struct section_list *next;
3564   struct section_list *prev;
3565 }
3566 section_list;
3567 
3568 typedef struct
3569 {
3570   void *finfo;
3571   struct bfd_link_info *info;
3572   asection *sec;
3573   int sec_shndx;
3574   int (*func) (void *, const char *, Elf_Internal_Sym *,
3575 	       asection *, struct elf_link_hash_entry *);
3576 } output_arch_syminfo;
3577 
3578 /* Output a single local symbol for a generated stub.  */
3579 
3580 static bool
elfNN_kvx_output_stub_sym(output_arch_syminfo * osi,const char * name,bfd_vma offset,bfd_vma size)3581 elfNN_kvx_output_stub_sym (output_arch_syminfo *osi, const char *name,
3582 			       bfd_vma offset, bfd_vma size)
3583 {
3584   Elf_Internal_Sym sym;
3585 
3586   sym.st_value = (osi->sec->output_section->vma
3587 		  + osi->sec->output_offset + offset);
3588   sym.st_size = size;
3589   sym.st_other = 0;
3590   sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
3591   sym.st_shndx = osi->sec_shndx;
3592   return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
3593 }
3594 
3595 static bool
kvx_map_one_stub(struct bfd_hash_entry * gen_entry,void * in_arg)3596 kvx_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
3597 {
3598   struct elf_kvx_stub_hash_entry *stub_entry;
3599   asection *stub_sec;
3600   bfd_vma addr;
3601   char *stub_name;
3602   output_arch_syminfo *osi;
3603 
3604   /* Massage our args to the form they really have.  */
3605   stub_entry = (struct elf_kvx_stub_hash_entry *) gen_entry;
3606   osi = (output_arch_syminfo *) in_arg;
3607 
3608   stub_sec = stub_entry->stub_sec;
3609 
3610   /* Ensure this stub is attached to the current section being
3611      processed.  */
3612   if (stub_sec != osi->sec)
3613     return true;
3614 
3615   addr = (bfd_vma) stub_entry->stub_offset;
3616 
3617   stub_name = stub_entry->output_name;
3618 
3619   switch (stub_entry->stub_type)
3620     {
3621     case kvx_stub_long_branch:
3622       if (!elfNN_kvx_output_stub_sym
3623 	  (osi, stub_name, addr, sizeof (elfNN_kvx_long_branch_stub)))
3624 	return false;
3625       break;
3626 
3627     default:
3628       abort ();
3629     }
3630 
3631   return true;
3632 }
3633 
3634 /* Output mapping symbols for linker generated sections.  */
3635 
3636 static bool
elfNN_kvx_output_arch_local_syms(bfd * output_bfd,struct bfd_link_info * info,void * finfo,int (* func)(void *,const char *,Elf_Internal_Sym *,asection *,struct elf_link_hash_entry *))3637 elfNN_kvx_output_arch_local_syms (bfd *output_bfd,
3638 				  struct bfd_link_info *info,
3639 				  void *finfo,
3640 				  int (*func) (void *, const char *,
3641 					       Elf_Internal_Sym *,
3642 					       asection *,
3643 					       struct elf_link_hash_entry *))
3644 {
3645   output_arch_syminfo osi;
3646   struct elf_kvx_link_hash_table *htab;
3647 
3648   htab = elf_kvx_hash_table (info);
3649 
3650   osi.finfo = finfo;
3651   osi.info = info;
3652   osi.func = func;
3653 
3654   /* Long calls stubs.  */
3655   if (htab->stub_bfd && htab->stub_bfd->sections)
3656     {
3657       asection *stub_sec;
3658 
3659       for (stub_sec = htab->stub_bfd->sections;
3660 	   stub_sec != NULL; stub_sec = stub_sec->next)
3661 	{
3662 	  /* Ignore non-stub sections.  */
3663 	  if (!strstr (stub_sec->name, STUB_SUFFIX))
3664 	    continue;
3665 
3666 	  osi.sec = stub_sec;
3667 
3668 	  osi.sec_shndx = _bfd_elf_section_from_bfd_section
3669 	    (output_bfd, osi.sec->output_section);
3670 
3671 	  bfd_hash_traverse (&htab->stub_hash_table, kvx_map_one_stub,
3672 			     &osi);
3673 	}
3674     }
3675 
3676   /* Finally, output mapping symbols for the PLT.  */
3677   if (!htab->root.splt || htab->root.splt->size == 0)
3678     return true;
3679 
3680   osi.sec_shndx = _bfd_elf_section_from_bfd_section
3681     (output_bfd, htab->root.splt->output_section);
3682   osi.sec = htab->root.splt;
3683 
3684   return true;
3685 
3686 }
3687 
3688 /* Allocate target specific section data.  */
3689 
3690 static bool
elfNN_kvx_new_section_hook(bfd * abfd,asection * sec)3691 elfNN_kvx_new_section_hook (bfd *abfd, asection *sec)
3692 {
3693   if (!sec->used_by_bfd)
3694     {
3695       _kvx_elf_section_data *sdata;
3696       bfd_size_type amt = sizeof (*sdata);
3697 
3698       sdata = bfd_zalloc (abfd, amt);
3699       if (sdata == NULL)
3700 	return false;
3701       sec->used_by_bfd = sdata;
3702     }
3703 
3704   return _bfd_elf_new_section_hook (abfd, sec);
3705 }
3706 
3707 /* Create dynamic sections. This is different from the ARM backend in that
3708    the got, plt, gotplt and their relocation sections are all created in the
3709    standard part of the bfd elf backend.  */
3710 
3711 static bool
elfNN_kvx_create_dynamic_sections(bfd * dynobj,struct bfd_link_info * info)3712 elfNN_kvx_create_dynamic_sections (bfd *dynobj,
3713 				   struct bfd_link_info *info)
3714 {
3715   struct elf_kvx_link_hash_table *htab;
3716 
3717   /* We need to create .got section.  */
3718   if (!kvx_elf_create_got_section (dynobj, info))
3719     return false;
3720 
3721   if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3722     return false;
3723 
3724   htab = elf_kvx_hash_table (info);
3725   htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
3726   if (!bfd_link_pic (info))
3727     htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
3728 
3729   if (!htab->sdynbss || (!bfd_link_pic (info) && !htab->srelbss))
3730     abort ();
3731 
3732   return true;
3733 }
3734 
3735 
3736 /* Allocate space in .plt, .got and associated reloc sections for
3737    dynamic relocs.  */
3738 
3739 static bool
elfNN_kvx_allocate_dynrelocs(struct elf_link_hash_entry * h,void * inf)3740 elfNN_kvx_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
3741 {
3742   struct bfd_link_info *info;
3743   struct elf_kvx_link_hash_table *htab;
3744   struct elf_dyn_relocs *p;
3745 
3746   /* An example of a bfd_link_hash_indirect symbol is versioned
3747      symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
3748      -> __gxx_personality_v0(bfd_link_hash_defined)
3749 
3750      There is no need to process bfd_link_hash_indirect symbols here
3751      because we will also be presented with the concrete instance of
3752      the symbol and elfNN_kvx_copy_indirect_symbol () will have been
3753      called to copy all relevant data from the generic to the concrete
3754      symbol instance.  */
3755   if (h->root.type == bfd_link_hash_indirect)
3756     return true;
3757 
3758   if (h->root.type == bfd_link_hash_warning)
3759     h = (struct elf_link_hash_entry *) h->root.u.i.link;
3760 
3761   info = (struct bfd_link_info *) inf;
3762   htab = elf_kvx_hash_table (info);
3763 
3764   if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
3765     {
3766       /* Make sure this symbol is output as a dynamic symbol.
3767 	 Undefined weak syms won't yet be marked as dynamic.  */
3768       if (h->dynindx == -1 && !h->forced_local)
3769 	{
3770 	  if (!bfd_elf_link_record_dynamic_symbol (info, h))
3771 	    return false;
3772 	}
3773 
3774       if (bfd_link_pic (info) || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
3775 	{
3776 	  asection *s = htab->root.splt;
3777 
3778 	  /* If this is the first .plt entry, make room for the special
3779 	     first entry.  */
3780 	  if (s->size == 0)
3781 	    s->size += htab->plt_header_size;
3782 
3783 	  h->plt.offset = s->size;
3784 
3785 	  /* If this symbol is not defined in a regular file, and we are
3786 	     not generating a shared library, then set the symbol to this
3787 	     location in the .plt.  This is required to make function
3788 	     pointers compare as equal between the normal executable and
3789 	     the shared library.  */
3790 	  if (!bfd_link_pic (info) && !h->def_regular)
3791 	    {
3792 	      h->root.u.def.section = s;
3793 	      h->root.u.def.value = h->plt.offset;
3794 	    }
3795 
3796 	  /* Make room for this entry. For now we only create the
3797 	     small model PLT entries. We later need to find a way
3798 	     of relaxing into these from the large model PLT entries.  */
3799 	  s->size += PLT_SMALL_ENTRY_SIZE;
3800 
3801 	  /* We also need to make an entry in the .got.plt section, which
3802 	     will be placed in the .got section by the linker script.  */
3803 	  htab->root.sgotplt->size += GOT_ENTRY_SIZE;
3804 
3805 	  /* We also need to make an entry in the .rela.plt section.  */
3806 	  htab->root.srelplt->size += RELOC_SIZE (htab);
3807 
3808 	  /* We need to ensure that all GOT entries that serve the PLT
3809 	     are consecutive with the special GOT slots [0] [1] and
3810 	     [2]. Any addtional relocations must be placed after the
3811 	     PLT related entries.  We abuse the reloc_count such that
3812 	     during sizing we adjust reloc_count to indicate the
3813 	     number of PLT related reserved entries.  In subsequent
3814 	     phases when filling in the contents of the reloc entries,
3815 	     PLT related entries are placed by computing their PLT
3816 	     index (0 .. reloc_count). While other none PLT relocs are
3817 	     placed at the slot indicated by reloc_count and
3818 	     reloc_count is updated.  */
3819 
3820 	  htab->root.srelplt->reloc_count++;
3821 	}
3822       else
3823 	{
3824 	  h->plt.offset = (bfd_vma) - 1;
3825 	  h->needs_plt = 0;
3826 	}
3827     }
3828   else
3829     {
3830       h->plt.offset = (bfd_vma) - 1;
3831       h->needs_plt = 0;
3832     }
3833 
3834   if (h->got.refcount > 0)
3835     {
3836       bool dyn;
3837       unsigned got_type = elf_kvx_hash_entry (h)->got_type;
3838 
3839       h->got.offset = (bfd_vma) - 1;
3840 
3841       dyn = htab->root.dynamic_sections_created;
3842 
3843       /* Make sure this symbol is output as a dynamic symbol.
3844 	 Undefined weak syms won't yet be marked as dynamic.  */
3845       if (dyn && h->dynindx == -1 && !h->forced_local)
3846 	{
3847 	  if (!bfd_elf_link_record_dynamic_symbol (info, h))
3848 	    return false;
3849 	}
3850 
3851       if (got_type == GOT_UNKNOWN)
3852 	{
3853 	  (*_bfd_error_handler)
3854 	    (_("relocation against `%s' has faulty GOT type "),
3855 	     (h) ? h->root.root.string : "a local symbol");
3856 	  bfd_set_error (bfd_error_bad_value);
3857 	  return false;
3858 	}
3859       else if (got_type == GOT_NORMAL)
3860 	{
3861 	  h->got.offset = htab->root.sgot->size;
3862 	  htab->root.sgot->size += GOT_ENTRY_SIZE;
3863 	  if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3864 	       || h->root.type != bfd_link_hash_undefweak)
3865 	      && (bfd_link_pic (info)
3866 		  || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
3867 	    {
3868 	      htab->root.srelgot->size += RELOC_SIZE (htab);
3869 	    }
3870 	}
3871       else
3872 	{
3873 	  int indx;
3874 
3875 	  /* Any of these will require 2 GOT slots because
3876 	   * they use __tls_get_addr() */
3877 	  if (got_type & (GOT_TLS_GD | GOT_TLS_LD))
3878 	    {
3879 	      h->got.offset = htab->root.sgot->size;
3880 	      htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
3881 	    }
3882 
3883 	  if (got_type & GOT_TLS_IE)
3884 	    {
3885 	      h->got.offset = htab->root.sgot->size;
3886 	      htab->root.sgot->size += GOT_ENTRY_SIZE;
3887 	    }
3888 
3889 	  indx = h && h->dynindx != -1 ? h->dynindx : 0;
3890 	  if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3891 	       || h->root.type != bfd_link_hash_undefweak)
3892 	      && (bfd_link_pic (info)
3893 		  || indx != 0
3894 		  || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
3895 	    {
3896 	      /* Only the GD case requires 2 relocations. */
3897 	      if (got_type & GOT_TLS_GD)
3898 		htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
3899 
3900 	      /* LD needs a DTPMOD reloc, IE needs a DTPOFF. */
3901 	      if (got_type & (GOT_TLS_LD | GOT_TLS_IE))
3902 		htab->root.srelgot->size += RELOC_SIZE (htab);
3903 	    }
3904 	}
3905     }
3906   else
3907     {
3908       h->got.offset = (bfd_vma) - 1;
3909     }
3910 
3911   if (h->dyn_relocs == NULL)
3912     return true;
3913 
3914   /* In the shared -Bsymbolic case, discard space allocated for
3915      dynamic pc-relative relocs against symbols which turn out to be
3916      defined in regular objects.  For the normal shared case, discard
3917      space for pc-relative relocs that have become local due to symbol
3918      visibility changes.  */
3919 
3920   if (bfd_link_pic (info))
3921     {
3922       /* Relocs that use pc_count are those that appear on a call
3923 	 insn, or certain REL relocs that can generated via assembly.
3924 	 We want calls to protected symbols to resolve directly to the
3925 	 function rather than going via the plt.  If people want
3926 	 function pointer comparisons to work as expected then they
3927 	 should avoid writing weird assembly.  */
3928       if (SYMBOL_CALLS_LOCAL (info, h))
3929 	{
3930 	  struct elf_dyn_relocs **pp;
3931 
3932 	  for (pp = &h->dyn_relocs; (p = *pp) != NULL;)
3933 	    {
3934 	      p->count -= p->pc_count;
3935 	      p->pc_count = 0;
3936 	      if (p->count == 0)
3937 		*pp = p->next;
3938 	      else
3939 		pp = &p->next;
3940 	    }
3941 	}
3942 
3943       /* Also discard relocs on undefined weak syms with non-default
3944 	 visibility.  */
3945       if (h->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
3946 	{
3947 	  if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
3948 	      || UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
3949 	    h->dyn_relocs = NULL;
3950 
3951 	  /* Make sure undefined weak symbols are output as a dynamic
3952 	     symbol in PIEs.  */
3953 	  else if (h->dynindx == -1
3954 		   && !h->forced_local
3955 		   && !bfd_elf_link_record_dynamic_symbol (info, h))
3956 	    return false;
3957 	}
3958 
3959     }
3960   else if (ELIMINATE_COPY_RELOCS)
3961     {
3962       /* For the non-shared case, discard space for relocs against
3963 	 symbols which turn out to need copy relocs or are not
3964 	 dynamic.  */
3965 
3966       if (!h->non_got_ref
3967 	  && ((h->def_dynamic
3968 	       && !h->def_regular)
3969 	      || (htab->root.dynamic_sections_created
3970 		  && (h->root.type == bfd_link_hash_undefweak
3971 		      || h->root.type == bfd_link_hash_undefined))))
3972 	{
3973 	  /* Make sure this symbol is output as a dynamic symbol.
3974 	     Undefined weak syms won't yet be marked as dynamic.  */
3975 	  if (h->dynindx == -1
3976 	      && !h->forced_local
3977 	      && !bfd_elf_link_record_dynamic_symbol (info, h))
3978 	    return false;
3979 
3980 	  /* If that succeeded, we know we'll be keeping all the
3981 	     relocs.  */
3982 	  if (h->dynindx != -1)
3983 	    goto keep;
3984 	}
3985 
3986       h->dyn_relocs = NULL;
3987 
3988     keep:;
3989     }
3990 
3991   /* Finally, allocate space.  */
3992   for (p = h->dyn_relocs; p != NULL; p = p->next)
3993     {
3994       asection *sreloc;
3995 
3996       sreloc = elf_section_data (p->sec)->sreloc;
3997 
3998       BFD_ASSERT (sreloc != NULL);
3999 
4000       sreloc->size += p->count * RELOC_SIZE (htab);
4001     }
4002 
4003   return true;
4004 }
4005 
4006 /* Find any dynamic relocs that apply to read-only sections.  */
4007 
4008 static bool
kvx_readonly_dynrelocs(struct elf_link_hash_entry * h,void * inf)4009 kvx_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
4010 {
4011   struct elf_dyn_relocs * p;
4012 
4013   for (p = h->dyn_relocs; p != NULL; p = p->next)
4014     {
4015       asection *s = p->sec;
4016 
4017       if (s != NULL && (s->flags & SEC_READONLY) != 0)
4018 	{
4019 	  struct bfd_link_info *info = (struct bfd_link_info *) inf;
4020 
4021 	  info->flags |= DF_TEXTREL;
4022 	  info->callbacks->minfo (_("%pB: dynamic relocation against `%pT' in "
4023 				    "read-only section `%pA'\n"),
4024 				  s->owner, h->root.root.string, s);
4025 
4026 	  /* Not an error, just cut short the traversal.  */
4027 	  return false;
4028 	}
4029     }
4030   return true;
4031 }
4032 
4033 /* This is the most important function of all . Innocuosly named
4034    though !  */
4035 static bool
elfNN_kvx_size_dynamic_sections(bfd * output_bfd ATTRIBUTE_UNUSED,struct bfd_link_info * info)4036 elfNN_kvx_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
4037 				 struct bfd_link_info *info)
4038 {
4039   struct elf_kvx_link_hash_table *htab;
4040   bfd *dynobj;
4041   asection *s;
4042   bool relocs;
4043   bfd *ibfd;
4044 
4045   htab = elf_kvx_hash_table ((info));
4046   dynobj = htab->root.dynobj;
4047 
4048   BFD_ASSERT (dynobj != NULL);
4049 
4050   if (htab->root.dynamic_sections_created)
4051     {
4052       if (bfd_link_executable (info) && !info->nointerp)
4053 	{
4054 	  s = bfd_get_linker_section (dynobj, ".interp");
4055 	  if (s == NULL)
4056 	    abort ();
4057 	  s->size = sizeof ELF_DYNAMIC_INTERPRETER;
4058 	  s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
4059 	}
4060     }
4061 
4062   /* Set up .got offsets for local syms, and space for local dynamic
4063      relocs.  */
4064   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
4065     {
4066       struct elf_kvx_local_symbol *locals = NULL;
4067       Elf_Internal_Shdr *symtab_hdr;
4068       asection *srel;
4069       unsigned int i;
4070 
4071       if (!is_kvx_elf (ibfd))
4072 	continue;
4073 
4074       for (s = ibfd->sections; s != NULL; s = s->next)
4075 	{
4076 	  struct elf_dyn_relocs *p;
4077 
4078 	  for (p = (struct elf_dyn_relocs *)
4079 		 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
4080 	    {
4081 	      if (!bfd_is_abs_section (p->sec)
4082 		  && bfd_is_abs_section (p->sec->output_section))
4083 		{
4084 		  /* Input section has been discarded, either because
4085 		     it is a copy of a linkonce section or due to
4086 		     linker script /DISCARD/, so we'll be discarding
4087 		     the relocs too.  */
4088 		}
4089 	      else if (p->count != 0)
4090 		{
4091 		  srel = elf_section_data (p->sec)->sreloc;
4092 		  srel->size += p->count * RELOC_SIZE (htab);
4093 		  if ((p->sec->output_section->flags & SEC_READONLY) != 0)
4094 		    info->flags |= DF_TEXTREL;
4095 		}
4096 	    }
4097 	}
4098 
4099       locals = elf_kvx_locals (ibfd);
4100       if (!locals)
4101 	continue;
4102 
4103       symtab_hdr = &elf_symtab_hdr (ibfd);
4104       srel = htab->root.srelgot;
4105       for (i = 0; i < symtab_hdr->sh_info; i++)
4106 	{
4107 	  locals[i].got_offset = (bfd_vma) - 1;
4108 	  if (locals[i].got_refcount > 0)
4109 	    {
4110 	      unsigned got_type = locals[i].got_type;
4111 	      if (got_type & (GOT_TLS_GD | GOT_TLS_LD))
4112 		{
4113 		  locals[i].got_offset = htab->root.sgot->size;
4114 		  htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
4115 		}
4116 
4117 	      if (got_type & (GOT_NORMAL | GOT_TLS_IE ))
4118 		{
4119 		  locals[i].got_offset = htab->root.sgot->size;
4120 		  htab->root.sgot->size += GOT_ENTRY_SIZE;
4121 		}
4122 
4123 	      if (got_type == GOT_UNKNOWN)
4124 		{
4125 		}
4126 
4127 	      if (bfd_link_pic (info))
4128 		{
4129 		  if (got_type & GOT_TLS_GD)
4130 		    htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
4131 
4132 		  if (got_type & GOT_TLS_IE
4133 		      || got_type & GOT_TLS_LD
4134 		      || got_type & GOT_NORMAL)
4135 		    htab->root.srelgot->size += RELOC_SIZE (htab);
4136 		}
4137 	    }
4138 	  else
4139 	    {
4140 	      locals[i].got_refcount = (bfd_vma) - 1;
4141 	    }
4142 	}
4143     }
4144 
4145 
4146   /* Allocate global sym .plt and .got entries, and space for global
4147      sym dynamic relocs.  */
4148   elf_link_hash_traverse (&htab->root, elfNN_kvx_allocate_dynrelocs,
4149 			  info);
4150 
4151   /* For every jump slot reserved in the sgotplt, reloc_count is
4152      incremented.  However, when we reserve space for TLS descriptors,
4153      it's not incremented, so in order to compute the space reserved
4154      for them, it suffices to multiply the reloc count by the jump
4155      slot size.  */
4156 
4157   if (htab->root.srelplt)
4158     htab->sgotplt_jump_table_size = kvx_compute_jump_table_size (htab);
4159 
4160   /* We now have determined the sizes of the various dynamic sections.
4161      Allocate memory for them.  */
4162   relocs = false;
4163   for (s = dynobj->sections; s != NULL; s = s->next)
4164     {
4165       if ((s->flags & SEC_LINKER_CREATED) == 0)
4166 	continue;
4167 
4168       if (s == htab->root.splt
4169 	  || s == htab->root.sgot
4170 	  || s == htab->root.sgotplt
4171 	  || s == htab->root.iplt
4172 	  || s == htab->root.igotplt || s == htab->sdynbss)
4173 	{
4174 	  /* Strip this section if we don't need it; see the
4175 	     comment below.  */
4176 	}
4177       else if (startswith (bfd_section_name (s), ".rela"))
4178 	{
4179 	  if (s->size != 0 && s != htab->root.srelplt)
4180 	    relocs = true;
4181 
4182 	  /* We use the reloc_count field as a counter if we need
4183 	     to copy relocs into the output file.  */
4184 	  if (s != htab->root.srelplt)
4185 	    s->reloc_count = 0;
4186 	}
4187       else
4188 	{
4189 	  /* It's not one of our sections, so don't allocate space.  */
4190 	  continue;
4191 	}
4192 
4193       if (s->size == 0)
4194 	{
4195 	  /* If we don't need this section, strip it from the
4196 	     output file.  This is mostly to handle .rela.bss and
4197 	     .rela.plt.  We must create both sections in
4198 	     create_dynamic_sections, because they must be created
4199 	     before the linker maps input sections to output
4200 	     sections.  The linker does that before
4201 	     adjust_dynamic_symbol is called, and it is that
4202 	     function which decides whether anything needs to go
4203 	     into these sections.  */
4204 
4205 	  s->flags |= SEC_EXCLUDE;
4206 	  continue;
4207 	}
4208 
4209       if ((s->flags & SEC_HAS_CONTENTS) == 0)
4210 	continue;
4211 
4212       /* Allocate memory for the section contents.  We use bfd_zalloc
4213 	 here in case unused entries are not reclaimed before the
4214 	 section's contents are written out.  This should not happen,
4215 	 but this way if it does, we get a R_KVX_NONE reloc instead
4216 	 of garbage.  */
4217       s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
4218       if (s->contents == NULL)
4219 	return false;
4220     }
4221 
4222   if (htab->root.dynamic_sections_created)
4223     {
4224       /* Add some entries to the .dynamic section.  We fill in the
4225 	 values later, in elfNN_kvx_finish_dynamic_sections, but we
4226 	 must add the entries now so that we get the correct size for
4227 	 the .dynamic section.  The DT_DEBUG entry is filled in by the
4228 	 dynamic linker and used by the debugger.  */
4229 #define add_dynamic_entry(TAG, VAL)			\
4230       _bfd_elf_add_dynamic_entry (info, TAG, VAL)
4231 
4232       if (bfd_link_executable (info))
4233 	{
4234 	  if (!add_dynamic_entry (DT_DEBUG, 0))
4235 	    return false;
4236 	}
4237 
4238       if (htab->root.splt->size != 0)
4239 	{
4240 	  if (!add_dynamic_entry (DT_PLTGOT, 0)
4241 	      || !add_dynamic_entry (DT_PLTRELSZ, 0)
4242 	      || !add_dynamic_entry (DT_PLTREL, DT_RELA)
4243 	      || !add_dynamic_entry (DT_JMPREL, 0))
4244 	    return false;
4245 	}
4246 
4247       if (relocs)
4248 	{
4249 	  if (!add_dynamic_entry (DT_RELA, 0)
4250 	      || !add_dynamic_entry (DT_RELASZ, 0)
4251 	      || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
4252 	    return false;
4253 
4254 	  /* If any dynamic relocs apply to a read-only section,
4255 	     then we need a DT_TEXTREL entry.  */
4256 	  if ((info->flags & DF_TEXTREL) == 0)
4257 	    elf_link_hash_traverse (&htab->root, kvx_readonly_dynrelocs,
4258 				    info);
4259 
4260 	  if ((info->flags & DF_TEXTREL) != 0)
4261 	    {
4262 	      if (!add_dynamic_entry (DT_TEXTREL, 0))
4263 		return false;
4264 	    }
4265 	}
4266     }
4267 #undef add_dynamic_entry
4268 
4269   return true;
4270 }
4271 
4272 static inline void
elf_kvx_update_plt_entry(bfd * output_bfd,bfd_reloc_code_real_type r_type,bfd_byte * plt_entry,bfd_vma value)4273 elf_kvx_update_plt_entry (bfd *output_bfd,
4274 			  bfd_reloc_code_real_type r_type,
4275 			  bfd_byte *plt_entry, bfd_vma value)
4276 {
4277   reloc_howto_type *howto = elfNN_kvx_howto_from_bfd_reloc (r_type);
4278   BFD_ASSERT(howto != NULL);
4279   _bfd_kvx_elf_put_addend (output_bfd, plt_entry, r_type, howto, value);
4280 }
4281 
4282 static void
elfNN_kvx_create_small_pltn_entry(struct elf_link_hash_entry * h,struct elf_kvx_link_hash_table * htab,bfd * output_bfd)4283 elfNN_kvx_create_small_pltn_entry (struct elf_link_hash_entry *h,
4284 				   struct elf_kvx_link_hash_table *htab,
4285 				   bfd *output_bfd)
4286 {
4287   bfd_byte *plt_entry;
4288   bfd_vma plt_index;
4289   bfd_vma got_offset;
4290   bfd_vma gotplt_entry_address;
4291   bfd_vma plt_entry_address;
4292   Elf_Internal_Rela rela;
4293   bfd_byte *loc;
4294   asection *plt, *gotplt, *relplt;
4295 
4296   plt = htab->root.splt;
4297   gotplt = htab->root.sgotplt;
4298   relplt = htab->root.srelplt;
4299 
4300   /* Get the index in the procedure linkage table which
4301      corresponds to this symbol.  This is the index of this symbol
4302      in all the symbols for which we are making plt entries.  The
4303      first entry in the procedure linkage table is reserved.
4304 
4305      Get the offset into the .got table of the entry that
4306      corresponds to this function.	Each .got entry is GOT_ENTRY_SIZE
4307      bytes. The first three are reserved for the dynamic linker.
4308 
4309      For static executables, we don't reserve anything.  */
4310 
4311   if (plt == htab->root.splt)
4312     {
4313       plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
4314       got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
4315     }
4316   else
4317     {
4318       plt_index = h->plt.offset / htab->plt_entry_size;
4319       got_offset = plt_index * GOT_ENTRY_SIZE;
4320     }
4321 
4322   plt_entry = plt->contents + h->plt.offset;
4323   plt_entry_address = plt->output_section->vma
4324     + plt->output_offset + h->plt.offset;
4325   gotplt_entry_address = gotplt->output_section->vma +
4326     gotplt->output_offset + got_offset;
4327 
4328   /* Copy in the boiler-plate for the PLTn entry.  */
4329   memcpy (plt_entry, elfNN_kvx_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
4330 
4331   /* Patch the loading of the GOT entry, relative to the PLT entry
4332      address. */
4333 
4334   /* Use 37bits offset for both 32 and 64bits mode.
4335      Fill the LO10 of of lw $r9 = 0[$r14].  */
4336   elf_kvx_update_plt_entry(output_bfd, BFD_RELOC_KVX_S37_LO10,
4337 			   plt_entry+4,
4338 			   gotplt_entry_address - plt_entry_address);
4339 
4340   /* Fill the UP27 of of lw $r9 = 0[$r14].  */
4341   elf_kvx_update_plt_entry(output_bfd, BFD_RELOC_KVX_S37_UP27,
4342 			   plt_entry+8,
4343 			   gotplt_entry_address - plt_entry_address);
4344 
4345   rela.r_offset = gotplt_entry_address;
4346 
4347   /* Fill in the entry in the .rela.plt section.  */
4348   rela.r_info = ELFNN_R_INFO (h->dynindx, R_KVX_JMP_SLOT);
4349   rela.r_addend = 0;
4350 
4351   /* Compute the relocation entry to used based on PLT index and do
4352      not adjust reloc_count. The reloc_count has already been adjusted
4353      to account for this entry.  */
4354   loc = relplt->contents + plt_index * RELOC_SIZE (htab);
4355   bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
4356 }
4357 
4358 /* Size sections even though they're not dynamic.  We use it to setup
4359    _TLS_MODULE_BASE_, if needed.  */
4360 
4361 static bool
elfNN_kvx_always_size_sections(bfd * output_bfd,struct bfd_link_info * info)4362 elfNN_kvx_always_size_sections (bfd *output_bfd,
4363 				struct bfd_link_info *info)
4364 {
4365   asection *tls_sec;
4366 
4367   if (bfd_link_relocatable (info))
4368     return true;
4369 
4370   tls_sec = elf_hash_table (info)->tls_sec;
4371 
4372   if (tls_sec)
4373     {
4374       struct elf_link_hash_entry *tlsbase;
4375 
4376       tlsbase = elf_link_hash_lookup (elf_hash_table (info),
4377 				      "_TLS_MODULE_BASE_", true, true, false);
4378 
4379       if (tlsbase)
4380 	{
4381 	  struct bfd_link_hash_entry *h = NULL;
4382 	  const struct elf_backend_data *bed =
4383 	    get_elf_backend_data (output_bfd);
4384 
4385 	  if (!(_bfd_generic_link_add_one_symbol
4386 		(info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
4387 		 tls_sec, 0, NULL, false, bed->collect, &h)))
4388 	    return false;
4389 
4390 	  tlsbase->type = STT_TLS;
4391 	  tlsbase = (struct elf_link_hash_entry *) h;
4392 	  tlsbase->def_regular = 1;
4393 	  tlsbase->other = STV_HIDDEN;
4394 	  (*bed->elf_backend_hide_symbol) (info, tlsbase, true);
4395 	}
4396     }
4397 
4398   return true;
4399 }
4400 
4401 /* Finish up dynamic symbol handling.  We set the contents of various
4402    dynamic sections here.  */
4403 static bool
elfNN_kvx_finish_dynamic_symbol(bfd * output_bfd,struct bfd_link_info * info,struct elf_link_hash_entry * h,Elf_Internal_Sym * sym)4404 elfNN_kvx_finish_dynamic_symbol (bfd *output_bfd,
4405 				 struct bfd_link_info *info,
4406 				 struct elf_link_hash_entry *h,
4407 				 Elf_Internal_Sym *sym)
4408 {
4409   struct elf_kvx_link_hash_table *htab;
4410   htab = elf_kvx_hash_table (info);
4411 
4412   if (h->plt.offset != (bfd_vma) - 1)
4413     {
4414       asection *plt = NULL, *gotplt = NULL, *relplt = NULL;
4415 
4416       /* This symbol has an entry in the procedure linkage table.  Set
4417 	 it up.  */
4418 
4419       if (htab->root.splt != NULL)
4420 	{
4421 	  plt = htab->root.splt;
4422 	  gotplt = htab->root.sgotplt;
4423 	  relplt = htab->root.srelplt;
4424 	}
4425 
4426       /* This symbol has an entry in the procedure linkage table.  Set
4427 	 it up.	 */
4428       if ((h->dynindx == -1
4429 	   && !((h->forced_local || bfd_link_executable (info))
4430 		&& h->def_regular
4431 		&& h->type == STT_GNU_IFUNC))
4432 	  || plt == NULL
4433 	  || gotplt == NULL
4434 	  || relplt == NULL)
4435 	abort ();
4436 
4437       elfNN_kvx_create_small_pltn_entry (h, htab, output_bfd);
4438       if (!h->def_regular)
4439 	{
4440 	  /* Mark the symbol as undefined, rather than as defined in
4441 	     the .plt section.  */
4442 	  sym->st_shndx = SHN_UNDEF;
4443 	  /* If the symbol is weak we need to clear the value.
4444 	     Otherwise, the PLT entry would provide a definition for
4445 	     the symbol even if the symbol wasn't defined anywhere,
4446 	     and so the symbol would never be NULL.  Leave the value if
4447 	     there were any relocations where pointer equality matters
4448 	     (this is a clue for the dynamic linker, to make function
4449 	     pointer comparisons work between an application and shared
4450 	     library).  */
4451 	  if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
4452 	    sym->st_value = 0;
4453 	}
4454     }
4455 
4456   if (h->got.offset != (bfd_vma) - 1
4457       && elf_kvx_hash_entry (h)->got_type == GOT_NORMAL)
4458     {
4459       Elf_Internal_Rela rela;
4460       bfd_byte *loc;
4461 
4462       /* This symbol has an entry in the global offset table.  Set it
4463 	 up.  */
4464       if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
4465 	abort ();
4466 
4467       rela.r_offset = (htab->root.sgot->output_section->vma
4468 		       + htab->root.sgot->output_offset
4469 		       + (h->got.offset & ~(bfd_vma) 1));
4470 
4471 #ifdef UGLY_DEBUG
4472       printf("setting rela at offset 0x%x(0x%x + 0x%x + 0x%x) for %s\n",
4473 	     rela.r_offset,
4474 	     htab->root.sgot->output_section->vma,
4475 	     htab->root.sgot->output_offset,
4476 	     h->got.offset,
4477 	     h->root.root.string);
4478 #endif
4479 
4480       if (bfd_link_pic (info) && SYMBOL_REFERENCES_LOCAL (info, h))
4481 	{
4482 	  if (!h->def_regular)
4483 	    return false;
4484 
4485 	  /* in case of PLT related GOT entry, it is not clear who is
4486 	     supposed to set the LSB of GOT entry...
4487 	     kvx_calculate_got_entry_vma() would be a good candidate,
4488 	     but it is not called currently
4489 	     So we are commenting it ATM.  */
4490 	  // BFD_ASSERT ((h->got.offset & 1) != 0);
4491 	  rela.r_info = ELFNN_R_INFO (0, R_KVX_RELATIVE);
4492 	  rela.r_addend = (h->root.u.def.value
4493 			   + h->root.u.def.section->output_section->vma
4494 			   + h->root.u.def.section->output_offset);
4495 	}
4496       else
4497 	{
4498 	  BFD_ASSERT ((h->got.offset & 1) == 0);
4499 	  bfd_put_NN (output_bfd, (bfd_vma) 0,
4500 		      htab->root.sgot->contents + h->got.offset);
4501 	  rela.r_info = ELFNN_R_INFO (h->dynindx, R_KVX_GLOB_DAT);
4502 	  rela.r_addend = 0;
4503 	}
4504 
4505       loc = htab->root.srelgot->contents;
4506       loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
4507       bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
4508     }
4509 
4510   if (h->needs_copy)
4511     {
4512       Elf_Internal_Rela rela;
4513       bfd_byte *loc;
4514 
4515       /* This symbol needs a copy reloc.  Set it up.  */
4516 
4517       if (h->dynindx == -1
4518 	  || (h->root.type != bfd_link_hash_defined
4519 	      && h->root.type != bfd_link_hash_defweak)
4520 	  || htab->srelbss == NULL)
4521 	abort ();
4522 
4523       rela.r_offset = (h->root.u.def.value
4524 		       + h->root.u.def.section->output_section->vma
4525 		       + h->root.u.def.section->output_offset);
4526       rela.r_info = ELFNN_R_INFO (h->dynindx, R_KVX_COPY);
4527       rela.r_addend = 0;
4528       loc = htab->srelbss->contents;
4529       loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
4530       bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
4531     }
4532 
4533   /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute.  SYM may
4534      be NULL for local symbols.  */
4535   if (sym != NULL
4536       && (h == elf_hash_table (info)->hdynamic
4537 	  || h == elf_hash_table (info)->hgot))
4538     sym->st_shndx = SHN_ABS;
4539 
4540   return true;
4541 }
4542 
4543 static void
elfNN_kvx_init_small_plt0_entry(bfd * output_bfd ATTRIBUTE_UNUSED,struct elf_kvx_link_hash_table * htab)4544 elfNN_kvx_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
4545 				 struct elf_kvx_link_hash_table *htab)
4546 {
4547   memcpy (htab->root.splt->contents, elfNN_kvx_small_plt0_entry,
4548 	  PLT_ENTRY_SIZE);
4549   elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
4550     PLT_ENTRY_SIZE;
4551 }
4552 
4553 static bool
elfNN_kvx_finish_dynamic_sections(bfd * output_bfd,struct bfd_link_info * info)4554 elfNN_kvx_finish_dynamic_sections (bfd *output_bfd,
4555 				   struct bfd_link_info *info)
4556 {
4557   struct elf_kvx_link_hash_table *htab;
4558   bfd *dynobj;
4559   asection *sdyn;
4560 
4561   htab = elf_kvx_hash_table (info);
4562   dynobj = htab->root.dynobj;
4563   sdyn = bfd_get_linker_section (dynobj, ".dynamic");
4564 
4565   if (htab->root.dynamic_sections_created)
4566     {
4567       ElfNN_External_Dyn *dyncon, *dynconend;
4568 
4569       if (sdyn == NULL || htab->root.sgot == NULL)
4570 	abort ();
4571 
4572       dyncon = (ElfNN_External_Dyn *) sdyn->contents;
4573       dynconend = (ElfNN_External_Dyn *) (sdyn->contents + sdyn->size);
4574       for (; dyncon < dynconend; dyncon++)
4575 	{
4576 	  Elf_Internal_Dyn dyn;
4577 	  asection *s;
4578 
4579 	  bfd_elfNN_swap_dyn_in (dynobj, dyncon, &dyn);
4580 
4581 	  switch (dyn.d_tag)
4582 	    {
4583 	    default:
4584 	      continue;
4585 
4586 	    case DT_PLTGOT:
4587 	      s = htab->root.sgotplt;
4588 	      dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
4589 	      break;
4590 
4591 	    case DT_JMPREL:
4592 	      s = htab->root.srelplt;
4593 	      dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
4594 	      break;
4595 
4596 	    case DT_PLTRELSZ:
4597 	      s = htab->root.srelplt;
4598 	      dyn.d_un.d_val = s->size;
4599 	      break;
4600 
4601 	    case DT_RELASZ:
4602 	      /* The procedure linkage table relocs (DT_JMPREL) should
4603 		 not be included in the overall relocs (DT_RELA).
4604 		 Therefore, we override the DT_RELASZ entry here to
4605 		 make it not include the JMPREL relocs.  Since the
4606 		 linker script arranges for .rela.plt to follow all
4607 		 other relocation sections, we don't have to worry
4608 		 about changing the DT_RELA entry.  */
4609 	      if (htab->root.srelplt != NULL)
4610 		{
4611 		  s = htab->root.srelplt;
4612 		  dyn.d_un.d_val -= s->size;
4613 		}
4614 	      break;
4615 	    }
4616 
4617 	  bfd_elfNN_swap_dyn_out (output_bfd, &dyn, dyncon);
4618 	}
4619 
4620     }
4621 
4622   /* Fill in the special first entry in the procedure linkage table.  */
4623   if (htab->root.splt && htab->root.splt->size > 0)
4624     {
4625       elfNN_kvx_init_small_plt0_entry (output_bfd, htab);
4626 
4627       elf_section_data (htab->root.splt->output_section)->
4628 	this_hdr.sh_entsize = htab->plt_entry_size;
4629     }
4630 
4631   if (htab->root.sgotplt)
4632     {
4633       if (bfd_is_abs_section (htab->root.sgotplt->output_section))
4634 	{
4635 	  (*_bfd_error_handler)
4636 	    (_("discarded output section: `%pA'"), htab->root.sgotplt);
4637 	  return false;
4638 	}
4639 
4640       /* Fill in the first three entries in the global offset table.  */
4641       if (htab->root.sgotplt->size > 0)
4642 	{
4643 	  bfd_put_NN (output_bfd, (bfd_vma) 0, htab->root.sgotplt->contents);
4644 
4645 	  /* Write GOT[1] and GOT[2], needed for the dynamic linker.  */
4646 	  bfd_put_NN (output_bfd,
4647 		      (bfd_vma) 0,
4648 		      htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
4649 	  bfd_put_NN (output_bfd,
4650 		      (bfd_vma) 0,
4651 		      htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
4652 	}
4653 
4654       if (htab->root.sgot)
4655 	{
4656 	  if (htab->root.sgot->size > 0)
4657 	    {
4658 	      bfd_vma addr =
4659 		sdyn ? sdyn->output_section->vma + sdyn->output_offset : 0;
4660 	      bfd_put_NN (output_bfd, addr, htab->root.sgot->contents);
4661 	    }
4662 	}
4663 
4664       elf_section_data (htab->root.sgotplt->output_section)->
4665 	this_hdr.sh_entsize = GOT_ENTRY_SIZE;
4666     }
4667 
4668   if (htab->root.sgot && htab->root.sgot->size > 0)
4669     elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
4670       = GOT_ENTRY_SIZE;
4671 
4672   return true;
4673 }
4674 
4675 /* Return address for Ith PLT stub in section PLT, for relocation REL
4676    or (bfd_vma) -1 if it should not be included.  */
4677 
4678 static bfd_vma
elfNN_kvx_plt_sym_val(bfd_vma i,const asection * plt,const arelent * rel ATTRIBUTE_UNUSED)4679 elfNN_kvx_plt_sym_val (bfd_vma i, const asection *plt,
4680 		       const arelent *rel ATTRIBUTE_UNUSED)
4681 {
4682   return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
4683 }
4684 
4685 #define ELF_ARCH			bfd_arch_kvx
4686 #define ELF_MACHINE_CODE		EM_KVX
4687 #define ELF_MAXPAGESIZE			0x10000
4688 #define ELF_MINPAGESIZE			0x1000
4689 #define ELF_COMMONPAGESIZE		0x1000
4690 
4691 #define bfd_elfNN_bfd_link_hash_table_create    \
4692   elfNN_kvx_link_hash_table_create
4693 
4694 #define bfd_elfNN_bfd_merge_private_bfd_data	\
4695   elfNN_kvx_merge_private_bfd_data
4696 
4697 #define bfd_elfNN_bfd_print_private_bfd_data	\
4698   elfNN_kvx_print_private_bfd_data
4699 
4700 #define bfd_elfNN_bfd_reloc_type_lookup		\
4701   elfNN_kvx_reloc_type_lookup
4702 
4703 #define bfd_elfNN_bfd_reloc_name_lookup		\
4704   elfNN_kvx_reloc_name_lookup
4705 
4706 #define bfd_elfNN_bfd_set_private_flags		\
4707   elfNN_kvx_set_private_flags
4708 
4709 #define bfd_elfNN_mkobject			\
4710   elfNN_kvx_mkobject
4711 
4712 #define bfd_elfNN_new_section_hook		\
4713   elfNN_kvx_new_section_hook
4714 
4715 #define elf_backend_adjust_dynamic_symbol	\
4716   elfNN_kvx_adjust_dynamic_symbol
4717 
4718 #define elf_backend_always_size_sections	\
4719   elfNN_kvx_always_size_sections
4720 
4721 #define elf_backend_check_relocs		\
4722   elfNN_kvx_check_relocs
4723 
4724 #define elf_backend_copy_indirect_symbol	\
4725   elfNN_kvx_copy_indirect_symbol
4726 
4727 /* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
4728    to them in our hash.  */
4729 #define elf_backend_create_dynamic_sections	\
4730   elfNN_kvx_create_dynamic_sections
4731 
4732 #define elf_backend_init_index_section		\
4733   _bfd_elf_init_2_index_sections
4734 
4735 #define elf_backend_finish_dynamic_sections	\
4736   elfNN_kvx_finish_dynamic_sections
4737 
4738 #define elf_backend_finish_dynamic_symbol	\
4739   elfNN_kvx_finish_dynamic_symbol
4740 
4741 #define elf_backend_object_p			\
4742   elfNN_kvx_object_p
4743 
4744 #define elf_backend_output_arch_local_syms      \
4745   elfNN_kvx_output_arch_local_syms
4746 
4747 #define elf_backend_plt_sym_val			\
4748   elfNN_kvx_plt_sym_val
4749 
4750 #define elf_backend_init_file_header		\
4751   elfNN_kvx_init_file_header
4752 
4753 #define elf_backend_init_process_headers	\
4754   elfNN_kvx_init_process_headers
4755 
4756 #define elf_backend_relocate_section		\
4757   elfNN_kvx_relocate_section
4758 
4759 #define elf_backend_reloc_type_class		\
4760   elfNN_kvx_reloc_type_class
4761 
4762 #define elf_backend_size_dynamic_sections	\
4763   elfNN_kvx_size_dynamic_sections
4764 
4765 #define elf_backend_can_refcount       1
4766 #define elf_backend_can_gc_sections    1
4767 #define elf_backend_plt_readonly       1
4768 #define elf_backend_want_got_plt       1
4769 #define elf_backend_want_plt_sym       0
4770 #define elf_backend_may_use_rel_p      0
4771 #define elf_backend_may_use_rela_p     1
4772 #define elf_backend_default_use_rela_p 1
4773 #define elf_backend_rela_normal        1
4774 #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
4775 #define elf_backend_default_execstack  0
4776 #define elf_backend_extern_protected_data 1
4777 #define elf_backend_hash_symbol elf_kvx_hash_symbol
4778 
4779 #include "elfNN-target.h"
4780