xref: /netbsd-src/external/gpl3/binutils/dist/gas/config/tc-kvx.c (revision cb63e24e8d6aae7ddac1859a9015f48b1d8bd90e)
1 /* tc-kvx.c -- Assemble for the KVX ISA
2 
3    Copyright (C) 2009-2024 Free Software Foundation, Inc.
4    Contributed by Kalray SA.
5 
6    This file is part of GAS.
7 
8    GAS is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License as published by
10    the Free Software Foundation; either version 3 of the license, or
11    (at your option) any later version.
12 
13    GAS is distributed in the hope that it will be useful,
14    but WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16    GNU General Public License for more details.
17 
18    You should have received a copy of the GNU General Public License
19    along with this program; see the file COPYING3. If not,
20    see <http://www.gnu.org/licenses/>.  */
21 
22 #include "as.h"
23 #include "obstack.h"
24 #include "subsegs.h"
25 #include "tc-kvx.h"
26 #include "libiberty.h"
27 
28 #include <assert.h>
29 #include <stdio.h>
30 #include <stdint.h>
31 #include <string.h>
32 #include <ctype.h>
33 
34 #ifdef OBJ_ELF
35 #include "elf/kvx.h"
36 #include "dwarf2dbg.h"
37 #include "dw2gencfi.h"
38 #endif
39 
40 #define D(args...) do { if(debug) fprintf(args); } while(0)
41 
42 static void supported_cores (char buf[], size_t buflen);
43 
44 #define NELEMS(a) ((int) (sizeof (a)/sizeof ((a)[0])))
45 
46 #define STREQ(x,y) !strcmp(((x) ? (x) : ""), ((y) ? (y) : ""))
47 #define STRNEQ(x,y,n) !strncmp(((x) ? (x) : ""), ((y) ? (y) : ""),(n))
48 
49 /* The PARALLEL_BIT is set to 0 when an instruction is the last of a bundle. */
50 #define PARALLEL_BIT (1u << 31)
51 
52 /*TB begin*/
53 int size_type_function = 1;
54 /*TB end */
55 
56 struct kvx_as_env env = {
57   .params = {
58     .abi = ELF_KVX_ABI_UNDEF,
59     .osabi = ELFOSABI_NONE,
60     .core = -1,
61     .core_set = 0,
62     .abi_set = 0,
63     .osabi_set = 0,
64     .pic_flags = 0,
65     .arch_size = 64
66   },
67   .opts = {
68     .march = NULL,
69     .check_resource_usage = 1,
70     .generate_illegal_code = 0,
71     .dump_table = 0,
72     .dump_insn = 0,
73     .diagnostics = 1,
74     .more = 1,
75     .allow_all_sfr = 0
76   }
77 };
78 
79 /* This string should contain position in string where error occured. */
80 
81 /* Default kvx_registers array. */
82 const struct kvx_Register *kvx_registers = NULL;
83 /* Default kvx_modifiers array. */
84 const char ***kvx_modifiers = NULL;
85 /* Default kvx_regfiles array. */
86 const int *kvx_regfiles = NULL;
87 /* Default values used if no assume directive is given */
88 const struct kvx_core_info *kvx_core_info = NULL;
89 
90 /***********************************************/
91 /*    Generic Globals for GAS                  */
92 /***********************************************/
93 
94 const char comment_chars[]        = "#";
95 const char line_comment_chars[]   = "#";
96 const char line_separator_chars[] = ";";
97 const char EXP_CHARS[]            = "eE";
98 const char FLT_CHARS[]            = "dD";
99 const int md_short_jump_size      = 0;
100 const int md_long_jump_size       = 0;
101 
102 /***********************************************/
103 /*           Local Types                       */
104 /***********************************************/
105 
106 /* a fix up record                       */
107 
108 struct kvx_fixup
109 {
110   /* The expression used.  */
111   expressionS exp;
112   /* The place in the frag where this goes.  */
113   int where;
114   /* The relocation.  */
115   bfd_reloc_code_real_type reloc;
116 };
117 
118 /* a single assembled instruction record */
119 /* may include immediate extension words  */
120 struct kvxinsn
121 {
122   /* written out?  */
123   int written;
124   /* Opcode table entry for this insn */
125   const struct kvxopc *opdef;
126   /* length of instruction in words (1 or 2) */
127   int len;
128   /* insn is extended */
129   int immx0;
130   /* insn has two immx */
131   int immx1;
132   /* order to stabilize sort */
133   int order;
134   /* instruction words */
135   uint32_t words[KVXMAXBUNDLEWORDS];
136   /* the number of fixups [0,2] */
137   int nfixups;
138   /* the actual fixups */
139   struct kvx_fixup fixup[2];
140 };
141 
142 typedef void (*print_insn_t) (struct kvxopc * op);
143 static print_insn_t print_insn = NULL;
144 
145 /* Set to TRUE when we assemble instructions.  */
146 static bool assembling_insn = false;
147 
148 #define NOIMMX -1
149 
150 /* Was KVXMAXBUNDLEISSUE, changed because of NOPs */
151 static struct kvxinsn insbuf[KVXMAXBUNDLEWORDS];
152 static int insncnt = 0;
153 static struct kvxinsn immxbuf[KVXMAXBUNDLEWORDS];
154 static int immxcnt = 0;
155 
156 static void
incr_immxcnt(void)157 incr_immxcnt (void)
158 {
159   immxcnt++;
160   if (immxcnt >= KVXMAXBUNDLEWORDS)
161     as_bad ("Max immx number exceeded: %d", immxcnt);
162 }
163 
164 static void set_byte_counter (asection * sec, int value);
165 static void
set_byte_counter(asection * sec,int value)166 set_byte_counter (asection * sec, int value)
167 {
168   sec->target_index = value;
169 }
170 
171 static int get_byte_counter (asection * sec);
172 int
get_byte_counter(asection * sec)173 get_byte_counter (asection * sec)
174 {
175   return sec->target_index;
176 }
177 
178 const char *
kvx_target_format(void)179 kvx_target_format (void)
180 {
181   return env.params.arch_size == 64 ? "elf64-kvx" : "elf32-kvx";
182 }
183 
184 /****************************************************/
185 /*  ASSEMBLER Pseudo-ops.  Some of this just        */
186 /*  extends the default definitions                 */
187 /*  others are KVX specific                          */
188 /****************************************************/
189 
190 static void kvx_check_resources (int);
191 static void kvx_proc (int start);
192 static void kvx_endp (int start);
193 static void kvx_type (int start);
194 
195 const pseudo_typeS md_pseudo_table[] = {
196   /* override default 2-bytes */
197   { "word",             cons,                  4 },
198 
199   /* KVX specific */
200   { "dword",            cons,                  8 },
201 
202   /* Override align directives to have a boundary as argument (and not the
203      power of two as in p2align) */
204   { "align",            s_align_bytes,         0 },
205 
206   { "checkresources",   kvx_check_resources,   1 },
207   { "nocheckresources", kvx_check_resources,   0 },
208 
209   { "proc",             kvx_proc,              1 },
210   { "endp",             kvx_endp,              0 },
211 
212   { "type",             kvx_type,              0 },
213 
214 #ifdef OBJ_ELF
215   { "file",             dwarf2_directive_file, 0 },
216   { "loc",              dwarf2_directive_loc,  0 },
217 #endif
218   { NULL,               0,                     0 }
219 };
220 
221 
222 static int inside_bundle = 0;
223 
224 /* Stores the labels inside bundles (typically debug labels) that need
225    to be postponed to the next bundle. */
226 struct label_fix
227 {
228   struct label_fix *next;
229   symbolS *sym;
230 } *label_fixes = 0;
231 
232 /*****************************************************/
233 /*   OPTIONS PROCESSING                              */
234 /*****************************************************/
235 
236 const char *md_shortopts = "hV";	/* catted to std short options */
237 
238 /* added to std long options */
239 
240 #define OPTION_HEXFILE               (OPTION_MD_BASE + 0)
241 #define OPTION_MARCH                 (OPTION_MD_BASE + 4)
242 #define OPTION_CHECK_RESOURCES       (OPTION_MD_BASE + 5)
243 #define OPTION_NO_CHECK_RESOURCES    (OPTION_MD_BASE + 6)
244 #define OPTION_GENERATE_ILLEGAL_CODE (OPTION_MD_BASE + 7)
245 #define OPTION_DUMP_TABLE            (OPTION_MD_BASE + 8)
246 #define OPTION_PIC                   (OPTION_MD_BASE + 9)
247 #define OPTION_BIGPIC                (OPTION_MD_BASE + 10)
248 #define OPTION_NOPIC                 (OPTION_MD_BASE + 12)
249 #define OPTION_32                    (OPTION_MD_BASE + 13)
250 #define OPTION_DUMPINSN              (OPTION_MD_BASE + 15)
251 #define OPTION_ALL_SFR               (OPTION_MD_BASE + 16)
252 #define OPTION_DIAGNOSTICS           (OPTION_MD_BASE + 17)
253 #define OPTION_NO_DIAGNOSTICS        (OPTION_MD_BASE + 18)
254 #define OPTION_MORE                  (OPTION_MD_BASE + 19)
255 #define OPTION_NO_MORE               (OPTION_MD_BASE + 20)
256 
257 struct option md_longopts[] = {
258   { "march",                 required_argument, NULL, OPTION_MARCH                 },
259   { "check-resources",       no_argument,       NULL, OPTION_CHECK_RESOURCES       },
260   { "no-check-resources",    no_argument,       NULL, OPTION_NO_CHECK_RESOURCES    },
261   { "generate-illegal-code", no_argument,       NULL, OPTION_GENERATE_ILLEGAL_CODE },
262   { "dump-table",            no_argument,       NULL, OPTION_DUMP_TABLE            },
263   { "mpic",                  no_argument,       NULL, OPTION_PIC                   },
264   { "mPIC",                  no_argument,       NULL, OPTION_BIGPIC                },
265   { "mnopic",                no_argument,       NULL, OPTION_NOPIC                 },
266   { "m32",                   no_argument,       NULL, OPTION_32                    },
267   { "dump-insn",             no_argument,       NULL, OPTION_DUMPINSN              },
268   { "all-sfr",               no_argument,       NULL, OPTION_ALL_SFR               },
269   { "diagnostics",           no_argument,       NULL, OPTION_DIAGNOSTICS           },
270   { "no-diagnostics",        no_argument,       NULL, OPTION_NO_DIAGNOSTICS        },
271   { "more",                  no_argument,       NULL, OPTION_MORE                  },
272   { "no-more",               no_argument,       NULL, OPTION_NO_MORE               },
273   { NULL,                    no_argument,       NULL, 0                            }
274 };
275 
276 size_t md_longopts_size = sizeof (md_longopts);
277 
278 int
md_parse_option(int c,const char * arg ATTRIBUTE_UNUSED)279 md_parse_option (int c, const char *arg ATTRIBUTE_UNUSED)
280 {
281   int find_core = 0;
282 
283   switch (c)
284     {
285     case 'h':
286       md_show_usage (stdout);
287       exit (EXIT_SUCCESS);
288       break;
289 
290       /* -V: SVR4 argument to print version ID.  */
291     case 'V':
292       print_version_id ();
293       exit (EXIT_SUCCESS);
294       break;
295     case OPTION_MARCH:
296       env.opts.march = strdup (arg);
297       for (int i = 0; i < KVXNUMCORES && !find_core; ++i)
298 	    if (!strcasecmp (env.opts.march, kvx_core_info_table[i]->name))
299 	      {
300 		kvx_core_info = kvx_core_info_table[i];
301 		kvx_registers = kvx_registers_table[i];
302 		kvx_modifiers = kvx_modifiers_table[i];
303 		kvx_regfiles = kvx_regfiles_table[i];
304 
305 		find_core = 1;
306 		break;
307 	      }
308       if (!find_core)
309 	{
310 	  char buf[100];
311 	  supported_cores (buf, sizeof (buf));
312 	  as_fatal ("Specified arch not supported [%s]", buf);
313 	}
314       break;
315     case OPTION_CHECK_RESOURCES:
316       env.opts.check_resource_usage = 1;
317       break;
318     case OPTION_NO_CHECK_RESOURCES:
319       env.opts.check_resource_usage = 0;
320       break;
321     case OPTION_GENERATE_ILLEGAL_CODE:
322       env.opts.generate_illegal_code = 1;
323       break;
324     case OPTION_DUMP_TABLE:
325       env.opts.dump_table = 1;
326       break;
327     case OPTION_DUMPINSN:
328       env.opts.dump_insn = 1;
329       break;
330     case OPTION_ALL_SFR:
331       env.opts.allow_all_sfr = 1;
332       break;
333     case OPTION_DIAGNOSTICS:
334       env.opts.diagnostics = 1;
335       break;
336     case OPTION_NO_DIAGNOSTICS:
337       env.opts.diagnostics = 0;
338       break;
339     case OPTION_MORE:
340       env.opts.more = 1;
341       break;
342     case OPTION_NO_MORE:
343       env.opts.more = 0;
344       break;
345     case OPTION_PIC:
346       /* fallthrough, for now the same on KVX */
347     case OPTION_BIGPIC:
348       env.params.pic_flags |= ELF_KVX_ABI_PIC_BIT;
349       break;
350     case OPTION_NOPIC:
351       env.params.pic_flags &= ~(ELF_KVX_ABI_PIC_BIT);
352       break;
353     case OPTION_32:
354       env.params.arch_size = 32;
355       break;
356 
357     default:
358       return 0;
359     }
360   return 1;
361 }
362 
363 void
md_show_usage(FILE * stream)364 md_show_usage (FILE * stream)
365 {
366   char buf[100];
367   supported_cores (buf, sizeof (buf));
368 
369   fprintf (stream, "\n"
370 "KVX specific options:\n\n"
371 "  --check-resources\t Perform minimal resource checking\n"
372 "  --march [%s]\t Select architecture\n"
373 "  -V \t\t\t Print assembler version number\n\n"
374 "  The options -M, --mri and -f are not supported in this assembler.\n", buf);
375 }
376 
377 /**************************************************/
378 /*              UTILITIES                         */
379 /**************************************************/
380 
381 /*
382  * Read a value from to the object file
383  */
384 
385 static valueT md_chars_to_number (char *buf, int n);
386 valueT
md_chars_to_number(char * buf,int n)387 md_chars_to_number (char *buf, int n)
388 {
389   valueT val = 0;
390 
391   if (n > (int) sizeof (val) || n <= 0)
392     abort ();
393 
394   while (n--)
395     {
396       val <<= 8;
397       val |= (buf[n] & 0xff);
398     }
399 
400   return val;
401 }
402 
403 /* Returns the corresponding pseudo function matching SYM and to be
404    used for data section */
405 static struct pseudo_func *
kvx_get_pseudo_func_data_scn(symbolS * sym)406 kvx_get_pseudo_func_data_scn (symbolS * sym)
407 {
408   for (int i = 0; i < kvx_core_info->nb_pseudo_funcs; i++)
409     if (sym == kvx_core_info->pseudo_funcs[i].sym
410 	&& kvx_core_info->pseudo_funcs[i].pseudo_relocs.single != BFD_RELOC_UNUSED)
411 	return &kvx_core_info->pseudo_funcs[i];
412   return NULL;
413 }
414 
415 /* Returns the corresponding pseudo function matching SYM and operand
416    format OPND */
417 static struct pseudo_func *
kvx_get_pseudo_func2(symbolS * sym,struct kvx_operand * opnd)418 kvx_get_pseudo_func2 (symbolS *sym, struct kvx_operand * opnd)
419 {
420   for (int i = 0; i < kvx_core_info->nb_pseudo_funcs; i++)
421     if (sym == kvx_core_info->pseudo_funcs[i].sym)
422       for (int relidx = 0; relidx < opnd->reloc_nb; relidx++)
423 	if (opnd->relocs[relidx] == kvx_core_info->pseudo_funcs[i].pseudo_relocs.kreloc
424 	    && (env.params.arch_size == (int) kvx_core_info->pseudo_funcs[i].pseudo_relocs.avail_modes
425 	      || kvx_core_info->pseudo_funcs[i].pseudo_relocs.avail_modes == PSEUDO_ALL))
426 	  return &kvx_core_info->pseudo_funcs[i];
427 
428   return NULL;
429 }
430 
431 static void
supported_cores(char buf[],size_t buflen)432 supported_cores (char buf[], size_t buflen)
433 {
434   buf[0] = '\0';
435   for (int i = 0; i < KVXNUMCORES; i++)
436     {
437       if (buf[0] == '\0')
438 	strcpy (buf, kvx_core_info_table[i]->name);
439       else
440 	if ((strlen (buf) + 1 + strlen (kvx_core_info_table[i]->name) + 1) < buflen)
441 	  {
442 	    strcat (buf, "|");
443 	    strcat (buf, kvx_core_info_table[i]->name);
444 	  }
445     }
446 }
447 
448 /***************************************************/
449 /*   ASSEMBLE AN INSTRUCTION                       */
450 /***************************************************/
451 
452 /*
453  * Insert ARG into the operand described by OPDEF in instruction INSN
454  * Returns 1 if the immediate extension (IMMX) has been
455  * handled along with relocation, 0 if not.
456  */
457 static int
insert_operand(struct kvxinsn * insn,struct kvx_operand * opdef,struct token_list * tok)458 insert_operand (struct kvxinsn *insn, struct kvx_operand *opdef,
459 		struct token_list *tok)
460 {
461   uint64_t op = 0;
462   struct kvx_bitfield *bfields = opdef->bfield;
463   int bf_nb = opdef->bitfields;
464   int immx_ready = 0;
465 
466   if (opdef->width == 0)
467     return 0;
468 
469 #define add_fixup(insn_, reloc_, exp_) \
470   do { \
471     (insn_)->fixup[(insn_)->nfixups].reloc = (reloc_); \
472     (insn_)->fixup[(insn_)->nfixups].exp = (exp_);     \
473     (insn_)->fixup[(insn_)->nfixups].where = 0;        \
474     (insn_)->nfixups++;                                \
475   } while (0)
476 
477 #define add_immx(insn_, words_, reloc_, exp_, nfixups_, len_) \
478   do { \
479     immxbuf[immxcnt].words[0] = (words_);                \
480     immxbuf[immxcnt].fixup[0].reloc = (reloc_);          \
481     immxbuf[immxcnt].fixup[0].exp = (exp_);              \
482     immxbuf[immxcnt].fixup[0].where = 0;                 \
483     immxbuf[immxcnt].nfixups = (nfixups_);               \
484     immxbuf[immxcnt].len = (len_);                       \
485     /* decrement insn->len: immx part handled separately \
486        from insn and must not be emited twice.  */       \
487     (insn_)->len -= 1;                                   \
488     incr_immxcnt ();                                     \
489   } while (0)
490 
491 #define chk_imm(core_, imm_) \
492   (env.params.core == ELF_KVX_CORE_## core_ && opdef->type == (imm_))
493 
494   /* try to resolve the value */
495 
496   switch (tok->category)
497     {
498     case CAT_REGISTER:
499       op = S_GET_VALUE (str_hash_find (env.reg_hash, tok->tok));
500       op -= opdef->bias;
501       op >>= opdef->shift;
502       break;
503     case CAT_MODIFIER:
504       op = tok->val;
505       op -= opdef->bias;
506       op >>= opdef->shift;
507       break;
508     case CAT_IMMEDIATE:
509       {
510 	char *ilp_save = input_line_pointer;
511 	input_line_pointer = tok->tok;
512 	expressionS exp = { 0 };
513 	expression (&exp);
514 	input_line_pointer = ilp_save;
515 
516 	/* We are dealing with a pseudo-function.  */
517 	if (tok->tok[0] == '@')
518 	  {
519 	    if (insn->nfixups == 0)
520 	      {
521 		expressionS reloc_arg;
522 		reloc_arg = exp;
523 		reloc_arg.X_op = O_symbol;
524 		struct pseudo_func *pf =
525 		  kvx_get_pseudo_func2 (exp.X_op_symbol, opdef);
526 		/* S64 uses LO10/UP27/EX27 format (3 words), with one reloc in each words (3) */
527 		/* S43 uses LO10/EX6/UP27 format (2 words), with 2 relocs in main syllabes and 1 in extra word */
528 		/* S37 uses LO10/UP27 format (2 words), with one reloc in each word (2) */
529 
530 		/* Beware that immxbuf must be filled in the same order as relocs should be emitted. */
531 
532 		if (pf->pseudo_relocs.reloc_type == S64_LO10_UP27_EX27
533 		    || pf->pseudo_relocs.reloc_type == S43_LO10_UP27_EX6
534 		    || pf->pseudo_relocs.reloc_type == S37_LO10_UP27)
535 		  {
536 		    add_fixup (insn, pf->pseudo_relocs.reloc_lo10, reloc_arg);
537 
538 		    insn->immx0 = immxcnt;
539 		    add_immx (insn, 0, pf->pseudo_relocs.reloc_up27,
540 			      reloc_arg, 1, 1);
541 		    immx_ready = 1;
542 		  }
543 		else if (pf->pseudo_relocs.reloc_type == S32_LO5_UP27)
544 		  {
545 		    add_fixup (insn, pf->pseudo_relocs.reloc_lo5, reloc_arg);
546 
547 		    insn->immx0 = immxcnt;
548 		    add_immx (insn, 0, pf->pseudo_relocs.reloc_up27,
549 			      reloc_arg, 1, 1);
550 		    immx_ready = 1;
551 		  }
552 		else if (pf->pseudo_relocs.reloc_type == S16)
553 		  add_fixup (insn, pf->pseudo_relocs.single, reloc_arg);
554 		else
555 		  as_fatal ("Unexpected fixup");
556 
557 		if (pf->pseudo_relocs.reloc_type == S64_LO10_UP27_EX27)
558 		  {
559 		    insn->immx1 = immxcnt;
560 		    add_immx (insn, 0, pf->pseudo_relocs.reloc_ex, reloc_arg,
561 			      1, 1);
562 		  }
563 		else if (pf->pseudo_relocs.reloc_type == S43_LO10_UP27_EX6)
564 		  add_fixup (insn, pf->pseudo_relocs.reloc_ex, reloc_arg);
565 	      }
566 	  }
567 	else
568 	  {
569 	    if (exp.X_op == O_constant)
570 	      {
571 		/* This is a immediate: either a regular immediate, or an
572 		   immediate that was saved in a variable through `.equ'.  */
573 		uint64_t sval = (int64_t) tok->val;
574 		op = opdef->flags & kvxSIGNED ? sval : tok->val;
575 		op >>= opdef->shift;
576 	      }
577 	    else if (exp.X_op == O_subtract)
578 	      as_fatal ("O_subtract not supported.");
579 	    else
580 	      {
581 
582 		/* This is a symbol which needs a relocation.  */
583 		if (insn->nfixups == 0)
584 		  {
585 		    if (chk_imm (KV3_1, Immediate_kv3_v1_pcrel17)
586 			|| chk_imm (KV3_2, Immediate_kv3_v2_pcrel17)
587 			|| chk_imm (KV4_1, Immediate_kv4_v1_pcrel17))
588 		      add_fixup (insn, BFD_RELOC_KVX_PCREL17, exp);
589 		    else if (chk_imm (KV3_1, Immediate_kv3_v1_pcrel27)
590 			     || chk_imm (KV3_2, Immediate_kv3_v2_pcrel27)
591 			     || chk_imm (KV4_1, Immediate_kv4_v1_pcrel27))
592 		      add_fixup (insn, BFD_RELOC_KVX_PCREL27, exp);
593 		    else if (chk_imm (KV3_1, Immediate_kv3_v1_wrapped32)
594 			     || chk_imm (KV3_2, Immediate_kv3_v2_wrapped32)
595 			     || chk_imm (KV4_1, Immediate_kv4_v1_wrapped32))
596 		      {
597 			add_fixup (insn, BFD_RELOC_KVX_S32_LO5, exp);
598 
599 			insn->immx0 = immxcnt;
600 			add_immx (insn, 0, BFD_RELOC_KVX_S32_UP27, exp, 1, 1);
601 
602 			immx_ready = 1;
603 		      }
604 		    else if (chk_imm (KV3_1, Immediate_kv3_v1_signed10)
605 			     || chk_imm (KV3_2, Immediate_kv3_v2_signed10)
606 			     || chk_imm (KV4_1, Immediate_kv4_v1_signed10))
607 		      add_fixup (insn, BFD_RELOC_KVX_S37_LO10, exp);
608 		    else if (chk_imm (KV3_1, Immediate_kv3_v1_signed37)
609 			     || chk_imm (KV3_2, Immediate_kv3_v2_signed37)
610 			     || chk_imm (KV4_1, Immediate_kv4_v1_signed37))
611 		      {
612 			add_fixup (insn, BFD_RELOC_KVX_S37_LO10, exp);
613 
614 			insn->immx0 = immxcnt;
615 			add_immx (insn, 0, BFD_RELOC_KVX_S37_UP27, exp, 1, 1);
616 
617 			immx_ready = 1;
618 		      }
619 		    else if (chk_imm (KV3_1, Immediate_kv3_v1_signed43)
620 			     || chk_imm (KV3_2, Immediate_kv3_v2_signed43)
621 			     || chk_imm (KV4_1, Immediate_kv4_v1_signed43))
622 		      {
623 			add_fixup (insn, BFD_RELOC_KVX_S43_LO10, exp);
624 			add_fixup (insn, BFD_RELOC_KVX_S43_EX6, exp);
625 
626 			insn->immx0 = immxcnt;
627 			add_immx (insn, insn->words[1],
628 				  BFD_RELOC_KVX_S43_UP27, exp, 1, 1);
629 
630 			immx_ready = 1;
631 		      }
632 		    else if (chk_imm (KV3_1, Immediate_kv3_v1_wrapped64)
633 			     || chk_imm (KV3_2, Immediate_kv3_v2_wrapped64)
634 			     || chk_imm (KV4_1, Immediate_kv4_v1_wrapped64))
635 		      {
636 			add_fixup (insn, BFD_RELOC_KVX_S64_LO10, exp);
637 
638 			insn->immx0 = immxcnt;
639 			add_immx (insn, insn->words[1],
640 				  BFD_RELOC_KVX_S64_UP27, exp, 1, 1);
641 
642 			insn->immx1 = immxcnt;
643 			add_immx (insn, insn->words[2],
644 				  BFD_RELOC_KVX_S64_EX27, exp, 1, 1);
645 
646 			immx_ready = 1;
647 		      }
648 		    else
649 		      as_fatal ("don't know how to generate a fixup record");
650 		    return immx_ready;
651 		  }
652 		else
653 		  as_fatal ("No room for fixup ");
654 	      }
655 	  }
656       }
657       break;
658     default:
659       break;
660     }
661 
662   for (int bf_idx = 0; bf_idx < bf_nb; bf_idx++)
663     {
664       uint64_t value =
665 	((uint64_t) op >> bfields[bf_idx].from_offset);
666       int j = 0;
667       int to_offset = bfields[bf_idx].to_offset;
668       value &= (1LL << bfields[bf_idx].size) - 1;
669       j = to_offset / 32;
670       to_offset = to_offset % 32;
671       insn->words[j] |= (value << to_offset) & 0xffffffff;
672     }
673 
674   return immx_ready;
675 
676 #undef chk_imm
677 #undef add_immx
678 #undef add_fixup
679 }
680 
681 /*
682  * Given a set of operands and a matching instruction,
683  * assemble it
684  *
685  */
686 static void
assemble_insn(const struct kvxopc * opcode,struct token_list * tok,struct kvxinsn * insn)687 assemble_insn (const struct kvxopc * opcode, struct token_list *tok, struct kvxinsn *insn)
688 {
689   unsigned immx_ready = 0;
690 
691   memset (insn, 0, sizeof (*insn));
692   insn->opdef = opcode;
693   for (int i = 0; i < opcode->wordcount; i++)
694     {
695       insn->words[i] = opcode->codewords[i].opcode;
696       insn->len += 1;
697     }
698 
699   insn->immx0 = NOIMMX;
700   insn->immx1 = NOIMMX;
701 
702   struct token_list *tok_ = tok;
703   struct kvx_operand **format = (struct kvx_operand **) opcode->format;
704 
705   while (tok_)
706     {
707       int ret = insert_operand (insn, *format, tok_);
708       immx_ready |= ret;
709       while ((tok_ = tok_->next) && tok_->category == CAT_SEPARATOR);
710       format++;
711     }
712 
713   // Handle immx if insert_operand did not already take care of that
714   if (!immx_ready)
715     {
716       for (int i = 0; i < opcode->wordcount; i++)
717 	{
718 	  if (opcode->codewords[i].flags & kvxOPCODE_FLAG_IMMX0)
719 	    {
720 	      insn->immx0 = immxcnt;
721 	      immxbuf[immxcnt].words[0] = insn->words[i];
722 	      immxbuf[immxcnt].nfixups = 0;
723 	      immxbuf[immxcnt].len = 1;
724 	      insn->len -= 1;
725 	      incr_immxcnt ();
726 	    }
727 	  if (opcode->codewords[i].flags & kvxOPCODE_FLAG_IMMX1)
728 	    {
729 	      insn->immx1 = immxcnt;
730 	      immxbuf[immxcnt].words[0] = insn->words[i];
731 	      immxbuf[immxcnt].nfixups = 0;
732 	      immxbuf[immxcnt].len = 1;
733 	      insn->len -= 1;
734 	      incr_immxcnt ();
735 	    }
736 	}
737     }
738 }
739 
740 /* Emit an instruction from the instruction array into the object
741  * file. INSN points to an element of the instruction array. STOPFLAG
742  * is true if this is the last instruction in the bundle.
743  *
744  * Only handles main syllables of bundle. Immediate extensions are
745  * handled by insert_operand.
746  */
747 static void
emit_insn(struct kvxinsn * insn,int insn_pos,int stopflag)748 emit_insn (struct kvxinsn * insn, int insn_pos, int stopflag)
749 {
750   char *f;
751   unsigned int image;
752 
753   /* if we are listing, attach frag to previous line.  */
754   if (listing)
755     listing_prev_line ();
756 
757   /* Update text size for lane parity checking.  */
758   set_byte_counter (now_seg, (get_byte_counter (now_seg) + (insn->len * 4)));
759 
760   /* allocate space in the fragment.  */
761   f = frag_more (insn->len * 4);
762 
763   /* spit out bits.  */
764   for (int i = 0; i < insn->len; i++)
765     {
766       image = insn->words[i];
767 
768       /* Handle bundle parallel bit. */ ;
769       if ((i == insn->len - 1) && stopflag)
770 	image &= ~PARALLEL_BIT;
771       else
772 	image |= PARALLEL_BIT;
773 
774       /* Emit the instruction image. */
775       md_number_to_chars (f + (i * 4), image, 4);
776     }
777 
778   /* generate fixup records */
779 
780   for (int i = 0; i < insn->nfixups; i++)
781     {
782       int size, pcrel;
783       reloc_howto_type *reloc_howto =
784 	bfd_reloc_type_lookup (stdoutput, insn->fixup[i].reloc);
785       assert (reloc_howto);
786       size = bfd_get_reloc_size (reloc_howto);
787       pcrel = reloc_howto->pc_relative;
788 
789       /* In case the PCREL relocation is not for the first insn in the
790          bundle, we have to offset it.  The pc used by the hardware
791          references a bundle and not separate insn.
792        */
793       assert (!(insn_pos == -1 && pcrel));
794       if (pcrel && insn_pos > 0)
795 	insn->fixup[i].exp.X_add_number += insn_pos * 4;
796 
797       fixS *fixup = fix_new_exp (frag_now,
798 				 f - frag_now->fr_literal +
799 				 insn->fixup[i].where,
800 				 size,
801 				 &(insn->fixup[i].exp),
802 				 pcrel,
803 				 insn->fixup[i].reloc);
804       /*
805        * Set this bit so that large value can still be
806        * handled. Without it, assembler will fail in fixup_segment
807        * when it checks there is enough bits to store the value. As we
808        * usually split our reloc across different words, it may think
809        * that 4 bytes are not enough for large value. This simply
810        * skips the tests
811        */
812       fixup->fx_no_overflow = 1;
813     }
814 }
815 
816 
817 /* Called for any expression that can not be recognized.  When the
818  * function is called, `input_line_pointer' will point to the start of
819  * the expression.  */
820 /* FIXME: Should be done by the parser */
821 void
md_operand(expressionS * e)822 md_operand (expressionS * e)
823 {
824   /* enum pseudo_type pseudo_type; */
825   /* char *name = NULL; */
826   size_t len;
827   int ch, i;
828 
829   switch (*input_line_pointer)
830     {
831     case '@':
832       /* Find what relocation pseudo-function we're dealing with. */
833       /* pseudo_type = 0; */
834       ch = *++input_line_pointer;
835       for (i = 0; i < kvx_core_info->nb_pseudo_funcs; ++i)
836 	if (kvx_core_info->pseudo_funcs[i].name && kvx_core_info->pseudo_funcs[i].name[0] == ch)
837 	  {
838 	    len = strlen (kvx_core_info->pseudo_funcs[i].name);
839 	    if (strncmp (kvx_core_info->pseudo_funcs[i].name + 1,
840 			 input_line_pointer + 1, len - 1) == 0
841 		&& !is_part_of_name (input_line_pointer[len]))
842 	      {
843 		input_line_pointer += len;
844 		break;
845 	      }
846 	  }
847       SKIP_WHITESPACE ();
848       if (*input_line_pointer != '(')
849 	{
850 	  as_bad ("Expected '('");
851 	  goto err;
852 	}
853       /* Skip '('.  */
854       ++input_line_pointer;
855       if (!kvx_core_info->pseudo_funcs[i].pseudo_relocs.has_no_arg)
856 	expression (e);
857       if (*input_line_pointer++ != ')')
858 	{
859 	  as_bad ("Missing ')'");
860 	  goto err;
861 	}
862       if (!kvx_core_info->pseudo_funcs[i].pseudo_relocs.has_no_arg)
863 	{
864 	  if (e->X_op != O_symbol)
865 	    as_fatal ("Illegal combination of relocation functions");
866 	}
867       /* Make sure gas doesn't get rid of local symbols that are used
868          in relocs.  */
869       e->X_op = O_pseudo_fixup;
870       e->X_op_symbol = kvx_core_info->pseudo_funcs[i].sym;
871       break;
872 
873     default:
874       break;
875     }
876   return;
877 
878 err:
879   ignore_rest_of_line ();
880 }
881 
882 /*
883  * Return the Bundling type for an insn.
884  */
885 static int
find_bundling(const struct kvxinsn * insn)886 find_bundling (const struct kvxinsn * insn)
887 {
888   return insn->opdef->bundling;
889 }
890 
891 static int
find_reservation(const struct kvxinsn * insn)892 find_reservation (const struct kvxinsn * insn)
893 {
894   return insn->opdef->reservation;
895 }
896 
897 static struct kvxopc *
assemble_tokens(struct token_list * tok_list)898 assemble_tokens (struct token_list *tok_list)
899 {
900   assert (tok_list != NULL);
901   struct token_list *toks = tok_list;
902 
903   /* make sure there is room in instruction buffer */
904   /* Was KVXMAXBUNDLEISSUE, changed because of NOPs */
905   if (insncnt >= KVXMAXBUNDLEWORDS)
906     as_fatal ("[assemble_tokens]: too many instructions in bundle.");
907 
908   /* TODO: Merge */
909   struct kvxinsn *insn;
910   insn = insbuf + insncnt;
911 
912   /* The formats table registers the modifier into the opcode, therefore we need
913      to fuse both before looking up the opcodes hashtable.  */
914   char *opcode = NULL;
915 
916   opcode = toks->tok;
917   toks = toks->next;
918 
919   while (toks && toks->category == CAT_SEPARATOR)
920     toks = toks->next;
921 
922   /* Find the format requested by the instruction.  */
923   struct kvxopc *format_tbl = str_hash_find (env.opcode_hash, opcode);
924   struct kvxopc *format = NULL;
925 
926   struct token_list *toks_ = toks;
927 
928   while (!format && format_tbl && STREQ (opcode, format_tbl->as_op))
929   {
930     for (int i = 0 ; toks_ && format_tbl->format[i]
931 	&& toks_->class_id == format_tbl->format[i]->type ;)
932       {
933 	toks_ = toks_->next;
934 	while (toks_ && toks_->category == CAT_SEPARATOR)
935 	  toks_ = toks_->next;
936 	i += 1;
937       }
938 
939     if (!toks_)
940       format = format_tbl;
941     else
942       {
943 	toks_ = toks;
944 	format_tbl++;
945       }
946   }
947 
948   assert (format != NULL);
949 
950   assemble_insn (format, toks, insn);
951   insncnt++;
952 
953   return NULL;
954 }
955 
956 /*
957  * Write in buf at most buf_size.
958  * Returns the number of writen characters.
959  */
960 static int ATTRIBUTE_UNUSED
insn_syntax(struct kvxopc * op,char * buf,int buf_size)961 insn_syntax (struct kvxopc * op, char *buf, int buf_size)
962 {
963   int chars = snprintf (buf, buf_size, "%s ", op->as_op);
964   const char *fmtp = op->fmtstring;
965   char ch = 0;
966 
967   for (int i = 0; op->format[i]; i++)
968     {
969       int type = op->format[i]->type;
970       const char *type_name = TOKEN_NAME (type);
971       int offset = 0;
972 
973       for (int j = 0 ; type_name[j] ; ++j)
974 	if (type_name[j] == '_')
975 	  offset = j + 1;
976 
977       /* Print characters in the format string up to the following * % or nul. */
978       while ((chars < buf_size) && (ch = *fmtp) && ch != '%')
979 	{
980 	  buf[chars++] = ch;
981 	  fmtp++;
982 	}
983 
984       /* Skip past %s */
985       if (ch == '%')
986 	{
987 	  ch = *fmtp++;
988 	  fmtp++;
989 	}
990 
991       chars += snprintf (&buf[chars], buf_size - chars, "%s", type_name + offset);
992     }
993 
994   /* Print trailing characters in the format string, if any */
995   while ((chars < buf_size) && (ch = *fmtp))
996     {
997       buf[chars++] = ch;
998       fmtp++;
999     }
1000 
1001   if (chars < buf_size)
1002     buf[chars++] = '\0';
1003   else
1004     buf[buf_size - 1] = '\0';
1005 
1006   return chars;
1007 }
1008 
1009 #define ASM_CHARS_MAX (71)
1010 
1011 static void
kvx_print_insn(struct kvxopc * op ATTRIBUTE_UNUSED)1012 kvx_print_insn (struct kvxopc * op ATTRIBUTE_UNUSED)
1013 {
1014   char asm_str[ASM_CHARS_MAX];
1015   int chars = insn_syntax (op, asm_str, ASM_CHARS_MAX);
1016   const char *insn_type = "UNKNOWN";
1017   const char *insn_mode = "";
1018 
1019   for (int i = chars - 1; i < ASM_CHARS_MAX - 1; i++)
1020     asm_str[i] = '-';
1021 
1022   /* This is a hack which works because the Bundling is the same for all cores
1023      for now.  */
1024   switch ((int) op->bundling)
1025     {
1026     case Bundling_kv3_v1_ALL:
1027       insn_type = "ALL  ";
1028       break;
1029     case Bundling_kv3_v1_BCU:
1030       insn_type = "BCU  ";
1031       break;
1032     case Bundling_kv3_v1_TCA:
1033       insn_type = "TCA  ";
1034       break;
1035     case Bundling_kv3_v1_FULL:
1036     case Bundling_kv3_v1_FULL_X:
1037     case Bundling_kv3_v1_FULL_Y:
1038       insn_type = "FULL ";
1039       break;
1040     case Bundling_kv3_v1_LITE:
1041     case Bundling_kv3_v1_LITE_X:
1042     case Bundling_kv3_v1_LITE_Y:
1043       insn_type = "LITE ";
1044       break;
1045     case Bundling_kv3_v1_TINY:
1046     case Bundling_kv3_v1_TINY_X:
1047     case Bundling_kv3_v1_TINY_Y:
1048       insn_type = "TINY ";
1049       break;
1050     case Bundling_kv3_v1_MAU:
1051     case Bundling_kv3_v1_MAU_X:
1052     case Bundling_kv3_v1_MAU_Y:
1053       insn_type = "MAU  ";
1054       break;
1055     case Bundling_kv3_v1_LSU:
1056     case Bundling_kv3_v1_LSU_X:
1057     case Bundling_kv3_v1_LSU_Y:
1058       insn_type = "LSU  ";
1059       break;
1060     case Bundling_kv3_v1_NOP:
1061       insn_type = "NOP  ";
1062       break;
1063     default:
1064       as_fatal ("Unhandled Bundling class %d", op->bundling);
1065     }
1066 
1067   if (op->codewords[0].flags & kvxOPCODE_FLAG_MODE64
1068       && op->codewords[0].flags & kvxOPCODE_FLAG_MODE32)
1069     insn_mode = "32 and 64";
1070   else if (op->codewords[0].flags & kvxOPCODE_FLAG_MODE64)
1071     insn_mode = "64";
1072   else if (op->codewords[0].flags & kvxOPCODE_FLAG_MODE32)
1073     insn_mode = "32";
1074   else
1075     as_fatal ("Unknown instruction mode.");
1076 
1077   printf ("%s | syllables: %d | type: %s | mode: %s bits\n", asm_str,
1078 	  op->wordcount, insn_type, insn_mode);
1079 }
1080 
1081 /* Comparison function compatible with qsort.  This is used to sort the issues
1082    into the right order.  */
1083 static int
kvxinsn_compare(const void * a,const void * b)1084 kvxinsn_compare (const void *a, const void *b)
1085 {
1086   struct kvxinsn *kvxinsn_a = *(struct kvxinsn **) a;
1087   struct kvxinsn *kvxinsn_b = *(struct kvxinsn **) b;
1088   int bundling_a = find_bundling (kvxinsn_a);
1089   int bundling_b = find_bundling (kvxinsn_b);
1090   int order_a = kvxinsn_a->order;
1091   int order_b = kvxinsn_b->order;
1092   if (bundling_a != bundling_b)
1093     return (bundling_b < bundling_a) - (bundling_a < bundling_b);
1094   return (order_b < order_a) - (order_a < order_b);
1095 }
1096 
1097 static void
kvx_reorder_bundle(struct kvxinsn * bundle_insn[],int bundle_insncnt)1098 kvx_reorder_bundle (struct kvxinsn *bundle_insn[], int bundle_insncnt)
1099 {
1100   enum
1101   { EXU_BCU, EXU_TCA, EXU_ALU0, EXU_ALU1, EXU_MAU, EXU_LSU, EXU__ };
1102   struct kvxinsn *issued[EXU__];
1103   int tag, exu;
1104 
1105   memset (issued, 0, sizeof (issued));
1106   for (int i = 0; i < bundle_insncnt; i++)
1107     {
1108       struct kvxinsn *kvxinsn = bundle_insn[i];
1109       tag = -1, exu = -1;
1110       /* This is a hack. It works because all the Bundling are the same for all
1111          cores for now.  */
1112       switch ((int) find_bundling (kvxinsn))
1113 	{
1114 	case Bundling_kv3_v1_ALL:
1115 	  if (bundle_insncnt > 1)
1116 	    as_fatal ("Too many ops in a single op bundle");
1117 	  issued[0] = kvxinsn;
1118 	  break;
1119 	case Bundling_kv3_v1_BCU:
1120 	  if (!issued[EXU_BCU])
1121 	    issued[EXU_BCU] = kvxinsn;
1122 	  else
1123 	    as_fatal ("More than one BCU instruction in bundle");
1124 	  break;
1125 	case Bundling_kv3_v1_TCA:
1126 	  if (!issued[EXU_TCA])
1127 	    issued[EXU_TCA] = kvxinsn;
1128 	  else
1129 	    as_fatal ("More than one TCA instruction in bundle");
1130 	  break;
1131 	case Bundling_kv3_v1_FULL:
1132 	case Bundling_kv3_v1_FULL_X:
1133 	case Bundling_kv3_v1_FULL_Y:
1134 	  if (!issued[EXU_ALU0])
1135 	    {
1136 	      issued[EXU_ALU0] = kvxinsn;
1137 	      tag = Modifier_kv3_v1_exunum_ALU0;
1138 	      exu = EXU_ALU0;
1139 	    }
1140 	  else
1141 	    as_fatal ("More than one ALU FULL instruction in bundle");
1142 	  break;
1143 	case Bundling_kv3_v1_LITE:
1144 	case Bundling_kv3_v1_LITE_X:
1145 	case Bundling_kv3_v1_LITE_Y:
1146 	  if (!issued[EXU_ALU0])
1147 	    {
1148 	      issued[EXU_ALU0] = kvxinsn;
1149 	      tag = Modifier_kv3_v1_exunum_ALU0;
1150 	      exu = EXU_ALU0;
1151 	    }
1152 	  else if (!issued[EXU_ALU1])
1153 	    {
1154 	      issued[EXU_ALU1] = kvxinsn;
1155 	      tag = Modifier_kv3_v1_exunum_ALU1;
1156 	      exu = EXU_ALU1;
1157 	    }
1158 	  else
1159 	    as_fatal ("Too many ALU FULL or LITE instructions in bundle");
1160 	  break;
1161 	case Bundling_kv3_v1_MAU:
1162 	case Bundling_kv3_v1_MAU_X:
1163 	case Bundling_kv3_v1_MAU_Y:
1164 	  if (!issued[EXU_MAU])
1165 	    {
1166 	      issued[EXU_MAU] = kvxinsn;
1167 	      tag = Modifier_kv3_v1_exunum_MAU;
1168 	      exu = EXU_MAU;
1169 	    }
1170 	  else
1171 	    as_fatal ("More than one MAU instruction in bundle");
1172 	  break;
1173 	case Bundling_kv3_v1_LSU:
1174 	case Bundling_kv3_v1_LSU_X:
1175 	case Bundling_kv3_v1_LSU_Y:
1176 	  if (!issued[EXU_LSU])
1177 	    {
1178 	      issued[EXU_LSU] = kvxinsn;
1179 	      tag = Modifier_kv3_v1_exunum_LSU;
1180 	      exu = EXU_LSU;
1181 	    }
1182 	  else
1183 	    as_fatal ("More than one LSU instruction in bundle");
1184 	  break;
1185 	case Bundling_kv3_v1_TINY:
1186 	case Bundling_kv3_v1_TINY_X:
1187 	case Bundling_kv3_v1_TINY_Y:
1188 	case Bundling_kv3_v1_NOP:
1189 	  if (!issued[EXU_ALU0])
1190 	    {
1191 	      issued[EXU_ALU0] = kvxinsn;
1192 	      tag = Modifier_kv3_v1_exunum_ALU0;
1193 	      exu = EXU_ALU0;
1194 	    }
1195 	  else if (!issued[EXU_ALU1])
1196 	    {
1197 	      issued[EXU_ALU1] = kvxinsn;
1198 	      tag = Modifier_kv3_v1_exunum_ALU1;
1199 	      exu = EXU_ALU1;
1200 	    }
1201 	  else if (!issued[EXU_MAU])
1202 	    {
1203 	      issued[EXU_MAU] = kvxinsn;
1204 	      tag = Modifier_kv3_v1_exunum_MAU;
1205 	      exu = EXU_MAU;
1206 	    }
1207 	  else if (!issued[EXU_LSU])
1208 	    {
1209 	      issued[EXU_LSU] = kvxinsn;
1210 	      tag = Modifier_kv3_v1_exunum_LSU;
1211 	      exu = EXU_LSU;
1212 	    }
1213 	  else
1214 	    as_fatal ("Too many ALU instructions in bundle");
1215 	  break;
1216 	default:
1217 	  as_fatal ("Unhandled Bundling class %d", find_bundling (kvxinsn));
1218 	}
1219       if (tag >= 0)
1220 	{
1221 	  if (issued[exu]->immx0 != NOIMMX)
1222 	    immxbuf[issued[exu]->immx0].words[0] |= (tag << 27);
1223 	  if (issued[exu]->immx1 != NOIMMX)
1224 	    immxbuf[issued[exu]->immx1].words[0] |= (tag << 27);
1225 	}
1226     }
1227 
1228   int i;
1229   for (i = 0, exu = 0; exu < EXU__; exu++)
1230     {
1231       if (issued[exu])
1232 	bundle_insn[i++] = issued[exu];
1233     }
1234   if (i != bundle_insncnt)
1235     as_fatal ("Mismatch between bundle and issued instructions");
1236 }
1237 
1238 static void
kvx_check_resource_usage(struct kvxinsn ** bundle_insn,int bundle_insncnt)1239 kvx_check_resource_usage (struct kvxinsn **bundle_insn, int bundle_insncnt)
1240 {
1241   const int reservation_table_len =
1242     (kvx_core_info->reservation_table_lines * kvx_core_info->resource_max);
1243   const int *resources = kvx_core_info->resources;
1244   int *resources_used =
1245     malloc (reservation_table_len * sizeof (int));
1246   memset (resources_used, 0, reservation_table_len * sizeof (int));
1247 
1248   for (int i = 0; i < bundle_insncnt; i++)
1249   {
1250     int insn_reservation = find_reservation (bundle_insn[i]);
1251     int reservation = insn_reservation & 0xff;
1252     const int *reservation_table = kvx_core_info->reservation_table_table[reservation];
1253     for (int j = 0; j < reservation_table_len; j++)
1254       resources_used[j] += reservation_table[j];
1255   }
1256 
1257   for (int i = 0; i < kvx_core_info->reservation_table_lines; i++)
1258     {
1259       for (int j = 0; j < kvx_core_info->resource_max; j++)
1260 	if (resources_used[(i * kvx_core_info->resource_max) + j] > resources[j])
1261 	  {
1262 	    int v = resources_used[(i * kvx_core_info->resource_max) + j];
1263 	    free (resources_used);
1264 	    as_fatal ("Resource %s over-used in bundle: %d used, %d available",
1265 		kvx_core_info->resource_names[j], v, resources[j]);
1266 	  }
1267   }
1268   free (resources_used);
1269 }
1270 
1271 /*
1272  * Called by core to assemble a single line
1273  */
1274 void
md_assemble(char * line)1275 md_assemble (char *line)
1276 {
1277   char *line_cursor = line;
1278 
1279   if (get_byte_counter (now_seg) & 3)
1280     as_fatal ("code segment not word aligned in md_assemble");
1281 
1282   while (line_cursor && line_cursor[0] && (line_cursor[0] == ' '))
1283     line_cursor++;
1284 
1285   /* ;; was converted to "be" by line hook          */
1286   /* here we look for the bundle end                */
1287   /* and actually output any instructions in bundle */
1288   /* also we need to implement the stop bit         */
1289   /* check for bundle end */
1290   if (strncmp (line_cursor, "be", 2) == 0)
1291   {
1292     inside_bundle = 0;
1293     //int sec_align = bfd_get_section_alignment(stdoutput, now_seg);
1294     /* Was KVXMAXBUNDLEISSUE, changed because of NOPs */
1295     struct kvxinsn *bundle_insn[KVXMAXBUNDLEWORDS];
1296     int bundle_insncnt = 0;
1297     int syllables = 0;
1298     int entry;
1299 
1300 #ifdef OBJ_ELF
1301     /* Emit Dwarf debug line information */
1302     dwarf2_emit_insn (0);
1303 #endif
1304     for (int j = 0; j < insncnt; j++)
1305     {
1306       insbuf[j].order = j;
1307       bundle_insn[bundle_insncnt++] = &insbuf[j];
1308       syllables += insbuf[j].len;
1309     }
1310 
1311     if (syllables + immxcnt > KVXMAXBUNDLEWORDS)
1312       as_fatal ("Bundle has too many syllables : %d instead of %d",
1313 	  syllables + immxcnt, KVXMAXBUNDLEWORDS);
1314 
1315     if (env.opts.check_resource_usage)
1316       kvx_check_resource_usage (bundle_insn, bundle_insncnt);
1317 
1318     /* Reorder and check the bundle.  */
1319     if (!env.opts.generate_illegal_code)
1320     {
1321       /* Sort the bundle_insn in order of bundling. */
1322       qsort (bundle_insn, bundle_insncnt, sizeof (struct kvxinsn *), kvxinsn_compare);
1323 
1324       kvx_reorder_bundle (bundle_insn, bundle_insncnt);
1325     }
1326 
1327     /* The ordering of the insns has been set correctly in bundle_insn. */
1328     for (int i = 0; i < bundle_insncnt; i++)
1329     {
1330       emit_insn (bundle_insn[i], i, (i == bundle_insncnt + immxcnt - 1));
1331       bundle_insn[i]->written = 1;
1332     }
1333 
1334     // Emit immx, ordering them by EXU tags, 0 to 3
1335     entry = 0;
1336     for (int tag = 0; tag < 4; tag++)
1337     {
1338       for (int j = 0; j < immxcnt; j++)
1339       {
1340 #define kv3_exunum2_fld(x) (int)(((unsigned int)(x) >> 27) & 0x3)
1341 	if (kv3_exunum2_fld (immxbuf[j].words[0]) == tag)
1342 	{
1343 	  assert (immxbuf[j].written == 0);
1344 	  int insn_pos = bundle_insncnt + entry;
1345 	  emit_insn (&(immxbuf[j]), insn_pos, entry == immxcnt - 1);
1346 	  immxbuf[j].written = 1;
1347 	  entry++;
1348 	}
1349 #undef kv3_exunum2_fld
1350       }
1351     }
1352     if (entry != immxcnt)
1353       as_fatal ("%d IMMX produced, only %d emitted.", immxcnt, entry);
1354 
1355     /* The debug label that appear in the middle of bundles
1356        had better appear to be attached to the next
1357        bundle. This is because usually these labels point to
1358        the first instruction where some condition is met. If
1359        the label isn't handled this way it will be attached to
1360        the current bundle which is wrong as the corresponding
1361        instruction wasn't executed yet. */
1362     while (label_fixes)
1363     {
1364       struct label_fix *fix = label_fixes;
1365 
1366       label_fixes = fix->next;
1367       symbol_set_value_now (fix->sym);
1368       free (fix);
1369     }
1370 
1371     insncnt = 0;
1372     immxcnt = 0;
1373     memset (immxbuf, 0, sizeof (immxbuf));
1374 
1375     return;
1376   }
1377 
1378     char *buf = NULL;
1379     sscanf (line_cursor, "%m[^\n]", &buf);
1380     struct token_s my_tok = { .insn = buf, .begin = 0, .end = 0, .class_id = -1 , .val = 0 };
1381     struct token_list *tok_lst = parse (my_tok);
1382     free (buf);
1383 
1384     if (!tok_lst)
1385       return;
1386 
1387     /* Skip opcode */
1388     line_cursor += strlen (tok_lst->tok);
1389 
1390   assembling_insn = true;
1391 
1392   inside_bundle = 1;
1393   assemble_tokens (tok_lst);
1394   free_token_list (tok_lst);
1395   assembling_insn = false;
1396 }
1397 
1398 static void
kvx_set_cpu(void)1399 kvx_set_cpu (void)
1400 {
1401   if (!kvx_core_info)
1402     kvx_core_info = &kvx_kv3_v1_core_info;
1403 
1404   if (!kvx_registers)
1405     kvx_registers = kvx_kv3_v1_registers;
1406 
1407   if (!kvx_regfiles)
1408     kvx_regfiles = kvx_kv3_v1_regfiles;
1409 
1410   if (!kvx_modifiers)
1411     kvx_modifiers = kvx_kv3_v1_modifiers;
1412 
1413   if (env.params.core == -1)
1414       env.params.core = kvx_core_info->elf_core;
1415 
1416   int kvx_bfd_mach;
1417   print_insn = kvx_print_insn;
1418 
1419   switch (kvx_core_info->elf_core)
1420     {
1421     case ELF_KVX_CORE_KV3_1:
1422       kvx_bfd_mach = env.params.arch_size == 32 ? bfd_mach_kv3_1 : bfd_mach_kv3_1_64;
1423       setup (ELF_KVX_CORE_KV3_1);
1424       break;
1425     case ELF_KVX_CORE_KV3_2:
1426       kvx_bfd_mach = env.params.arch_size == 32 ? bfd_mach_kv3_2 : bfd_mach_kv3_2_64;
1427       setup (ELF_KVX_CORE_KV3_2);
1428       break;
1429     case ELF_KVX_CORE_KV4_1:
1430       kvx_bfd_mach = env.params.arch_size == 32 ? bfd_mach_kv4_1 : bfd_mach_kv4_1_64;
1431       setup (ELF_KVX_CORE_KV4_1);
1432       break;
1433     default:
1434       as_fatal ("Unknown elf core: 0x%x", kvx_core_info->elf_core);
1435     }
1436 
1437   if (!bfd_set_arch_mach (stdoutput, TARGET_ARCH, kvx_bfd_mach))
1438     as_warn (_("could not set architecture and machine"));
1439 }
1440 
1441 static int
kvxop_compar(const void * a,const void * b)1442 kvxop_compar (const void *a, const void *b)
1443 {
1444   const struct kvxopc *opa = (const struct kvxopc *) a;
1445   const struct kvxopc *opb = (const struct kvxopc *) b;
1446   int res = strcmp (opa->as_op, opb->as_op);
1447 
1448   if (res)
1449     return res;
1450   else
1451     {
1452       for (int i = 0; opa->format[i] && opb->format[i]; ++i)
1453 	if (opa->format[i]->width != opb->format[i]->width)
1454 	  return opa->format[i]->width - opb->format[i]->width;
1455       return 0;
1456     }
1457 }
1458 
1459 /***************************************************/
1460 /*    INITIALIZE ASSEMBLER                         */
1461 /***************************************************/
1462 
1463 static int
print_hash(void ** slot,void * arg ATTRIBUTE_UNUSED)1464 print_hash (void **slot, void *arg ATTRIBUTE_UNUSED)
1465 {
1466   string_tuple_t *tuple = *((string_tuple_t **) slot);
1467   printf ("%s\n", tuple->key);
1468   return 1;
1469 }
1470 
1471 static void
declare_register(const char * name,int number)1472 declare_register (const char *name, int number)
1473 {
1474   symbolS *regS = symbol_create (name, reg_section,
1475 				 &zero_address_frag, number);
1476 
1477   if (str_hash_insert (env.reg_hash, S_GET_NAME (regS), regS, 0) != NULL)
1478     as_fatal (_("duplicate %s"), name);
1479 }
1480 
1481 void
md_begin()1482 md_begin ()
1483 {
1484   kvx_set_cpu ();
1485 
1486   /*
1487    * Declare register names with symbols
1488    */
1489 
1490   env.reg_hash = str_htab_create ();
1491 
1492   for (int i = 0; i < kvx_regfiles[KVX_REGFILE_REGISTERS]; i++)
1493     declare_register (kvx_registers[i].name, kvx_registers[i].id);
1494 
1495   /* Sort optab, so that identical mnemonics appear consecutively */
1496   {
1497     int nel;
1498     for (nel = 0; !STREQ ("", kvx_core_info->optab[nel].as_op); nel++)
1499       ;
1500     qsort (kvx_core_info->optab, nel, sizeof (kvx_core_info->optab[0]),
1501 	   kvxop_compar);
1502   }
1503 
1504   /* The '?' is an operand separator */
1505   lex_type['?'] = 0;
1506 
1507   /* Create the opcode hash table      */
1508   /* Each name should appear only once */
1509 
1510   env.opcode_hash = str_htab_create ();
1511   env.reloc_hash = str_htab_create ();
1512 
1513   {
1514     struct kvxopc *op;
1515     const char *name = 0;
1516     for (op = kvx_core_info->optab; !(STREQ ("", op->as_op)); op++)
1517       {
1518 	/* enter in hash table if this is a new name */
1519 	if (!(STREQ (name, op->as_op)))
1520 	  {
1521 	    name = op->as_op;
1522 	    if (str_hash_insert (env.opcode_hash, name, op, 0))
1523 	      as_fatal ("internal error: can't hash opcode `%s'", name);
1524 	  }
1525 
1526 
1527 	for (int i = 0 ; op->format[i] ; ++i)
1528 	  {
1529 	    const char *reloc_name = TOKEN_NAME (op->format[i]->type);
1530 	    void *relocs = op->format[i]->relocs;
1531 	    if (op->format[i]->relocs[0] != 0
1532 		&& !str_hash_find (env.reloc_hash, reloc_name))
1533 	      if (str_hash_insert (env.reloc_hash, reloc_name, relocs, 0))
1534 		  as_fatal ("internal error: can't hash type `%s'", reloc_name);
1535 	  }
1536       }
1537   }
1538 
1539   if (env.opts.dump_table)
1540     {
1541       htab_traverse (env.opcode_hash, print_hash, NULL);
1542       exit (0);
1543     }
1544 
1545   if (env.opts.dump_insn)
1546     {
1547       for (struct kvxopc *op = kvx_core_info->optab; !(STREQ ("", op->as_op)); op++)
1548 	print_insn (op);
1549       exit (0);
1550     }
1551 
1552   /* Here we enforce the minimum section alignment.  Remember, in
1553    * the linker we can make the boudaries between the linked sections
1554    * on larger boundaries.  The text segment is aligned to long words
1555    * because of the odd/even constraint on immediate extensions
1556    */
1557 
1558   bfd_set_section_alignment (text_section, 3);	/* -- 8 bytes */
1559   bfd_set_section_alignment (data_section, 2);	/* -- 4 bytes */
1560   bfd_set_section_alignment (bss_section, 2);	/* -- 4 bytes */
1561   subseg_set (text_section, 0);
1562 
1563   symbolS *gotoff_sym   = symbol_create (".<gotoff>",   undefined_section, &zero_address_frag, 0);
1564   symbolS *got_sym      = symbol_create (".<got>",      undefined_section, &zero_address_frag, 0);
1565   symbolS *plt_sym      = symbol_create (".<plt>",      undefined_section, &zero_address_frag, 0);
1566   symbolS *tlsgd_sym    = symbol_create (".<tlsgd>",    undefined_section, &zero_address_frag, 0);
1567   symbolS *tlsie_sym    = symbol_create (".<tlsie>",    undefined_section, &zero_address_frag, 0);
1568   symbolS *tlsle_sym    = symbol_create (".<tlsle>",    undefined_section, &zero_address_frag, 0);
1569   symbolS *tlsld_sym    = symbol_create (".<tlsld>",    undefined_section, &zero_address_frag, 0);
1570   symbolS *dtpoff_sym   = symbol_create (".<dtpoff>",   undefined_section, &zero_address_frag, 0);
1571   symbolS *plt64_sym    = symbol_create (".<plt64>",    undefined_section, &zero_address_frag, 0);
1572   symbolS *gotaddr_sym  = symbol_create (".<gotaddr>",  undefined_section, &zero_address_frag, 0);
1573   symbolS *pcrel16_sym  = symbol_create (".<pcrel16>",  undefined_section, &zero_address_frag, 0);
1574   symbolS *pcrel_sym    = symbol_create (".<pcrel>",    undefined_section, &zero_address_frag, 0);
1575   symbolS *signed32_sym = symbol_create (".<signed32>", undefined_section, &zero_address_frag, 0);
1576 
1577   for (int i = 0; i < kvx_core_info->nb_pseudo_funcs; ++i)
1578     {
1579       symbolS *sym;
1580       if (!strcmp (kvx_core_info->pseudo_funcs[i].name, "gotoff"))
1581 	sym = gotoff_sym;
1582       else if (!strcmp (kvx_core_info->pseudo_funcs[i].name, "got"))
1583 	sym = got_sym;
1584       else if (!strcmp (kvx_core_info->pseudo_funcs[i].name, "plt"))
1585 	sym = plt_sym;
1586       else if (!strcmp (kvx_core_info->pseudo_funcs[i].name, "tlsgd"))
1587 	sym = tlsgd_sym;
1588       else if (!strcmp (kvx_core_info->pseudo_funcs[i].name, "tlsle"))
1589 	sym = tlsle_sym;
1590       else if (!strcmp (kvx_core_info->pseudo_funcs[i].name, "tlsld"))
1591 	sym = tlsld_sym;
1592       else if (!strcmp (kvx_core_info->pseudo_funcs[i].name, "dtpoff"))
1593 	sym = dtpoff_sym;
1594       else if (!strcmp (kvx_core_info->pseudo_funcs[i].name, "tlsie"))
1595 	sym = tlsie_sym;
1596       else if (!strcmp (kvx_core_info->pseudo_funcs[i].name, "plt64"))
1597 	sym = plt64_sym;
1598       else if (!strcmp (kvx_core_info->pseudo_funcs[i].name, "pcrel16"))
1599 	sym = pcrel16_sym;
1600       else if (!strcmp (kvx_core_info->pseudo_funcs[i].name, "pcrel"))
1601 	sym = pcrel_sym;
1602       else if (!strcmp (kvx_core_info->pseudo_funcs[i].name, "gotaddr"))
1603 	sym = gotaddr_sym;
1604       else if (!strcmp (kvx_core_info->pseudo_funcs[i].name, "signed32"))
1605 	sym = signed32_sym;
1606       else
1607 	as_fatal ("internal error: Unknown pseudo func `%s'",
1608 	    kvx_core_info->pseudo_funcs[i].name);
1609 
1610       kvx_core_info->pseudo_funcs[i].sym = sym;
1611     }
1612 }
1613 
1614 /***************************************************/
1615 /*          ASSEMBLER CLEANUP STUFF                */
1616 /***************************************************/
1617 
1618 /* Return non-zero if the indicated VALUE has overflowed the maximum
1619    range expressible by a signed number with the indicated number of
1620    BITS.
1621 
1622    This is from tc-aarch64.c
1623 */
1624 
1625 static bfd_boolean
signed_overflow(offsetT value,unsigned bits)1626 signed_overflow (offsetT value, unsigned bits)
1627 {
1628   offsetT lim;
1629   if (bits >= sizeof (offsetT) * 8)
1630     return FALSE;
1631   lim = (offsetT) 1 << (bits - 1);
1632   return (value < -lim || value >= lim);
1633 }
1634 
1635 /***************************************************/
1636 /*          ASSEMBLER FIXUP STUFF                  */
1637 /***************************************************/
1638 
1639 void
md_apply_fix(fixS * fixP,valueT * valueP,segT segmentP ATTRIBUTE_UNUSED)1640 md_apply_fix (fixS * fixP, valueT * valueP, segT segmentP ATTRIBUTE_UNUSED)
1641 {
1642   char *const fixpos = fixP->fx_frag->fr_literal + fixP->fx_where;
1643   valueT value = *valueP;
1644   valueT image;
1645   arelent *rel;
1646 
1647   rel = (arelent *) xmalloc (sizeof (arelent));
1648 
1649   rel->howto = bfd_reloc_type_lookup (stdoutput, fixP->fx_r_type);
1650   if (rel->howto == NULL)
1651     {
1652       as_fatal
1653 	("[md_apply_fix] unsupported relocation type (can't find howto)");
1654     }
1655 
1656   /* Note whether this will delete the relocation.  */
1657   if (fixP->fx_addsy == NULL && fixP->fx_pcrel == 0)
1658     fixP->fx_done = 1;
1659 
1660   if (fixP->fx_size > 0)
1661     image = md_chars_to_number (fixpos, fixP->fx_size);
1662   else
1663     image = 0;
1664   if (fixP->fx_addsy != NULL)
1665     {
1666       switch (fixP->fx_r_type)
1667 	{
1668 	case BFD_RELOC_KVX_S37_TLS_LE_UP27:
1669 	case BFD_RELOC_KVX_S37_TLS_LE_LO10:
1670 
1671 	case BFD_RELOC_KVX_S43_TLS_LE_EX6:
1672 	case BFD_RELOC_KVX_S43_TLS_LE_UP27:
1673 	case BFD_RELOC_KVX_S43_TLS_LE_LO10:
1674 
1675 	case BFD_RELOC_KVX_S37_TLS_GD_LO10:
1676 	case BFD_RELOC_KVX_S37_TLS_GD_UP27:
1677 
1678 	case BFD_RELOC_KVX_S43_TLS_GD_LO10:
1679 	case BFD_RELOC_KVX_S43_TLS_GD_UP27:
1680 	case BFD_RELOC_KVX_S43_TLS_GD_EX6:
1681 
1682 	case BFD_RELOC_KVX_S37_TLS_IE_LO10:
1683 	case BFD_RELOC_KVX_S37_TLS_IE_UP27:
1684 
1685 	case BFD_RELOC_KVX_S43_TLS_IE_LO10:
1686 	case BFD_RELOC_KVX_S43_TLS_IE_UP27:
1687 	case BFD_RELOC_KVX_S43_TLS_IE_EX6:
1688 
1689 	case BFD_RELOC_KVX_S37_TLS_LD_LO10:
1690 	case BFD_RELOC_KVX_S37_TLS_LD_UP27:
1691 
1692 	case BFD_RELOC_KVX_S43_TLS_LD_LO10:
1693 	case BFD_RELOC_KVX_S43_TLS_LD_UP27:
1694 	case BFD_RELOC_KVX_S43_TLS_LD_EX6:
1695 
1696 	  S_SET_THREAD_LOCAL (fixP->fx_addsy);
1697 	  break;
1698 	default:
1699 	  break;
1700 	}
1701     }
1702 
1703   /* If relocation has been marked for deletion, apply remaining changes */
1704   if (fixP->fx_done)
1705     {
1706       switch (fixP->fx_r_type)
1707 	{
1708 	case BFD_RELOC_8:
1709 	case BFD_RELOC_16:
1710 	case BFD_RELOC_32:
1711 	case BFD_RELOC_64:
1712 
1713 	case BFD_RELOC_KVX_GLOB_DAT:
1714 	case BFD_RELOC_KVX_32_GOT:
1715 	case BFD_RELOC_KVX_64_GOT:
1716 	case BFD_RELOC_KVX_64_GOTOFF:
1717 	case BFD_RELOC_KVX_32_GOTOFF:
1718 	  image = value;
1719 	  md_number_to_chars (fixpos, image, fixP->fx_size);
1720 	  break;
1721 
1722 	case BFD_RELOC_KVX_PCREL17:
1723 	  if (signed_overflow (value, 17 + 2))
1724 	    as_bad_where (fixP->fx_file, fixP->fx_line,
1725 			  _("branch out of range"));
1726 	  goto pcrel_common;
1727 
1728 	case BFD_RELOC_KVX_PCREL27:
1729 	  if (signed_overflow (value, 27 + 2))
1730 	    as_bad_where (fixP->fx_file, fixP->fx_line,
1731 			  _("branch out of range"));
1732 	  goto pcrel_common;
1733 
1734 	case BFD_RELOC_KVX_S16_PCREL:
1735 	  if (signed_overflow (value, 16))
1736 	    as_bad_where (fixP->fx_file, fixP->fx_line,
1737 			  _("signed16 PCREL value out of range"));
1738 	  goto pcrel_common;
1739 
1740 	case BFD_RELOC_KVX_S43_PCREL_LO10:
1741 	case BFD_RELOC_KVX_S43_PCREL_UP27:
1742 	case BFD_RELOC_KVX_S43_PCREL_EX6:
1743 	  if (signed_overflow (value, 10 + 27 + 6))
1744 	    as_bad_where (fixP->fx_file, fixP->fx_line,
1745 			  _("signed43 PCREL value out of range"));
1746 	  goto pcrel_common;
1747 
1748 	case BFD_RELOC_KVX_S37_PCREL_LO10:
1749 	case BFD_RELOC_KVX_S37_PCREL_UP27:
1750 	  if (signed_overflow (value, 10 + 27))
1751 	    as_bad_where (fixP->fx_file, fixP->fx_line,
1752 			  _("signed37 PCREL value out of range"));
1753 	  goto pcrel_common;
1754 
1755 	case BFD_RELOC_KVX_S64_PCREL_LO10:
1756 	case BFD_RELOC_KVX_S64_PCREL_UP27:
1757 	case BFD_RELOC_KVX_S64_PCREL_EX27:
1758 
1759 	pcrel_common:
1760 	  if (fixP->fx_pcrel || fixP->fx_addsy)
1761 	    return;
1762 	  value =
1763 	    (((value >> rel->howto->rightshift) << rel->howto->bitpos) & rel->
1764 	     howto->dst_mask);
1765 	  image = (image & ~(rel->howto->dst_mask)) | value;
1766 	  md_number_to_chars (fixpos, image, fixP->fx_size);
1767 	  break;
1768 
1769 	case BFD_RELOC_KVX_S64_GOTADDR_LO10:
1770 	case BFD_RELOC_KVX_S64_GOTADDR_UP27:
1771 	case BFD_RELOC_KVX_S64_GOTADDR_EX27:
1772 
1773 	case BFD_RELOC_KVX_S43_GOTADDR_LO10:
1774 	case BFD_RELOC_KVX_S43_GOTADDR_UP27:
1775 	case BFD_RELOC_KVX_S43_GOTADDR_EX6:
1776 
1777 	case BFD_RELOC_KVX_S37_GOTADDR_LO10:
1778 	case BFD_RELOC_KVX_S37_GOTADDR_UP27:
1779 	  value = 0;
1780 	  /* Fallthrough */
1781 
1782 	case BFD_RELOC_KVX_S32_UP27:
1783 
1784 	case BFD_RELOC_KVX_S37_UP27:
1785 
1786 	case BFD_RELOC_KVX_S43_UP27:
1787 
1788 	case BFD_RELOC_KVX_S64_UP27:
1789 	case BFD_RELOC_KVX_S64_EX27:
1790 	case BFD_RELOC_KVX_S64_LO10:
1791 
1792 	case BFD_RELOC_KVX_S43_TLS_LE_UP27:
1793 	case BFD_RELOC_KVX_S43_TLS_LE_EX6:
1794 
1795 	case BFD_RELOC_KVX_S37_TLS_LE_UP27:
1796 
1797 	case BFD_RELOC_KVX_S37_GOTOFF_UP27:
1798 
1799 	case BFD_RELOC_KVX_S43_GOTOFF_UP27:
1800 	case BFD_RELOC_KVX_S43_GOTOFF_EX6:
1801 
1802 	case BFD_RELOC_KVX_S43_GOT_UP27:
1803 	case BFD_RELOC_KVX_S43_GOT_EX6:
1804 
1805 	case BFD_RELOC_KVX_S37_GOT_UP27:
1806 
1807 	case BFD_RELOC_KVX_S32_LO5:
1808 	case BFD_RELOC_KVX_S37_LO10:
1809 
1810 	case BFD_RELOC_KVX_S43_LO10:
1811 	case BFD_RELOC_KVX_S43_EX6:
1812 
1813 	case BFD_RELOC_KVX_S43_TLS_LE_LO10:
1814 	case BFD_RELOC_KVX_S37_TLS_LE_LO10:
1815 
1816 	case BFD_RELOC_KVX_S37_GOTOFF_LO10:
1817 	case BFD_RELOC_KVX_S43_GOTOFF_LO10:
1818 
1819 	case BFD_RELOC_KVX_S43_GOT_LO10:
1820 	case BFD_RELOC_KVX_S37_GOT_LO10:
1821 
1822 	default:
1823 	  as_fatal ("[md_apply_fix]:"
1824 		    "unsupported relocation type (type not handled : %d)",
1825 		    fixP->fx_r_type);
1826 	}
1827     }
1828 }
1829 
1830 /*
1831  * Warning: Can be called only in fixup_segment() after fx_addsy field
1832  * has been updated by calling symbol_get_value_expression(...->X_add_symbol)
1833  */
1834 int
kvx_validate_sub_fix(fixS * fixP)1835 kvx_validate_sub_fix (fixS * fixP)
1836 {
1837   segT add_symbol_segment, sub_symbol_segment;
1838 
1839   switch (fixP->fx_r_type)
1840     {
1841     case BFD_RELOC_8:
1842     case BFD_RELOC_16:
1843     case BFD_RELOC_32:
1844       if (fixP->fx_addsy != NULL)
1845 	add_symbol_segment = S_GET_SEGMENT (fixP->fx_addsy);
1846       else
1847 	return 0;
1848       if (fixP->fx_subsy != NULL)
1849 	sub_symbol_segment = S_GET_SEGMENT (fixP->fx_subsy);
1850       else
1851 	return 0;
1852 
1853       if ((strcmp (S_GET_NAME (fixP->fx_addsy),
1854 		   S_GET_NAME (fixP->fx_subsy)) == 0) &&
1855 	  (add_symbol_segment == sub_symbol_segment))
1856 	return 1;
1857       break;
1858     default:
1859       break;
1860     }
1861 
1862   return 0;
1863 }
1864 
1865 /* This is called whenever some data item (not an instruction) needs a
1866  * fixup.  */
1867 void
kvx_cons_fix_new(fragS * f,int where,int nbytes,expressionS * exp,bfd_reloc_code_real_type code)1868 kvx_cons_fix_new (fragS * f, int where, int nbytes, expressionS * exp,
1869 		  bfd_reloc_code_real_type code)
1870 {
1871   if (exp->X_op == O_pseudo_fixup)
1872     {
1873       exp->X_op = O_symbol;
1874       struct pseudo_func *pf =
1875 	kvx_get_pseudo_func_data_scn (exp->X_op_symbol);
1876       assert (pf != NULL);
1877       code = pf->pseudo_relocs.single;
1878 
1879       if (code == BFD_RELOC_UNUSED)
1880 	as_fatal ("Unsupported relocation");
1881     }
1882   else
1883     {
1884       switch (nbytes)
1885 	{
1886 	case 1:
1887 	  code = BFD_RELOC_8;
1888 	  break;
1889 	case 2:
1890 	  code = BFD_RELOC_16;
1891 	  break;
1892 	case 4:
1893 	  code = BFD_RELOC_32;
1894 	  break;
1895 	case 8:
1896 	  code = BFD_RELOC_64;
1897 	  break;
1898 	default:
1899 	  as_fatal ("unsupported BFD relocation size %u", nbytes);
1900 	  break;
1901 	}
1902     }
1903   fix_new_exp (f, where, nbytes, exp, 0, code);
1904 }
1905 
1906 /*
1907  * generate a relocation record
1908  */
1909 
1910 arelent *
tc_gen_reloc(asection * sec ATTRIBUTE_UNUSED,fixS * fixp)1911 tc_gen_reloc (asection * sec ATTRIBUTE_UNUSED, fixS * fixp)
1912 {
1913   arelent *reloc;
1914   bfd_reloc_code_real_type code;
1915 
1916   reloc = (arelent *) xmalloc (sizeof (arelent));
1917 
1918   reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
1919   *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
1920   reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
1921 
1922   code = fixp->fx_r_type;
1923   if (code == BFD_RELOC_32 && fixp->fx_pcrel)
1924     code = BFD_RELOC_32_PCREL;
1925   reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
1926 
1927   if (reloc->howto == NULL)
1928     {
1929       as_bad_where (fixp->fx_file, fixp->fx_line,
1930 		    "cannot represent `%s' relocation in object file",
1931 		    bfd_get_reloc_code_name (code));
1932       return NULL;
1933     }
1934 
1935 //  if (!fixp->fx_pcrel != !reloc->howto->pc_relative)
1936 //    {
1937 //      as_fatal ("internal error? cannot generate `%s' relocation",
1938 //		bfd_get_reloc_code_name (code));
1939 //    }
1940 //  assert (!fixp->fx_pcrel == !reloc->howto->pc_relative);
1941 
1942   reloc->addend = fixp->fx_offset;
1943 
1944   /*
1945    * Ohhh, this is ugly.  The problem is that if this is a local global
1946    * symbol, the relocation will entirely be performed at link time, not
1947    * at assembly time.  bfd_perform_reloc doesn't know about this sort
1948    * of thing, and as a result we need to fake it out here.
1949    */
1950 
1951   /* GD I'm not sure what this is used for in the kvx case but it sure  */
1952   /* messes up the relocs when emit_all_relocs is used as they are not */
1953   /* resolved with respect to a global sysmbol (e.g. .text), and hence */
1954   /* they are ALWAYS resolved at link time                             */
1955   /* FIXME FIXME                                                       */
1956 
1957   /* clarkes: 030827:  This code (and the other half of the fix in write.c)
1958    * have caused problems with the PIC relocations.
1959    * The root problem is that bfd_install_relocation adds in to the reloc
1960    * addend the section offset of a symbol defined in the current object.
1961    * This causes problems on numerous other targets too, and there are
1962    * several different methods used to get around it:
1963    *   1.  In tc_gen_reloc, subtract off the value that bfd_install_relocation
1964    *       added.  That is what we do here, and it is also done the
1965    *       same way for alpha.
1966    *   2.  In md_apply_fix, subtract off the value that bfd_install_relocation
1967    *       will add.  This is done on SH (non-ELF) and sparc targets.
1968    *   3.  In the howto structure for the relocations, specify a
1969    *       special function that does not return bfd_reloc_continue.
1970    *       This causes bfd_install_relocaion to terminate before it
1971    *       adds in the symbol offset.  This is done on SH ELF targets.
1972    *       Note that on ST200 we specify bfd_elf_generic_reloc as
1973    *       the special function.  This will return bfd_reloc_continue
1974    *       only in some circumstances, but in particular if the reloc
1975    *       is marked as partial_inplace in the bfd howto structure, then
1976    *       bfd_elf_generic_reloc will return bfd_reloc_continue.
1977    *       Some ST200 relocations are marked as partial_inplace
1978    *       (this is an error in my opinion because ST200 always uses
1979    *       a separate addend), but some are not.  The PIC relocations
1980    *       are not marked as partial_inplace, so for them,
1981    *       bfd_elf_generic_reloc returns bfd_reloc_ok, and the addend
1982    *       is not modified by bfd_install_relocation.   The relocations
1983    *       R_KVX_16 and R_KVX_32 are marked partial_inplace, and so for
1984    *       these we need to correct the addend.
1985    * In the code below, the condition in the emit_all_relocs branch
1986    * (now moved to write.c) is the inverse of the condition that
1987    * bfd_elf_generic_reloc uses to short-circuit the code in
1988    * bfd_install_relocation that modifies the addend.  The condition
1989    * in the else branch match the condition used in the alpha version
1990    * of tc_gen_reloc (see tc-alpha.c).
1991    * I do not know why we need to use different conditions in these
1992    * two branches, it seems to me that the condition should be the same
1993    * whether or not emit_all_relocs is true.
1994    * I also do not understand why it was necessary to move the emit_all_relocs
1995    * condition to write.c.
1996    */
1997 
1998   if (S_IS_EXTERNAL (fixp->fx_addsy) &&
1999       !S_IS_COMMON (fixp->fx_addsy) && reloc->howto->partial_inplace)
2000     reloc->addend -= symbol_get_bfdsym (fixp->fx_addsy)->value;
2001 
2002   return reloc;
2003 }
2004 
2005 /* Round up segment to appropriate boundary */
2006 
2007 valueT
md_section_align(asection * seg ATTRIBUTE_UNUSED,valueT size)2008 md_section_align (asection * seg ATTRIBUTE_UNUSED, valueT size)
2009 {
2010 #ifndef OBJ_ELF
2011   /* This is not right for ELF; a.out wants it, and COFF will force
2012    * the alignment anyways.  */
2013   int align = bfd_get_section_alignment (stdoutput, seg);
2014   valueT mask = ((valueT) 1 << align) - 1;
2015   return (size + mask) & ~mask;
2016 #else
2017   return size;
2018 #endif
2019 }
2020 
2021 int
md_estimate_size_before_relax(register fragS * fragP ATTRIBUTE_UNUSED,segT segtype ATTRIBUTE_UNUSED)2022 md_estimate_size_before_relax (register fragS * fragP ATTRIBUTE_UNUSED,
2023 			       segT segtype ATTRIBUTE_UNUSED)
2024 {
2025   as_fatal ("estimate_size_before_relax called");
2026 }
2027 
2028 void
md_convert_frag(bfd * abfd ATTRIBUTE_UNUSED,asection * sec ATTRIBUTE_UNUSED,fragS * fragp ATTRIBUTE_UNUSED)2029 md_convert_frag (bfd * abfd ATTRIBUTE_UNUSED,
2030 		 asection * sec ATTRIBUTE_UNUSED,
2031 		 fragS * fragp ATTRIBUTE_UNUSED)
2032 {
2033   as_fatal ("kvx convert_frag");
2034 }
2035 
2036 symbolS *
md_undefined_symbol(char * name ATTRIBUTE_UNUSED)2037 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
2038 {
2039   return 0;
2040 }
2041 
2042 const char *
md_atof(int type ATTRIBUTE_UNUSED,char * litp ATTRIBUTE_UNUSED,int * sizep ATTRIBUTE_UNUSED)2043 md_atof (int type ATTRIBUTE_UNUSED,
2044 	 char *litp ATTRIBUTE_UNUSED, int *sizep ATTRIBUTE_UNUSED)
2045 {
2046   return ieee_md_atof (type, litp, sizep, TARGET_BYTES_BIG_ENDIAN);
2047 }
2048 
2049 /*
2050  * calculate the base for a pcrel fixup
2051  * -- for relocation, we might need to add addend ?
2052  */
2053 
2054 long
md_pcrel_from(fixS * fixP)2055 md_pcrel_from (fixS * fixP)
2056 {
2057   return (fixP->fx_where + fixP->fx_frag->fr_address);
2058 }
2059 
2060 /************************************************************/
2061 /*   Hooks into standard processing -- we hook into label   */
2062 /*   handling code to detect double ':' and we hook before  */
2063 /*   a line of code is processed to do some simple sed style */
2064 /*   edits.                                                 */
2065 /************************************************************/
2066 
2067 static symbolS *last_proc_sym = NULL;
2068 static int update_last_proc_sym = 0;
2069 
2070 void
kvx_frob_label(symbolS * sym)2071 kvx_frob_label (symbolS *sym)
2072 {
2073   if (update_last_proc_sym)
2074     {
2075       last_proc_sym = sym;
2076       update_last_proc_sym = 0;
2077     }
2078 
2079   if (inside_bundle)
2080     {
2081       struct label_fix *fix;
2082       fix = malloc (sizeof (*fix));
2083       fix->next = label_fixes;
2084       fix->sym = sym;
2085       label_fixes = fix;
2086     }
2087 
2088   dwarf2_emit_label (sym);
2089 }
2090 
2091 void
kvx_check_label(symbolS * sym)2092 kvx_check_label (symbolS *sym)
2093 {
2094   /* Labels followed by a second semi-colon are considered external symbols.  */
2095   if (*input_line_pointer == ':')
2096     {
2097       S_SET_EXTERNAL (sym);
2098       input_line_pointer++;
2099     }
2100 }
2101 
2102 /* Emit single bundle nop. This is needed by .nop asm directive
2103  * Have to manage end of bundle done usually by start_line_hook
2104  * using BE pseudo op
2105  */
2106 void
kvx_emit_single_noop(void)2107 kvx_emit_single_noop (void)
2108 {
2109   char *nop;
2110   char *end_of_bundle;
2111 
2112   if (asprintf (&nop, "nop") < 0)
2113     as_fatal ("%s", xstrerror (errno));
2114 
2115   if (asprintf (&end_of_bundle, "be") < 0)
2116     as_fatal ("%s", xstrerror (errno));
2117 
2118   char *saved_ilp = input_line_pointer;
2119   md_assemble (nop);
2120   md_assemble (end_of_bundle);
2121   input_line_pointer = saved_ilp;
2122   free (nop);
2123   free (end_of_bundle);
2124 }
2125 
2126 /*  edit out some syntactic sugar that confuses GAS       */
2127 /*  input_line_pointer is guaranteed to point to the      */
2128 /*  the current line but may include text from following  */
2129 /*  lines.  Thus, '\n' must be scanned for as well as '\0' */
2130 
2131 void
kvx_md_start_line_hook(void)2132 kvx_md_start_line_hook (void)
2133 {
2134   char *t;
2135 
2136   for (t = input_line_pointer; t && t[0] == ' '; t++);
2137 
2138   /* Detect illegal syntax patterns:
2139    * - two bundle ends on the same line: ;; ;;
2140    * - illegal token: ;;;
2141    */
2142   if (t && (t[0] == ';') && (t[1] == ';'))
2143     {
2144       char *tmp_t;
2145       bool newline_seen = false;
2146 
2147       if (t[2] == ';')
2148 	as_fatal ("Syntax error: Illegal ;;; token");
2149 
2150       tmp_t = t + 2;
2151 
2152       while (tmp_t && tmp_t[0])
2153 	{
2154 	  while (tmp_t && tmp_t[0] &&
2155 		 ((tmp_t[0] == ' ') || (tmp_t[0] == '\n')))
2156 	    {
2157 	      if (tmp_t[0] == '\n')
2158 		newline_seen = true;
2159 	      tmp_t++;
2160 	    }
2161 	  if (tmp_t[0] == ';' && tmp_t[1] == ';')
2162 	    {
2163 	      /* if there's no newline between the two bundle stops
2164 	       * then raise a syntax error now, otherwise a strange error
2165 	       * message from read.c will be raised: "junk at end of line..."
2166 	       */
2167 	      if (tmp_t[2] == ';')
2168 		as_fatal ("Syntax error: Illegal ;;; token");
2169 
2170 	      if (!newline_seen)
2171 		  as_fatal ("Syntax error: More than one bundle stop on a line");
2172 	      newline_seen = false;	/* reset */
2173 
2174 	      /* this is an empty bundle, transform it into an
2175 	       * empty statement */
2176 	      tmp_t[0] = ';';
2177 	      tmp_t[1] = ' ';
2178 
2179 	      tmp_t += 2;
2180 	    }
2181 	  else
2182 	    break;
2183 	}
2184     }
2185 
2186   /* check for bundle end                             */
2187   /* we transform these into a special opcode BE      */
2188   /* because gas has ';' hardwired as a statement end */
2189   if (t && (t[0] == ';') && (t[1] == ';'))
2190     {
2191       t[0] = 'B';
2192       t[1] = 'E';
2193       return;
2194     }
2195 }
2196 
2197 static void
kvx_check_resources(int f)2198 kvx_check_resources (int f)
2199 {
2200   env.opts.check_resource_usage = f;
2201 }
2202 
2203 /** called before write_object_file */
2204 void
kvx_end(void)2205 kvx_end (void)
2206 {
2207   int newflags;
2208 
2209   if (!env.params.core_set)
2210     env.params.core = kvx_core_info->elf_core;
2211 
2212   /* (pp) the flags must be set at once */
2213   newflags = env.params.core | env.params.abi | env.params.pic_flags;
2214 
2215   if (env.params.arch_size == 64)
2216     newflags |= ELF_KVX_ABI_64B_ADDR_BIT;
2217 
2218   bfd_set_private_flags (stdoutput, newflags);
2219 
2220   cleanup ();
2221 
2222   if (inside_bundle && insncnt != 0)
2223     as_bad ("unexpected end-of-file while processing a bundle."
2224 	    "  Please check that ;; is on its own line.");
2225 }
2226 
2227 static void
kvx_type(int start ATTRIBUTE_UNUSED)2228 kvx_type (int start ATTRIBUTE_UNUSED)
2229 {
2230   char *name;
2231   char c;
2232   int type;
2233   char *typename = NULL;
2234   symbolS *sym;
2235   elf_symbol_type *elfsym;
2236 
2237   c = get_symbol_name (&name);
2238   sym = symbol_find_or_make (name);
2239   elfsym = (elf_symbol_type *) symbol_get_bfdsym (sym);
2240   *input_line_pointer = c;
2241 
2242   if (!*S_GET_NAME (sym))
2243     as_bad (_("Missing symbol name in directive"));
2244 
2245   SKIP_WHITESPACE ();
2246   if (*input_line_pointer == ',')
2247     ++input_line_pointer;
2248 
2249 
2250   SKIP_WHITESPACE ();
2251   if (*input_line_pointer == '#'
2252       || *input_line_pointer == '@'
2253       || *input_line_pointer == '"' || *input_line_pointer == '%')
2254     ++input_line_pointer;
2255 
2256   /* typename = input_line_pointer; */
2257   /* c = get_symbol_end(); */
2258   c = get_symbol_name (&typename);
2259 
2260   type = 0;
2261   if (strcmp (typename, "function") == 0
2262       || strcmp (typename, "STT_FUNC") == 0)
2263     type = BSF_FUNCTION;
2264   else if (strcmp (typename, "object") == 0
2265 	   || strcmp (typename, "STT_OBJECT") == 0)
2266     type = BSF_OBJECT;
2267   else if (strcmp (typename, "tls_object") == 0
2268 	   || strcmp (typename, "STT_TLS") == 0)
2269     type = BSF_OBJECT | BSF_THREAD_LOCAL;
2270   else if (strcmp (typename, "common") == 0
2271 	   || strcmp (typename, "STT_COMMON") == 0)
2272     type = BSF_ELF_COMMON;
2273   else if (strcmp (typename, "gnu_unique_object") == 0
2274 	   || strcmp (typename, "STB_GNU_UNIQUE") == 0)
2275     {
2276       elf_tdata (stdoutput)->has_gnu_osabi |= elf_gnu_osabi_unique;
2277       type = BSF_OBJECT | BSF_GNU_UNIQUE;
2278     }
2279   else if (strcmp (typename, "notype") == 0
2280 	   || strcmp (typename, "STT_NOTYPE") == 0)
2281     ;
2282 #ifdef md_elf_symbol_type
2283   else if ((type = md_elf_symbol_type (typename, sym, elfsym)) != -1)
2284     ;
2285 #endif
2286   else
2287     as_bad (_("unrecognized symbol type \"%s\""), typename);
2288 
2289   *input_line_pointer = c;
2290 
2291   if (*input_line_pointer == '"')
2292     ++input_line_pointer;
2293 
2294   elfsym->symbol.flags |= type;
2295   symbol_get_bfdsym (sym)->flags |= type;
2296 
2297   demand_empty_rest_of_line ();
2298 }
2299 
2300 #define ENDPROCEXTENSION	"$endproc"
2301 #define MINUSEXPR		".-"
2302 
2303 static int proc_endp_status = 0;
2304 
2305 static void
kvx_endp(int start ATTRIBUTE_UNUSED)2306 kvx_endp (int start ATTRIBUTE_UNUSED)
2307 {
2308   char c;
2309   char *name;
2310 
2311   if (inside_bundle)
2312     as_warn (".endp directive inside a bundle.");
2313   /* function name is optionnal and is ignored */
2314   /* there may be several names separated by commas... */
2315   while (1)
2316     {
2317       SKIP_WHITESPACE ();
2318       c = get_symbol_name (&name);
2319       (void) restore_line_pointer (c);
2320       SKIP_WHITESPACE ();
2321       if (*input_line_pointer != ',')
2322 	break;
2323       ++input_line_pointer;
2324     }
2325   demand_empty_rest_of_line ();
2326 
2327   if (!proc_endp_status)
2328     {
2329       as_warn (".endp directive doesn't follow .proc -- ignoring ");
2330       return;
2331     }
2332 
2333   proc_endp_status = 0;
2334 
2335   /* TB begin : add BSF_FUNCTION attribute to last_proc_sym symbol */
2336   if (size_type_function)
2337     {
2338       if (!last_proc_sym)
2339 	{
2340 	  as_bad ("Cannot set function attributes (bad symbol)");
2341 	  return;
2342 	}
2343 
2344       /*    last_proc_sym->symbol.flags |= BSF_FUNCTION; */
2345       symbol_get_bfdsym (last_proc_sym)->flags |= BSF_FUNCTION;
2346       /* Add .size funcname,.-funcname in order to add size
2347        * attribute to the current function */
2348       {
2349 	const int newdirective_sz =
2350 	  strlen (S_GET_NAME (last_proc_sym)) + strlen (MINUSEXPR) + 1;
2351 	char *newdirective = malloc (newdirective_sz);
2352 	char *savep = input_line_pointer;
2353 	expressionS exp;
2354 
2355 	memset (newdirective, 0, newdirective_sz);
2356 
2357 	/* BUILD :".-funcname" expression */
2358 	strcat (newdirective, MINUSEXPR);
2359 	strcat (newdirective, S_GET_NAME (last_proc_sym));
2360 	input_line_pointer = newdirective;
2361 	expression (&exp);
2362 
2363 	if (exp.X_op == O_constant)
2364 	  {
2365 	    S_SET_SIZE (last_proc_sym, exp.X_add_number);
2366 	    if (symbol_get_obj (last_proc_sym)->size)
2367 	      {
2368 		xfree (symbol_get_obj (last_proc_sym)->size);
2369 		symbol_get_obj (last_proc_sym)->size = NULL;
2370 	      }
2371 	  }
2372 	else
2373 	  {
2374 	    symbol_get_obj (last_proc_sym)->size =
2375 	      (expressionS *) xmalloc (sizeof (expressionS));
2376 	    *symbol_get_obj (last_proc_sym)->size = exp;
2377 	  }
2378 
2379 	/* just restore the real input pointer */
2380 	input_line_pointer = savep;
2381 	free (newdirective);
2382       }
2383     }
2384   /* TB end */
2385 
2386   last_proc_sym = NULL;
2387 }
2388 
2389 static void
kvx_proc(int start ATTRIBUTE_UNUSED)2390 kvx_proc (int start ATTRIBUTE_UNUSED)
2391 {
2392   char c;
2393   char *name;
2394   /* there may be several names separated by commas... */
2395   while (1)
2396     {
2397       SKIP_WHITESPACE ();
2398       c = get_symbol_name (&name);
2399       (void) restore_line_pointer (c);
2400 
2401       SKIP_WHITESPACE ();
2402       if (*input_line_pointer != ',')
2403 	break;
2404       ++input_line_pointer;
2405     }
2406   demand_empty_rest_of_line ();
2407 
2408   if (proc_endp_status)
2409     {
2410       as_warn (".proc follows .proc -- ignoring");
2411       return;
2412     }
2413 
2414   proc_endp_status = 1;
2415 
2416   /* this code emit a global symbol to mark the end of each function    */
2417   /* the symbol emitted has a name formed by the original function name */
2418   /* concatenated with $endproc so if _foo is a function name the symbol */
2419   /* marking the end of it is _foo$endproc                              */
2420   /* It is also required for generation of .size directive in kvx_endp() */
2421 
2422   if (size_type_function)
2423     update_last_proc_sym = 1;
2424 }
2425 
2426 int
kvx_force_reloc(fixS * fixP)2427 kvx_force_reloc (fixS * fixP)
2428 {
2429   symbolS *sym;
2430   asection *symsec;
2431 
2432   if (generic_force_reloc (fixP))
2433     return 1;
2434 
2435   switch (fixP->fx_r_type)
2436     {
2437     case BFD_RELOC_KVX_32_GOTOFF:
2438     case BFD_RELOC_KVX_S37_GOTOFF_UP27:
2439     case BFD_RELOC_KVX_S37_GOTOFF_LO10:
2440 
2441     case BFD_RELOC_KVX_64_GOTOFF:
2442     case BFD_RELOC_KVX_S43_GOTOFF_UP27:
2443     case BFD_RELOC_KVX_S43_GOTOFF_LO10:
2444     case BFD_RELOC_KVX_S43_GOTOFF_EX6:
2445 
2446     case BFD_RELOC_KVX_32_GOT:
2447     case BFD_RELOC_KVX_64_GOT:
2448     case BFD_RELOC_KVX_S37_GOT_UP27:
2449     case BFD_RELOC_KVX_S37_GOT_LO10:
2450 
2451     case BFD_RELOC_KVX_GLOB_DAT:
2452       return 1;
2453     default:
2454       return 0;
2455     }
2456 
2457   sym = fixP->fx_addsy;
2458   if (sym)
2459     {
2460       symsec = S_GET_SEGMENT (sym);
2461       /* if (bfd_is_abs_section (symsec)) return 0; */
2462       if (!SEG_NORMAL (symsec))
2463 	return 0;
2464     }
2465   return 1;
2466 }
2467 
2468 int
kvx_force_reloc_sub_same(fixS * fixP,segT sec)2469 kvx_force_reloc_sub_same (fixS * fixP, segT sec)
2470 {
2471   symbolS *sym;
2472   asection *symsec;
2473   const char *sec_name = NULL;
2474 
2475   if (generic_force_reloc (fixP))
2476     return 1;
2477 
2478   switch (fixP->fx_r_type)
2479     {
2480     case BFD_RELOC_KVX_32_GOTOFF:
2481     case BFD_RELOC_KVX_S37_GOTOFF_UP27:
2482     case BFD_RELOC_KVX_S37_GOTOFF_LO10:
2483 
2484     case BFD_RELOC_KVX_64_GOTOFF:
2485     case BFD_RELOC_KVX_S43_GOTOFF_UP27:
2486     case BFD_RELOC_KVX_S43_GOTOFF_LO10:
2487     case BFD_RELOC_KVX_S43_GOTOFF_EX6:
2488 
2489     case BFD_RELOC_KVX_32_GOT:
2490     case BFD_RELOC_KVX_64_GOT:
2491     case BFD_RELOC_KVX_S37_GOT_UP27:
2492     case BFD_RELOC_KVX_S37_GOT_LO10:
2493 
2494     case BFD_RELOC_KVX_S37_LO10:
2495     case BFD_RELOC_KVX_S37_UP27:
2496 
2497     case BFD_RELOC_KVX_GLOB_DAT:
2498       return 1;
2499 
2500     default:
2501       return 0;
2502     }
2503 
2504   sym = fixP->fx_addsy;
2505   if (sym)
2506     {
2507       symsec = S_GET_SEGMENT (sym);
2508       /* if (bfd_is_abs_section (symsec)) return 0; */
2509       if (!SEG_NORMAL (symsec))
2510 	return 0;
2511 
2512       /*
2513        * for .debug_arrange, .debug_frame, .eh_frame sections, containing
2514        * expressions of the form "sym2 - sym1 + addend", solve them even when
2515        * --emit-all-relocs is set. Otherwise, a relocation on two symbols
2516        * is necessary and fails at elf level. Binopt should not be impacted by
2517        * the resolution of this relocatable expression on symbols inside a
2518        * function.
2519        */
2520       sec_name = segment_name (sec);
2521       if ((strcmp (sec_name, ".eh_frame") == 0) ||
2522 	  (strcmp (sec_name, ".except_table") == 0) ||
2523 	  (strncmp (sec_name, ".debug_", sizeof (".debug_")) == 0))
2524 	return 0;
2525     }
2526   return 1;
2527 }
2528 
2529 /* Implement HANDLE_ALIGN.  */
2530 
2531 static void
kvx_make_nops(char * buf,bfd_vma bytes)2532 kvx_make_nops (char *buf, bfd_vma bytes)
2533 {
2534   bfd_vma i = 0;
2535   unsigned int j;
2536 
2537   static unsigned int nop_single = 0;
2538 
2539   if (!nop_single)
2540     {
2541       const struct kvxopc *opcode =
2542 	(struct kvxopc *) str_hash_find (env.opcode_hash, "nop");
2543 
2544       if (opcode == NULL)
2545 	as_fatal
2546 	  ("internal error: could not find opcode for 'nop' during padding");
2547 
2548       nop_single = opcode->codewords[0].opcode;
2549     }
2550 
2551   /* KVX instructions are always 4-bytes aligned. If we are at a position */
2552   /* that is not 4 bytes aligned, it means this is not part of an instruction, */
2553   /* so it is safe to use a zero byte for padding. */
2554 
2555   for (j = bytes % 4; j > 0; j--)
2556     buf[i++] = 0;
2557 
2558   for (j = 0; j < (bytes - i); j += 4)
2559     {
2560       unsigned nop = nop_single;
2561 
2562       // nop has bundle end only if #4 nop or last padding nop.
2563       // Sets the parallel bit when neither conditions are matched.
2564       // 4*4 = biggest nop bundle we can get
2565       // 12 = offset when writting the last nop possible in a 4 nops bundle
2566       // bytes-i-4 = offset for the last 4-words in the padding
2567       if (j % (4 * 4) != 12 && j != (bytes - i - 4))
2568 	nop |= PARALLEL_BIT;
2569 
2570       memcpy (buf + i + j, &nop, sizeof (nop));
2571     }
2572 }
2573 
2574 /* Pads code section with bundle of nops when possible, 0 if not. */
2575 void
kvx_handle_align(fragS * fragP)2576 kvx_handle_align (fragS *fragP)
2577 {
2578   switch (fragP->fr_type)
2579     {
2580     case rs_align_code:
2581       {
2582 	bfd_signed_vma bytes = (fragP->fr_next->fr_address
2583 				- fragP->fr_address - fragP->fr_fix);
2584 	char *p = fragP->fr_literal + fragP->fr_fix;
2585 
2586 	if (bytes <= 0)
2587 	  break;
2588 
2589 	/* Insert zeros or nops to get 4 byte alignment.  */
2590 	kvx_make_nops (p, bytes);
2591 	fragP->fr_fix += bytes;
2592       }
2593       break;
2594 
2595     default:
2596       break;
2597     }
2598 }
2599 /*
2600  * This is just used for debugging
2601  */
2602 
2603 ATTRIBUTE_UNUSED
2604 static void
print_operand(expressionS * e,FILE * out)2605 print_operand (expressionS * e, FILE * out)
2606 {
2607   if (e)
2608     {
2609       switch (e->X_op)
2610 	{
2611 	case O_register:
2612 	  fprintf (out, "%s", kvx_registers[e->X_add_number].name);
2613 	  break;
2614 
2615 	case O_constant:
2616 	  if (e->X_add_symbol)
2617 	    {
2618 	      if (e->X_add_number)
2619 		fprintf (out, "(%s + %d)", S_GET_NAME (e->X_add_symbol),
2620 			 (int) e->X_add_number);
2621 	      else
2622 		fprintf (out, "%s", S_GET_NAME (e->X_add_symbol));
2623 	    }
2624 	  else
2625 	    fprintf (out, "%d", (int) e->X_add_number);
2626 	  break;
2627 
2628 	case O_symbol:
2629 	  if (e->X_add_symbol)
2630 	    {
2631 	      if (e->X_add_number)
2632 		fprintf (out, "(%s + %d)", S_GET_NAME (e->X_add_symbol),
2633 			 (int) e->X_add_number);
2634 	      else
2635 		fprintf (out, "%s", S_GET_NAME (e->X_add_symbol));
2636 	    }
2637 	  else
2638 	    fprintf (out, "%d", (int) e->X_add_number);
2639 	  break;
2640 
2641 	default:
2642 	  fprintf (out, "o,ptype-%d", e->X_op);
2643 	}
2644     }
2645 }
2646 
2647 void
kvx_cfi_frame_initial_instructions(void)2648 kvx_cfi_frame_initial_instructions (void)
2649 {
2650   cfi_add_CFA_def_cfa (KVX_SP_REGNO, 0);
2651 }
2652 
2653 int
kvx_regname_to_dw2regnum(const char * regname)2654 kvx_regname_to_dw2regnum (const char *regname)
2655 {
2656   unsigned int regnum = -1;
2657   const char *p;
2658   char *q;
2659 
2660   if (regname[0] == 'r')
2661     {
2662       p = regname + 1;
2663       regnum = strtoul (p, &q, 10);
2664       if (p == q || *q || regnum >= 64)
2665 	return -1;
2666     }
2667   return regnum;
2668 }
2669