1 /* tc-ia64.c -- Assembler for the HP/Intel IA-64 architecture. 2 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 3 Free Software Foundation, Inc. 4 Contributed by David Mosberger-Tang <davidm@hpl.hp.com> 5 6 This file is part of GAS, the GNU Assembler. 7 8 GAS is free software; you can redistribute it and/or modify 9 it under the terms of the GNU General Public License as published by 10 the Free Software Foundation; either version 3, or (at your option) 11 any later version. 12 13 GAS is distributed in the hope that it will be useful, 14 but WITHOUT ANY WARRANTY; without even the implied warranty of 15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 GNU General Public License for more details. 17 18 You should have received a copy of the GNU General Public License 19 along with GAS; see the file COPYING. If not, write to 20 the Free Software Foundation, 51 Franklin Street - Fifth Floor, 21 Boston, MA 02110-1301, USA. */ 22 23 /* 24 TODO: 25 26 - optional operands 27 - directives: 28 .eb 29 .estate 30 .lb 31 .popsection 32 .previous 33 .psr 34 .pushsection 35 - labels are wrong if automatic alignment is introduced 36 (e.g., checkout the second real10 definition in test-data.s) 37 - DV-related stuff: 38 <reg>.safe_across_calls and any other DV-related directives I don't 39 have documentation for. 40 verify mod-sched-brs reads/writes are checked/marked (and other 41 notes) 42 43 */ 44 45 #include "as.h" 46 #include "safe-ctype.h" 47 #include "dwarf2dbg.h" 48 #include "subsegs.h" 49 50 #include "opcode/ia64.h" 51 52 #include "elf/ia64.h" 53 54 #ifdef HAVE_LIMITS_H 55 #include <limits.h> 56 #endif 57 58 #define NELEMS(a) ((int) (sizeof (a)/sizeof ((a)[0]))) 59 60 /* Some systems define MIN in, e.g., param.h. */ 61 #undef MIN 62 #define MIN(a,b) ((a) < (b) ? (a) : (b)) 63 64 #define NUM_SLOTS 4 65 #define PREV_SLOT md.slot[(md.curr_slot + NUM_SLOTS - 1) % NUM_SLOTS] 66 #define CURR_SLOT md.slot[md.curr_slot] 67 68 #define O_pseudo_fixup (O_max + 1) 69 70 enum special_section 71 { 72 /* IA-64 ABI section pseudo-ops. */ 73 SPECIAL_SECTION_BSS = 0, 74 SPECIAL_SECTION_SBSS, 75 SPECIAL_SECTION_SDATA, 76 SPECIAL_SECTION_RODATA, 77 SPECIAL_SECTION_COMMENT, 78 SPECIAL_SECTION_UNWIND, 79 SPECIAL_SECTION_UNWIND_INFO, 80 /* HPUX specific section pseudo-ops. */ 81 SPECIAL_SECTION_INIT_ARRAY, 82 SPECIAL_SECTION_FINI_ARRAY, 83 }; 84 85 enum reloc_func 86 { 87 FUNC_DTP_MODULE, 88 FUNC_DTP_RELATIVE, 89 FUNC_FPTR_RELATIVE, 90 FUNC_GP_RELATIVE, 91 FUNC_LT_RELATIVE, 92 FUNC_LT_RELATIVE_X, 93 FUNC_PC_RELATIVE, 94 FUNC_PLT_RELATIVE, 95 FUNC_SEC_RELATIVE, 96 FUNC_SEG_RELATIVE, 97 FUNC_TP_RELATIVE, 98 FUNC_LTV_RELATIVE, 99 FUNC_LT_FPTR_RELATIVE, 100 FUNC_LT_DTP_MODULE, 101 FUNC_LT_DTP_RELATIVE, 102 FUNC_LT_TP_RELATIVE, 103 FUNC_IPLT_RELOC, 104 }; 105 106 enum reg_symbol 107 { 108 REG_GR = 0, 109 REG_FR = (REG_GR + 128), 110 REG_AR = (REG_FR + 128), 111 REG_CR = (REG_AR + 128), 112 REG_P = (REG_CR + 128), 113 REG_BR = (REG_P + 64), 114 REG_IP = (REG_BR + 8), 115 REG_CFM, 116 REG_PR, 117 REG_PR_ROT, 118 REG_PSR, 119 REG_PSR_L, 120 REG_PSR_UM, 121 /* The following are pseudo-registers for use by gas only. */ 122 IND_CPUID, 123 IND_DBR, 124 IND_DTR, 125 IND_ITR, 126 IND_IBR, 127 IND_MSR, 128 IND_PKR, 129 IND_PMC, 130 IND_PMD, 131 IND_RR, 132 /* The following pseudo-registers are used for unwind directives only: */ 133 REG_PSP, 134 REG_PRIUNAT, 135 REG_NUM 136 }; 137 138 enum dynreg_type 139 { 140 DYNREG_GR = 0, /* dynamic general purpose register */ 141 DYNREG_FR, /* dynamic floating point register */ 142 DYNREG_PR, /* dynamic predicate register */ 143 DYNREG_NUM_TYPES 144 }; 145 146 enum operand_match_result 147 { 148 OPERAND_MATCH, 149 OPERAND_OUT_OF_RANGE, 150 OPERAND_MISMATCH 151 }; 152 153 /* On the ia64, we can't know the address of a text label until the 154 instructions are packed into a bundle. To handle this, we keep 155 track of the list of labels that appear in front of each 156 instruction. */ 157 struct label_fix 158 { 159 struct label_fix *next; 160 struct symbol *sym; 161 bfd_boolean dw2_mark_labels; 162 }; 163 164 /* This is the endianness of the current section. */ 165 extern int target_big_endian; 166 167 /* This is the default endianness. */ 168 static int default_big_endian = TARGET_BYTES_BIG_ENDIAN; 169 170 void (*ia64_number_to_chars) (char *, valueT, int); 171 172 static void ia64_float_to_chars_bigendian (char *, LITTLENUM_TYPE *, int); 173 static void ia64_float_to_chars_littleendian (char *, LITTLENUM_TYPE *, int); 174 175 static void (*ia64_float_to_chars) (char *, LITTLENUM_TYPE *, int); 176 177 static struct hash_control *alias_hash; 178 static struct hash_control *alias_name_hash; 179 static struct hash_control *secalias_hash; 180 static struct hash_control *secalias_name_hash; 181 182 /* List of chars besides those in app.c:symbol_chars that can start an 183 operand. Used to prevent the scrubber eating vital white-space. */ 184 const char ia64_symbol_chars[] = "@?"; 185 186 /* Characters which always start a comment. */ 187 const char comment_chars[] = ""; 188 189 /* Characters which start a comment at the beginning of a line. */ 190 const char line_comment_chars[] = "#"; 191 192 /* Characters which may be used to separate multiple commands on a 193 single line. */ 194 const char line_separator_chars[] = ";{}"; 195 196 /* Characters which are used to indicate an exponent in a floating 197 point number. */ 198 const char EXP_CHARS[] = "eE"; 199 200 /* Characters which mean that a number is a floating point constant, 201 as in 0d1.0. */ 202 const char FLT_CHARS[] = "rRsSfFdDxXpP"; 203 204 /* ia64-specific option processing: */ 205 206 const char *md_shortopts = "m:N:x::"; 207 208 struct option md_longopts[] = 209 { 210 #define OPTION_MCONSTANT_GP (OPTION_MD_BASE + 1) 211 {"mconstant-gp", no_argument, NULL, OPTION_MCONSTANT_GP}, 212 #define OPTION_MAUTO_PIC (OPTION_MD_BASE + 2) 213 {"mauto-pic", no_argument, NULL, OPTION_MAUTO_PIC} 214 }; 215 216 size_t md_longopts_size = sizeof (md_longopts); 217 218 static struct 219 { 220 struct hash_control *pseudo_hash; /* pseudo opcode hash table */ 221 struct hash_control *reg_hash; /* register name hash table */ 222 struct hash_control *dynreg_hash; /* dynamic register hash table */ 223 struct hash_control *const_hash; /* constant hash table */ 224 struct hash_control *entry_hash; /* code entry hint hash table */ 225 226 /* If X_op is != O_absent, the registername for the instruction's 227 qualifying predicate. If NULL, p0 is assumed for instructions 228 that are predictable. */ 229 expressionS qp; 230 231 /* Optimize for which CPU. */ 232 enum 233 { 234 itanium1, 235 itanium2 236 } tune; 237 238 /* What to do when hint.b is used. */ 239 enum 240 { 241 hint_b_error, 242 hint_b_warning, 243 hint_b_ok 244 } hint_b; 245 246 unsigned int 247 manual_bundling : 1, 248 debug_dv: 1, 249 detect_dv: 1, 250 explicit_mode : 1, /* which mode we're in */ 251 default_explicit_mode : 1, /* which mode is the default */ 252 mode_explicitly_set : 1, /* was the current mode explicitly set? */ 253 auto_align : 1, 254 keep_pending_output : 1; 255 256 /* What to do when something is wrong with unwind directives. */ 257 enum 258 { 259 unwind_check_warning, 260 unwind_check_error 261 } unwind_check; 262 263 /* Each bundle consists of up to three instructions. We keep 264 track of four most recent instructions so we can correctly set 265 the end_of_insn_group for the last instruction in a bundle. */ 266 int curr_slot; 267 int num_slots_in_use; 268 struct slot 269 { 270 unsigned int 271 end_of_insn_group : 1, 272 manual_bundling_on : 1, 273 manual_bundling_off : 1, 274 loc_directive_seen : 1; 275 signed char user_template; /* user-selected template, if any */ 276 unsigned char qp_regno; /* qualifying predicate */ 277 /* This duplicates a good fraction of "struct fix" but we 278 can't use a "struct fix" instead since we can't call 279 fix_new_exp() until we know the address of the instruction. */ 280 int num_fixups; 281 struct insn_fix 282 { 283 bfd_reloc_code_real_type code; 284 enum ia64_opnd opnd; /* type of operand in need of fix */ 285 unsigned int is_pcrel : 1; /* is operand pc-relative? */ 286 expressionS expr; /* the value to be inserted */ 287 } 288 fixup[2]; /* at most two fixups per insn */ 289 struct ia64_opcode *idesc; 290 struct label_fix *label_fixups; 291 struct label_fix *tag_fixups; 292 struct unw_rec_list *unwind_record; /* Unwind directive. */ 293 expressionS opnd[6]; 294 char *src_file; 295 unsigned int src_line; 296 struct dwarf2_line_info debug_line; 297 } 298 slot[NUM_SLOTS]; 299 300 segT last_text_seg; 301 302 struct dynreg 303 { 304 struct dynreg *next; /* next dynamic register */ 305 const char *name; 306 unsigned short base; /* the base register number */ 307 unsigned short num_regs; /* # of registers in this set */ 308 } 309 *dynreg[DYNREG_NUM_TYPES], in, loc, out, rot; 310 311 flagword flags; /* ELF-header flags */ 312 313 struct mem_offset { 314 unsigned hint:1; /* is this hint currently valid? */ 315 bfd_vma offset; /* mem.offset offset */ 316 bfd_vma base; /* mem.offset base */ 317 } mem_offset; 318 319 int path; /* number of alt. entry points seen */ 320 const char **entry_labels; /* labels of all alternate paths in 321 the current DV-checking block. */ 322 int maxpaths; /* size currently allocated for 323 entry_labels */ 324 325 int pointer_size; /* size in bytes of a pointer */ 326 int pointer_size_shift; /* shift size of a pointer for alignment */ 327 328 symbolS *indregsym[IND_RR - IND_CPUID + 1]; 329 } 330 md; 331 332 /* These are not const, because they are modified to MMI for non-itanium1 333 targets below. */ 334 /* MFI bundle of nops. */ 335 static unsigned char le_nop[16] = 336 { 337 0x0c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 338 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00 339 }; 340 /* MFI bundle of nops with stop-bit. */ 341 static unsigned char le_nop_stop[16] = 342 { 343 0x0d, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 344 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00 345 }; 346 347 /* application registers: */ 348 349 #define AR_K0 0 350 #define AR_K7 7 351 #define AR_RSC 16 352 #define AR_BSP 17 353 #define AR_BSPSTORE 18 354 #define AR_RNAT 19 355 #define AR_FCR 21 356 #define AR_EFLAG 24 357 #define AR_CSD 25 358 #define AR_SSD 26 359 #define AR_CFLG 27 360 #define AR_FSR 28 361 #define AR_FIR 29 362 #define AR_FDR 30 363 #define AR_CCV 32 364 #define AR_UNAT 36 365 #define AR_FPSR 40 366 #define AR_ITC 44 367 #define AR_RUC 45 368 #define AR_PFS 64 369 #define AR_LC 65 370 #define AR_EC 66 371 372 static const struct 373 { 374 const char *name; 375 unsigned int regnum; 376 } 377 ar[] = 378 { 379 {"ar.k0", AR_K0}, {"ar.k1", AR_K0 + 1}, 380 {"ar.k2", AR_K0 + 2}, {"ar.k3", AR_K0 + 3}, 381 {"ar.k4", AR_K0 + 4}, {"ar.k5", AR_K0 + 5}, 382 {"ar.k6", AR_K0 + 6}, {"ar.k7", AR_K7}, 383 {"ar.rsc", AR_RSC}, {"ar.bsp", AR_BSP}, 384 {"ar.bspstore", AR_BSPSTORE}, {"ar.rnat", AR_RNAT}, 385 {"ar.fcr", AR_FCR}, {"ar.eflag", AR_EFLAG}, 386 {"ar.csd", AR_CSD}, {"ar.ssd", AR_SSD}, 387 {"ar.cflg", AR_CFLG}, {"ar.fsr", AR_FSR}, 388 {"ar.fir", AR_FIR}, {"ar.fdr", AR_FDR}, 389 {"ar.ccv", AR_CCV}, {"ar.unat", AR_UNAT}, 390 {"ar.fpsr", AR_FPSR}, {"ar.itc", AR_ITC}, 391 {"ar.ruc", AR_RUC}, {"ar.pfs", AR_PFS}, 392 {"ar.lc", AR_LC}, {"ar.ec", AR_EC}, 393 }; 394 395 /* control registers: */ 396 397 #define CR_DCR 0 398 #define CR_ITM 1 399 #define CR_IVA 2 400 #define CR_PTA 8 401 #define CR_GPTA 9 402 #define CR_IPSR 16 403 #define CR_ISR 17 404 #define CR_IIP 19 405 #define CR_IFA 20 406 #define CR_ITIR 21 407 #define CR_IIPA 22 408 #define CR_IFS 23 409 #define CR_IIM 24 410 #define CR_IHA 25 411 #define CR_IIB0 26 412 #define CR_IIB1 27 413 #define CR_LID 64 414 #define CR_IVR 65 415 #define CR_TPR 66 416 #define CR_EOI 67 417 #define CR_IRR0 68 418 #define CR_IRR3 71 419 #define CR_ITV 72 420 #define CR_PMV 73 421 #define CR_CMCV 74 422 #define CR_LRR0 80 423 #define CR_LRR1 81 424 425 static const struct 426 { 427 const char *name; 428 unsigned int regnum; 429 } 430 cr[] = 431 { 432 {"cr.dcr", CR_DCR}, 433 {"cr.itm", CR_ITM}, 434 {"cr.iva", CR_IVA}, 435 {"cr.pta", CR_PTA}, 436 {"cr.gpta", CR_GPTA}, 437 {"cr.ipsr", CR_IPSR}, 438 {"cr.isr", CR_ISR}, 439 {"cr.iip", CR_IIP}, 440 {"cr.ifa", CR_IFA}, 441 {"cr.itir", CR_ITIR}, 442 {"cr.iipa", CR_IIPA}, 443 {"cr.ifs", CR_IFS}, 444 {"cr.iim", CR_IIM}, 445 {"cr.iha", CR_IHA}, 446 {"cr.iib0", CR_IIB0}, 447 {"cr.iib1", CR_IIB1}, 448 {"cr.lid", CR_LID}, 449 {"cr.ivr", CR_IVR}, 450 {"cr.tpr", CR_TPR}, 451 {"cr.eoi", CR_EOI}, 452 {"cr.irr0", CR_IRR0}, 453 {"cr.irr1", CR_IRR0 + 1}, 454 {"cr.irr2", CR_IRR0 + 2}, 455 {"cr.irr3", CR_IRR3}, 456 {"cr.itv", CR_ITV}, 457 {"cr.pmv", CR_PMV}, 458 {"cr.cmcv", CR_CMCV}, 459 {"cr.lrr0", CR_LRR0}, 460 {"cr.lrr1", CR_LRR1} 461 }; 462 463 #define PSR_MFL 4 464 #define PSR_IC 13 465 #define PSR_DFL 18 466 #define PSR_CPL 32 467 468 static const struct const_desc 469 { 470 const char *name; 471 valueT value; 472 } 473 const_bits[] = 474 { 475 /* PSR constant masks: */ 476 477 /* 0: reserved */ 478 {"psr.be", ((valueT) 1) << 1}, 479 {"psr.up", ((valueT) 1) << 2}, 480 {"psr.ac", ((valueT) 1) << 3}, 481 {"psr.mfl", ((valueT) 1) << 4}, 482 {"psr.mfh", ((valueT) 1) << 5}, 483 /* 6-12: reserved */ 484 {"psr.ic", ((valueT) 1) << 13}, 485 {"psr.i", ((valueT) 1) << 14}, 486 {"psr.pk", ((valueT) 1) << 15}, 487 /* 16: reserved */ 488 {"psr.dt", ((valueT) 1) << 17}, 489 {"psr.dfl", ((valueT) 1) << 18}, 490 {"psr.dfh", ((valueT) 1) << 19}, 491 {"psr.sp", ((valueT) 1) << 20}, 492 {"psr.pp", ((valueT) 1) << 21}, 493 {"psr.di", ((valueT) 1) << 22}, 494 {"psr.si", ((valueT) 1) << 23}, 495 {"psr.db", ((valueT) 1) << 24}, 496 {"psr.lp", ((valueT) 1) << 25}, 497 {"psr.tb", ((valueT) 1) << 26}, 498 {"psr.rt", ((valueT) 1) << 27}, 499 /* 28-31: reserved */ 500 /* 32-33: cpl (current privilege level) */ 501 {"psr.is", ((valueT) 1) << 34}, 502 {"psr.mc", ((valueT) 1) << 35}, 503 {"psr.it", ((valueT) 1) << 36}, 504 {"psr.id", ((valueT) 1) << 37}, 505 {"psr.da", ((valueT) 1) << 38}, 506 {"psr.dd", ((valueT) 1) << 39}, 507 {"psr.ss", ((valueT) 1) << 40}, 508 /* 41-42: ri (restart instruction) */ 509 {"psr.ed", ((valueT) 1) << 43}, 510 {"psr.bn", ((valueT) 1) << 44}, 511 }; 512 513 /* indirect register-sets/memory: */ 514 515 static const struct 516 { 517 const char *name; 518 unsigned int regnum; 519 } 520 indirect_reg[] = 521 { 522 { "CPUID", IND_CPUID }, 523 { "cpuid", IND_CPUID }, 524 { "dbr", IND_DBR }, 525 { "dtr", IND_DTR }, 526 { "itr", IND_ITR }, 527 { "ibr", IND_IBR }, 528 { "msr", IND_MSR }, 529 { "pkr", IND_PKR }, 530 { "pmc", IND_PMC }, 531 { "pmd", IND_PMD }, 532 { "rr", IND_RR }, 533 }; 534 535 /* Pseudo functions used to indicate relocation types (these functions 536 start with an at sign (@). */ 537 static struct 538 { 539 const char *name; 540 enum pseudo_type 541 { 542 PSEUDO_FUNC_NONE, 543 PSEUDO_FUNC_RELOC, 544 PSEUDO_FUNC_CONST, 545 PSEUDO_FUNC_REG, 546 PSEUDO_FUNC_FLOAT 547 } 548 type; 549 union 550 { 551 unsigned long ival; 552 symbolS *sym; 553 } 554 u; 555 } 556 pseudo_func[] = 557 { 558 /* reloc pseudo functions (these must come first!): */ 559 { "dtpmod", PSEUDO_FUNC_RELOC, { 0 } }, 560 { "dtprel", PSEUDO_FUNC_RELOC, { 0 } }, 561 { "fptr", PSEUDO_FUNC_RELOC, { 0 } }, 562 { "gprel", PSEUDO_FUNC_RELOC, { 0 } }, 563 { "ltoff", PSEUDO_FUNC_RELOC, { 0 } }, 564 { "ltoffx", PSEUDO_FUNC_RELOC, { 0 } }, 565 { "pcrel", PSEUDO_FUNC_RELOC, { 0 } }, 566 { "pltoff", PSEUDO_FUNC_RELOC, { 0 } }, 567 { "secrel", PSEUDO_FUNC_RELOC, { 0 } }, 568 { "segrel", PSEUDO_FUNC_RELOC, { 0 } }, 569 { "tprel", PSEUDO_FUNC_RELOC, { 0 } }, 570 { "ltv", PSEUDO_FUNC_RELOC, { 0 } }, 571 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_FPTR_RELATIVE */ 572 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_DTP_MODULE */ 573 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_DTP_RELATIVE */ 574 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_TP_RELATIVE */ 575 { "iplt", PSEUDO_FUNC_RELOC, { 0 } }, 576 577 /* mbtype4 constants: */ 578 { "alt", PSEUDO_FUNC_CONST, { 0xa } }, 579 { "brcst", PSEUDO_FUNC_CONST, { 0x0 } }, 580 { "mix", PSEUDO_FUNC_CONST, { 0x8 } }, 581 { "rev", PSEUDO_FUNC_CONST, { 0xb } }, 582 { "shuf", PSEUDO_FUNC_CONST, { 0x9 } }, 583 584 /* fclass constants: */ 585 { "nat", PSEUDO_FUNC_CONST, { 0x100 } }, 586 { "qnan", PSEUDO_FUNC_CONST, { 0x080 } }, 587 { "snan", PSEUDO_FUNC_CONST, { 0x040 } }, 588 { "pos", PSEUDO_FUNC_CONST, { 0x001 } }, 589 { "neg", PSEUDO_FUNC_CONST, { 0x002 } }, 590 { "zero", PSEUDO_FUNC_CONST, { 0x004 } }, 591 { "unorm", PSEUDO_FUNC_CONST, { 0x008 } }, 592 { "norm", PSEUDO_FUNC_CONST, { 0x010 } }, 593 { "inf", PSEUDO_FUNC_CONST, { 0x020 } }, 594 595 { "natval", PSEUDO_FUNC_CONST, { 0x100 } }, /* old usage */ 596 597 /* hint constants: */ 598 { "pause", PSEUDO_FUNC_CONST, { 0x0 } }, 599 600 /* unwind-related constants: */ 601 { "svr4", PSEUDO_FUNC_CONST, { ELFOSABI_NONE } }, 602 { "hpux", PSEUDO_FUNC_CONST, { ELFOSABI_HPUX } }, 603 { "nt", PSEUDO_FUNC_CONST, { 2 } }, /* conflicts w/ELFOSABI_NETBSD */ 604 { "linux", PSEUDO_FUNC_CONST, { ELFOSABI_LINUX } }, 605 { "freebsd", PSEUDO_FUNC_CONST, { ELFOSABI_FREEBSD } }, 606 { "openvms", PSEUDO_FUNC_CONST, { ELFOSABI_OPENVMS } }, 607 { "nsk", PSEUDO_FUNC_CONST, { ELFOSABI_NSK } }, 608 609 /* unwind-related registers: */ 610 { "priunat",PSEUDO_FUNC_REG, { REG_PRIUNAT } } 611 }; 612 613 /* 41-bit nop opcodes (one per unit): */ 614 static const bfd_vma nop[IA64_NUM_UNITS] = 615 { 616 0x0000000000LL, /* NIL => break 0 */ 617 0x0008000000LL, /* I-unit nop */ 618 0x0008000000LL, /* M-unit nop */ 619 0x4000000000LL, /* B-unit nop */ 620 0x0008000000LL, /* F-unit nop */ 621 0x0000000000LL, /* L-"unit" nop immediate */ 622 0x0008000000LL, /* X-unit nop */ 623 }; 624 625 /* Can't be `const' as it's passed to input routines (which have the 626 habit of setting temporary sentinels. */ 627 static char special_section_name[][20] = 628 { 629 {".bss"}, {".sbss"}, {".sdata"}, {".rodata"}, {".comment"}, 630 {".IA_64.unwind"}, {".IA_64.unwind_info"}, 631 {".init_array"}, {".fini_array"} 632 }; 633 634 /* The best template for a particular sequence of up to three 635 instructions: */ 636 #define N IA64_NUM_TYPES 637 static unsigned char best_template[N][N][N]; 638 #undef N 639 640 /* Resource dependencies currently in effect */ 641 static struct rsrc { 642 int depind; /* dependency index */ 643 const struct ia64_dependency *dependency; /* actual dependency */ 644 unsigned specific:1, /* is this a specific bit/regno? */ 645 link_to_qp_branch:1; /* will a branch on the same QP clear it?*/ 646 int index; /* specific regno/bit within dependency */ 647 int note; /* optional qualifying note (0 if none) */ 648 #define STATE_NONE 0 649 #define STATE_STOP 1 650 #define STATE_SRLZ 2 651 int insn_srlz; /* current insn serialization state */ 652 int data_srlz; /* current data serialization state */ 653 int qp_regno; /* qualifying predicate for this usage */ 654 char *file; /* what file marked this dependency */ 655 unsigned int line; /* what line marked this dependency */ 656 struct mem_offset mem_offset; /* optional memory offset hint */ 657 enum { CMP_NONE, CMP_OR, CMP_AND } cmp_type; /* OR or AND compare? */ 658 int path; /* corresponding code entry index */ 659 } *regdeps = NULL; 660 static int regdepslen = 0; 661 static int regdepstotlen = 0; 662 static const char *dv_mode[] = { "RAW", "WAW", "WAR" }; 663 static const char *dv_sem[] = { "none", "implied", "impliedf", 664 "data", "instr", "specific", "stop", "other" }; 665 static const char *dv_cmp_type[] = { "none", "OR", "AND" }; 666 667 /* Current state of PR mutexation */ 668 static struct qpmutex { 669 valueT prmask; 670 int path; 671 } *qp_mutexes = NULL; /* QP mutex bitmasks */ 672 static int qp_mutexeslen = 0; 673 static int qp_mutexestotlen = 0; 674 static valueT qp_safe_across_calls = 0; 675 676 /* Current state of PR implications */ 677 static struct qp_imply { 678 unsigned p1:6; 679 unsigned p2:6; 680 unsigned p2_branched:1; 681 int path; 682 } *qp_implies = NULL; 683 static int qp_implieslen = 0; 684 static int qp_impliestotlen = 0; 685 686 /* Keep track of static GR values so that indirect register usage can 687 sometimes be tracked. */ 688 static struct gr { 689 unsigned known:1; 690 int path; 691 valueT value; 692 } gr_values[128] = { 693 { 694 1, 695 #ifdef INT_MAX 696 INT_MAX, 697 #else 698 (((1 << (8 * sizeof(gr_values->path) - 2)) - 1) << 1) + 1, 699 #endif 700 0 701 } 702 }; 703 704 /* Remember the alignment frag. */ 705 static fragS *align_frag; 706 707 /* These are the routines required to output the various types of 708 unwind records. */ 709 710 /* A slot_number is a frag address plus the slot index (0-2). We use the 711 frag address here so that if there is a section switch in the middle of 712 a function, then instructions emitted to a different section are not 713 counted. Since there may be more than one frag for a function, this 714 means we also need to keep track of which frag this address belongs to 715 so we can compute inter-frag distances. This also nicely solves the 716 problem with nops emitted for align directives, which can't easily be 717 counted, but can easily be derived from frag sizes. */ 718 719 typedef struct unw_rec_list { 720 unwind_record r; 721 unsigned long slot_number; 722 fragS *slot_frag; 723 struct unw_rec_list *next; 724 } unw_rec_list; 725 726 #define SLOT_NUM_NOT_SET (unsigned)-1 727 728 /* Linked list of saved prologue counts. A very poor 729 implementation of a map from label numbers to prologue counts. */ 730 typedef struct label_prologue_count 731 { 732 struct label_prologue_count *next; 733 unsigned long label_number; 734 unsigned int prologue_count; 735 } label_prologue_count; 736 737 typedef struct proc_pending 738 { 739 symbolS *sym; 740 struct proc_pending *next; 741 } proc_pending; 742 743 static struct 744 { 745 /* Maintain a list of unwind entries for the current function. */ 746 unw_rec_list *list; 747 unw_rec_list *tail; 748 749 /* Any unwind entries that should be attached to the current slot 750 that an insn is being constructed for. */ 751 unw_rec_list *current_entry; 752 753 /* These are used to create the unwind table entry for this function. */ 754 proc_pending proc_pending; 755 symbolS *info; /* pointer to unwind info */ 756 symbolS *personality_routine; 757 segT saved_text_seg; 758 subsegT saved_text_subseg; 759 unsigned int force_unwind_entry : 1; /* force generation of unwind entry? */ 760 761 /* TRUE if processing unwind directives in a prologue region. */ 762 unsigned int prologue : 1; 763 unsigned int prologue_mask : 4; 764 unsigned int prologue_gr : 7; 765 unsigned int body : 1; 766 unsigned int insn : 1; 767 unsigned int prologue_count; /* number of .prologues seen so far */ 768 /* Prologue counts at previous .label_state directives. */ 769 struct label_prologue_count * saved_prologue_counts; 770 771 /* List of split up .save-s. */ 772 unw_p_record *pending_saves; 773 } unwind; 774 775 /* The input value is a negated offset from psp, and specifies an address 776 psp - offset. The encoded value is psp + 16 - (4 * offset). Thus we 777 must add 16 and divide by 4 to get the encoded value. */ 778 779 #define ENCODED_PSP_OFFSET(OFFSET) (((OFFSET) + 16) / 4) 780 781 typedef void (*vbyte_func) (int, char *, char *); 782 783 /* Forward declarations: */ 784 static void dot_alias (int); 785 static int parse_operand (expressionS *, int); 786 static void emit_one_bundle (void); 787 static bfd_reloc_code_real_type ia64_gen_real_reloc_type (struct symbol *, 788 bfd_reloc_code_real_type); 789 static void insn_group_break (int, int, int); 790 static void add_qp_mutex (valueT); 791 static void add_qp_imply (int, int); 792 static void clear_qp_mutex (valueT); 793 static void clear_qp_implies (valueT, valueT); 794 static void print_dependency (const char *, int); 795 static void instruction_serialization (void); 796 static void data_serialization (void); 797 static void output_R3_format (vbyte_func, unw_record_type, unsigned long); 798 static void output_B3_format (vbyte_func, unsigned long, unsigned long); 799 static void output_B4_format (vbyte_func, unw_record_type, unsigned long); 800 static void free_saved_prologue_counts (void); 801 802 /* Determine if application register REGNUM resides only in the integer 803 unit (as opposed to the memory unit). */ 804 static int 805 ar_is_only_in_integer_unit (int reg) 806 { 807 reg -= REG_AR; 808 return reg >= 64 && reg <= 111; 809 } 810 811 /* Determine if application register REGNUM resides only in the memory 812 unit (as opposed to the integer unit). */ 813 static int 814 ar_is_only_in_memory_unit (int reg) 815 { 816 reg -= REG_AR; 817 return reg >= 0 && reg <= 47; 818 } 819 820 /* Switch to section NAME and create section if necessary. It's 821 rather ugly that we have to manipulate input_line_pointer but I 822 don't see any other way to accomplish the same thing without 823 changing obj-elf.c (which may be the Right Thing, in the end). */ 824 static void 825 set_section (char *name) 826 { 827 char *saved_input_line_pointer; 828 829 saved_input_line_pointer = input_line_pointer; 830 input_line_pointer = name; 831 obj_elf_section (0); 832 input_line_pointer = saved_input_line_pointer; 833 } 834 835 /* Map 's' to SHF_IA_64_SHORT. */ 836 837 int 838 ia64_elf_section_letter (int letter, char **ptr_msg) 839 { 840 if (letter == 's') 841 return SHF_IA_64_SHORT; 842 else if (letter == 'o') 843 return SHF_LINK_ORDER; 844 845 *ptr_msg = _("Bad .section directive: want a,o,s,w,x,M,S,G,T in string"); 846 return -1; 847 } 848 849 /* Map SHF_IA_64_SHORT to SEC_SMALL_DATA. */ 850 851 flagword 852 ia64_elf_section_flags (flagword flags, 853 int attr, 854 int type ATTRIBUTE_UNUSED) 855 { 856 if (attr & SHF_IA_64_SHORT) 857 flags |= SEC_SMALL_DATA; 858 return flags; 859 } 860 861 int 862 ia64_elf_section_type (const char *str, size_t len) 863 { 864 #define STREQ(s) ((len == sizeof (s) - 1) && (strncmp (str, s, sizeof (s) - 1) == 0)) 865 866 if (STREQ (ELF_STRING_ia64_unwind_info)) 867 return SHT_PROGBITS; 868 869 if (STREQ (ELF_STRING_ia64_unwind_info_once)) 870 return SHT_PROGBITS; 871 872 if (STREQ (ELF_STRING_ia64_unwind)) 873 return SHT_IA_64_UNWIND; 874 875 if (STREQ (ELF_STRING_ia64_unwind_once)) 876 return SHT_IA_64_UNWIND; 877 878 if (STREQ ("unwind")) 879 return SHT_IA_64_UNWIND; 880 881 return -1; 882 #undef STREQ 883 } 884 885 static unsigned int 886 set_regstack (unsigned int ins, 887 unsigned int locs, 888 unsigned int outs, 889 unsigned int rots) 890 { 891 /* Size of frame. */ 892 unsigned int sof; 893 894 sof = ins + locs + outs; 895 if (sof > 96) 896 { 897 as_bad (_("Size of frame exceeds maximum of 96 registers")); 898 return 0; 899 } 900 if (rots > sof) 901 { 902 as_warn (_("Size of rotating registers exceeds frame size")); 903 return 0; 904 } 905 md.in.base = REG_GR + 32; 906 md.loc.base = md.in.base + ins; 907 md.out.base = md.loc.base + locs; 908 909 md.in.num_regs = ins; 910 md.loc.num_regs = locs; 911 md.out.num_regs = outs; 912 md.rot.num_regs = rots; 913 return sof; 914 } 915 916 void 917 ia64_flush_insns (void) 918 { 919 struct label_fix *lfix; 920 segT saved_seg; 921 subsegT saved_subseg; 922 unw_rec_list *ptr; 923 bfd_boolean mark; 924 925 if (!md.last_text_seg) 926 return; 927 928 saved_seg = now_seg; 929 saved_subseg = now_subseg; 930 931 subseg_set (md.last_text_seg, 0); 932 933 while (md.num_slots_in_use > 0) 934 emit_one_bundle (); /* force out queued instructions */ 935 936 /* In case there are labels following the last instruction, resolve 937 those now. */ 938 mark = FALSE; 939 for (lfix = CURR_SLOT.label_fixups; lfix; lfix = lfix->next) 940 { 941 symbol_set_value_now (lfix->sym); 942 mark |= lfix->dw2_mark_labels; 943 } 944 if (mark) 945 { 946 dwarf2_where (&CURR_SLOT.debug_line); 947 CURR_SLOT.debug_line.flags |= DWARF2_FLAG_BASIC_BLOCK; 948 dwarf2_gen_line_info (frag_now_fix (), &CURR_SLOT.debug_line); 949 dwarf2_consume_line_info (); 950 } 951 CURR_SLOT.label_fixups = 0; 952 953 for (lfix = CURR_SLOT.tag_fixups; lfix; lfix = lfix->next) 954 symbol_set_value_now (lfix->sym); 955 CURR_SLOT.tag_fixups = 0; 956 957 /* In case there are unwind directives following the last instruction, 958 resolve those now. We only handle prologue, body, and endp directives 959 here. Give an error for others. */ 960 for (ptr = unwind.current_entry; ptr; ptr = ptr->next) 961 { 962 switch (ptr->r.type) 963 { 964 case prologue: 965 case prologue_gr: 966 case body: 967 case endp: 968 ptr->slot_number = (unsigned long) frag_more (0); 969 ptr->slot_frag = frag_now; 970 break; 971 972 /* Allow any record which doesn't have a "t" field (i.e., 973 doesn't relate to a particular instruction). */ 974 case unwabi: 975 case br_gr: 976 case copy_state: 977 case fr_mem: 978 case frgr_mem: 979 case gr_gr: 980 case gr_mem: 981 case label_state: 982 case rp_br: 983 case spill_base: 984 case spill_mask: 985 /* nothing */ 986 break; 987 988 default: 989 as_bad (_("Unwind directive not followed by an instruction.")); 990 break; 991 } 992 } 993 unwind.current_entry = NULL; 994 995 subseg_set (saved_seg, saved_subseg); 996 997 if (md.qp.X_op == O_register) 998 as_bad (_("qualifying predicate not followed by instruction")); 999 } 1000 1001 static void 1002 ia64_do_align (int nbytes) 1003 { 1004 char *saved_input_line_pointer = input_line_pointer; 1005 1006 input_line_pointer = ""; 1007 s_align_bytes (nbytes); 1008 input_line_pointer = saved_input_line_pointer; 1009 } 1010 1011 void 1012 ia64_cons_align (int nbytes) 1013 { 1014 if (md.auto_align) 1015 { 1016 char *saved_input_line_pointer = input_line_pointer; 1017 input_line_pointer = ""; 1018 s_align_bytes (nbytes); 1019 input_line_pointer = saved_input_line_pointer; 1020 } 1021 } 1022 1023 /* Output COUNT bytes to a memory location. */ 1024 static char *vbyte_mem_ptr = NULL; 1025 1026 static void 1027 output_vbyte_mem (int count, char *ptr, char *comment ATTRIBUTE_UNUSED) 1028 { 1029 int x; 1030 if (vbyte_mem_ptr == NULL) 1031 abort (); 1032 1033 if (count == 0) 1034 return; 1035 for (x = 0; x < count; x++) 1036 *(vbyte_mem_ptr++) = ptr[x]; 1037 } 1038 1039 /* Count the number of bytes required for records. */ 1040 static int vbyte_count = 0; 1041 static void 1042 count_output (int count, 1043 char *ptr ATTRIBUTE_UNUSED, 1044 char *comment ATTRIBUTE_UNUSED) 1045 { 1046 vbyte_count += count; 1047 } 1048 1049 static void 1050 output_R1_format (vbyte_func f, unw_record_type rtype, int rlen) 1051 { 1052 int r = 0; 1053 char byte; 1054 if (rlen > 0x1f) 1055 { 1056 output_R3_format (f, rtype, rlen); 1057 return; 1058 } 1059 1060 if (rtype == body) 1061 r = 1; 1062 else if (rtype != prologue) 1063 as_bad (_("record type is not valid")); 1064 1065 byte = UNW_R1 | (r << 5) | (rlen & 0x1f); 1066 (*f) (1, &byte, NULL); 1067 } 1068 1069 static void 1070 output_R2_format (vbyte_func f, int mask, int grsave, unsigned long rlen) 1071 { 1072 char bytes[20]; 1073 int count = 2; 1074 mask = (mask & 0x0f); 1075 grsave = (grsave & 0x7f); 1076 1077 bytes[0] = (UNW_R2 | (mask >> 1)); 1078 bytes[1] = (((mask & 0x01) << 7) | grsave); 1079 count += output_leb128 (bytes + 2, rlen, 0); 1080 (*f) (count, bytes, NULL); 1081 } 1082 1083 static void 1084 output_R3_format (vbyte_func f, unw_record_type rtype, unsigned long rlen) 1085 { 1086 int r = 0, count; 1087 char bytes[20]; 1088 if (rlen <= 0x1f) 1089 { 1090 output_R1_format (f, rtype, rlen); 1091 return; 1092 } 1093 1094 if (rtype == body) 1095 r = 1; 1096 else if (rtype != prologue) 1097 as_bad (_("record type is not valid")); 1098 bytes[0] = (UNW_R3 | r); 1099 count = output_leb128 (bytes + 1, rlen, 0); 1100 (*f) (count + 1, bytes, NULL); 1101 } 1102 1103 static void 1104 output_P1_format (vbyte_func f, int brmask) 1105 { 1106 char byte; 1107 byte = UNW_P1 | (brmask & 0x1f); 1108 (*f) (1, &byte, NULL); 1109 } 1110 1111 static void 1112 output_P2_format (vbyte_func f, int brmask, int gr) 1113 { 1114 char bytes[2]; 1115 brmask = (brmask & 0x1f); 1116 bytes[0] = UNW_P2 | (brmask >> 1); 1117 bytes[1] = (((brmask & 1) << 7) | gr); 1118 (*f) (2, bytes, NULL); 1119 } 1120 1121 static void 1122 output_P3_format (vbyte_func f, unw_record_type rtype, int reg) 1123 { 1124 char bytes[2]; 1125 int r = 0; 1126 reg = (reg & 0x7f); 1127 switch (rtype) 1128 { 1129 case psp_gr: 1130 r = 0; 1131 break; 1132 case rp_gr: 1133 r = 1; 1134 break; 1135 case pfs_gr: 1136 r = 2; 1137 break; 1138 case preds_gr: 1139 r = 3; 1140 break; 1141 case unat_gr: 1142 r = 4; 1143 break; 1144 case lc_gr: 1145 r = 5; 1146 break; 1147 case rp_br: 1148 r = 6; 1149 break; 1150 case rnat_gr: 1151 r = 7; 1152 break; 1153 case bsp_gr: 1154 r = 8; 1155 break; 1156 case bspstore_gr: 1157 r = 9; 1158 break; 1159 case fpsr_gr: 1160 r = 10; 1161 break; 1162 case priunat_gr: 1163 r = 11; 1164 break; 1165 default: 1166 as_bad (_("Invalid record type for P3 format.")); 1167 } 1168 bytes[0] = (UNW_P3 | (r >> 1)); 1169 bytes[1] = (((r & 1) << 7) | reg); 1170 (*f) (2, bytes, NULL); 1171 } 1172 1173 static void 1174 output_P4_format (vbyte_func f, unsigned char *imask, unsigned long imask_size) 1175 { 1176 imask[0] = UNW_P4; 1177 (*f) (imask_size, (char *) imask, NULL); 1178 } 1179 1180 static void 1181 output_P5_format (vbyte_func f, int grmask, unsigned long frmask) 1182 { 1183 char bytes[4]; 1184 grmask = (grmask & 0x0f); 1185 1186 bytes[0] = UNW_P5; 1187 bytes[1] = ((grmask << 4) | ((frmask & 0x000f0000) >> 16)); 1188 bytes[2] = ((frmask & 0x0000ff00) >> 8); 1189 bytes[3] = (frmask & 0x000000ff); 1190 (*f) (4, bytes, NULL); 1191 } 1192 1193 static void 1194 output_P6_format (vbyte_func f, unw_record_type rtype, int rmask) 1195 { 1196 char byte; 1197 int r = 0; 1198 1199 if (rtype == gr_mem) 1200 r = 1; 1201 else if (rtype != fr_mem) 1202 as_bad (_("Invalid record type for format P6")); 1203 byte = (UNW_P6 | (r << 4) | (rmask & 0x0f)); 1204 (*f) (1, &byte, NULL); 1205 } 1206 1207 static void 1208 output_P7_format (vbyte_func f, 1209 unw_record_type rtype, 1210 unsigned long w1, 1211 unsigned long w2) 1212 { 1213 char bytes[20]; 1214 int count = 1; 1215 int r = 0; 1216 count += output_leb128 (bytes + 1, w1, 0); 1217 switch (rtype) 1218 { 1219 case mem_stack_f: 1220 r = 0; 1221 count += output_leb128 (bytes + count, w2 >> 4, 0); 1222 break; 1223 case mem_stack_v: 1224 r = 1; 1225 break; 1226 case spill_base: 1227 r = 2; 1228 break; 1229 case psp_sprel: 1230 r = 3; 1231 break; 1232 case rp_when: 1233 r = 4; 1234 break; 1235 case rp_psprel: 1236 r = 5; 1237 break; 1238 case pfs_when: 1239 r = 6; 1240 break; 1241 case pfs_psprel: 1242 r = 7; 1243 break; 1244 case preds_when: 1245 r = 8; 1246 break; 1247 case preds_psprel: 1248 r = 9; 1249 break; 1250 case lc_when: 1251 r = 10; 1252 break; 1253 case lc_psprel: 1254 r = 11; 1255 break; 1256 case unat_when: 1257 r = 12; 1258 break; 1259 case unat_psprel: 1260 r = 13; 1261 break; 1262 case fpsr_when: 1263 r = 14; 1264 break; 1265 case fpsr_psprel: 1266 r = 15; 1267 break; 1268 default: 1269 break; 1270 } 1271 bytes[0] = (UNW_P7 | r); 1272 (*f) (count, bytes, NULL); 1273 } 1274 1275 static void 1276 output_P8_format (vbyte_func f, unw_record_type rtype, unsigned long t) 1277 { 1278 char bytes[20]; 1279 int r = 0; 1280 int count = 2; 1281 bytes[0] = UNW_P8; 1282 switch (rtype) 1283 { 1284 case rp_sprel: 1285 r = 1; 1286 break; 1287 case pfs_sprel: 1288 r = 2; 1289 break; 1290 case preds_sprel: 1291 r = 3; 1292 break; 1293 case lc_sprel: 1294 r = 4; 1295 break; 1296 case unat_sprel: 1297 r = 5; 1298 break; 1299 case fpsr_sprel: 1300 r = 6; 1301 break; 1302 case bsp_when: 1303 r = 7; 1304 break; 1305 case bsp_psprel: 1306 r = 8; 1307 break; 1308 case bsp_sprel: 1309 r = 9; 1310 break; 1311 case bspstore_when: 1312 r = 10; 1313 break; 1314 case bspstore_psprel: 1315 r = 11; 1316 break; 1317 case bspstore_sprel: 1318 r = 12; 1319 break; 1320 case rnat_when: 1321 r = 13; 1322 break; 1323 case rnat_psprel: 1324 r = 14; 1325 break; 1326 case rnat_sprel: 1327 r = 15; 1328 break; 1329 case priunat_when_gr: 1330 r = 16; 1331 break; 1332 case priunat_psprel: 1333 r = 17; 1334 break; 1335 case priunat_sprel: 1336 r = 18; 1337 break; 1338 case priunat_when_mem: 1339 r = 19; 1340 break; 1341 default: 1342 break; 1343 } 1344 bytes[1] = r; 1345 count += output_leb128 (bytes + 2, t, 0); 1346 (*f) (count, bytes, NULL); 1347 } 1348 1349 static void 1350 output_P9_format (vbyte_func f, int grmask, int gr) 1351 { 1352 char bytes[3]; 1353 bytes[0] = UNW_P9; 1354 bytes[1] = (grmask & 0x0f); 1355 bytes[2] = (gr & 0x7f); 1356 (*f) (3, bytes, NULL); 1357 } 1358 1359 static void 1360 output_P10_format (vbyte_func f, int abi, int context) 1361 { 1362 char bytes[3]; 1363 bytes[0] = UNW_P10; 1364 bytes[1] = (abi & 0xff); 1365 bytes[2] = (context & 0xff); 1366 (*f) (3, bytes, NULL); 1367 } 1368 1369 static void 1370 output_B1_format (vbyte_func f, unw_record_type rtype, unsigned long label) 1371 { 1372 char byte; 1373 int r = 0; 1374 if (label > 0x1f) 1375 { 1376 output_B4_format (f, rtype, label); 1377 return; 1378 } 1379 if (rtype == copy_state) 1380 r = 1; 1381 else if (rtype != label_state) 1382 as_bad (_("Invalid record type for format B1")); 1383 1384 byte = (UNW_B1 | (r << 5) | (label & 0x1f)); 1385 (*f) (1, &byte, NULL); 1386 } 1387 1388 static void 1389 output_B2_format (vbyte_func f, unsigned long ecount, unsigned long t) 1390 { 1391 char bytes[20]; 1392 int count = 1; 1393 if (ecount > 0x1f) 1394 { 1395 output_B3_format (f, ecount, t); 1396 return; 1397 } 1398 bytes[0] = (UNW_B2 | (ecount & 0x1f)); 1399 count += output_leb128 (bytes + 1, t, 0); 1400 (*f) (count, bytes, NULL); 1401 } 1402 1403 static void 1404 output_B3_format (vbyte_func f, unsigned long ecount, unsigned long t) 1405 { 1406 char bytes[20]; 1407 int count = 1; 1408 if (ecount <= 0x1f) 1409 { 1410 output_B2_format (f, ecount, t); 1411 return; 1412 } 1413 bytes[0] = UNW_B3; 1414 count += output_leb128 (bytes + 1, t, 0); 1415 count += output_leb128 (bytes + count, ecount, 0); 1416 (*f) (count, bytes, NULL); 1417 } 1418 1419 static void 1420 output_B4_format (vbyte_func f, unw_record_type rtype, unsigned long label) 1421 { 1422 char bytes[20]; 1423 int r = 0; 1424 int count = 1; 1425 if (label <= 0x1f) 1426 { 1427 output_B1_format (f, rtype, label); 1428 return; 1429 } 1430 1431 if (rtype == copy_state) 1432 r = 1; 1433 else if (rtype != label_state) 1434 as_bad (_("Invalid record type for format B1")); 1435 1436 bytes[0] = (UNW_B4 | (r << 3)); 1437 count += output_leb128 (bytes + 1, label, 0); 1438 (*f) (count, bytes, NULL); 1439 } 1440 1441 static char 1442 format_ab_reg (int ab, int reg) 1443 { 1444 int ret; 1445 ab = (ab & 3); 1446 reg = (reg & 0x1f); 1447 ret = (ab << 5) | reg; 1448 return ret; 1449 } 1450 1451 static void 1452 output_X1_format (vbyte_func f, 1453 unw_record_type rtype, 1454 int ab, 1455 int reg, 1456 unsigned long t, 1457 unsigned long w1) 1458 { 1459 char bytes[20]; 1460 int r = 0; 1461 int count = 2; 1462 bytes[0] = UNW_X1; 1463 1464 if (rtype == spill_sprel) 1465 r = 1; 1466 else if (rtype != spill_psprel) 1467 as_bad (_("Invalid record type for format X1")); 1468 bytes[1] = ((r << 7) | format_ab_reg (ab, reg)); 1469 count += output_leb128 (bytes + 2, t, 0); 1470 count += output_leb128 (bytes + count, w1, 0); 1471 (*f) (count, bytes, NULL); 1472 } 1473 1474 static void 1475 output_X2_format (vbyte_func f, 1476 int ab, 1477 int reg, 1478 int x, 1479 int y, 1480 int treg, 1481 unsigned long t) 1482 { 1483 char bytes[20]; 1484 int count = 3; 1485 bytes[0] = UNW_X2; 1486 bytes[1] = (((x & 1) << 7) | format_ab_reg (ab, reg)); 1487 bytes[2] = (((y & 1) << 7) | (treg & 0x7f)); 1488 count += output_leb128 (bytes + 3, t, 0); 1489 (*f) (count, bytes, NULL); 1490 } 1491 1492 static void 1493 output_X3_format (vbyte_func f, 1494 unw_record_type rtype, 1495 int qp, 1496 int ab, 1497 int reg, 1498 unsigned long t, 1499 unsigned long w1) 1500 { 1501 char bytes[20]; 1502 int r = 0; 1503 int count = 3; 1504 bytes[0] = UNW_X3; 1505 1506 if (rtype == spill_sprel_p) 1507 r = 1; 1508 else if (rtype != spill_psprel_p) 1509 as_bad (_("Invalid record type for format X3")); 1510 bytes[1] = ((r << 7) | (qp & 0x3f)); 1511 bytes[2] = format_ab_reg (ab, reg); 1512 count += output_leb128 (bytes + 3, t, 0); 1513 count += output_leb128 (bytes + count, w1, 0); 1514 (*f) (count, bytes, NULL); 1515 } 1516 1517 static void 1518 output_X4_format (vbyte_func f, 1519 int qp, 1520 int ab, 1521 int reg, 1522 int x, 1523 int y, 1524 int treg, 1525 unsigned long t) 1526 { 1527 char bytes[20]; 1528 int count = 4; 1529 bytes[0] = UNW_X4; 1530 bytes[1] = (qp & 0x3f); 1531 bytes[2] = (((x & 1) << 7) | format_ab_reg (ab, reg)); 1532 bytes[3] = (((y & 1) << 7) | (treg & 0x7f)); 1533 count += output_leb128 (bytes + 4, t, 0); 1534 (*f) (count, bytes, NULL); 1535 } 1536 1537 /* This function checks whether there are any outstanding .save-s and 1538 discards them if so. */ 1539 1540 static void 1541 check_pending_save (void) 1542 { 1543 if (unwind.pending_saves) 1544 { 1545 unw_rec_list *cur, *prev; 1546 1547 as_warn (_("Previous .save incomplete")); 1548 for (cur = unwind.list, prev = NULL; cur; ) 1549 if (&cur->r.record.p == unwind.pending_saves) 1550 { 1551 if (prev) 1552 prev->next = cur->next; 1553 else 1554 unwind.list = cur->next; 1555 if (cur == unwind.tail) 1556 unwind.tail = prev; 1557 if (cur == unwind.current_entry) 1558 unwind.current_entry = cur->next; 1559 /* Don't free the first discarded record, it's being used as 1560 terminator for (currently) br_gr and gr_gr processing, and 1561 also prevents leaving a dangling pointer to it in its 1562 predecessor. */ 1563 cur->r.record.p.grmask = 0; 1564 cur->r.record.p.brmask = 0; 1565 cur->r.record.p.frmask = 0; 1566 prev = cur->r.record.p.next; 1567 cur->r.record.p.next = NULL; 1568 cur = prev; 1569 break; 1570 } 1571 else 1572 { 1573 prev = cur; 1574 cur = cur->next; 1575 } 1576 while (cur) 1577 { 1578 prev = cur; 1579 cur = cur->r.record.p.next; 1580 free (prev); 1581 } 1582 unwind.pending_saves = NULL; 1583 } 1584 } 1585 1586 /* This function allocates a record list structure, and initializes fields. */ 1587 1588 static unw_rec_list * 1589 alloc_record (unw_record_type t) 1590 { 1591 unw_rec_list *ptr; 1592 ptr = xmalloc (sizeof (*ptr)); 1593 memset (ptr, 0, sizeof (*ptr)); 1594 ptr->slot_number = SLOT_NUM_NOT_SET; 1595 ptr->r.type = t; 1596 return ptr; 1597 } 1598 1599 /* Dummy unwind record used for calculating the length of the last prologue or 1600 body region. */ 1601 1602 static unw_rec_list * 1603 output_endp (void) 1604 { 1605 unw_rec_list *ptr = alloc_record (endp); 1606 return ptr; 1607 } 1608 1609 static unw_rec_list * 1610 output_prologue (void) 1611 { 1612 unw_rec_list *ptr = alloc_record (prologue); 1613 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask)); 1614 return ptr; 1615 } 1616 1617 static unw_rec_list * 1618 output_prologue_gr (unsigned int saved_mask, unsigned int reg) 1619 { 1620 unw_rec_list *ptr = alloc_record (prologue_gr); 1621 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask)); 1622 ptr->r.record.r.grmask = saved_mask; 1623 ptr->r.record.r.grsave = reg; 1624 return ptr; 1625 } 1626 1627 static unw_rec_list * 1628 output_body (void) 1629 { 1630 unw_rec_list *ptr = alloc_record (body); 1631 return ptr; 1632 } 1633 1634 static unw_rec_list * 1635 output_mem_stack_f (unsigned int size) 1636 { 1637 unw_rec_list *ptr = alloc_record (mem_stack_f); 1638 ptr->r.record.p.size = size; 1639 return ptr; 1640 } 1641 1642 static unw_rec_list * 1643 output_mem_stack_v (void) 1644 { 1645 unw_rec_list *ptr = alloc_record (mem_stack_v); 1646 return ptr; 1647 } 1648 1649 static unw_rec_list * 1650 output_psp_gr (unsigned int gr) 1651 { 1652 unw_rec_list *ptr = alloc_record (psp_gr); 1653 ptr->r.record.p.r.gr = gr; 1654 return ptr; 1655 } 1656 1657 static unw_rec_list * 1658 output_psp_sprel (unsigned int offset) 1659 { 1660 unw_rec_list *ptr = alloc_record (psp_sprel); 1661 ptr->r.record.p.off.sp = offset / 4; 1662 return ptr; 1663 } 1664 1665 static unw_rec_list * 1666 output_rp_when (void) 1667 { 1668 unw_rec_list *ptr = alloc_record (rp_when); 1669 return ptr; 1670 } 1671 1672 static unw_rec_list * 1673 output_rp_gr (unsigned int gr) 1674 { 1675 unw_rec_list *ptr = alloc_record (rp_gr); 1676 ptr->r.record.p.r.gr = gr; 1677 return ptr; 1678 } 1679 1680 static unw_rec_list * 1681 output_rp_br (unsigned int br) 1682 { 1683 unw_rec_list *ptr = alloc_record (rp_br); 1684 ptr->r.record.p.r.br = br; 1685 return ptr; 1686 } 1687 1688 static unw_rec_list * 1689 output_rp_psprel (unsigned int offset) 1690 { 1691 unw_rec_list *ptr = alloc_record (rp_psprel); 1692 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset); 1693 return ptr; 1694 } 1695 1696 static unw_rec_list * 1697 output_rp_sprel (unsigned int offset) 1698 { 1699 unw_rec_list *ptr = alloc_record (rp_sprel); 1700 ptr->r.record.p.off.sp = offset / 4; 1701 return ptr; 1702 } 1703 1704 static unw_rec_list * 1705 output_pfs_when (void) 1706 { 1707 unw_rec_list *ptr = alloc_record (pfs_when); 1708 return ptr; 1709 } 1710 1711 static unw_rec_list * 1712 output_pfs_gr (unsigned int gr) 1713 { 1714 unw_rec_list *ptr = alloc_record (pfs_gr); 1715 ptr->r.record.p.r.gr = gr; 1716 return ptr; 1717 } 1718 1719 static unw_rec_list * 1720 output_pfs_psprel (unsigned int offset) 1721 { 1722 unw_rec_list *ptr = alloc_record (pfs_psprel); 1723 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset); 1724 return ptr; 1725 } 1726 1727 static unw_rec_list * 1728 output_pfs_sprel (unsigned int offset) 1729 { 1730 unw_rec_list *ptr = alloc_record (pfs_sprel); 1731 ptr->r.record.p.off.sp = offset / 4; 1732 return ptr; 1733 } 1734 1735 static unw_rec_list * 1736 output_preds_when (void) 1737 { 1738 unw_rec_list *ptr = alloc_record (preds_when); 1739 return ptr; 1740 } 1741 1742 static unw_rec_list * 1743 output_preds_gr (unsigned int gr) 1744 { 1745 unw_rec_list *ptr = alloc_record (preds_gr); 1746 ptr->r.record.p.r.gr = gr; 1747 return ptr; 1748 } 1749 1750 static unw_rec_list * 1751 output_preds_psprel (unsigned int offset) 1752 { 1753 unw_rec_list *ptr = alloc_record (preds_psprel); 1754 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset); 1755 return ptr; 1756 } 1757 1758 static unw_rec_list * 1759 output_preds_sprel (unsigned int offset) 1760 { 1761 unw_rec_list *ptr = alloc_record (preds_sprel); 1762 ptr->r.record.p.off.sp = offset / 4; 1763 return ptr; 1764 } 1765 1766 static unw_rec_list * 1767 output_fr_mem (unsigned int mask) 1768 { 1769 unw_rec_list *ptr = alloc_record (fr_mem); 1770 unw_rec_list *cur = ptr; 1771 1772 ptr->r.record.p.frmask = mask; 1773 unwind.pending_saves = &ptr->r.record.p; 1774 for (;;) 1775 { 1776 unw_rec_list *prev = cur; 1777 1778 /* Clear least significant set bit. */ 1779 mask &= ~(mask & (~mask + 1)); 1780 if (!mask) 1781 return ptr; 1782 cur = alloc_record (fr_mem); 1783 cur->r.record.p.frmask = mask; 1784 /* Retain only least significant bit. */ 1785 prev->r.record.p.frmask ^= mask; 1786 prev->r.record.p.next = cur; 1787 } 1788 } 1789 1790 static unw_rec_list * 1791 output_frgr_mem (unsigned int gr_mask, unsigned int fr_mask) 1792 { 1793 unw_rec_list *ptr = alloc_record (frgr_mem); 1794 unw_rec_list *cur = ptr; 1795 1796 unwind.pending_saves = &cur->r.record.p; 1797 cur->r.record.p.frmask = fr_mask; 1798 while (fr_mask) 1799 { 1800 unw_rec_list *prev = cur; 1801 1802 /* Clear least significant set bit. */ 1803 fr_mask &= ~(fr_mask & (~fr_mask + 1)); 1804 if (!gr_mask && !fr_mask) 1805 return ptr; 1806 cur = alloc_record (frgr_mem); 1807 cur->r.record.p.frmask = fr_mask; 1808 /* Retain only least significant bit. */ 1809 prev->r.record.p.frmask ^= fr_mask; 1810 prev->r.record.p.next = cur; 1811 } 1812 cur->r.record.p.grmask = gr_mask; 1813 for (;;) 1814 { 1815 unw_rec_list *prev = cur; 1816 1817 /* Clear least significant set bit. */ 1818 gr_mask &= ~(gr_mask & (~gr_mask + 1)); 1819 if (!gr_mask) 1820 return ptr; 1821 cur = alloc_record (frgr_mem); 1822 cur->r.record.p.grmask = gr_mask; 1823 /* Retain only least significant bit. */ 1824 prev->r.record.p.grmask ^= gr_mask; 1825 prev->r.record.p.next = cur; 1826 } 1827 } 1828 1829 static unw_rec_list * 1830 output_gr_gr (unsigned int mask, unsigned int reg) 1831 { 1832 unw_rec_list *ptr = alloc_record (gr_gr); 1833 unw_rec_list *cur = ptr; 1834 1835 ptr->r.record.p.grmask = mask; 1836 ptr->r.record.p.r.gr = reg; 1837 unwind.pending_saves = &ptr->r.record.p; 1838 for (;;) 1839 { 1840 unw_rec_list *prev = cur; 1841 1842 /* Clear least significant set bit. */ 1843 mask &= ~(mask & (~mask + 1)); 1844 if (!mask) 1845 return ptr; 1846 cur = alloc_record (gr_gr); 1847 cur->r.record.p.grmask = mask; 1848 /* Indicate this record shouldn't be output. */ 1849 cur->r.record.p.r.gr = REG_NUM; 1850 /* Retain only least significant bit. */ 1851 prev->r.record.p.grmask ^= mask; 1852 prev->r.record.p.next = cur; 1853 } 1854 } 1855 1856 static unw_rec_list * 1857 output_gr_mem (unsigned int mask) 1858 { 1859 unw_rec_list *ptr = alloc_record (gr_mem); 1860 unw_rec_list *cur = ptr; 1861 1862 ptr->r.record.p.grmask = mask; 1863 unwind.pending_saves = &ptr->r.record.p; 1864 for (;;) 1865 { 1866 unw_rec_list *prev = cur; 1867 1868 /* Clear least significant set bit. */ 1869 mask &= ~(mask & (~mask + 1)); 1870 if (!mask) 1871 return ptr; 1872 cur = alloc_record (gr_mem); 1873 cur->r.record.p.grmask = mask; 1874 /* Retain only least significant bit. */ 1875 prev->r.record.p.grmask ^= mask; 1876 prev->r.record.p.next = cur; 1877 } 1878 } 1879 1880 static unw_rec_list * 1881 output_br_mem (unsigned int mask) 1882 { 1883 unw_rec_list *ptr = alloc_record (br_mem); 1884 unw_rec_list *cur = ptr; 1885 1886 ptr->r.record.p.brmask = mask; 1887 unwind.pending_saves = &ptr->r.record.p; 1888 for (;;) 1889 { 1890 unw_rec_list *prev = cur; 1891 1892 /* Clear least significant set bit. */ 1893 mask &= ~(mask & (~mask + 1)); 1894 if (!mask) 1895 return ptr; 1896 cur = alloc_record (br_mem); 1897 cur->r.record.p.brmask = mask; 1898 /* Retain only least significant bit. */ 1899 prev->r.record.p.brmask ^= mask; 1900 prev->r.record.p.next = cur; 1901 } 1902 } 1903 1904 static unw_rec_list * 1905 output_br_gr (unsigned int mask, unsigned int reg) 1906 { 1907 unw_rec_list *ptr = alloc_record (br_gr); 1908 unw_rec_list *cur = ptr; 1909 1910 ptr->r.record.p.brmask = mask; 1911 ptr->r.record.p.r.gr = reg; 1912 unwind.pending_saves = &ptr->r.record.p; 1913 for (;;) 1914 { 1915 unw_rec_list *prev = cur; 1916 1917 /* Clear least significant set bit. */ 1918 mask &= ~(mask & (~mask + 1)); 1919 if (!mask) 1920 return ptr; 1921 cur = alloc_record (br_gr); 1922 cur->r.record.p.brmask = mask; 1923 /* Indicate this record shouldn't be output. */ 1924 cur->r.record.p.r.gr = REG_NUM; 1925 /* Retain only least significant bit. */ 1926 prev->r.record.p.brmask ^= mask; 1927 prev->r.record.p.next = cur; 1928 } 1929 } 1930 1931 static unw_rec_list * 1932 output_spill_base (unsigned int offset) 1933 { 1934 unw_rec_list *ptr = alloc_record (spill_base); 1935 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset); 1936 return ptr; 1937 } 1938 1939 static unw_rec_list * 1940 output_unat_when (void) 1941 { 1942 unw_rec_list *ptr = alloc_record (unat_when); 1943 return ptr; 1944 } 1945 1946 static unw_rec_list * 1947 output_unat_gr (unsigned int gr) 1948 { 1949 unw_rec_list *ptr = alloc_record (unat_gr); 1950 ptr->r.record.p.r.gr = gr; 1951 return ptr; 1952 } 1953 1954 static unw_rec_list * 1955 output_unat_psprel (unsigned int offset) 1956 { 1957 unw_rec_list *ptr = alloc_record (unat_psprel); 1958 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset); 1959 return ptr; 1960 } 1961 1962 static unw_rec_list * 1963 output_unat_sprel (unsigned int offset) 1964 { 1965 unw_rec_list *ptr = alloc_record (unat_sprel); 1966 ptr->r.record.p.off.sp = offset / 4; 1967 return ptr; 1968 } 1969 1970 static unw_rec_list * 1971 output_lc_when (void) 1972 { 1973 unw_rec_list *ptr = alloc_record (lc_when); 1974 return ptr; 1975 } 1976 1977 static unw_rec_list * 1978 output_lc_gr (unsigned int gr) 1979 { 1980 unw_rec_list *ptr = alloc_record (lc_gr); 1981 ptr->r.record.p.r.gr = gr; 1982 return ptr; 1983 } 1984 1985 static unw_rec_list * 1986 output_lc_psprel (unsigned int offset) 1987 { 1988 unw_rec_list *ptr = alloc_record (lc_psprel); 1989 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset); 1990 return ptr; 1991 } 1992 1993 static unw_rec_list * 1994 output_lc_sprel (unsigned int offset) 1995 { 1996 unw_rec_list *ptr = alloc_record (lc_sprel); 1997 ptr->r.record.p.off.sp = offset / 4; 1998 return ptr; 1999 } 2000 2001 static unw_rec_list * 2002 output_fpsr_when (void) 2003 { 2004 unw_rec_list *ptr = alloc_record (fpsr_when); 2005 return ptr; 2006 } 2007 2008 static unw_rec_list * 2009 output_fpsr_gr (unsigned int gr) 2010 { 2011 unw_rec_list *ptr = alloc_record (fpsr_gr); 2012 ptr->r.record.p.r.gr = gr; 2013 return ptr; 2014 } 2015 2016 static unw_rec_list * 2017 output_fpsr_psprel (unsigned int offset) 2018 { 2019 unw_rec_list *ptr = alloc_record (fpsr_psprel); 2020 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset); 2021 return ptr; 2022 } 2023 2024 static unw_rec_list * 2025 output_fpsr_sprel (unsigned int offset) 2026 { 2027 unw_rec_list *ptr = alloc_record (fpsr_sprel); 2028 ptr->r.record.p.off.sp = offset / 4; 2029 return ptr; 2030 } 2031 2032 static unw_rec_list * 2033 output_priunat_when_gr (void) 2034 { 2035 unw_rec_list *ptr = alloc_record (priunat_when_gr); 2036 return ptr; 2037 } 2038 2039 static unw_rec_list * 2040 output_priunat_when_mem (void) 2041 { 2042 unw_rec_list *ptr = alloc_record (priunat_when_mem); 2043 return ptr; 2044 } 2045 2046 static unw_rec_list * 2047 output_priunat_gr (unsigned int gr) 2048 { 2049 unw_rec_list *ptr = alloc_record (priunat_gr); 2050 ptr->r.record.p.r.gr = gr; 2051 return ptr; 2052 } 2053 2054 static unw_rec_list * 2055 output_priunat_psprel (unsigned int offset) 2056 { 2057 unw_rec_list *ptr = alloc_record (priunat_psprel); 2058 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset); 2059 return ptr; 2060 } 2061 2062 static unw_rec_list * 2063 output_priunat_sprel (unsigned int offset) 2064 { 2065 unw_rec_list *ptr = alloc_record (priunat_sprel); 2066 ptr->r.record.p.off.sp = offset / 4; 2067 return ptr; 2068 } 2069 2070 static unw_rec_list * 2071 output_bsp_when (void) 2072 { 2073 unw_rec_list *ptr = alloc_record (bsp_when); 2074 return ptr; 2075 } 2076 2077 static unw_rec_list * 2078 output_bsp_gr (unsigned int gr) 2079 { 2080 unw_rec_list *ptr = alloc_record (bsp_gr); 2081 ptr->r.record.p.r.gr = gr; 2082 return ptr; 2083 } 2084 2085 static unw_rec_list * 2086 output_bsp_psprel (unsigned int offset) 2087 { 2088 unw_rec_list *ptr = alloc_record (bsp_psprel); 2089 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset); 2090 return ptr; 2091 } 2092 2093 static unw_rec_list * 2094 output_bsp_sprel (unsigned int offset) 2095 { 2096 unw_rec_list *ptr = alloc_record (bsp_sprel); 2097 ptr->r.record.p.off.sp = offset / 4; 2098 return ptr; 2099 } 2100 2101 static unw_rec_list * 2102 output_bspstore_when (void) 2103 { 2104 unw_rec_list *ptr = alloc_record (bspstore_when); 2105 return ptr; 2106 } 2107 2108 static unw_rec_list * 2109 output_bspstore_gr (unsigned int gr) 2110 { 2111 unw_rec_list *ptr = alloc_record (bspstore_gr); 2112 ptr->r.record.p.r.gr = gr; 2113 return ptr; 2114 } 2115 2116 static unw_rec_list * 2117 output_bspstore_psprel (unsigned int offset) 2118 { 2119 unw_rec_list *ptr = alloc_record (bspstore_psprel); 2120 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset); 2121 return ptr; 2122 } 2123 2124 static unw_rec_list * 2125 output_bspstore_sprel (unsigned int offset) 2126 { 2127 unw_rec_list *ptr = alloc_record (bspstore_sprel); 2128 ptr->r.record.p.off.sp = offset / 4; 2129 return ptr; 2130 } 2131 2132 static unw_rec_list * 2133 output_rnat_when (void) 2134 { 2135 unw_rec_list *ptr = alloc_record (rnat_when); 2136 return ptr; 2137 } 2138 2139 static unw_rec_list * 2140 output_rnat_gr (unsigned int gr) 2141 { 2142 unw_rec_list *ptr = alloc_record (rnat_gr); 2143 ptr->r.record.p.r.gr = gr; 2144 return ptr; 2145 } 2146 2147 static unw_rec_list * 2148 output_rnat_psprel (unsigned int offset) 2149 { 2150 unw_rec_list *ptr = alloc_record (rnat_psprel); 2151 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset); 2152 return ptr; 2153 } 2154 2155 static unw_rec_list * 2156 output_rnat_sprel (unsigned int offset) 2157 { 2158 unw_rec_list *ptr = alloc_record (rnat_sprel); 2159 ptr->r.record.p.off.sp = offset / 4; 2160 return ptr; 2161 } 2162 2163 static unw_rec_list * 2164 output_unwabi (unsigned long abi, unsigned long context) 2165 { 2166 unw_rec_list *ptr = alloc_record (unwabi); 2167 ptr->r.record.p.abi = abi; 2168 ptr->r.record.p.context = context; 2169 return ptr; 2170 } 2171 2172 static unw_rec_list * 2173 output_epilogue (unsigned long ecount) 2174 { 2175 unw_rec_list *ptr = alloc_record (epilogue); 2176 ptr->r.record.b.ecount = ecount; 2177 return ptr; 2178 } 2179 2180 static unw_rec_list * 2181 output_label_state (unsigned long label) 2182 { 2183 unw_rec_list *ptr = alloc_record (label_state); 2184 ptr->r.record.b.label = label; 2185 return ptr; 2186 } 2187 2188 static unw_rec_list * 2189 output_copy_state (unsigned long label) 2190 { 2191 unw_rec_list *ptr = alloc_record (copy_state); 2192 ptr->r.record.b.label = label; 2193 return ptr; 2194 } 2195 2196 static unw_rec_list * 2197 output_spill_psprel (unsigned int ab, 2198 unsigned int reg, 2199 unsigned int offset, 2200 unsigned int predicate) 2201 { 2202 unw_rec_list *ptr = alloc_record (predicate ? spill_psprel_p : spill_psprel); 2203 ptr->r.record.x.ab = ab; 2204 ptr->r.record.x.reg = reg; 2205 ptr->r.record.x.where.pspoff = ENCODED_PSP_OFFSET (offset); 2206 ptr->r.record.x.qp = predicate; 2207 return ptr; 2208 } 2209 2210 static unw_rec_list * 2211 output_spill_sprel (unsigned int ab, 2212 unsigned int reg, 2213 unsigned int offset, 2214 unsigned int predicate) 2215 { 2216 unw_rec_list *ptr = alloc_record (predicate ? spill_sprel_p : spill_sprel); 2217 ptr->r.record.x.ab = ab; 2218 ptr->r.record.x.reg = reg; 2219 ptr->r.record.x.where.spoff = offset / 4; 2220 ptr->r.record.x.qp = predicate; 2221 return ptr; 2222 } 2223 2224 static unw_rec_list * 2225 output_spill_reg (unsigned int ab, 2226 unsigned int reg, 2227 unsigned int targ_reg, 2228 unsigned int xy, 2229 unsigned int predicate) 2230 { 2231 unw_rec_list *ptr = alloc_record (predicate ? spill_reg_p : spill_reg); 2232 ptr->r.record.x.ab = ab; 2233 ptr->r.record.x.reg = reg; 2234 ptr->r.record.x.where.reg = targ_reg; 2235 ptr->r.record.x.xy = xy; 2236 ptr->r.record.x.qp = predicate; 2237 return ptr; 2238 } 2239 2240 /* Given a unw_rec_list process the correct format with the 2241 specified function. */ 2242 2243 static void 2244 process_one_record (unw_rec_list *ptr, vbyte_func f) 2245 { 2246 unsigned int fr_mask, gr_mask; 2247 2248 switch (ptr->r.type) 2249 { 2250 /* This is a dummy record that takes up no space in the output. */ 2251 case endp: 2252 break; 2253 2254 case gr_mem: 2255 case fr_mem: 2256 case br_mem: 2257 case frgr_mem: 2258 /* These are taken care of by prologue/prologue_gr. */ 2259 break; 2260 2261 case prologue_gr: 2262 case prologue: 2263 if (ptr->r.type == prologue_gr) 2264 output_R2_format (f, ptr->r.record.r.grmask, 2265 ptr->r.record.r.grsave, ptr->r.record.r.rlen); 2266 else 2267 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen); 2268 2269 /* Output descriptor(s) for union of register spills (if any). */ 2270 gr_mask = ptr->r.record.r.mask.gr_mem; 2271 fr_mask = ptr->r.record.r.mask.fr_mem; 2272 if (fr_mask) 2273 { 2274 if ((fr_mask & ~0xfUL) == 0) 2275 output_P6_format (f, fr_mem, fr_mask); 2276 else 2277 { 2278 output_P5_format (f, gr_mask, fr_mask); 2279 gr_mask = 0; 2280 } 2281 } 2282 if (gr_mask) 2283 output_P6_format (f, gr_mem, gr_mask); 2284 if (ptr->r.record.r.mask.br_mem) 2285 output_P1_format (f, ptr->r.record.r.mask.br_mem); 2286 2287 /* output imask descriptor if necessary: */ 2288 if (ptr->r.record.r.mask.i) 2289 output_P4_format (f, ptr->r.record.r.mask.i, 2290 ptr->r.record.r.imask_size); 2291 break; 2292 2293 case body: 2294 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen); 2295 break; 2296 case mem_stack_f: 2297 case mem_stack_v: 2298 output_P7_format (f, ptr->r.type, ptr->r.record.p.t, 2299 ptr->r.record.p.size); 2300 break; 2301 case psp_gr: 2302 case rp_gr: 2303 case pfs_gr: 2304 case preds_gr: 2305 case unat_gr: 2306 case lc_gr: 2307 case fpsr_gr: 2308 case priunat_gr: 2309 case bsp_gr: 2310 case bspstore_gr: 2311 case rnat_gr: 2312 output_P3_format (f, ptr->r.type, ptr->r.record.p.r.gr); 2313 break; 2314 case rp_br: 2315 output_P3_format (f, rp_br, ptr->r.record.p.r.br); 2316 break; 2317 case psp_sprel: 2318 output_P7_format (f, psp_sprel, ptr->r.record.p.off.sp, 0); 2319 break; 2320 case rp_when: 2321 case pfs_when: 2322 case preds_when: 2323 case unat_when: 2324 case lc_when: 2325 case fpsr_when: 2326 output_P7_format (f, ptr->r.type, ptr->r.record.p.t, 0); 2327 break; 2328 case rp_psprel: 2329 case pfs_psprel: 2330 case preds_psprel: 2331 case unat_psprel: 2332 case lc_psprel: 2333 case fpsr_psprel: 2334 case spill_base: 2335 output_P7_format (f, ptr->r.type, ptr->r.record.p.off.psp, 0); 2336 break; 2337 case rp_sprel: 2338 case pfs_sprel: 2339 case preds_sprel: 2340 case unat_sprel: 2341 case lc_sprel: 2342 case fpsr_sprel: 2343 case priunat_sprel: 2344 case bsp_sprel: 2345 case bspstore_sprel: 2346 case rnat_sprel: 2347 output_P8_format (f, ptr->r.type, ptr->r.record.p.off.sp); 2348 break; 2349 case gr_gr: 2350 if (ptr->r.record.p.r.gr < REG_NUM) 2351 { 2352 const unw_rec_list *cur = ptr; 2353 2354 gr_mask = cur->r.record.p.grmask; 2355 while ((cur = cur->r.record.p.next) != NULL) 2356 gr_mask |= cur->r.record.p.grmask; 2357 output_P9_format (f, gr_mask, ptr->r.record.p.r.gr); 2358 } 2359 break; 2360 case br_gr: 2361 if (ptr->r.record.p.r.gr < REG_NUM) 2362 { 2363 const unw_rec_list *cur = ptr; 2364 2365 gr_mask = cur->r.record.p.brmask; 2366 while ((cur = cur->r.record.p.next) != NULL) 2367 gr_mask |= cur->r.record.p.brmask; 2368 output_P2_format (f, gr_mask, ptr->r.record.p.r.gr); 2369 } 2370 break; 2371 case spill_mask: 2372 as_bad (_("spill_mask record unimplemented.")); 2373 break; 2374 case priunat_when_gr: 2375 case priunat_when_mem: 2376 case bsp_when: 2377 case bspstore_when: 2378 case rnat_when: 2379 output_P8_format (f, ptr->r.type, ptr->r.record.p.t); 2380 break; 2381 case priunat_psprel: 2382 case bsp_psprel: 2383 case bspstore_psprel: 2384 case rnat_psprel: 2385 output_P8_format (f, ptr->r.type, ptr->r.record.p.off.psp); 2386 break; 2387 case unwabi: 2388 output_P10_format (f, ptr->r.record.p.abi, ptr->r.record.p.context); 2389 break; 2390 case epilogue: 2391 output_B3_format (f, ptr->r.record.b.ecount, ptr->r.record.b.t); 2392 break; 2393 case label_state: 2394 case copy_state: 2395 output_B4_format (f, ptr->r.type, ptr->r.record.b.label); 2396 break; 2397 case spill_psprel: 2398 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab, 2399 ptr->r.record.x.reg, ptr->r.record.x.t, 2400 ptr->r.record.x.where.pspoff); 2401 break; 2402 case spill_sprel: 2403 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab, 2404 ptr->r.record.x.reg, ptr->r.record.x.t, 2405 ptr->r.record.x.where.spoff); 2406 break; 2407 case spill_reg: 2408 output_X2_format (f, ptr->r.record.x.ab, ptr->r.record.x.reg, 2409 ptr->r.record.x.xy >> 1, ptr->r.record.x.xy, 2410 ptr->r.record.x.where.reg, ptr->r.record.x.t); 2411 break; 2412 case spill_psprel_p: 2413 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp, 2414 ptr->r.record.x.ab, ptr->r.record.x.reg, 2415 ptr->r.record.x.t, ptr->r.record.x.where.pspoff); 2416 break; 2417 case spill_sprel_p: 2418 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp, 2419 ptr->r.record.x.ab, ptr->r.record.x.reg, 2420 ptr->r.record.x.t, ptr->r.record.x.where.spoff); 2421 break; 2422 case spill_reg_p: 2423 output_X4_format (f, ptr->r.record.x.qp, ptr->r.record.x.ab, 2424 ptr->r.record.x.reg, ptr->r.record.x.xy >> 1, 2425 ptr->r.record.x.xy, ptr->r.record.x.where.reg, 2426 ptr->r.record.x.t); 2427 break; 2428 default: 2429 as_bad (_("record_type_not_valid")); 2430 break; 2431 } 2432 } 2433 2434 /* Given a unw_rec_list list, process all the records with 2435 the specified function. */ 2436 static void 2437 process_unw_records (unw_rec_list *list, vbyte_func f) 2438 { 2439 unw_rec_list *ptr; 2440 for (ptr = list; ptr; ptr = ptr->next) 2441 process_one_record (ptr, f); 2442 } 2443 2444 /* Determine the size of a record list in bytes. */ 2445 static int 2446 calc_record_size (unw_rec_list *list) 2447 { 2448 vbyte_count = 0; 2449 process_unw_records (list, count_output); 2450 return vbyte_count; 2451 } 2452 2453 /* Return the number of bits set in the input value. 2454 Perhaps this has a better place... */ 2455 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) 2456 # define popcount __builtin_popcount 2457 #else 2458 static int 2459 popcount (unsigned x) 2460 { 2461 static const unsigned char popcnt[16] = 2462 { 2463 0, 1, 1, 2, 2464 1, 2, 2, 3, 2465 1, 2, 2, 3, 2466 2, 3, 3, 4 2467 }; 2468 2469 if (x < NELEMS (popcnt)) 2470 return popcnt[x]; 2471 return popcnt[x % NELEMS (popcnt)] + popcount (x / NELEMS (popcnt)); 2472 } 2473 #endif 2474 2475 /* Update IMASK bitmask to reflect the fact that one or more registers 2476 of type TYPE are saved starting at instruction with index T. If N 2477 bits are set in REGMASK, it is assumed that instructions T through 2478 T+N-1 save these registers. 2479 2480 TYPE values: 2481 0: no save 2482 1: instruction saves next fp reg 2483 2: instruction saves next general reg 2484 3: instruction saves next branch reg */ 2485 static void 2486 set_imask (unw_rec_list *region, 2487 unsigned long regmask, 2488 unsigned long t, 2489 unsigned int type) 2490 { 2491 unsigned char *imask; 2492 unsigned long imask_size; 2493 unsigned int i; 2494 int pos; 2495 2496 imask = region->r.record.r.mask.i; 2497 imask_size = region->r.record.r.imask_size; 2498 if (!imask) 2499 { 2500 imask_size = (region->r.record.r.rlen * 2 + 7) / 8 + 1; 2501 imask = xmalloc (imask_size); 2502 memset (imask, 0, imask_size); 2503 2504 region->r.record.r.imask_size = imask_size; 2505 region->r.record.r.mask.i = imask; 2506 } 2507 2508 i = (t / 4) + 1; 2509 pos = 2 * (3 - t % 4); 2510 while (regmask) 2511 { 2512 if (i >= imask_size) 2513 { 2514 as_bad (_("Ignoring attempt to spill beyond end of region")); 2515 return; 2516 } 2517 2518 imask[i] |= (type & 0x3) << pos; 2519 2520 regmask &= (regmask - 1); 2521 pos -= 2; 2522 if (pos < 0) 2523 { 2524 pos = 0; 2525 ++i; 2526 } 2527 } 2528 } 2529 2530 /* Return the number of instruction slots from FIRST_ADDR to SLOT_ADDR. 2531 SLOT_FRAG is the frag containing SLOT_ADDR, and FIRST_FRAG is the frag 2532 containing FIRST_ADDR. If BEFORE_RELAX, then we use worst-case estimates 2533 for frag sizes. */ 2534 2535 static unsigned long 2536 slot_index (unsigned long slot_addr, 2537 fragS *slot_frag, 2538 unsigned long first_addr, 2539 fragS *first_frag, 2540 int before_relax) 2541 { 2542 unsigned long index = 0; 2543 2544 /* First time we are called, the initial address and frag are invalid. */ 2545 if (first_addr == 0) 2546 return 0; 2547 2548 /* If the two addresses are in different frags, then we need to add in 2549 the remaining size of this frag, and then the entire size of intermediate 2550 frags. */ 2551 while (slot_frag != first_frag) 2552 { 2553 unsigned long start_addr = (unsigned long) &first_frag->fr_literal; 2554 2555 if (! before_relax) 2556 { 2557 /* We can get the final addresses only during and after 2558 relaxation. */ 2559 if (first_frag->fr_next && first_frag->fr_next->fr_address) 2560 index += 3 * ((first_frag->fr_next->fr_address 2561 - first_frag->fr_address 2562 - first_frag->fr_fix) >> 4); 2563 } 2564 else 2565 /* We don't know what the final addresses will be. We try our 2566 best to estimate. */ 2567 switch (first_frag->fr_type) 2568 { 2569 default: 2570 break; 2571 2572 case rs_space: 2573 as_fatal (_("Only constant space allocation is supported")); 2574 break; 2575 2576 case rs_align: 2577 case rs_align_code: 2578 case rs_align_test: 2579 /* Take alignment into account. Assume the worst case 2580 before relaxation. */ 2581 index += 3 * ((1 << first_frag->fr_offset) >> 4); 2582 break; 2583 2584 case rs_org: 2585 if (first_frag->fr_symbol) 2586 { 2587 as_fatal (_("Only constant offsets are supported")); 2588 break; 2589 } 2590 case rs_fill: 2591 index += 3 * (first_frag->fr_offset >> 4); 2592 break; 2593 } 2594 2595 /* Add in the full size of the frag converted to instruction slots. */ 2596 index += 3 * (first_frag->fr_fix >> 4); 2597 /* Subtract away the initial part before first_addr. */ 2598 index -= (3 * ((first_addr >> 4) - (start_addr >> 4)) 2599 + ((first_addr & 0x3) - (start_addr & 0x3))); 2600 2601 /* Move to the beginning of the next frag. */ 2602 first_frag = first_frag->fr_next; 2603 first_addr = (unsigned long) &first_frag->fr_literal; 2604 2605 /* This can happen if there is section switching in the middle of a 2606 function, causing the frag chain for the function to be broken. 2607 It is too difficult to recover safely from this problem, so we just 2608 exit with an error. */ 2609 if (first_frag == NULL) 2610 as_fatal (_("Section switching in code is not supported.")); 2611 } 2612 2613 /* Add in the used part of the last frag. */ 2614 index += (3 * ((slot_addr >> 4) - (first_addr >> 4)) 2615 + ((slot_addr & 0x3) - (first_addr & 0x3))); 2616 return index; 2617 } 2618 2619 /* Optimize unwind record directives. */ 2620 2621 static unw_rec_list * 2622 optimize_unw_records (unw_rec_list *list) 2623 { 2624 if (!list) 2625 return NULL; 2626 2627 /* If the only unwind record is ".prologue" or ".prologue" followed 2628 by ".body", then we can optimize the unwind directives away. */ 2629 if (list->r.type == prologue 2630 && (list->next->r.type == endp 2631 || (list->next->r.type == body && list->next->next->r.type == endp))) 2632 return NULL; 2633 2634 return list; 2635 } 2636 2637 /* Given a complete record list, process any records which have 2638 unresolved fields, (ie length counts for a prologue). After 2639 this has been run, all necessary information should be available 2640 within each record to generate an image. */ 2641 2642 static void 2643 fixup_unw_records (unw_rec_list *list, int before_relax) 2644 { 2645 unw_rec_list *ptr, *region = 0; 2646 unsigned long first_addr = 0, rlen = 0, t; 2647 fragS *first_frag = 0; 2648 2649 for (ptr = list; ptr; ptr = ptr->next) 2650 { 2651 if (ptr->slot_number == SLOT_NUM_NOT_SET) 2652 as_bad (_(" Insn slot not set in unwind record.")); 2653 t = slot_index (ptr->slot_number, ptr->slot_frag, 2654 first_addr, first_frag, before_relax); 2655 switch (ptr->r.type) 2656 { 2657 case prologue: 2658 case prologue_gr: 2659 case body: 2660 { 2661 unw_rec_list *last; 2662 int size; 2663 unsigned long last_addr = 0; 2664 fragS *last_frag = NULL; 2665 2666 first_addr = ptr->slot_number; 2667 first_frag = ptr->slot_frag; 2668 /* Find either the next body/prologue start, or the end of 2669 the function, and determine the size of the region. */ 2670 for (last = ptr->next; last != NULL; last = last->next) 2671 if (last->r.type == prologue || last->r.type == prologue_gr 2672 || last->r.type == body || last->r.type == endp) 2673 { 2674 last_addr = last->slot_number; 2675 last_frag = last->slot_frag; 2676 break; 2677 } 2678 size = slot_index (last_addr, last_frag, first_addr, first_frag, 2679 before_relax); 2680 rlen = ptr->r.record.r.rlen = size; 2681 if (ptr->r.type == body) 2682 /* End of region. */ 2683 region = 0; 2684 else 2685 region = ptr; 2686 break; 2687 } 2688 case epilogue: 2689 if (t < rlen) 2690 ptr->r.record.b.t = rlen - 1 - t; 2691 else 2692 /* This happens when a memory-stack-less procedure uses a 2693 ".restore sp" directive at the end of a region to pop 2694 the frame state. */ 2695 ptr->r.record.b.t = 0; 2696 break; 2697 2698 case mem_stack_f: 2699 case mem_stack_v: 2700 case rp_when: 2701 case pfs_when: 2702 case preds_when: 2703 case unat_when: 2704 case lc_when: 2705 case fpsr_when: 2706 case priunat_when_gr: 2707 case priunat_when_mem: 2708 case bsp_when: 2709 case bspstore_when: 2710 case rnat_when: 2711 ptr->r.record.p.t = t; 2712 break; 2713 2714 case spill_reg: 2715 case spill_sprel: 2716 case spill_psprel: 2717 case spill_reg_p: 2718 case spill_sprel_p: 2719 case spill_psprel_p: 2720 ptr->r.record.x.t = t; 2721 break; 2722 2723 case frgr_mem: 2724 if (!region) 2725 { 2726 as_bad (_("frgr_mem record before region record!")); 2727 return; 2728 } 2729 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask; 2730 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask; 2731 set_imask (region, ptr->r.record.p.frmask, t, 1); 2732 set_imask (region, ptr->r.record.p.grmask, t, 2); 2733 break; 2734 case fr_mem: 2735 if (!region) 2736 { 2737 as_bad (_("fr_mem record before region record!")); 2738 return; 2739 } 2740 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask; 2741 set_imask (region, ptr->r.record.p.frmask, t, 1); 2742 break; 2743 case gr_mem: 2744 if (!region) 2745 { 2746 as_bad (_("gr_mem record before region record!")); 2747 return; 2748 } 2749 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask; 2750 set_imask (region, ptr->r.record.p.grmask, t, 2); 2751 break; 2752 case br_mem: 2753 if (!region) 2754 { 2755 as_bad (_("br_mem record before region record!")); 2756 return; 2757 } 2758 region->r.record.r.mask.br_mem |= ptr->r.record.p.brmask; 2759 set_imask (region, ptr->r.record.p.brmask, t, 3); 2760 break; 2761 2762 case gr_gr: 2763 if (!region) 2764 { 2765 as_bad (_("gr_gr record before region record!")); 2766 return; 2767 } 2768 set_imask (region, ptr->r.record.p.grmask, t, 2); 2769 break; 2770 case br_gr: 2771 if (!region) 2772 { 2773 as_bad (_("br_gr record before region record!")); 2774 return; 2775 } 2776 set_imask (region, ptr->r.record.p.brmask, t, 3); 2777 break; 2778 2779 default: 2780 break; 2781 } 2782 } 2783 } 2784 2785 /* Estimate the size of a frag before relaxing. We only have one type of frag 2786 to handle here, which is the unwind info frag. */ 2787 2788 int 2789 ia64_estimate_size_before_relax (fragS *frag, 2790 asection *segtype ATTRIBUTE_UNUSED) 2791 { 2792 unw_rec_list *list; 2793 int len, size, pad; 2794 2795 /* ??? This code is identical to the first part of ia64_convert_frag. */ 2796 list = (unw_rec_list *) frag->fr_opcode; 2797 fixup_unw_records (list, 0); 2798 2799 len = calc_record_size (list); 2800 /* pad to pointer-size boundary. */ 2801 pad = len % md.pointer_size; 2802 if (pad != 0) 2803 len += md.pointer_size - pad; 2804 /* Add 8 for the header. */ 2805 size = len + 8; 2806 /* Add a pointer for the personality offset. */ 2807 if (frag->fr_offset) 2808 size += md.pointer_size; 2809 2810 /* fr_var carries the max_chars that we created the fragment with. 2811 We must, of course, have allocated enough memory earlier. */ 2812 assert (frag->fr_var >= size); 2813 2814 return frag->fr_fix + size; 2815 } 2816 2817 /* This function converts a rs_machine_dependent variant frag into a 2818 normal fill frag with the unwind image from the the record list. */ 2819 void 2820 ia64_convert_frag (fragS *frag) 2821 { 2822 unw_rec_list *list; 2823 int len, size, pad; 2824 valueT flag_value; 2825 2826 /* ??? This code is identical to ia64_estimate_size_before_relax. */ 2827 list = (unw_rec_list *) frag->fr_opcode; 2828 fixup_unw_records (list, 0); 2829 2830 len = calc_record_size (list); 2831 /* pad to pointer-size boundary. */ 2832 pad = len % md.pointer_size; 2833 if (pad != 0) 2834 len += md.pointer_size - pad; 2835 /* Add 8 for the header. */ 2836 size = len + 8; 2837 /* Add a pointer for the personality offset. */ 2838 if (frag->fr_offset) 2839 size += md.pointer_size; 2840 2841 /* fr_var carries the max_chars that we created the fragment with. 2842 We must, of course, have allocated enough memory earlier. */ 2843 assert (frag->fr_var >= size); 2844 2845 /* Initialize the header area. fr_offset is initialized with 2846 unwind.personality_routine. */ 2847 if (frag->fr_offset) 2848 { 2849 if (md.flags & EF_IA_64_ABI64) 2850 flag_value = (bfd_vma) 3 << 32; 2851 else 2852 /* 32-bit unwind info block. */ 2853 flag_value = (bfd_vma) 0x1003 << 32; 2854 } 2855 else 2856 flag_value = 0; 2857 2858 md_number_to_chars (frag->fr_literal, 2859 (((bfd_vma) 1 << 48) /* Version. */ 2860 | flag_value /* U & E handler flags. */ 2861 | (len / md.pointer_size)), /* Length. */ 2862 8); 2863 2864 /* Skip the header. */ 2865 vbyte_mem_ptr = frag->fr_literal + 8; 2866 process_unw_records (list, output_vbyte_mem); 2867 2868 /* Fill the padding bytes with zeros. */ 2869 if (pad != 0) 2870 md_number_to_chars (frag->fr_literal + len + 8 - md.pointer_size + pad, 0, 2871 md.pointer_size - pad); 2872 /* Fill the unwind personality with zeros. */ 2873 if (frag->fr_offset) 2874 md_number_to_chars (frag->fr_literal + size - md.pointer_size, 0, 2875 md.pointer_size); 2876 2877 frag->fr_fix += size; 2878 frag->fr_type = rs_fill; 2879 frag->fr_var = 0; 2880 frag->fr_offset = 0; 2881 } 2882 2883 static int 2884 parse_predicate_and_operand (expressionS *e, unsigned *qp, const char *po) 2885 { 2886 int sep = parse_operand (e, ','); 2887 2888 *qp = e->X_add_number - REG_P; 2889 if (e->X_op != O_register || *qp > 63) 2890 { 2891 as_bad (_("First operand to .%s must be a predicate"), po); 2892 *qp = 0; 2893 } 2894 else if (*qp == 0) 2895 as_warn (_("Pointless use of p0 as first operand to .%s"), po); 2896 if (sep == ',') 2897 sep = parse_operand (e, ','); 2898 else 2899 e->X_op = O_absent; 2900 return sep; 2901 } 2902 2903 static void 2904 convert_expr_to_ab_reg (const expressionS *e, 2905 unsigned int *ab, 2906 unsigned int *regp, 2907 const char *po, 2908 int n) 2909 { 2910 unsigned int reg = e->X_add_number; 2911 2912 *ab = *regp = 0; /* Anything valid is good here. */ 2913 2914 if (e->X_op != O_register) 2915 reg = REG_GR; /* Anything invalid is good here. */ 2916 2917 if (reg >= (REG_GR + 4) && reg <= (REG_GR + 7)) 2918 { 2919 *ab = 0; 2920 *regp = reg - REG_GR; 2921 } 2922 else if ((reg >= (REG_FR + 2) && reg <= (REG_FR + 5)) 2923 || (reg >= (REG_FR + 16) && reg <= (REG_FR + 31))) 2924 { 2925 *ab = 1; 2926 *regp = reg - REG_FR; 2927 } 2928 else if (reg >= (REG_BR + 1) && reg <= (REG_BR + 5)) 2929 { 2930 *ab = 2; 2931 *regp = reg - REG_BR; 2932 } 2933 else 2934 { 2935 *ab = 3; 2936 switch (reg) 2937 { 2938 case REG_PR: *regp = 0; break; 2939 case REG_PSP: *regp = 1; break; 2940 case REG_PRIUNAT: *regp = 2; break; 2941 case REG_BR + 0: *regp = 3; break; 2942 case REG_AR + AR_BSP: *regp = 4; break; 2943 case REG_AR + AR_BSPSTORE: *regp = 5; break; 2944 case REG_AR + AR_RNAT: *regp = 6; break; 2945 case REG_AR + AR_UNAT: *regp = 7; break; 2946 case REG_AR + AR_FPSR: *regp = 8; break; 2947 case REG_AR + AR_PFS: *regp = 9; break; 2948 case REG_AR + AR_LC: *regp = 10; break; 2949 2950 default: 2951 as_bad (_("Operand %d to .%s must be a preserved register"), n, po); 2952 break; 2953 } 2954 } 2955 } 2956 2957 static void 2958 convert_expr_to_xy_reg (const expressionS *e, 2959 unsigned int *xy, 2960 unsigned int *regp, 2961 const char *po, 2962 int n) 2963 { 2964 unsigned int reg = e->X_add_number; 2965 2966 *xy = *regp = 0; /* Anything valid is good here. */ 2967 2968 if (e->X_op != O_register) 2969 reg = REG_GR; /* Anything invalid is good here. */ 2970 2971 if (reg >= (REG_GR + 1) && reg <= (REG_GR + 127)) 2972 { 2973 *xy = 0; 2974 *regp = reg - REG_GR; 2975 } 2976 else if (reg >= (REG_FR + 2) && reg <= (REG_FR + 127)) 2977 { 2978 *xy = 1; 2979 *regp = reg - REG_FR; 2980 } 2981 else if (reg >= REG_BR && reg <= (REG_BR + 7)) 2982 { 2983 *xy = 2; 2984 *regp = reg - REG_BR; 2985 } 2986 else 2987 as_bad (_("Operand %d to .%s must be a writable register"), n, po); 2988 } 2989 2990 static void 2991 dot_align (int arg) 2992 { 2993 /* The current frag is an alignment frag. */ 2994 align_frag = frag_now; 2995 s_align_bytes (arg); 2996 } 2997 2998 static void 2999 dot_radix (int dummy ATTRIBUTE_UNUSED) 3000 { 3001 char *radix; 3002 int ch; 3003 3004 SKIP_WHITESPACE (); 3005 3006 if (is_it_end_of_statement ()) 3007 return; 3008 radix = input_line_pointer; 3009 ch = get_symbol_end (); 3010 ia64_canonicalize_symbol_name (radix); 3011 if (strcasecmp (radix, "C")) 3012 as_bad (_("Radix `%s' unsupported or invalid"), radix); 3013 *input_line_pointer = ch; 3014 demand_empty_rest_of_line (); 3015 } 3016 3017 /* Helper function for .loc directives. If the assembler is not generating 3018 line number info, then we need to remember which instructions have a .loc 3019 directive, and only call dwarf2_gen_line_info for those instructions. */ 3020 3021 static void 3022 dot_loc (int x) 3023 { 3024 CURR_SLOT.loc_directive_seen = 1; 3025 dwarf2_directive_loc (x); 3026 } 3027 3028 /* .sbss, .bss etc. are macros that expand into ".section SECNAME". */ 3029 static void 3030 dot_special_section (int which) 3031 { 3032 set_section ((char *) special_section_name[which]); 3033 } 3034 3035 /* Return -1 for warning and 0 for error. */ 3036 3037 static int 3038 unwind_diagnostic (const char * region, const char *directive) 3039 { 3040 if (md.unwind_check == unwind_check_warning) 3041 { 3042 as_warn (_(".%s outside of %s"), directive, region); 3043 return -1; 3044 } 3045 else 3046 { 3047 as_bad (_(".%s outside of %s"), directive, region); 3048 ignore_rest_of_line (); 3049 return 0; 3050 } 3051 } 3052 3053 /* Return 1 if a directive is in a procedure, -1 if a directive isn't in 3054 a procedure but the unwind directive check is set to warning, 0 if 3055 a directive isn't in a procedure and the unwind directive check is set 3056 to error. */ 3057 3058 static int 3059 in_procedure (const char *directive) 3060 { 3061 if (unwind.proc_pending.sym 3062 && (!unwind.saved_text_seg || strcmp (directive, "endp") == 0)) 3063 return 1; 3064 return unwind_diagnostic ("procedure", directive); 3065 } 3066 3067 /* Return 1 if a directive is in a prologue, -1 if a directive isn't in 3068 a prologue but the unwind directive check is set to warning, 0 if 3069 a directive isn't in a prologue and the unwind directive check is set 3070 to error. */ 3071 3072 static int 3073 in_prologue (const char *directive) 3074 { 3075 int in = in_procedure (directive); 3076 3077 if (in > 0 && !unwind.prologue) 3078 in = unwind_diagnostic ("prologue", directive); 3079 check_pending_save (); 3080 return in; 3081 } 3082 3083 /* Return 1 if a directive is in a body, -1 if a directive isn't in 3084 a body but the unwind directive check is set to warning, 0 if 3085 a directive isn't in a body and the unwind directive check is set 3086 to error. */ 3087 3088 static int 3089 in_body (const char *directive) 3090 { 3091 int in = in_procedure (directive); 3092 3093 if (in > 0 && !unwind.body) 3094 in = unwind_diagnostic ("body region", directive); 3095 return in; 3096 } 3097 3098 static void 3099 add_unwind_entry (unw_rec_list *ptr, int sep) 3100 { 3101 if (ptr) 3102 { 3103 if (unwind.tail) 3104 unwind.tail->next = ptr; 3105 else 3106 unwind.list = ptr; 3107 unwind.tail = ptr; 3108 3109 /* The current entry can in fact be a chain of unwind entries. */ 3110 if (unwind.current_entry == NULL) 3111 unwind.current_entry = ptr; 3112 } 3113 3114 /* The current entry can in fact be a chain of unwind entries. */ 3115 if (unwind.current_entry == NULL) 3116 unwind.current_entry = ptr; 3117 3118 if (sep == ',') 3119 { 3120 /* Parse a tag permitted for the current directive. */ 3121 int ch; 3122 3123 SKIP_WHITESPACE (); 3124 ch = get_symbol_end (); 3125 /* FIXME: For now, just issue a warning that this isn't implemented. */ 3126 { 3127 static int warned; 3128 3129 if (!warned) 3130 { 3131 warned = 1; 3132 as_warn (_("Tags on unwind pseudo-ops aren't supported, yet")); 3133 } 3134 } 3135 *input_line_pointer = ch; 3136 } 3137 if (sep != NOT_A_CHAR) 3138 demand_empty_rest_of_line (); 3139 } 3140 3141 static void 3142 dot_fframe (int dummy ATTRIBUTE_UNUSED) 3143 { 3144 expressionS e; 3145 int sep; 3146 3147 if (!in_prologue ("fframe")) 3148 return; 3149 3150 sep = parse_operand (&e, ','); 3151 3152 if (e.X_op != O_constant) 3153 { 3154 as_bad (_("First operand to .fframe must be a constant")); 3155 e.X_add_number = 0; 3156 } 3157 add_unwind_entry (output_mem_stack_f (e.X_add_number), sep); 3158 } 3159 3160 static void 3161 dot_vframe (int dummy ATTRIBUTE_UNUSED) 3162 { 3163 expressionS e; 3164 unsigned reg; 3165 int sep; 3166 3167 if (!in_prologue ("vframe")) 3168 return; 3169 3170 sep = parse_operand (&e, ','); 3171 reg = e.X_add_number - REG_GR; 3172 if (e.X_op != O_register || reg > 127) 3173 { 3174 as_bad (_("First operand to .vframe must be a general register")); 3175 reg = 0; 3176 } 3177 add_unwind_entry (output_mem_stack_v (), sep); 3178 if (! (unwind.prologue_mask & 2)) 3179 add_unwind_entry (output_psp_gr (reg), NOT_A_CHAR); 3180 else if (reg != unwind.prologue_gr 3181 + (unsigned) popcount (unwind.prologue_mask & (-2 << 1))) 3182 as_warn (_("Operand of .vframe contradicts .prologue")); 3183 } 3184 3185 static void 3186 dot_vframesp (int psp) 3187 { 3188 expressionS e; 3189 int sep; 3190 3191 if (psp) 3192 as_warn (_(".vframepsp is meaningless, assuming .vframesp was meant")); 3193 3194 if (!in_prologue ("vframesp")) 3195 return; 3196 3197 sep = parse_operand (&e, ','); 3198 if (e.X_op != O_constant) 3199 { 3200 as_bad (_("Operand to .vframesp must be a constant (sp-relative offset)")); 3201 e.X_add_number = 0; 3202 } 3203 add_unwind_entry (output_mem_stack_v (), sep); 3204 add_unwind_entry (output_psp_sprel (e.X_add_number), NOT_A_CHAR); 3205 } 3206 3207 static void 3208 dot_save (int dummy ATTRIBUTE_UNUSED) 3209 { 3210 expressionS e1, e2; 3211 unsigned reg1, reg2; 3212 int sep; 3213 3214 if (!in_prologue ("save")) 3215 return; 3216 3217 sep = parse_operand (&e1, ','); 3218 if (sep == ',') 3219 sep = parse_operand (&e2, ','); 3220 else 3221 e2.X_op = O_absent; 3222 3223 reg1 = e1.X_add_number; 3224 /* Make sure its a valid ar.xxx reg, OR its br0, aka 'rp'. */ 3225 if (e1.X_op != O_register) 3226 { 3227 as_bad (_("First operand to .save not a register")); 3228 reg1 = REG_PR; /* Anything valid is good here. */ 3229 } 3230 reg2 = e2.X_add_number - REG_GR; 3231 if (e2.X_op != O_register || reg2 > 127) 3232 { 3233 as_bad (_("Second operand to .save not a valid register")); 3234 reg2 = 0; 3235 } 3236 switch (reg1) 3237 { 3238 case REG_AR + AR_BSP: 3239 add_unwind_entry (output_bsp_when (), sep); 3240 add_unwind_entry (output_bsp_gr (reg2), NOT_A_CHAR); 3241 break; 3242 case REG_AR + AR_BSPSTORE: 3243 add_unwind_entry (output_bspstore_when (), sep); 3244 add_unwind_entry (output_bspstore_gr (reg2), NOT_A_CHAR); 3245 break; 3246 case REG_AR + AR_RNAT: 3247 add_unwind_entry (output_rnat_when (), sep); 3248 add_unwind_entry (output_rnat_gr (reg2), NOT_A_CHAR); 3249 break; 3250 case REG_AR + AR_UNAT: 3251 add_unwind_entry (output_unat_when (), sep); 3252 add_unwind_entry (output_unat_gr (reg2), NOT_A_CHAR); 3253 break; 3254 case REG_AR + AR_FPSR: 3255 add_unwind_entry (output_fpsr_when (), sep); 3256 add_unwind_entry (output_fpsr_gr (reg2), NOT_A_CHAR); 3257 break; 3258 case REG_AR + AR_PFS: 3259 add_unwind_entry (output_pfs_when (), sep); 3260 if (! (unwind.prologue_mask & 4)) 3261 add_unwind_entry (output_pfs_gr (reg2), NOT_A_CHAR); 3262 else if (reg2 != unwind.prologue_gr 3263 + (unsigned) popcount (unwind.prologue_mask & (-4 << 1))) 3264 as_warn (_("Second operand of .save contradicts .prologue")); 3265 break; 3266 case REG_AR + AR_LC: 3267 add_unwind_entry (output_lc_when (), sep); 3268 add_unwind_entry (output_lc_gr (reg2), NOT_A_CHAR); 3269 break; 3270 case REG_BR: 3271 add_unwind_entry (output_rp_when (), sep); 3272 if (! (unwind.prologue_mask & 8)) 3273 add_unwind_entry (output_rp_gr (reg2), NOT_A_CHAR); 3274 else if (reg2 != unwind.prologue_gr) 3275 as_warn (_("Second operand of .save contradicts .prologue")); 3276 break; 3277 case REG_PR: 3278 add_unwind_entry (output_preds_when (), sep); 3279 if (! (unwind.prologue_mask & 1)) 3280 add_unwind_entry (output_preds_gr (reg2), NOT_A_CHAR); 3281 else if (reg2 != unwind.prologue_gr 3282 + (unsigned) popcount (unwind.prologue_mask & (-1 << 1))) 3283 as_warn (_("Second operand of .save contradicts .prologue")); 3284 break; 3285 case REG_PRIUNAT: 3286 add_unwind_entry (output_priunat_when_gr (), sep); 3287 add_unwind_entry (output_priunat_gr (reg2), NOT_A_CHAR); 3288 break; 3289 default: 3290 as_bad (_("First operand to .save not a valid register")); 3291 add_unwind_entry (NULL, sep); 3292 break; 3293 } 3294 } 3295 3296 static void 3297 dot_restore (int dummy ATTRIBUTE_UNUSED) 3298 { 3299 expressionS e1; 3300 unsigned long ecount; /* # of _additional_ regions to pop */ 3301 int sep; 3302 3303 if (!in_body ("restore")) 3304 return; 3305 3306 sep = parse_operand (&e1, ','); 3307 if (e1.X_op != O_register || e1.X_add_number != REG_GR + 12) 3308 as_bad (_("First operand to .restore must be stack pointer (sp)")); 3309 3310 if (sep == ',') 3311 { 3312 expressionS e2; 3313 3314 sep = parse_operand (&e2, ','); 3315 if (e2.X_op != O_constant || e2.X_add_number < 0) 3316 { 3317 as_bad (_("Second operand to .restore must be a constant >= 0")); 3318 e2.X_add_number = 0; 3319 } 3320 ecount = e2.X_add_number; 3321 } 3322 else 3323 ecount = unwind.prologue_count - 1; 3324 3325 if (ecount >= unwind.prologue_count) 3326 { 3327 as_bad (_("Epilogue count of %lu exceeds number of nested prologues (%u)"), 3328 ecount + 1, unwind.prologue_count); 3329 ecount = 0; 3330 } 3331 3332 add_unwind_entry (output_epilogue (ecount), sep); 3333 3334 if (ecount < unwind.prologue_count) 3335 unwind.prologue_count -= ecount + 1; 3336 else 3337 unwind.prologue_count = 0; 3338 } 3339 3340 static void 3341 dot_restorereg (int pred) 3342 { 3343 unsigned int qp, ab, reg; 3344 expressionS e; 3345 int sep; 3346 const char * const po = pred ? "restorereg.p" : "restorereg"; 3347 3348 if (!in_procedure (po)) 3349 return; 3350 3351 if (pred) 3352 sep = parse_predicate_and_operand (&e, &qp, po); 3353 else 3354 { 3355 sep = parse_operand (&e, ','); 3356 qp = 0; 3357 } 3358 convert_expr_to_ab_reg (&e, &ab, ®, po, 1 + pred); 3359 3360 add_unwind_entry (output_spill_reg (ab, reg, 0, 0, qp), sep); 3361 } 3362 3363 static char *special_linkonce_name[] = 3364 { 3365 ".gnu.linkonce.ia64unw.", ".gnu.linkonce.ia64unwi." 3366 }; 3367 3368 static void 3369 start_unwind_section (const segT text_seg, int sec_index) 3370 { 3371 /* 3372 Use a slightly ugly scheme to derive the unwind section names from 3373 the text section name: 3374 3375 text sect. unwind table sect. 3376 name: name: comments: 3377 ---------- ----------------- -------------------------------- 3378 .text .IA_64.unwind 3379 .text.foo .IA_64.unwind.text.foo 3380 .foo .IA_64.unwind.foo 3381 .gnu.linkonce.t.foo 3382 .gnu.linkonce.ia64unw.foo 3383 _info .IA_64.unwind_info gas issues error message (ditto) 3384 _infoFOO .IA_64.unwind_infoFOO gas issues error message (ditto) 3385 3386 This mapping is done so that: 3387 3388 (a) An object file with unwind info only in .text will use 3389 unwind section names .IA_64.unwind and .IA_64.unwind_info. 3390 This follows the letter of the ABI and also ensures backwards 3391 compatibility with older toolchains. 3392 3393 (b) An object file with unwind info in multiple text sections 3394 will use separate unwind sections for each text section. 3395 This allows us to properly set the "sh_info" and "sh_link" 3396 fields in SHT_IA_64_UNWIND as required by the ABI and also 3397 lets GNU ld support programs with multiple segments 3398 containing unwind info (as might be the case for certain 3399 embedded applications). 3400 3401 (c) An error is issued if there would be a name clash. 3402 */ 3403 3404 const char *text_name, *sec_text_name; 3405 char *sec_name; 3406 const char *prefix = special_section_name [sec_index]; 3407 const char *suffix; 3408 size_t prefix_len, suffix_len, sec_name_len; 3409 3410 sec_text_name = segment_name (text_seg); 3411 text_name = sec_text_name; 3412 if (strncmp (text_name, "_info", 5) == 0) 3413 { 3414 as_bad (_("Illegal section name `%s' (causes unwind section name clash)"), 3415 text_name); 3416 ignore_rest_of_line (); 3417 return; 3418 } 3419 if (strcmp (text_name, ".text") == 0) 3420 text_name = ""; 3421 3422 /* Build the unwind section name by appending the (possibly stripped) 3423 text section name to the unwind prefix. */ 3424 suffix = text_name; 3425 if (strncmp (text_name, ".gnu.linkonce.t.", 3426 sizeof (".gnu.linkonce.t.") - 1) == 0) 3427 { 3428 prefix = special_linkonce_name [sec_index - SPECIAL_SECTION_UNWIND]; 3429 suffix += sizeof (".gnu.linkonce.t.") - 1; 3430 } 3431 3432 prefix_len = strlen (prefix); 3433 suffix_len = strlen (suffix); 3434 sec_name_len = prefix_len + suffix_len; 3435 sec_name = alloca (sec_name_len + 1); 3436 memcpy (sec_name, prefix, prefix_len); 3437 memcpy (sec_name + prefix_len, suffix, suffix_len); 3438 sec_name [sec_name_len] = '\0'; 3439 3440 /* Handle COMDAT group. */ 3441 if ((text_seg->flags & SEC_LINK_ONCE) != 0 3442 && (elf_section_flags (text_seg) & SHF_GROUP) != 0) 3443 { 3444 char *section; 3445 size_t len, group_name_len; 3446 const char *group_name = elf_group_name (text_seg); 3447 3448 if (group_name == NULL) 3449 { 3450 as_bad (_("Group section `%s' has no group signature"), 3451 sec_text_name); 3452 ignore_rest_of_line (); 3453 return; 3454 } 3455 /* We have to construct a fake section directive. */ 3456 group_name_len = strlen (group_name); 3457 len = (sec_name_len 3458 + 16 /* ,"aG",@progbits, */ 3459 + group_name_len /* ,group_name */ 3460 + 7); /* ,comdat */ 3461 3462 section = alloca (len + 1); 3463 memcpy (section, sec_name, sec_name_len); 3464 memcpy (section + sec_name_len, ",\"aG\",@progbits,", 16); 3465 memcpy (section + sec_name_len + 16, group_name, group_name_len); 3466 memcpy (section + len - 7, ",comdat", 7); 3467 section [len] = '\0'; 3468 set_section (section); 3469 } 3470 else 3471 { 3472 set_section (sec_name); 3473 bfd_set_section_flags (stdoutput, now_seg, 3474 SEC_LOAD | SEC_ALLOC | SEC_READONLY); 3475 } 3476 3477 elf_linked_to_section (now_seg) = text_seg; 3478 } 3479 3480 static void 3481 generate_unwind_image (const segT text_seg) 3482 { 3483 int size, pad; 3484 unw_rec_list *list; 3485 3486 /* Mark the end of the unwind info, so that we can compute the size of the 3487 last unwind region. */ 3488 add_unwind_entry (output_endp (), NOT_A_CHAR); 3489 3490 /* Force out pending instructions, to make sure all unwind records have 3491 a valid slot_number field. */ 3492 ia64_flush_insns (); 3493 3494 /* Generate the unwind record. */ 3495 list = optimize_unw_records (unwind.list); 3496 fixup_unw_records (list, 1); 3497 size = calc_record_size (list); 3498 3499 if (size > 0 || unwind.force_unwind_entry) 3500 { 3501 unwind.force_unwind_entry = 0; 3502 /* pad to pointer-size boundary. */ 3503 pad = size % md.pointer_size; 3504 if (pad != 0) 3505 size += md.pointer_size - pad; 3506 /* Add 8 for the header. */ 3507 size += 8; 3508 /* Add a pointer for the personality offset. */ 3509 if (unwind.personality_routine) 3510 size += md.pointer_size; 3511 } 3512 3513 /* If there are unwind records, switch sections, and output the info. */ 3514 if (size != 0) 3515 { 3516 expressionS exp; 3517 bfd_reloc_code_real_type reloc; 3518 3519 start_unwind_section (text_seg, SPECIAL_SECTION_UNWIND_INFO); 3520 3521 /* Make sure the section has 4 byte alignment for ILP32 and 3522 8 byte alignment for LP64. */ 3523 frag_align (md.pointer_size_shift, 0, 0); 3524 record_alignment (now_seg, md.pointer_size_shift); 3525 3526 /* Set expression which points to start of unwind descriptor area. */ 3527 unwind.info = expr_build_dot (); 3528 3529 frag_var (rs_machine_dependent, size, size, 0, 0, 3530 (offsetT) (long) unwind.personality_routine, 3531 (char *) list); 3532 3533 /* Add the personality address to the image. */ 3534 if (unwind.personality_routine != 0) 3535 { 3536 exp.X_op = O_symbol; 3537 exp.X_add_symbol = unwind.personality_routine; 3538 exp.X_add_number = 0; 3539 3540 if (md.flags & EF_IA_64_BE) 3541 { 3542 if (md.flags & EF_IA_64_ABI64) 3543 reloc = BFD_RELOC_IA64_LTOFF_FPTR64MSB; 3544 else 3545 reloc = BFD_RELOC_IA64_LTOFF_FPTR32MSB; 3546 } 3547 else 3548 { 3549 if (md.flags & EF_IA_64_ABI64) 3550 reloc = BFD_RELOC_IA64_LTOFF_FPTR64LSB; 3551 else 3552 reloc = BFD_RELOC_IA64_LTOFF_FPTR32LSB; 3553 } 3554 3555 fix_new_exp (frag_now, frag_now_fix () - md.pointer_size, 3556 md.pointer_size, &exp, 0, reloc); 3557 unwind.personality_routine = 0; 3558 } 3559 } 3560 3561 free_saved_prologue_counts (); 3562 unwind.list = unwind.tail = unwind.current_entry = NULL; 3563 } 3564 3565 static void 3566 dot_handlerdata (int dummy ATTRIBUTE_UNUSED) 3567 { 3568 if (!in_procedure ("handlerdata")) 3569 return; 3570 unwind.force_unwind_entry = 1; 3571 3572 /* Remember which segment we're in so we can switch back after .endp */ 3573 unwind.saved_text_seg = now_seg; 3574 unwind.saved_text_subseg = now_subseg; 3575 3576 /* Generate unwind info into unwind-info section and then leave that 3577 section as the currently active one so dataXX directives go into 3578 the language specific data area of the unwind info block. */ 3579 generate_unwind_image (now_seg); 3580 demand_empty_rest_of_line (); 3581 } 3582 3583 static void 3584 dot_unwentry (int dummy ATTRIBUTE_UNUSED) 3585 { 3586 if (!in_procedure ("unwentry")) 3587 return; 3588 unwind.force_unwind_entry = 1; 3589 demand_empty_rest_of_line (); 3590 } 3591 3592 static void 3593 dot_altrp (int dummy ATTRIBUTE_UNUSED) 3594 { 3595 expressionS e; 3596 unsigned reg; 3597 3598 if (!in_prologue ("altrp")) 3599 return; 3600 3601 parse_operand (&e, 0); 3602 reg = e.X_add_number - REG_BR; 3603 if (e.X_op != O_register || reg > 7) 3604 { 3605 as_bad (_("First operand to .altrp not a valid branch register")); 3606 reg = 0; 3607 } 3608 add_unwind_entry (output_rp_br (reg), 0); 3609 } 3610 3611 static void 3612 dot_savemem (int psprel) 3613 { 3614 expressionS e1, e2; 3615 int sep; 3616 int reg1, val; 3617 const char * const po = psprel ? "savepsp" : "savesp"; 3618 3619 if (!in_prologue (po)) 3620 return; 3621 3622 sep = parse_operand (&e1, ','); 3623 if (sep == ',') 3624 sep = parse_operand (&e2, ','); 3625 else 3626 e2.X_op = O_absent; 3627 3628 reg1 = e1.X_add_number; 3629 val = e2.X_add_number; 3630 3631 /* Make sure its a valid ar.xxx reg, OR its br0, aka 'rp'. */ 3632 if (e1.X_op != O_register) 3633 { 3634 as_bad (_("First operand to .%s not a register"), po); 3635 reg1 = REG_PR; /* Anything valid is good here. */ 3636 } 3637 if (e2.X_op != O_constant) 3638 { 3639 as_bad (_("Second operand to .%s not a constant"), po); 3640 val = 0; 3641 } 3642 3643 switch (reg1) 3644 { 3645 case REG_AR + AR_BSP: 3646 add_unwind_entry (output_bsp_when (), sep); 3647 add_unwind_entry ((psprel 3648 ? output_bsp_psprel 3649 : output_bsp_sprel) (val), NOT_A_CHAR); 3650 break; 3651 case REG_AR + AR_BSPSTORE: 3652 add_unwind_entry (output_bspstore_when (), sep); 3653 add_unwind_entry ((psprel 3654 ? output_bspstore_psprel 3655 : output_bspstore_sprel) (val), NOT_A_CHAR); 3656 break; 3657 case REG_AR + AR_RNAT: 3658 add_unwind_entry (output_rnat_when (), sep); 3659 add_unwind_entry ((psprel 3660 ? output_rnat_psprel 3661 : output_rnat_sprel) (val), NOT_A_CHAR); 3662 break; 3663 case REG_AR + AR_UNAT: 3664 add_unwind_entry (output_unat_when (), sep); 3665 add_unwind_entry ((psprel 3666 ? output_unat_psprel 3667 : output_unat_sprel) (val), NOT_A_CHAR); 3668 break; 3669 case REG_AR + AR_FPSR: 3670 add_unwind_entry (output_fpsr_when (), sep); 3671 add_unwind_entry ((psprel 3672 ? output_fpsr_psprel 3673 : output_fpsr_sprel) (val), NOT_A_CHAR); 3674 break; 3675 case REG_AR + AR_PFS: 3676 add_unwind_entry (output_pfs_when (), sep); 3677 add_unwind_entry ((psprel 3678 ? output_pfs_psprel 3679 : output_pfs_sprel) (val), NOT_A_CHAR); 3680 break; 3681 case REG_AR + AR_LC: 3682 add_unwind_entry (output_lc_when (), sep); 3683 add_unwind_entry ((psprel 3684 ? output_lc_psprel 3685 : output_lc_sprel) (val), NOT_A_CHAR); 3686 break; 3687 case REG_BR: 3688 add_unwind_entry (output_rp_when (), sep); 3689 add_unwind_entry ((psprel 3690 ? output_rp_psprel 3691 : output_rp_sprel) (val), NOT_A_CHAR); 3692 break; 3693 case REG_PR: 3694 add_unwind_entry (output_preds_when (), sep); 3695 add_unwind_entry ((psprel 3696 ? output_preds_psprel 3697 : output_preds_sprel) (val), NOT_A_CHAR); 3698 break; 3699 case REG_PRIUNAT: 3700 add_unwind_entry (output_priunat_when_mem (), sep); 3701 add_unwind_entry ((psprel 3702 ? output_priunat_psprel 3703 : output_priunat_sprel) (val), NOT_A_CHAR); 3704 break; 3705 default: 3706 as_bad (_("First operand to .%s not a valid register"), po); 3707 add_unwind_entry (NULL, sep); 3708 break; 3709 } 3710 } 3711 3712 static void 3713 dot_saveg (int dummy ATTRIBUTE_UNUSED) 3714 { 3715 expressionS e; 3716 unsigned grmask; 3717 int sep; 3718 3719 if (!in_prologue ("save.g")) 3720 return; 3721 3722 sep = parse_operand (&e, ','); 3723 3724 grmask = e.X_add_number; 3725 if (e.X_op != O_constant 3726 || e.X_add_number <= 0 3727 || e.X_add_number > 0xf) 3728 { 3729 as_bad (_("First operand to .save.g must be a positive 4-bit constant")); 3730 grmask = 0; 3731 } 3732 3733 if (sep == ',') 3734 { 3735 unsigned reg; 3736 int n = popcount (grmask); 3737 3738 parse_operand (&e, 0); 3739 reg = e.X_add_number - REG_GR; 3740 if (e.X_op != O_register || reg > 127) 3741 { 3742 as_bad (_("Second operand to .save.g must be a general register")); 3743 reg = 0; 3744 } 3745 else if (reg > 128U - n) 3746 { 3747 as_bad (_("Second operand to .save.g must be the first of %d general registers"), n); 3748 reg = 0; 3749 } 3750 add_unwind_entry (output_gr_gr (grmask, reg), 0); 3751 } 3752 else 3753 add_unwind_entry (output_gr_mem (grmask), 0); 3754 } 3755 3756 static void 3757 dot_savef (int dummy ATTRIBUTE_UNUSED) 3758 { 3759 expressionS e; 3760 3761 if (!in_prologue ("save.f")) 3762 return; 3763 3764 parse_operand (&e, 0); 3765 3766 if (e.X_op != O_constant 3767 || e.X_add_number <= 0 3768 || e.X_add_number > 0xfffff) 3769 { 3770 as_bad (_("Operand to .save.f must be a positive 20-bit constant")); 3771 e.X_add_number = 0; 3772 } 3773 add_unwind_entry (output_fr_mem (e.X_add_number), 0); 3774 } 3775 3776 static void 3777 dot_saveb (int dummy ATTRIBUTE_UNUSED) 3778 { 3779 expressionS e; 3780 unsigned brmask; 3781 int sep; 3782 3783 if (!in_prologue ("save.b")) 3784 return; 3785 3786 sep = parse_operand (&e, ','); 3787 3788 brmask = e.X_add_number; 3789 if (e.X_op != O_constant 3790 || e.X_add_number <= 0 3791 || e.X_add_number > 0x1f) 3792 { 3793 as_bad (_("First operand to .save.b must be a positive 5-bit constant")); 3794 brmask = 0; 3795 } 3796 3797 if (sep == ',') 3798 { 3799 unsigned reg; 3800 int n = popcount (brmask); 3801 3802 parse_operand (&e, 0); 3803 reg = e.X_add_number - REG_GR; 3804 if (e.X_op != O_register || reg > 127) 3805 { 3806 as_bad (_("Second operand to .save.b must be a general register")); 3807 reg = 0; 3808 } 3809 else if (reg > 128U - n) 3810 { 3811 as_bad (_("Second operand to .save.b must be the first of %d general registers"), n); 3812 reg = 0; 3813 } 3814 add_unwind_entry (output_br_gr (brmask, reg), 0); 3815 } 3816 else 3817 add_unwind_entry (output_br_mem (brmask), 0); 3818 } 3819 3820 static void 3821 dot_savegf (int dummy ATTRIBUTE_UNUSED) 3822 { 3823 expressionS e1, e2; 3824 3825 if (!in_prologue ("save.gf")) 3826 return; 3827 3828 if (parse_operand (&e1, ',') == ',') 3829 parse_operand (&e2, 0); 3830 else 3831 e2.X_op = O_absent; 3832 3833 if (e1.X_op != O_constant 3834 || e1.X_add_number < 0 3835 || e1.X_add_number > 0xf) 3836 { 3837 as_bad (_("First operand to .save.gf must be a non-negative 4-bit constant")); 3838 e1.X_op = O_absent; 3839 e1.X_add_number = 0; 3840 } 3841 if (e2.X_op != O_constant 3842 || e2.X_add_number < 0 3843 || e2.X_add_number > 0xfffff) 3844 { 3845 as_bad (_("Second operand to .save.gf must be a non-negative 20-bit constant")); 3846 e2.X_op = O_absent; 3847 e2.X_add_number = 0; 3848 } 3849 if (e1.X_op == O_constant 3850 && e2.X_op == O_constant 3851 && e1.X_add_number == 0 3852 && e2.X_add_number == 0) 3853 as_bad (_("Operands to .save.gf may not be both zero")); 3854 3855 add_unwind_entry (output_frgr_mem (e1.X_add_number, e2.X_add_number), 0); 3856 } 3857 3858 static void 3859 dot_spill (int dummy ATTRIBUTE_UNUSED) 3860 { 3861 expressionS e; 3862 3863 if (!in_prologue ("spill")) 3864 return; 3865 3866 parse_operand (&e, 0); 3867 3868 if (e.X_op != O_constant) 3869 { 3870 as_bad (_("Operand to .spill must be a constant")); 3871 e.X_add_number = 0; 3872 } 3873 add_unwind_entry (output_spill_base (e.X_add_number), 0); 3874 } 3875 3876 static void 3877 dot_spillreg (int pred) 3878 { 3879 int sep; 3880 unsigned int qp, ab, xy, reg, treg; 3881 expressionS e; 3882 const char * const po = pred ? "spillreg.p" : "spillreg"; 3883 3884 if (!in_procedure (po)) 3885 return; 3886 3887 if (pred) 3888 sep = parse_predicate_and_operand (&e, &qp, po); 3889 else 3890 { 3891 sep = parse_operand (&e, ','); 3892 qp = 0; 3893 } 3894 convert_expr_to_ab_reg (&e, &ab, ®, po, 1 + pred); 3895 3896 if (sep == ',') 3897 sep = parse_operand (&e, ','); 3898 else 3899 e.X_op = O_absent; 3900 convert_expr_to_xy_reg (&e, &xy, &treg, po, 2 + pred); 3901 3902 add_unwind_entry (output_spill_reg (ab, reg, treg, xy, qp), sep); 3903 } 3904 3905 static void 3906 dot_spillmem (int psprel) 3907 { 3908 expressionS e; 3909 int pred = (psprel < 0), sep; 3910 unsigned int qp, ab, reg; 3911 const char * po; 3912 3913 if (pred) 3914 { 3915 psprel = ~psprel; 3916 po = psprel ? "spillpsp.p" : "spillsp.p"; 3917 } 3918 else 3919 po = psprel ? "spillpsp" : "spillsp"; 3920 3921 if (!in_procedure (po)) 3922 return; 3923 3924 if (pred) 3925 sep = parse_predicate_and_operand (&e, &qp, po); 3926 else 3927 { 3928 sep = parse_operand (&e, ','); 3929 qp = 0; 3930 } 3931 convert_expr_to_ab_reg (&e, &ab, ®, po, 1 + pred); 3932 3933 if (sep == ',') 3934 sep = parse_operand (&e, ','); 3935 else 3936 e.X_op = O_absent; 3937 if (e.X_op != O_constant) 3938 { 3939 as_bad (_("Operand %d to .%s must be a constant"), 2 + pred, po); 3940 e.X_add_number = 0; 3941 } 3942 3943 if (psprel) 3944 add_unwind_entry (output_spill_psprel (ab, reg, e.X_add_number, qp), sep); 3945 else 3946 add_unwind_entry (output_spill_sprel (ab, reg, e.X_add_number, qp), sep); 3947 } 3948 3949 static unsigned int 3950 get_saved_prologue_count (unsigned long lbl) 3951 { 3952 label_prologue_count *lpc = unwind.saved_prologue_counts; 3953 3954 while (lpc != NULL && lpc->label_number != lbl) 3955 lpc = lpc->next; 3956 3957 if (lpc != NULL) 3958 return lpc->prologue_count; 3959 3960 as_bad (_("Missing .label_state %ld"), lbl); 3961 return 1; 3962 } 3963 3964 static void 3965 save_prologue_count (unsigned long lbl, unsigned int count) 3966 { 3967 label_prologue_count *lpc = unwind.saved_prologue_counts; 3968 3969 while (lpc != NULL && lpc->label_number != lbl) 3970 lpc = lpc->next; 3971 3972 if (lpc != NULL) 3973 lpc->prologue_count = count; 3974 else 3975 { 3976 label_prologue_count *new_lpc = xmalloc (sizeof (* new_lpc)); 3977 3978 new_lpc->next = unwind.saved_prologue_counts; 3979 new_lpc->label_number = lbl; 3980 new_lpc->prologue_count = count; 3981 unwind.saved_prologue_counts = new_lpc; 3982 } 3983 } 3984 3985 static void 3986 free_saved_prologue_counts () 3987 { 3988 label_prologue_count *lpc = unwind.saved_prologue_counts; 3989 label_prologue_count *next; 3990 3991 while (lpc != NULL) 3992 { 3993 next = lpc->next; 3994 free (lpc); 3995 lpc = next; 3996 } 3997 3998 unwind.saved_prologue_counts = NULL; 3999 } 4000 4001 static void 4002 dot_label_state (int dummy ATTRIBUTE_UNUSED) 4003 { 4004 expressionS e; 4005 4006 if (!in_body ("label_state")) 4007 return; 4008 4009 parse_operand (&e, 0); 4010 if (e.X_op == O_constant) 4011 save_prologue_count (e.X_add_number, unwind.prologue_count); 4012 else 4013 { 4014 as_bad (_("Operand to .label_state must be a constant")); 4015 e.X_add_number = 0; 4016 } 4017 add_unwind_entry (output_label_state (e.X_add_number), 0); 4018 } 4019 4020 static void 4021 dot_copy_state (int dummy ATTRIBUTE_UNUSED) 4022 { 4023 expressionS e; 4024 4025 if (!in_body ("copy_state")) 4026 return; 4027 4028 parse_operand (&e, 0); 4029 if (e.X_op == O_constant) 4030 unwind.prologue_count = get_saved_prologue_count (e.X_add_number); 4031 else 4032 { 4033 as_bad (_("Operand to .copy_state must be a constant")); 4034 e.X_add_number = 0; 4035 } 4036 add_unwind_entry (output_copy_state (e.X_add_number), 0); 4037 } 4038 4039 static void 4040 dot_unwabi (int dummy ATTRIBUTE_UNUSED) 4041 { 4042 expressionS e1, e2; 4043 unsigned char sep; 4044 4045 if (!in_prologue ("unwabi")) 4046 return; 4047 4048 sep = parse_operand (&e1, ','); 4049 if (sep == ',') 4050 parse_operand (&e2, 0); 4051 else 4052 e2.X_op = O_absent; 4053 4054 if (e1.X_op != O_constant) 4055 { 4056 as_bad (_("First operand to .unwabi must be a constant")); 4057 e1.X_add_number = 0; 4058 } 4059 4060 if (e2.X_op != O_constant) 4061 { 4062 as_bad (_("Second operand to .unwabi must be a constant")); 4063 e2.X_add_number = 0; 4064 } 4065 4066 add_unwind_entry (output_unwabi (e1.X_add_number, e2.X_add_number), 0); 4067 } 4068 4069 static void 4070 dot_personality (int dummy ATTRIBUTE_UNUSED) 4071 { 4072 char *name, *p, c; 4073 if (!in_procedure ("personality")) 4074 return; 4075 SKIP_WHITESPACE (); 4076 name = input_line_pointer; 4077 c = get_symbol_end (); 4078 p = input_line_pointer; 4079 unwind.personality_routine = symbol_find_or_make (name); 4080 unwind.force_unwind_entry = 1; 4081 *p = c; 4082 SKIP_WHITESPACE (); 4083 demand_empty_rest_of_line (); 4084 } 4085 4086 static void 4087 dot_proc (int dummy ATTRIBUTE_UNUSED) 4088 { 4089 char *name, *p, c; 4090 symbolS *sym; 4091 proc_pending *pending, *last_pending; 4092 4093 if (unwind.proc_pending.sym) 4094 { 4095 (md.unwind_check == unwind_check_warning 4096 ? as_warn 4097 : as_bad) (_("Missing .endp after previous .proc")); 4098 while (unwind.proc_pending.next) 4099 { 4100 pending = unwind.proc_pending.next; 4101 unwind.proc_pending.next = pending->next; 4102 free (pending); 4103 } 4104 } 4105 last_pending = NULL; 4106 4107 /* Parse names of main and alternate entry points and mark them as 4108 function symbols: */ 4109 while (1) 4110 { 4111 SKIP_WHITESPACE (); 4112 name = input_line_pointer; 4113 c = get_symbol_end (); 4114 p = input_line_pointer; 4115 if (!*name) 4116 as_bad (_("Empty argument of .proc")); 4117 else 4118 { 4119 sym = symbol_find_or_make (name); 4120 if (S_IS_DEFINED (sym)) 4121 as_bad (_("`%s' was already defined"), name); 4122 else if (!last_pending) 4123 { 4124 unwind.proc_pending.sym = sym; 4125 last_pending = &unwind.proc_pending; 4126 } 4127 else 4128 { 4129 pending = xmalloc (sizeof (*pending)); 4130 pending->sym = sym; 4131 last_pending = last_pending->next = pending; 4132 } 4133 symbol_get_bfdsym (sym)->flags |= BSF_FUNCTION; 4134 } 4135 *p = c; 4136 SKIP_WHITESPACE (); 4137 if (*input_line_pointer != ',') 4138 break; 4139 ++input_line_pointer; 4140 } 4141 if (!last_pending) 4142 { 4143 unwind.proc_pending.sym = expr_build_dot (); 4144 last_pending = &unwind.proc_pending; 4145 } 4146 last_pending->next = NULL; 4147 demand_empty_rest_of_line (); 4148 ia64_do_align (16); 4149 4150 unwind.prologue = 0; 4151 unwind.prologue_count = 0; 4152 unwind.body = 0; 4153 unwind.insn = 0; 4154 unwind.list = unwind.tail = unwind.current_entry = NULL; 4155 unwind.personality_routine = 0; 4156 } 4157 4158 static void 4159 dot_body (int dummy ATTRIBUTE_UNUSED) 4160 { 4161 if (!in_procedure ("body")) 4162 return; 4163 if (!unwind.prologue && !unwind.body && unwind.insn) 4164 as_warn (_("Initial .body should precede any instructions")); 4165 check_pending_save (); 4166 4167 unwind.prologue = 0; 4168 unwind.prologue_mask = 0; 4169 unwind.body = 1; 4170 4171 add_unwind_entry (output_body (), 0); 4172 } 4173 4174 static void 4175 dot_prologue (int dummy ATTRIBUTE_UNUSED) 4176 { 4177 unsigned mask = 0, grsave = 0; 4178 4179 if (!in_procedure ("prologue")) 4180 return; 4181 if (unwind.prologue) 4182 { 4183 as_bad (_(".prologue within prologue")); 4184 ignore_rest_of_line (); 4185 return; 4186 } 4187 if (!unwind.body && unwind.insn) 4188 as_warn (_("Initial .prologue should precede any instructions")); 4189 4190 if (!is_it_end_of_statement ()) 4191 { 4192 expressionS e; 4193 int n, sep = parse_operand (&e, ','); 4194 4195 if (e.X_op != O_constant 4196 || e.X_add_number < 0 4197 || e.X_add_number > 0xf) 4198 as_bad (_("First operand to .prologue must be a positive 4-bit constant")); 4199 else if (e.X_add_number == 0) 4200 as_warn (_("Pointless use of zero first operand to .prologue")); 4201 else 4202 mask = e.X_add_number; 4203 n = popcount (mask); 4204 4205 if (sep == ',') 4206 parse_operand (&e, 0); 4207 else 4208 e.X_op = O_absent; 4209 if (e.X_op == O_constant 4210 && e.X_add_number >= 0 4211 && e.X_add_number < 128) 4212 { 4213 if (md.unwind_check == unwind_check_error) 4214 as_warn (_("Using a constant as second operand to .prologue is deprecated")); 4215 grsave = e.X_add_number; 4216 } 4217 else if (e.X_op != O_register 4218 || (grsave = e.X_add_number - REG_GR) > 127) 4219 { 4220 as_bad (_("Second operand to .prologue must be a general register")); 4221 grsave = 0; 4222 } 4223 else if (grsave > 128U - n) 4224 { 4225 as_bad (_("Second operand to .prologue must be the first of %d general registers"), n); 4226 grsave = 0; 4227 } 4228 4229 } 4230 4231 if (mask) 4232 add_unwind_entry (output_prologue_gr (mask, grsave), 0); 4233 else 4234 add_unwind_entry (output_prologue (), 0); 4235 4236 unwind.prologue = 1; 4237 unwind.prologue_mask = mask; 4238 unwind.prologue_gr = grsave; 4239 unwind.body = 0; 4240 ++unwind.prologue_count; 4241 } 4242 4243 static void 4244 dot_endp (int dummy ATTRIBUTE_UNUSED) 4245 { 4246 expressionS e; 4247 int bytes_per_address; 4248 long where; 4249 segT saved_seg; 4250 subsegT saved_subseg; 4251 proc_pending *pending; 4252 int unwind_check = md.unwind_check; 4253 4254 md.unwind_check = unwind_check_error; 4255 if (!in_procedure ("endp")) 4256 return; 4257 md.unwind_check = unwind_check; 4258 4259 if (unwind.saved_text_seg) 4260 { 4261 saved_seg = unwind.saved_text_seg; 4262 saved_subseg = unwind.saved_text_subseg; 4263 unwind.saved_text_seg = NULL; 4264 } 4265 else 4266 { 4267 saved_seg = now_seg; 4268 saved_subseg = now_subseg; 4269 } 4270 4271 insn_group_break (1, 0, 0); 4272 4273 /* If there wasn't a .handlerdata, we haven't generated an image yet. */ 4274 if (!unwind.info) 4275 generate_unwind_image (saved_seg); 4276 4277 if (unwind.info || unwind.force_unwind_entry) 4278 { 4279 symbolS *proc_end; 4280 4281 subseg_set (md.last_text_seg, 0); 4282 proc_end = expr_build_dot (); 4283 4284 start_unwind_section (saved_seg, SPECIAL_SECTION_UNWIND); 4285 4286 /* Make sure that section has 4 byte alignment for ILP32 and 4287 8 byte alignment for LP64. */ 4288 record_alignment (now_seg, md.pointer_size_shift); 4289 4290 /* Need space for 3 pointers for procedure start, procedure end, 4291 and unwind info. */ 4292 memset (frag_more (3 * md.pointer_size), 0, 3 * md.pointer_size); 4293 where = frag_now_fix () - (3 * md.pointer_size); 4294 bytes_per_address = bfd_arch_bits_per_address (stdoutput) / 8; 4295 4296 /* Issue the values of a) Proc Begin, b) Proc End, c) Unwind Record. */ 4297 e.X_op = O_pseudo_fixup; 4298 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym; 4299 e.X_add_number = 0; 4300 if (!S_IS_LOCAL (unwind.proc_pending.sym) 4301 && S_IS_DEFINED (unwind.proc_pending.sym)) 4302 e.X_add_symbol = symbol_temp_new (S_GET_SEGMENT (unwind.proc_pending.sym), 4303 S_GET_VALUE (unwind.proc_pending.sym), 4304 symbol_get_frag (unwind.proc_pending.sym)); 4305 else 4306 e.X_add_symbol = unwind.proc_pending.sym; 4307 ia64_cons_fix_new (frag_now, where, bytes_per_address, &e); 4308 4309 e.X_op = O_pseudo_fixup; 4310 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym; 4311 e.X_add_number = 0; 4312 e.X_add_symbol = proc_end; 4313 ia64_cons_fix_new (frag_now, where + bytes_per_address, 4314 bytes_per_address, &e); 4315 4316 if (unwind.info) 4317 { 4318 e.X_op = O_pseudo_fixup; 4319 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym; 4320 e.X_add_number = 0; 4321 e.X_add_symbol = unwind.info; 4322 ia64_cons_fix_new (frag_now, where + (bytes_per_address * 2), 4323 bytes_per_address, &e); 4324 } 4325 } 4326 subseg_set (saved_seg, saved_subseg); 4327 4328 /* Set symbol sizes. */ 4329 pending = &unwind.proc_pending; 4330 if (S_GET_NAME (pending->sym)) 4331 { 4332 do 4333 { 4334 symbolS *sym = pending->sym; 4335 4336 if (!S_IS_DEFINED (sym)) 4337 as_bad (_("`%s' was not defined within procedure"), S_GET_NAME (sym)); 4338 else if (S_GET_SIZE (sym) == 0 4339 && symbol_get_obj (sym)->size == NULL) 4340 { 4341 fragS *frag = symbol_get_frag (sym); 4342 4343 if (frag) 4344 { 4345 if (frag == frag_now && SEG_NORMAL (now_seg)) 4346 S_SET_SIZE (sym, frag_now_fix () - S_GET_VALUE (sym)); 4347 else 4348 { 4349 symbol_get_obj (sym)->size = 4350 (expressionS *) xmalloc (sizeof (expressionS)); 4351 symbol_get_obj (sym)->size->X_op = O_subtract; 4352 symbol_get_obj (sym)->size->X_add_symbol 4353 = symbol_new (FAKE_LABEL_NAME, now_seg, 4354 frag_now_fix (), frag_now); 4355 symbol_get_obj (sym)->size->X_op_symbol = sym; 4356 symbol_get_obj (sym)->size->X_add_number = 0; 4357 } 4358 } 4359 } 4360 } while ((pending = pending->next) != NULL); 4361 } 4362 4363 /* Parse names of main and alternate entry points. */ 4364 while (1) 4365 { 4366 char *name, *p, c; 4367 4368 SKIP_WHITESPACE (); 4369 name = input_line_pointer; 4370 c = get_symbol_end (); 4371 p = input_line_pointer; 4372 if (!*name) 4373 (md.unwind_check == unwind_check_warning 4374 ? as_warn 4375 : as_bad) (_("Empty argument of .endp")); 4376 else 4377 { 4378 symbolS *sym = symbol_find (name); 4379 4380 for (pending = &unwind.proc_pending; pending; pending = pending->next) 4381 { 4382 if (sym == pending->sym) 4383 { 4384 pending->sym = NULL; 4385 break; 4386 } 4387 } 4388 if (!sym || !pending) 4389 as_warn (_("`%s' was not specified with previous .proc"), name); 4390 } 4391 *p = c; 4392 SKIP_WHITESPACE (); 4393 if (*input_line_pointer != ',') 4394 break; 4395 ++input_line_pointer; 4396 } 4397 demand_empty_rest_of_line (); 4398 4399 /* Deliberately only checking for the main entry point here; the 4400 language spec even says all arguments to .endp are ignored. */ 4401 if (unwind.proc_pending.sym 4402 && S_GET_NAME (unwind.proc_pending.sym) 4403 && strcmp (S_GET_NAME (unwind.proc_pending.sym), FAKE_LABEL_NAME)) 4404 as_warn (_("`%s' should be an operand to this .endp"), 4405 S_GET_NAME (unwind.proc_pending.sym)); 4406 while (unwind.proc_pending.next) 4407 { 4408 pending = unwind.proc_pending.next; 4409 unwind.proc_pending.next = pending->next; 4410 free (pending); 4411 } 4412 unwind.proc_pending.sym = unwind.info = NULL; 4413 } 4414 4415 static void 4416 dot_template (int template) 4417 { 4418 CURR_SLOT.user_template = template; 4419 } 4420 4421 static void 4422 dot_regstk (int dummy ATTRIBUTE_UNUSED) 4423 { 4424 int ins, locs, outs, rots; 4425 4426 if (is_it_end_of_statement ()) 4427 ins = locs = outs = rots = 0; 4428 else 4429 { 4430 ins = get_absolute_expression (); 4431 if (*input_line_pointer++ != ',') 4432 goto err; 4433 locs = get_absolute_expression (); 4434 if (*input_line_pointer++ != ',') 4435 goto err; 4436 outs = get_absolute_expression (); 4437 if (*input_line_pointer++ != ',') 4438 goto err; 4439 rots = get_absolute_expression (); 4440 } 4441 set_regstack (ins, locs, outs, rots); 4442 return; 4443 4444 err: 4445 as_bad (_("Comma expected")); 4446 ignore_rest_of_line (); 4447 } 4448 4449 static void 4450 dot_rot (int type) 4451 { 4452 offsetT num_regs; 4453 valueT num_alloced = 0; 4454 struct dynreg **drpp, *dr; 4455 int ch, base_reg = 0; 4456 char *name, *start; 4457 size_t len; 4458 4459 switch (type) 4460 { 4461 case DYNREG_GR: base_reg = REG_GR + 32; break; 4462 case DYNREG_FR: base_reg = REG_FR + 32; break; 4463 case DYNREG_PR: base_reg = REG_P + 16; break; 4464 default: break; 4465 } 4466 4467 /* First, remove existing names from hash table. */ 4468 for (dr = md.dynreg[type]; dr && dr->num_regs; dr = dr->next) 4469 { 4470 hash_delete (md.dynreg_hash, dr->name, FALSE); 4471 /* FIXME: Free dr->name. */ 4472 dr->num_regs = 0; 4473 } 4474 4475 drpp = &md.dynreg[type]; 4476 while (1) 4477 { 4478 start = input_line_pointer; 4479 ch = get_symbol_end (); 4480 len = strlen (ia64_canonicalize_symbol_name (start)); 4481 *input_line_pointer = ch; 4482 4483 SKIP_WHITESPACE (); 4484 if (*input_line_pointer != '[') 4485 { 4486 as_bad (_("Expected '['")); 4487 goto err; 4488 } 4489 ++input_line_pointer; /* skip '[' */ 4490 4491 num_regs = get_absolute_expression (); 4492 4493 if (*input_line_pointer++ != ']') 4494 { 4495 as_bad (_("Expected ']'")); 4496 goto err; 4497 } 4498 if (num_regs <= 0) 4499 { 4500 as_bad (_("Number of elements must be positive")); 4501 goto err; 4502 } 4503 SKIP_WHITESPACE (); 4504 4505 num_alloced += num_regs; 4506 switch (type) 4507 { 4508 case DYNREG_GR: 4509 if (num_alloced > md.rot.num_regs) 4510 { 4511 as_bad (_("Used more than the declared %d rotating registers"), 4512 md.rot.num_regs); 4513 goto err; 4514 } 4515 break; 4516 case DYNREG_FR: 4517 if (num_alloced > 96) 4518 { 4519 as_bad (_("Used more than the available 96 rotating registers")); 4520 goto err; 4521 } 4522 break; 4523 case DYNREG_PR: 4524 if (num_alloced > 48) 4525 { 4526 as_bad (_("Used more than the available 48 rotating registers")); 4527 goto err; 4528 } 4529 break; 4530 4531 default: 4532 break; 4533 } 4534 4535 if (!*drpp) 4536 { 4537 *drpp = obstack_alloc (¬es, sizeof (*dr)); 4538 memset (*drpp, 0, sizeof (*dr)); 4539 } 4540 4541 name = obstack_alloc (¬es, len + 1); 4542 memcpy (name, start, len); 4543 name[len] = '\0'; 4544 4545 dr = *drpp; 4546 dr->name = name; 4547 dr->num_regs = num_regs; 4548 dr->base = base_reg; 4549 drpp = &dr->next; 4550 base_reg += num_regs; 4551 4552 if (hash_insert (md.dynreg_hash, name, dr)) 4553 { 4554 as_bad (_("Attempt to redefine register set `%s'"), name); 4555 obstack_free (¬es, name); 4556 goto err; 4557 } 4558 4559 if (*input_line_pointer != ',') 4560 break; 4561 ++input_line_pointer; /* skip comma */ 4562 SKIP_WHITESPACE (); 4563 } 4564 demand_empty_rest_of_line (); 4565 return; 4566 4567 err: 4568 ignore_rest_of_line (); 4569 } 4570 4571 static void 4572 dot_byteorder (int byteorder) 4573 { 4574 segment_info_type *seginfo = seg_info (now_seg); 4575 4576 if (byteorder == -1) 4577 { 4578 if (seginfo->tc_segment_info_data.endian == 0) 4579 seginfo->tc_segment_info_data.endian = default_big_endian ? 1 : 2; 4580 byteorder = seginfo->tc_segment_info_data.endian == 1; 4581 } 4582 else 4583 seginfo->tc_segment_info_data.endian = byteorder ? 1 : 2; 4584 4585 if (target_big_endian != byteorder) 4586 { 4587 target_big_endian = byteorder; 4588 if (target_big_endian) 4589 { 4590 ia64_number_to_chars = number_to_chars_bigendian; 4591 ia64_float_to_chars = ia64_float_to_chars_bigendian; 4592 } 4593 else 4594 { 4595 ia64_number_to_chars = number_to_chars_littleendian; 4596 ia64_float_to_chars = ia64_float_to_chars_littleendian; 4597 } 4598 } 4599 } 4600 4601 static void 4602 dot_psr (int dummy ATTRIBUTE_UNUSED) 4603 { 4604 char *option; 4605 int ch; 4606 4607 while (1) 4608 { 4609 option = input_line_pointer; 4610 ch = get_symbol_end (); 4611 if (strcmp (option, "lsb") == 0) 4612 md.flags &= ~EF_IA_64_BE; 4613 else if (strcmp (option, "msb") == 0) 4614 md.flags |= EF_IA_64_BE; 4615 else if (strcmp (option, "abi32") == 0) 4616 md.flags &= ~EF_IA_64_ABI64; 4617 else if (strcmp (option, "abi64") == 0) 4618 md.flags |= EF_IA_64_ABI64; 4619 else 4620 as_bad (_("Unknown psr option `%s'"), option); 4621 *input_line_pointer = ch; 4622 4623 SKIP_WHITESPACE (); 4624 if (*input_line_pointer != ',') 4625 break; 4626 4627 ++input_line_pointer; 4628 SKIP_WHITESPACE (); 4629 } 4630 demand_empty_rest_of_line (); 4631 } 4632 4633 static void 4634 dot_ln (int dummy ATTRIBUTE_UNUSED) 4635 { 4636 new_logical_line (0, get_absolute_expression ()); 4637 demand_empty_rest_of_line (); 4638 } 4639 4640 static void 4641 cross_section (int ref, void (*cons) (int), int ua) 4642 { 4643 char *start, *end; 4644 int saved_auto_align; 4645 unsigned int section_count; 4646 4647 SKIP_WHITESPACE (); 4648 start = input_line_pointer; 4649 if (*start == '"') 4650 { 4651 int len; 4652 char *name; 4653 4654 name = demand_copy_C_string (&len); 4655 obstack_free(¬es, name); 4656 if (!name) 4657 { 4658 ignore_rest_of_line (); 4659 return; 4660 } 4661 } 4662 else 4663 { 4664 char c = get_symbol_end (); 4665 4666 if (input_line_pointer == start) 4667 { 4668 as_bad (_("Missing section name")); 4669 ignore_rest_of_line (); 4670 return; 4671 } 4672 *input_line_pointer = c; 4673 } 4674 end = input_line_pointer; 4675 SKIP_WHITESPACE (); 4676 if (*input_line_pointer != ',') 4677 { 4678 as_bad (_("Comma expected after section name")); 4679 ignore_rest_of_line (); 4680 return; 4681 } 4682 *end = '\0'; 4683 end = input_line_pointer + 1; /* skip comma */ 4684 input_line_pointer = start; 4685 md.keep_pending_output = 1; 4686 section_count = bfd_count_sections(stdoutput); 4687 obj_elf_section (0); 4688 if (section_count != bfd_count_sections(stdoutput)) 4689 as_warn (_("Creating sections with .xdataN/.xrealN/.xstringZ is deprecated.")); 4690 input_line_pointer = end; 4691 saved_auto_align = md.auto_align; 4692 if (ua) 4693 md.auto_align = 0; 4694 (*cons) (ref); 4695 if (ua) 4696 md.auto_align = saved_auto_align; 4697 obj_elf_previous (0); 4698 md.keep_pending_output = 0; 4699 } 4700 4701 static void 4702 dot_xdata (int size) 4703 { 4704 cross_section (size, cons, 0); 4705 } 4706 4707 /* Why doesn't float_cons() call md_cons_align() the way cons() does? */ 4708 4709 static void 4710 stmt_float_cons (int kind) 4711 { 4712 size_t alignment; 4713 4714 switch (kind) 4715 { 4716 case 'd': 4717 alignment = 8; 4718 break; 4719 4720 case 'x': 4721 case 'X': 4722 alignment = 16; 4723 break; 4724 4725 case 'f': 4726 default: 4727 alignment = 4; 4728 break; 4729 } 4730 ia64_do_align (alignment); 4731 float_cons (kind); 4732 } 4733 4734 static void 4735 stmt_cons_ua (int size) 4736 { 4737 int saved_auto_align = md.auto_align; 4738 4739 md.auto_align = 0; 4740 cons (size); 4741 md.auto_align = saved_auto_align; 4742 } 4743 4744 static void 4745 dot_xfloat_cons (int kind) 4746 { 4747 cross_section (kind, stmt_float_cons, 0); 4748 } 4749 4750 static void 4751 dot_xstringer (int zero) 4752 { 4753 cross_section (zero, stringer, 0); 4754 } 4755 4756 static void 4757 dot_xdata_ua (int size) 4758 { 4759 cross_section (size, cons, 1); 4760 } 4761 4762 static void 4763 dot_xfloat_cons_ua (int kind) 4764 { 4765 cross_section (kind, float_cons, 1); 4766 } 4767 4768 /* .reg.val <regname>,value */ 4769 4770 static void 4771 dot_reg_val (int dummy ATTRIBUTE_UNUSED) 4772 { 4773 expressionS reg; 4774 4775 expression_and_evaluate (®); 4776 if (reg.X_op != O_register) 4777 { 4778 as_bad (_("Register name expected")); 4779 ignore_rest_of_line (); 4780 } 4781 else if (*input_line_pointer++ != ',') 4782 { 4783 as_bad (_("Comma expected")); 4784 ignore_rest_of_line (); 4785 } 4786 else 4787 { 4788 valueT value = get_absolute_expression (); 4789 int regno = reg.X_add_number; 4790 if (regno <= REG_GR || regno > REG_GR + 127) 4791 as_warn (_("Register value annotation ignored")); 4792 else 4793 { 4794 gr_values[regno - REG_GR].known = 1; 4795 gr_values[regno - REG_GR].value = value; 4796 gr_values[regno - REG_GR].path = md.path; 4797 } 4798 } 4799 demand_empty_rest_of_line (); 4800 } 4801 4802 /* 4803 .serialize.data 4804 .serialize.instruction 4805 */ 4806 static void 4807 dot_serialize (int type) 4808 { 4809 insn_group_break (0, 0, 0); 4810 if (type) 4811 instruction_serialization (); 4812 else 4813 data_serialization (); 4814 insn_group_break (0, 0, 0); 4815 demand_empty_rest_of_line (); 4816 } 4817 4818 /* select dv checking mode 4819 .auto 4820 .explicit 4821 .default 4822 4823 A stop is inserted when changing modes 4824 */ 4825 4826 static void 4827 dot_dv_mode (int type) 4828 { 4829 if (md.manual_bundling) 4830 as_warn (_("Directive invalid within a bundle")); 4831 4832 if (type == 'E' || type == 'A') 4833 md.mode_explicitly_set = 0; 4834 else 4835 md.mode_explicitly_set = 1; 4836 4837 md.detect_dv = 1; 4838 switch (type) 4839 { 4840 case 'A': 4841 case 'a': 4842 if (md.explicit_mode) 4843 insn_group_break (1, 0, 0); 4844 md.explicit_mode = 0; 4845 break; 4846 case 'E': 4847 case 'e': 4848 if (!md.explicit_mode) 4849 insn_group_break (1, 0, 0); 4850 md.explicit_mode = 1; 4851 break; 4852 default: 4853 case 'd': 4854 if (md.explicit_mode != md.default_explicit_mode) 4855 insn_group_break (1, 0, 0); 4856 md.explicit_mode = md.default_explicit_mode; 4857 md.mode_explicitly_set = 0; 4858 break; 4859 } 4860 } 4861 4862 static void 4863 print_prmask (valueT mask) 4864 { 4865 int regno; 4866 char *comma = ""; 4867 for (regno = 0; regno < 64; regno++) 4868 { 4869 if (mask & ((valueT) 1 << regno)) 4870 { 4871 fprintf (stderr, "%s p%d", comma, regno); 4872 comma = ","; 4873 } 4874 } 4875 } 4876 4877 /* 4878 .pred.rel.clear [p1 [,p2 [,...]]] (also .pred.rel "clear" or @clear) 4879 .pred.rel.imply p1, p2 (also .pred.rel "imply" or @imply) 4880 .pred.rel.mutex p1, p2 [,...] (also .pred.rel "mutex" or @mutex) 4881 .pred.safe_across_calls p1 [, p2 [,...]] 4882 */ 4883 4884 static void 4885 dot_pred_rel (int type) 4886 { 4887 valueT mask = 0; 4888 int count = 0; 4889 int p1 = -1, p2 = -1; 4890 4891 if (type == 0) 4892 { 4893 if (*input_line_pointer == '"') 4894 { 4895 int len; 4896 char *form = demand_copy_C_string (&len); 4897 4898 if (strcmp (form, "mutex") == 0) 4899 type = 'm'; 4900 else if (strcmp (form, "clear") == 0) 4901 type = 'c'; 4902 else if (strcmp (form, "imply") == 0) 4903 type = 'i'; 4904 obstack_free (¬es, form); 4905 } 4906 else if (*input_line_pointer == '@') 4907 { 4908 char *form = ++input_line_pointer; 4909 char c = get_symbol_end(); 4910 4911 if (strcmp (form, "mutex") == 0) 4912 type = 'm'; 4913 else if (strcmp (form, "clear") == 0) 4914 type = 'c'; 4915 else if (strcmp (form, "imply") == 0) 4916 type = 'i'; 4917 *input_line_pointer = c; 4918 } 4919 else 4920 { 4921 as_bad (_("Missing predicate relation type")); 4922 ignore_rest_of_line (); 4923 return; 4924 } 4925 if (type == 0) 4926 { 4927 as_bad (_("Unrecognized predicate relation type")); 4928 ignore_rest_of_line (); 4929 return; 4930 } 4931 if (*input_line_pointer == ',') 4932 ++input_line_pointer; 4933 SKIP_WHITESPACE (); 4934 } 4935 4936 while (1) 4937 { 4938 valueT bits = 1; 4939 int sep, regno; 4940 expressionS pr, *pr1, *pr2; 4941 4942 sep = parse_operand (&pr, ','); 4943 if (pr.X_op == O_register 4944 && pr.X_add_number >= REG_P 4945 && pr.X_add_number <= REG_P + 63) 4946 { 4947 regno = pr.X_add_number - REG_P; 4948 bits <<= regno; 4949 count++; 4950 if (p1 == -1) 4951 p1 = regno; 4952 else if (p2 == -1) 4953 p2 = regno; 4954 } 4955 else if (type != 'i' 4956 && pr.X_op == O_subtract 4957 && (pr1 = symbol_get_value_expression (pr.X_add_symbol)) 4958 && pr1->X_op == O_register 4959 && pr1->X_add_number >= REG_P 4960 && pr1->X_add_number <= REG_P + 63 4961 && (pr2 = symbol_get_value_expression (pr.X_op_symbol)) 4962 && pr2->X_op == O_register 4963 && pr2->X_add_number >= REG_P 4964 && pr2->X_add_number <= REG_P + 63) 4965 { 4966 /* It's a range. */ 4967 int stop; 4968 4969 regno = pr1->X_add_number - REG_P; 4970 stop = pr2->X_add_number - REG_P; 4971 if (regno >= stop) 4972 { 4973 as_bad (_("Bad register range")); 4974 ignore_rest_of_line (); 4975 return; 4976 } 4977 bits = ((bits << stop) << 1) - (bits << regno); 4978 count += stop - regno + 1; 4979 } 4980 else 4981 { 4982 as_bad (_("Predicate register expected")); 4983 ignore_rest_of_line (); 4984 return; 4985 } 4986 if (mask & bits) 4987 as_warn (_("Duplicate predicate register ignored")); 4988 mask |= bits; 4989 if (sep != ',') 4990 break; 4991 } 4992 4993 switch (type) 4994 { 4995 case 'c': 4996 if (count == 0) 4997 mask = ~(valueT) 0; 4998 clear_qp_mutex (mask); 4999 clear_qp_implies (mask, (valueT) 0); 5000 break; 5001 case 'i': 5002 if (count != 2 || p1 == -1 || p2 == -1) 5003 as_bad (_("Predicate source and target required")); 5004 else if (p1 == 0 || p2 == 0) 5005 as_bad (_("Use of p0 is not valid in this context")); 5006 else 5007 add_qp_imply (p1, p2); 5008 break; 5009 case 'm': 5010 if (count < 2) 5011 { 5012 as_bad (_("At least two PR arguments expected")); 5013 break; 5014 } 5015 else if (mask & 1) 5016 { 5017 as_bad (_("Use of p0 is not valid in this context")); 5018 break; 5019 } 5020 add_qp_mutex (mask); 5021 break; 5022 case 's': 5023 /* note that we don't override any existing relations */ 5024 if (count == 0) 5025 { 5026 as_bad (_("At least one PR argument expected")); 5027 break; 5028 } 5029 if (md.debug_dv) 5030 { 5031 fprintf (stderr, "Safe across calls: "); 5032 print_prmask (mask); 5033 fprintf (stderr, "\n"); 5034 } 5035 qp_safe_across_calls = mask; 5036 break; 5037 } 5038 demand_empty_rest_of_line (); 5039 } 5040 5041 /* .entry label [, label [, ...]] 5042 Hint to DV code that the given labels are to be considered entry points. 5043 Otherwise, only global labels are considered entry points. */ 5044 5045 static void 5046 dot_entry (int dummy ATTRIBUTE_UNUSED) 5047 { 5048 const char *err; 5049 char *name; 5050 int c; 5051 symbolS *symbolP; 5052 5053 do 5054 { 5055 name = input_line_pointer; 5056 c = get_symbol_end (); 5057 symbolP = symbol_find_or_make (name); 5058 5059 err = hash_insert (md.entry_hash, S_GET_NAME (symbolP), (void *) symbolP); 5060 if (err) 5061 as_fatal (_("Inserting \"%s\" into entry hint table failed: %s"), 5062 name, err); 5063 5064 *input_line_pointer = c; 5065 SKIP_WHITESPACE (); 5066 c = *input_line_pointer; 5067 if (c == ',') 5068 { 5069 input_line_pointer++; 5070 SKIP_WHITESPACE (); 5071 if (*input_line_pointer == '\n') 5072 c = '\n'; 5073 } 5074 } 5075 while (c == ','); 5076 5077 demand_empty_rest_of_line (); 5078 } 5079 5080 /* .mem.offset offset, base 5081 "base" is used to distinguish between offsets from a different base. */ 5082 5083 static void 5084 dot_mem_offset (int dummy ATTRIBUTE_UNUSED) 5085 { 5086 md.mem_offset.hint = 1; 5087 md.mem_offset.offset = get_absolute_expression (); 5088 if (*input_line_pointer != ',') 5089 { 5090 as_bad (_("Comma expected")); 5091 ignore_rest_of_line (); 5092 return; 5093 } 5094 ++input_line_pointer; 5095 md.mem_offset.base = get_absolute_expression (); 5096 demand_empty_rest_of_line (); 5097 } 5098 5099 /* ia64-specific pseudo-ops: */ 5100 const pseudo_typeS md_pseudo_table[] = 5101 { 5102 { "radix", dot_radix, 0 }, 5103 { "lcomm", s_lcomm_bytes, 1 }, 5104 { "loc", dot_loc, 0 }, 5105 { "bss", dot_special_section, SPECIAL_SECTION_BSS }, 5106 { "sbss", dot_special_section, SPECIAL_SECTION_SBSS }, 5107 { "sdata", dot_special_section, SPECIAL_SECTION_SDATA }, 5108 { "rodata", dot_special_section, SPECIAL_SECTION_RODATA }, 5109 { "comment", dot_special_section, SPECIAL_SECTION_COMMENT }, 5110 { "ia_64.unwind", dot_special_section, SPECIAL_SECTION_UNWIND }, 5111 { "ia_64.unwind_info", dot_special_section, SPECIAL_SECTION_UNWIND_INFO }, 5112 { "init_array", dot_special_section, SPECIAL_SECTION_INIT_ARRAY }, 5113 { "fini_array", dot_special_section, SPECIAL_SECTION_FINI_ARRAY }, 5114 { "proc", dot_proc, 0 }, 5115 { "body", dot_body, 0 }, 5116 { "prologue", dot_prologue, 0 }, 5117 { "endp", dot_endp, 0 }, 5118 5119 { "fframe", dot_fframe, 0 }, 5120 { "vframe", dot_vframe, 0 }, 5121 { "vframesp", dot_vframesp, 0 }, 5122 { "vframepsp", dot_vframesp, 1 }, 5123 { "save", dot_save, 0 }, 5124 { "restore", dot_restore, 0 }, 5125 { "restorereg", dot_restorereg, 0 }, 5126 { "restorereg.p", dot_restorereg, 1 }, 5127 { "handlerdata", dot_handlerdata, 0 }, 5128 { "unwentry", dot_unwentry, 0 }, 5129 { "altrp", dot_altrp, 0 }, 5130 { "savesp", dot_savemem, 0 }, 5131 { "savepsp", dot_savemem, 1 }, 5132 { "save.g", dot_saveg, 0 }, 5133 { "save.f", dot_savef, 0 }, 5134 { "save.b", dot_saveb, 0 }, 5135 { "save.gf", dot_savegf, 0 }, 5136 { "spill", dot_spill, 0 }, 5137 { "spillreg", dot_spillreg, 0 }, 5138 { "spillsp", dot_spillmem, 0 }, 5139 { "spillpsp", dot_spillmem, 1 }, 5140 { "spillreg.p", dot_spillreg, 1 }, 5141 { "spillsp.p", dot_spillmem, ~0 }, 5142 { "spillpsp.p", dot_spillmem, ~1 }, 5143 { "label_state", dot_label_state, 0 }, 5144 { "copy_state", dot_copy_state, 0 }, 5145 { "unwabi", dot_unwabi, 0 }, 5146 { "personality", dot_personality, 0 }, 5147 { "mii", dot_template, 0x0 }, 5148 { "mli", dot_template, 0x2 }, /* old format, for compatibility */ 5149 { "mlx", dot_template, 0x2 }, 5150 { "mmi", dot_template, 0x4 }, 5151 { "mfi", dot_template, 0x6 }, 5152 { "mmf", dot_template, 0x7 }, 5153 { "mib", dot_template, 0x8 }, 5154 { "mbb", dot_template, 0x9 }, 5155 { "bbb", dot_template, 0xb }, 5156 { "mmb", dot_template, 0xc }, 5157 { "mfb", dot_template, 0xe }, 5158 { "align", dot_align, 0 }, 5159 { "regstk", dot_regstk, 0 }, 5160 { "rotr", dot_rot, DYNREG_GR }, 5161 { "rotf", dot_rot, DYNREG_FR }, 5162 { "rotp", dot_rot, DYNREG_PR }, 5163 { "lsb", dot_byteorder, 0 }, 5164 { "msb", dot_byteorder, 1 }, 5165 { "psr", dot_psr, 0 }, 5166 { "alias", dot_alias, 0 }, 5167 { "secalias", dot_alias, 1 }, 5168 { "ln", dot_ln, 0 }, /* source line info (for debugging) */ 5169 5170 { "xdata1", dot_xdata, 1 }, 5171 { "xdata2", dot_xdata, 2 }, 5172 { "xdata4", dot_xdata, 4 }, 5173 { "xdata8", dot_xdata, 8 }, 5174 { "xdata16", dot_xdata, 16 }, 5175 { "xreal4", dot_xfloat_cons, 'f' }, 5176 { "xreal8", dot_xfloat_cons, 'd' }, 5177 { "xreal10", dot_xfloat_cons, 'x' }, 5178 { "xreal16", dot_xfloat_cons, 'X' }, 5179 { "xstring", dot_xstringer, 8 + 0 }, 5180 { "xstringz", dot_xstringer, 8 + 1 }, 5181 5182 /* unaligned versions: */ 5183 { "xdata2.ua", dot_xdata_ua, 2 }, 5184 { "xdata4.ua", dot_xdata_ua, 4 }, 5185 { "xdata8.ua", dot_xdata_ua, 8 }, 5186 { "xdata16.ua", dot_xdata_ua, 16 }, 5187 { "xreal4.ua", dot_xfloat_cons_ua, 'f' }, 5188 { "xreal8.ua", dot_xfloat_cons_ua, 'd' }, 5189 { "xreal10.ua", dot_xfloat_cons_ua, 'x' }, 5190 { "xreal16.ua", dot_xfloat_cons_ua, 'X' }, 5191 5192 /* annotations/DV checking support */ 5193 { "entry", dot_entry, 0 }, 5194 { "mem.offset", dot_mem_offset, 0 }, 5195 { "pred.rel", dot_pred_rel, 0 }, 5196 { "pred.rel.clear", dot_pred_rel, 'c' }, 5197 { "pred.rel.imply", dot_pred_rel, 'i' }, 5198 { "pred.rel.mutex", dot_pred_rel, 'm' }, 5199 { "pred.safe_across_calls", dot_pred_rel, 's' }, 5200 { "reg.val", dot_reg_val, 0 }, 5201 { "serialize.data", dot_serialize, 0 }, 5202 { "serialize.instruction", dot_serialize, 1 }, 5203 { "auto", dot_dv_mode, 'a' }, 5204 { "explicit", dot_dv_mode, 'e' }, 5205 { "default", dot_dv_mode, 'd' }, 5206 5207 /* ??? These are needed to make gas/testsuite/gas/elf/ehopt.s work. 5208 IA-64 aligns data allocation pseudo-ops by default, so we have to 5209 tell it that these ones are supposed to be unaligned. Long term, 5210 should rewrite so that only IA-64 specific data allocation pseudo-ops 5211 are aligned by default. */ 5212 {"2byte", stmt_cons_ua, 2}, 5213 {"4byte", stmt_cons_ua, 4}, 5214 {"8byte", stmt_cons_ua, 8}, 5215 5216 { NULL, 0, 0 } 5217 }; 5218 5219 static const struct pseudo_opcode 5220 { 5221 const char *name; 5222 void (*handler) (int); 5223 int arg; 5224 } 5225 pseudo_opcode[] = 5226 { 5227 /* these are more like pseudo-ops, but don't start with a dot */ 5228 { "data1", cons, 1 }, 5229 { "data2", cons, 2 }, 5230 { "data4", cons, 4 }, 5231 { "data8", cons, 8 }, 5232 { "data16", cons, 16 }, 5233 { "real4", stmt_float_cons, 'f' }, 5234 { "real8", stmt_float_cons, 'd' }, 5235 { "real10", stmt_float_cons, 'x' }, 5236 { "real16", stmt_float_cons, 'X' }, 5237 { "string", stringer, 8 + 0 }, 5238 { "stringz", stringer, 8 + 1 }, 5239 5240 /* unaligned versions: */ 5241 { "data2.ua", stmt_cons_ua, 2 }, 5242 { "data4.ua", stmt_cons_ua, 4 }, 5243 { "data8.ua", stmt_cons_ua, 8 }, 5244 { "data16.ua", stmt_cons_ua, 16 }, 5245 { "real4.ua", float_cons, 'f' }, 5246 { "real8.ua", float_cons, 'd' }, 5247 { "real10.ua", float_cons, 'x' }, 5248 { "real16.ua", float_cons, 'X' }, 5249 }; 5250 5251 /* Declare a register by creating a symbol for it and entering it in 5252 the symbol table. */ 5253 5254 static symbolS * 5255 declare_register (const char *name, unsigned int regnum) 5256 { 5257 const char *err; 5258 symbolS *sym; 5259 5260 sym = symbol_create (name, reg_section, regnum, &zero_address_frag); 5261 5262 err = hash_insert (md.reg_hash, S_GET_NAME (sym), (void *) sym); 5263 if (err) 5264 as_fatal ("Inserting \"%s\" into register table failed: %s", 5265 name, err); 5266 5267 return sym; 5268 } 5269 5270 static void 5271 declare_register_set (const char *prefix, 5272 unsigned int num_regs, 5273 unsigned int base_regnum) 5274 { 5275 char name[8]; 5276 unsigned int i; 5277 5278 for (i = 0; i < num_regs; ++i) 5279 { 5280 snprintf (name, sizeof (name), "%s%u", prefix, i); 5281 declare_register (name, base_regnum + i); 5282 } 5283 } 5284 5285 static unsigned int 5286 operand_width (enum ia64_opnd opnd) 5287 { 5288 const struct ia64_operand *odesc = &elf64_ia64_operands[opnd]; 5289 unsigned int bits = 0; 5290 int i; 5291 5292 bits = 0; 5293 for (i = 0; i < NELEMS (odesc->field) && odesc->field[i].bits; ++i) 5294 bits += odesc->field[i].bits; 5295 5296 return bits; 5297 } 5298 5299 static enum operand_match_result 5300 operand_match (const struct ia64_opcode *idesc, int index, expressionS *e) 5301 { 5302 enum ia64_opnd opnd = idesc->operands[index]; 5303 int bits, relocatable = 0; 5304 struct insn_fix *fix; 5305 bfd_signed_vma val; 5306 5307 switch (opnd) 5308 { 5309 /* constants: */ 5310 5311 case IA64_OPND_AR_CCV: 5312 if (e->X_op == O_register && e->X_add_number == REG_AR + 32) 5313 return OPERAND_MATCH; 5314 break; 5315 5316 case IA64_OPND_AR_CSD: 5317 if (e->X_op == O_register && e->X_add_number == REG_AR + 25) 5318 return OPERAND_MATCH; 5319 break; 5320 5321 case IA64_OPND_AR_PFS: 5322 if (e->X_op == O_register && e->X_add_number == REG_AR + 64) 5323 return OPERAND_MATCH; 5324 break; 5325 5326 case IA64_OPND_GR0: 5327 if (e->X_op == O_register && e->X_add_number == REG_GR + 0) 5328 return OPERAND_MATCH; 5329 break; 5330 5331 case IA64_OPND_IP: 5332 if (e->X_op == O_register && e->X_add_number == REG_IP) 5333 return OPERAND_MATCH; 5334 break; 5335 5336 case IA64_OPND_PR: 5337 if (e->X_op == O_register && e->X_add_number == REG_PR) 5338 return OPERAND_MATCH; 5339 break; 5340 5341 case IA64_OPND_PR_ROT: 5342 if (e->X_op == O_register && e->X_add_number == REG_PR_ROT) 5343 return OPERAND_MATCH; 5344 break; 5345 5346 case IA64_OPND_PSR: 5347 if (e->X_op == O_register && e->X_add_number == REG_PSR) 5348 return OPERAND_MATCH; 5349 break; 5350 5351 case IA64_OPND_PSR_L: 5352 if (e->X_op == O_register && e->X_add_number == REG_PSR_L) 5353 return OPERAND_MATCH; 5354 break; 5355 5356 case IA64_OPND_PSR_UM: 5357 if (e->X_op == O_register && e->X_add_number == REG_PSR_UM) 5358 return OPERAND_MATCH; 5359 break; 5360 5361 case IA64_OPND_C1: 5362 if (e->X_op == O_constant) 5363 { 5364 if (e->X_add_number == 1) 5365 return OPERAND_MATCH; 5366 else 5367 return OPERAND_OUT_OF_RANGE; 5368 } 5369 break; 5370 5371 case IA64_OPND_C8: 5372 if (e->X_op == O_constant) 5373 { 5374 if (e->X_add_number == 8) 5375 return OPERAND_MATCH; 5376 else 5377 return OPERAND_OUT_OF_RANGE; 5378 } 5379 break; 5380 5381 case IA64_OPND_C16: 5382 if (e->X_op == O_constant) 5383 { 5384 if (e->X_add_number == 16) 5385 return OPERAND_MATCH; 5386 else 5387 return OPERAND_OUT_OF_RANGE; 5388 } 5389 break; 5390 5391 /* register operands: */ 5392 5393 case IA64_OPND_AR3: 5394 if (e->X_op == O_register && e->X_add_number >= REG_AR 5395 && e->X_add_number < REG_AR + 128) 5396 return OPERAND_MATCH; 5397 break; 5398 5399 case IA64_OPND_B1: 5400 case IA64_OPND_B2: 5401 if (e->X_op == O_register && e->X_add_number >= REG_BR 5402 && e->X_add_number < REG_BR + 8) 5403 return OPERAND_MATCH; 5404 break; 5405 5406 case IA64_OPND_CR3: 5407 if (e->X_op == O_register && e->X_add_number >= REG_CR 5408 && e->X_add_number < REG_CR + 128) 5409 return OPERAND_MATCH; 5410 break; 5411 5412 case IA64_OPND_F1: 5413 case IA64_OPND_F2: 5414 case IA64_OPND_F3: 5415 case IA64_OPND_F4: 5416 if (e->X_op == O_register && e->X_add_number >= REG_FR 5417 && e->X_add_number < REG_FR + 128) 5418 return OPERAND_MATCH; 5419 break; 5420 5421 case IA64_OPND_P1: 5422 case IA64_OPND_P2: 5423 if (e->X_op == O_register && e->X_add_number >= REG_P 5424 && e->X_add_number < REG_P + 64) 5425 return OPERAND_MATCH; 5426 break; 5427 5428 case IA64_OPND_R1: 5429 case IA64_OPND_R2: 5430 case IA64_OPND_R3: 5431 if (e->X_op == O_register && e->X_add_number >= REG_GR 5432 && e->X_add_number < REG_GR + 128) 5433 return OPERAND_MATCH; 5434 break; 5435 5436 case IA64_OPND_R3_2: 5437 if (e->X_op == O_register && e->X_add_number >= REG_GR) 5438 { 5439 if (e->X_add_number < REG_GR + 4) 5440 return OPERAND_MATCH; 5441 else if (e->X_add_number < REG_GR + 128) 5442 return OPERAND_OUT_OF_RANGE; 5443 } 5444 break; 5445 5446 /* indirect operands: */ 5447 case IA64_OPND_CPUID_R3: 5448 case IA64_OPND_DBR_R3: 5449 case IA64_OPND_DTR_R3: 5450 case IA64_OPND_ITR_R3: 5451 case IA64_OPND_IBR_R3: 5452 case IA64_OPND_MSR_R3: 5453 case IA64_OPND_PKR_R3: 5454 case IA64_OPND_PMC_R3: 5455 case IA64_OPND_PMD_R3: 5456 case IA64_OPND_RR_R3: 5457 if (e->X_op == O_index && e->X_op_symbol 5458 && (S_GET_VALUE (e->X_op_symbol) - IND_CPUID 5459 == opnd - IA64_OPND_CPUID_R3)) 5460 return OPERAND_MATCH; 5461 break; 5462 5463 case IA64_OPND_MR3: 5464 if (e->X_op == O_index && !e->X_op_symbol) 5465 return OPERAND_MATCH; 5466 break; 5467 5468 /* immediate operands: */ 5469 case IA64_OPND_CNT2a: 5470 case IA64_OPND_LEN4: 5471 case IA64_OPND_LEN6: 5472 bits = operand_width (idesc->operands[index]); 5473 if (e->X_op == O_constant) 5474 { 5475 if ((bfd_vma) (e->X_add_number - 1) < ((bfd_vma) 1 << bits)) 5476 return OPERAND_MATCH; 5477 else 5478 return OPERAND_OUT_OF_RANGE; 5479 } 5480 break; 5481 5482 case IA64_OPND_CNT2b: 5483 if (e->X_op == O_constant) 5484 { 5485 if ((bfd_vma) (e->X_add_number - 1) < 3) 5486 return OPERAND_MATCH; 5487 else 5488 return OPERAND_OUT_OF_RANGE; 5489 } 5490 break; 5491 5492 case IA64_OPND_CNT2c: 5493 val = e->X_add_number; 5494 if (e->X_op == O_constant) 5495 { 5496 if ((val == 0 || val == 7 || val == 15 || val == 16)) 5497 return OPERAND_MATCH; 5498 else 5499 return OPERAND_OUT_OF_RANGE; 5500 } 5501 break; 5502 5503 case IA64_OPND_SOR: 5504 /* SOR must be an integer multiple of 8 */ 5505 if (e->X_op == O_constant && e->X_add_number & 0x7) 5506 return OPERAND_OUT_OF_RANGE; 5507 case IA64_OPND_SOF: 5508 case IA64_OPND_SOL: 5509 if (e->X_op == O_constant) 5510 { 5511 if ((bfd_vma) e->X_add_number <= 96) 5512 return OPERAND_MATCH; 5513 else 5514 return OPERAND_OUT_OF_RANGE; 5515 } 5516 break; 5517 5518 case IA64_OPND_IMMU62: 5519 if (e->X_op == O_constant) 5520 { 5521 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << 62)) 5522 return OPERAND_MATCH; 5523 else 5524 return OPERAND_OUT_OF_RANGE; 5525 } 5526 else 5527 { 5528 /* FIXME -- need 62-bit relocation type */ 5529 as_bad (_("62-bit relocation not yet implemented")); 5530 } 5531 break; 5532 5533 case IA64_OPND_IMMU64: 5534 if (e->X_op == O_symbol || e->X_op == O_pseudo_fixup 5535 || e->X_op == O_subtract) 5536 { 5537 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups; 5538 fix->code = BFD_RELOC_IA64_IMM64; 5539 if (e->X_op != O_subtract) 5540 { 5541 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code); 5542 if (e->X_op == O_pseudo_fixup) 5543 e->X_op = O_symbol; 5544 } 5545 5546 fix->opnd = idesc->operands[index]; 5547 fix->expr = *e; 5548 fix->is_pcrel = 0; 5549 ++CURR_SLOT.num_fixups; 5550 return OPERAND_MATCH; 5551 } 5552 else if (e->X_op == O_constant) 5553 return OPERAND_MATCH; 5554 break; 5555 5556 case IA64_OPND_IMMU5b: 5557 if (e->X_op == O_constant) 5558 { 5559 val = e->X_add_number; 5560 if (val >= 32 && val <= 63) 5561 return OPERAND_MATCH; 5562 else 5563 return OPERAND_OUT_OF_RANGE; 5564 } 5565 break; 5566 5567 case IA64_OPND_CCNT5: 5568 case IA64_OPND_CNT5: 5569 case IA64_OPND_CNT6: 5570 case IA64_OPND_CPOS6a: 5571 case IA64_OPND_CPOS6b: 5572 case IA64_OPND_CPOS6c: 5573 case IA64_OPND_IMMU2: 5574 case IA64_OPND_IMMU7a: 5575 case IA64_OPND_IMMU7b: 5576 case IA64_OPND_IMMU21: 5577 case IA64_OPND_IMMU24: 5578 case IA64_OPND_MBTYPE4: 5579 case IA64_OPND_MHTYPE8: 5580 case IA64_OPND_POS6: 5581 bits = operand_width (idesc->operands[index]); 5582 if (e->X_op == O_constant) 5583 { 5584 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits)) 5585 return OPERAND_MATCH; 5586 else 5587 return OPERAND_OUT_OF_RANGE; 5588 } 5589 break; 5590 5591 case IA64_OPND_IMMU9: 5592 bits = operand_width (idesc->operands[index]); 5593 if (e->X_op == O_constant) 5594 { 5595 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits)) 5596 { 5597 int lobits = e->X_add_number & 0x3; 5598 if (((bfd_vma) e->X_add_number & 0x3C) != 0 && lobits == 0) 5599 e->X_add_number |= (bfd_vma) 0x3; 5600 return OPERAND_MATCH; 5601 } 5602 else 5603 return OPERAND_OUT_OF_RANGE; 5604 } 5605 break; 5606 5607 case IA64_OPND_IMM44: 5608 /* least 16 bits must be zero */ 5609 if ((e->X_add_number & 0xffff) != 0) 5610 /* XXX technically, this is wrong: we should not be issuing warning 5611 messages until we're sure this instruction pattern is going to 5612 be used! */ 5613 as_warn (_("lower 16 bits of mask ignored")); 5614 5615 if (e->X_op == O_constant) 5616 { 5617 if (((e->X_add_number >= 0 5618 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 44)) 5619 || (e->X_add_number < 0 5620 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 44)))) 5621 { 5622 /* sign-extend */ 5623 if (e->X_add_number >= 0 5624 && (e->X_add_number & ((bfd_vma) 1 << 43)) != 0) 5625 { 5626 e->X_add_number |= ~(((bfd_vma) 1 << 44) - 1); 5627 } 5628 return OPERAND_MATCH; 5629 } 5630 else 5631 return OPERAND_OUT_OF_RANGE; 5632 } 5633 break; 5634 5635 case IA64_OPND_IMM17: 5636 /* bit 0 is a don't care (pr0 is hardwired to 1) */ 5637 if (e->X_op == O_constant) 5638 { 5639 if (((e->X_add_number >= 0 5640 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 17)) 5641 || (e->X_add_number < 0 5642 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 17)))) 5643 { 5644 /* sign-extend */ 5645 if (e->X_add_number >= 0 5646 && (e->X_add_number & ((bfd_vma) 1 << 16)) != 0) 5647 { 5648 e->X_add_number |= ~(((bfd_vma) 1 << 17) - 1); 5649 } 5650 return OPERAND_MATCH; 5651 } 5652 else 5653 return OPERAND_OUT_OF_RANGE; 5654 } 5655 break; 5656 5657 case IA64_OPND_IMM14: 5658 case IA64_OPND_IMM22: 5659 relocatable = 1; 5660 case IA64_OPND_IMM1: 5661 case IA64_OPND_IMM8: 5662 case IA64_OPND_IMM8U4: 5663 case IA64_OPND_IMM8M1: 5664 case IA64_OPND_IMM8M1U4: 5665 case IA64_OPND_IMM8M1U8: 5666 case IA64_OPND_IMM9a: 5667 case IA64_OPND_IMM9b: 5668 bits = operand_width (idesc->operands[index]); 5669 if (relocatable && (e->X_op == O_symbol 5670 || e->X_op == O_subtract 5671 || e->X_op == O_pseudo_fixup)) 5672 { 5673 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups; 5674 5675 if (idesc->operands[index] == IA64_OPND_IMM14) 5676 fix->code = BFD_RELOC_IA64_IMM14; 5677 else 5678 fix->code = BFD_RELOC_IA64_IMM22; 5679 5680 if (e->X_op != O_subtract) 5681 { 5682 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code); 5683 if (e->X_op == O_pseudo_fixup) 5684 e->X_op = O_symbol; 5685 } 5686 5687 fix->opnd = idesc->operands[index]; 5688 fix->expr = *e; 5689 fix->is_pcrel = 0; 5690 ++CURR_SLOT.num_fixups; 5691 return OPERAND_MATCH; 5692 } 5693 else if (e->X_op != O_constant 5694 && ! (e->X_op == O_big && opnd == IA64_OPND_IMM8M1U8)) 5695 return OPERAND_MISMATCH; 5696 5697 if (opnd == IA64_OPND_IMM8M1U4) 5698 { 5699 /* Zero is not valid for unsigned compares that take an adjusted 5700 constant immediate range. */ 5701 if (e->X_add_number == 0) 5702 return OPERAND_OUT_OF_RANGE; 5703 5704 /* Sign-extend 32-bit unsigned numbers, so that the following range 5705 checks will work. */ 5706 val = e->X_add_number; 5707 if (((val & (~(bfd_vma) 0 << 32)) == 0) 5708 && ((val & ((bfd_vma) 1 << 31)) != 0)) 5709 val = ((val << 32) >> 32); 5710 5711 /* Check for 0x100000000. This is valid because 5712 0x100000000-1 is the same as ((uint32_t) -1). */ 5713 if (val == ((bfd_signed_vma) 1 << 32)) 5714 return OPERAND_MATCH; 5715 5716 val = val - 1; 5717 } 5718 else if (opnd == IA64_OPND_IMM8M1U8) 5719 { 5720 /* Zero is not valid for unsigned compares that take an adjusted 5721 constant immediate range. */ 5722 if (e->X_add_number == 0) 5723 return OPERAND_OUT_OF_RANGE; 5724 5725 /* Check for 0x10000000000000000. */ 5726 if (e->X_op == O_big) 5727 { 5728 if (generic_bignum[0] == 0 5729 && generic_bignum[1] == 0 5730 && generic_bignum[2] == 0 5731 && generic_bignum[3] == 0 5732 && generic_bignum[4] == 1) 5733 return OPERAND_MATCH; 5734 else 5735 return OPERAND_OUT_OF_RANGE; 5736 } 5737 else 5738 val = e->X_add_number - 1; 5739 } 5740 else if (opnd == IA64_OPND_IMM8M1) 5741 val = e->X_add_number - 1; 5742 else if (opnd == IA64_OPND_IMM8U4) 5743 { 5744 /* Sign-extend 32-bit unsigned numbers, so that the following range 5745 checks will work. */ 5746 val = e->X_add_number; 5747 if (((val & (~(bfd_vma) 0 << 32)) == 0) 5748 && ((val & ((bfd_vma) 1 << 31)) != 0)) 5749 val = ((val << 32) >> 32); 5750 } 5751 else 5752 val = e->X_add_number; 5753 5754 if ((val >= 0 && (bfd_vma) val < ((bfd_vma) 1 << (bits - 1))) 5755 || (val < 0 && (bfd_vma) -val <= ((bfd_vma) 1 << (bits - 1)))) 5756 return OPERAND_MATCH; 5757 else 5758 return OPERAND_OUT_OF_RANGE; 5759 5760 case IA64_OPND_INC3: 5761 /* +/- 1, 4, 8, 16 */ 5762 val = e->X_add_number; 5763 if (val < 0) 5764 val = -val; 5765 if (e->X_op == O_constant) 5766 { 5767 if ((val == 1 || val == 4 || val == 8 || val == 16)) 5768 return OPERAND_MATCH; 5769 else 5770 return OPERAND_OUT_OF_RANGE; 5771 } 5772 break; 5773 5774 case IA64_OPND_TGT25: 5775 case IA64_OPND_TGT25b: 5776 case IA64_OPND_TGT25c: 5777 case IA64_OPND_TGT64: 5778 if (e->X_op == O_symbol) 5779 { 5780 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups; 5781 if (opnd == IA64_OPND_TGT25) 5782 fix->code = BFD_RELOC_IA64_PCREL21F; 5783 else if (opnd == IA64_OPND_TGT25b) 5784 fix->code = BFD_RELOC_IA64_PCREL21M; 5785 else if (opnd == IA64_OPND_TGT25c) 5786 fix->code = BFD_RELOC_IA64_PCREL21B; 5787 else if (opnd == IA64_OPND_TGT64) 5788 fix->code = BFD_RELOC_IA64_PCREL60B; 5789 else 5790 abort (); 5791 5792 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code); 5793 fix->opnd = idesc->operands[index]; 5794 fix->expr = *e; 5795 fix->is_pcrel = 1; 5796 ++CURR_SLOT.num_fixups; 5797 return OPERAND_MATCH; 5798 } 5799 case IA64_OPND_TAG13: 5800 case IA64_OPND_TAG13b: 5801 switch (e->X_op) 5802 { 5803 case O_constant: 5804 return OPERAND_MATCH; 5805 5806 case O_symbol: 5807 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups; 5808 /* There are no external relocs for TAG13/TAG13b fields, so we 5809 create a dummy reloc. This will not live past md_apply_fix. */ 5810 fix->code = BFD_RELOC_UNUSED; 5811 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code); 5812 fix->opnd = idesc->operands[index]; 5813 fix->expr = *e; 5814 fix->is_pcrel = 1; 5815 ++CURR_SLOT.num_fixups; 5816 return OPERAND_MATCH; 5817 5818 default: 5819 break; 5820 } 5821 break; 5822 5823 case IA64_OPND_LDXMOV: 5824 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups; 5825 fix->code = BFD_RELOC_IA64_LDXMOV; 5826 fix->opnd = idesc->operands[index]; 5827 fix->expr = *e; 5828 fix->is_pcrel = 0; 5829 ++CURR_SLOT.num_fixups; 5830 return OPERAND_MATCH; 5831 5832 default: 5833 break; 5834 } 5835 return OPERAND_MISMATCH; 5836 } 5837 5838 static int 5839 parse_operand (expressionS *e, int more) 5840 { 5841 int sep = '\0'; 5842 5843 memset (e, 0, sizeof (*e)); 5844 e->X_op = O_absent; 5845 SKIP_WHITESPACE (); 5846 expression_and_evaluate (e); 5847 sep = *input_line_pointer; 5848 if (more && (sep == ',' || sep == more)) 5849 ++input_line_pointer; 5850 return sep; 5851 } 5852 5853 /* Returns the next entry in the opcode table that matches the one in 5854 IDESC, and frees the entry in IDESC. If no matching entry is 5855 found, NULL is returned instead. */ 5856 5857 static struct ia64_opcode * 5858 get_next_opcode (struct ia64_opcode *idesc) 5859 { 5860 struct ia64_opcode *next = ia64_find_next_opcode (idesc); 5861 ia64_free_opcode (idesc); 5862 return next; 5863 } 5864 5865 /* Parse the operands for the opcode and find the opcode variant that 5866 matches the specified operands, or NULL if no match is possible. */ 5867 5868 static struct ia64_opcode * 5869 parse_operands (struct ia64_opcode *idesc) 5870 { 5871 int i = 0, highest_unmatched_operand, num_operands = 0, num_outputs = 0; 5872 int error_pos, out_of_range_pos, curr_out_of_range_pos, sep = 0; 5873 int reg1, reg2; 5874 char reg_class; 5875 enum ia64_opnd expected_operand = IA64_OPND_NIL; 5876 enum operand_match_result result; 5877 char mnemonic[129]; 5878 char *first_arg = 0, *end, *saved_input_pointer; 5879 unsigned int sof; 5880 5881 assert (strlen (idesc->name) <= 128); 5882 5883 strcpy (mnemonic, idesc->name); 5884 if (idesc->operands[2] == IA64_OPND_SOF 5885 || idesc->operands[1] == IA64_OPND_SOF) 5886 { 5887 /* To make the common idiom "alloc loc?=ar.pfs,0,1,0,0" work, we 5888 can't parse the first operand until we have parsed the 5889 remaining operands of the "alloc" instruction. */ 5890 SKIP_WHITESPACE (); 5891 first_arg = input_line_pointer; 5892 end = strchr (input_line_pointer, '='); 5893 if (!end) 5894 { 5895 as_bad (_("Expected separator `='")); 5896 return 0; 5897 } 5898 input_line_pointer = end + 1; 5899 ++i; 5900 ++num_outputs; 5901 } 5902 5903 for (; ; ++i) 5904 { 5905 if (i < NELEMS (CURR_SLOT.opnd)) 5906 { 5907 sep = parse_operand (CURR_SLOT.opnd + i, '='); 5908 if (CURR_SLOT.opnd[i].X_op == O_absent) 5909 break; 5910 } 5911 else 5912 { 5913 expressionS dummy; 5914 5915 sep = parse_operand (&dummy, '='); 5916 if (dummy.X_op == O_absent) 5917 break; 5918 } 5919 5920 ++num_operands; 5921 5922 if (sep != '=' && sep != ',') 5923 break; 5924 5925 if (sep == '=') 5926 { 5927 if (num_outputs > 0) 5928 as_bad (_("Duplicate equal sign (=) in instruction")); 5929 else 5930 num_outputs = i + 1; 5931 } 5932 } 5933 if (sep != '\0') 5934 { 5935 as_bad (_("Illegal operand separator `%c'"), sep); 5936 return 0; 5937 } 5938 5939 if (idesc->operands[2] == IA64_OPND_SOF 5940 || idesc->operands[1] == IA64_OPND_SOF) 5941 { 5942 /* Map alloc r1=ar.pfs,i,l,o,r to alloc r1=ar.pfs,(i+l+o),(i+l),r. 5943 Note, however, that due to that mapping operand numbers in error 5944 messages for any of the constant operands will not be correct. */ 5945 know (strcmp (idesc->name, "alloc") == 0); 5946 /* The first operand hasn't been parsed/initialized, yet (but 5947 num_operands intentionally doesn't account for that). */ 5948 i = num_operands > 4 ? 2 : 1; 5949 #define FORCE_CONST(n) (CURR_SLOT.opnd[n].X_op == O_constant \ 5950 ? CURR_SLOT.opnd[n].X_add_number \ 5951 : 0) 5952 sof = set_regstack (FORCE_CONST(i), 5953 FORCE_CONST(i + 1), 5954 FORCE_CONST(i + 2), 5955 FORCE_CONST(i + 3)); 5956 #undef FORCE_CONST 5957 5958 /* now we can parse the first arg: */ 5959 saved_input_pointer = input_line_pointer; 5960 input_line_pointer = first_arg; 5961 sep = parse_operand (CURR_SLOT.opnd + 0, '='); 5962 if (sep != '=') 5963 --num_outputs; /* force error */ 5964 input_line_pointer = saved_input_pointer; 5965 5966 CURR_SLOT.opnd[i].X_add_number = sof; 5967 if (CURR_SLOT.opnd[i + 1].X_op == O_constant 5968 && CURR_SLOT.opnd[i + 2].X_op == O_constant) 5969 CURR_SLOT.opnd[i + 1].X_add_number 5970 = sof - CURR_SLOT.opnd[i + 2].X_add_number; 5971 else 5972 CURR_SLOT.opnd[i + 1].X_op = O_illegal; 5973 CURR_SLOT.opnd[i + 2] = CURR_SLOT.opnd[i + 3]; 5974 } 5975 5976 highest_unmatched_operand = -4; 5977 curr_out_of_range_pos = -1; 5978 error_pos = 0; 5979 for (; idesc; idesc = get_next_opcode (idesc)) 5980 { 5981 if (num_outputs != idesc->num_outputs) 5982 continue; /* mismatch in # of outputs */ 5983 if (highest_unmatched_operand < 0) 5984 highest_unmatched_operand |= 1; 5985 if (num_operands > NELEMS (idesc->operands) 5986 || (num_operands < NELEMS (idesc->operands) 5987 && idesc->operands[num_operands]) 5988 || (num_operands > 0 && !idesc->operands[num_operands - 1])) 5989 continue; /* mismatch in number of arguments */ 5990 if (highest_unmatched_operand < 0) 5991 highest_unmatched_operand |= 2; 5992 5993 CURR_SLOT.num_fixups = 0; 5994 5995 /* Try to match all operands. If we see an out-of-range operand, 5996 then continue trying to match the rest of the operands, since if 5997 the rest match, then this idesc will give the best error message. */ 5998 5999 out_of_range_pos = -1; 6000 for (i = 0; i < num_operands && idesc->operands[i]; ++i) 6001 { 6002 result = operand_match (idesc, i, CURR_SLOT.opnd + i); 6003 if (result != OPERAND_MATCH) 6004 { 6005 if (result != OPERAND_OUT_OF_RANGE) 6006 break; 6007 if (out_of_range_pos < 0) 6008 /* remember position of the first out-of-range operand: */ 6009 out_of_range_pos = i; 6010 } 6011 } 6012 6013 /* If we did not match all operands, or if at least one operand was 6014 out-of-range, then this idesc does not match. Keep track of which 6015 idesc matched the most operands before failing. If we have two 6016 idescs that failed at the same position, and one had an out-of-range 6017 operand, then prefer the out-of-range operand. Thus if we have 6018 "add r0=0x1000000,r1" we get an error saying the constant is out 6019 of range instead of an error saying that the constant should have been 6020 a register. */ 6021 6022 if (i != num_operands || out_of_range_pos >= 0) 6023 { 6024 if (i > highest_unmatched_operand 6025 || (i == highest_unmatched_operand 6026 && out_of_range_pos > curr_out_of_range_pos)) 6027 { 6028 highest_unmatched_operand = i; 6029 if (out_of_range_pos >= 0) 6030 { 6031 expected_operand = idesc->operands[out_of_range_pos]; 6032 error_pos = out_of_range_pos; 6033 } 6034 else 6035 { 6036 expected_operand = idesc->operands[i]; 6037 error_pos = i; 6038 } 6039 curr_out_of_range_pos = out_of_range_pos; 6040 } 6041 continue; 6042 } 6043 6044 break; 6045 } 6046 if (!idesc) 6047 { 6048 if (expected_operand) 6049 as_bad (_("Operand %u of `%s' should be %s"), 6050 error_pos + 1, mnemonic, 6051 elf64_ia64_operands[expected_operand].desc); 6052 else if (highest_unmatched_operand < 0 && !(highest_unmatched_operand & 1)) 6053 as_bad (_("Wrong number of output operands")); 6054 else if (highest_unmatched_operand < 0 && !(highest_unmatched_operand & 2)) 6055 as_bad (_("Wrong number of input operands")); 6056 else 6057 as_bad (_("Operand mismatch")); 6058 return 0; 6059 } 6060 6061 /* Check that the instruction doesn't use 6062 - r0, f0, or f1 as output operands 6063 - the same predicate twice as output operands 6064 - r0 as address of a base update load or store 6065 - the same GR as output and address of a base update load 6066 - two even- or two odd-numbered FRs as output operands of a floating 6067 point parallel load. 6068 At most two (conflicting) output (or output-like) operands can exist, 6069 (floating point parallel loads have three outputs, but the base register, 6070 if updated, cannot conflict with the actual outputs). */ 6071 reg2 = reg1 = -1; 6072 for (i = 0; i < num_operands; ++i) 6073 { 6074 int regno = 0; 6075 6076 reg_class = 0; 6077 switch (idesc->operands[i]) 6078 { 6079 case IA64_OPND_R1: 6080 case IA64_OPND_R2: 6081 case IA64_OPND_R3: 6082 if (i < num_outputs) 6083 { 6084 if (CURR_SLOT.opnd[i].X_add_number == REG_GR) 6085 reg_class = 'r'; 6086 else if (reg1 < 0) 6087 reg1 = CURR_SLOT.opnd[i].X_add_number; 6088 else if (reg2 < 0) 6089 reg2 = CURR_SLOT.opnd[i].X_add_number; 6090 } 6091 break; 6092 case IA64_OPND_P1: 6093 case IA64_OPND_P2: 6094 if (i < num_outputs) 6095 { 6096 if (reg1 < 0) 6097 reg1 = CURR_SLOT.opnd[i].X_add_number; 6098 else if (reg2 < 0) 6099 reg2 = CURR_SLOT.opnd[i].X_add_number; 6100 } 6101 break; 6102 case IA64_OPND_F1: 6103 case IA64_OPND_F2: 6104 case IA64_OPND_F3: 6105 case IA64_OPND_F4: 6106 if (i < num_outputs) 6107 { 6108 if (CURR_SLOT.opnd[i].X_add_number >= REG_FR 6109 && CURR_SLOT.opnd[i].X_add_number <= REG_FR + 1) 6110 { 6111 reg_class = 'f'; 6112 regno = CURR_SLOT.opnd[i].X_add_number - REG_FR; 6113 } 6114 else if (reg1 < 0) 6115 reg1 = CURR_SLOT.opnd[i].X_add_number; 6116 else if (reg2 < 0) 6117 reg2 = CURR_SLOT.opnd[i].X_add_number; 6118 } 6119 break; 6120 case IA64_OPND_MR3: 6121 if (idesc->flags & IA64_OPCODE_POSTINC) 6122 { 6123 if (CURR_SLOT.opnd[i].X_add_number == REG_GR) 6124 reg_class = 'm'; 6125 else if (reg1 < 0) 6126 reg1 = CURR_SLOT.opnd[i].X_add_number; 6127 else if (reg2 < 0) 6128 reg2 = CURR_SLOT.opnd[i].X_add_number; 6129 } 6130 break; 6131 default: 6132 break; 6133 } 6134 switch (reg_class) 6135 { 6136 case 0: 6137 break; 6138 default: 6139 as_warn (_("Invalid use of `%c%d' as output operand"), reg_class, regno); 6140 break; 6141 case 'm': 6142 as_warn (_("Invalid use of `r%d' as base update address operand"), regno); 6143 break; 6144 } 6145 } 6146 if (reg1 == reg2) 6147 { 6148 if (reg1 >= REG_GR && reg1 <= REG_GR + 127) 6149 { 6150 reg1 -= REG_GR; 6151 reg_class = 'r'; 6152 } 6153 else if (reg1 >= REG_P && reg1 <= REG_P + 63) 6154 { 6155 reg1 -= REG_P; 6156 reg_class = 'p'; 6157 } 6158 else if (reg1 >= REG_FR && reg1 <= REG_FR + 127) 6159 { 6160 reg1 -= REG_FR; 6161 reg_class = 'f'; 6162 } 6163 else 6164 reg_class = 0; 6165 if (reg_class) 6166 as_warn (_("Invalid duplicate use of `%c%d'"), reg_class, reg1); 6167 } 6168 else if (((reg1 >= REG_FR && reg1 <= REG_FR + 31 6169 && reg2 >= REG_FR && reg2 <= REG_FR + 31) 6170 || (reg1 >= REG_FR + 32 && reg1 <= REG_FR + 127 6171 && reg2 >= REG_FR + 32 && reg2 <= REG_FR + 127)) 6172 && ! ((reg1 ^ reg2) & 1)) 6173 as_warn (_("Invalid simultaneous use of `f%d' and `f%d'"), 6174 reg1 - REG_FR, reg2 - REG_FR); 6175 else if ((reg1 >= REG_FR && reg1 <= REG_FR + 31 6176 && reg2 >= REG_FR + 32 && reg2 <= REG_FR + 127) 6177 || (reg1 >= REG_FR + 32 && reg1 <= REG_FR + 127 6178 && reg2 >= REG_FR && reg2 <= REG_FR + 31)) 6179 as_warn (_("Dangerous simultaneous use of `f%d' and `f%d'"), 6180 reg1 - REG_FR, reg2 - REG_FR); 6181 return idesc; 6182 } 6183 6184 static void 6185 build_insn (struct slot *slot, bfd_vma *insnp) 6186 { 6187 const struct ia64_operand *odesc, *o2desc; 6188 struct ia64_opcode *idesc = slot->idesc; 6189 bfd_vma insn; 6190 bfd_signed_vma val; 6191 const char *err; 6192 int i; 6193 6194 insn = idesc->opcode | slot->qp_regno; 6195 6196 for (i = 0; i < NELEMS (idesc->operands) && idesc->operands[i]; ++i) 6197 { 6198 if (slot->opnd[i].X_op == O_register 6199 || slot->opnd[i].X_op == O_constant 6200 || slot->opnd[i].X_op == O_index) 6201 val = slot->opnd[i].X_add_number; 6202 else if (slot->opnd[i].X_op == O_big) 6203 { 6204 /* This must be the value 0x10000000000000000. */ 6205 assert (idesc->operands[i] == IA64_OPND_IMM8M1U8); 6206 val = 0; 6207 } 6208 else 6209 val = 0; 6210 6211 switch (idesc->operands[i]) 6212 { 6213 case IA64_OPND_IMMU64: 6214 *insnp++ = (val >> 22) & 0x1ffffffffffLL; 6215 insn |= (((val & 0x7f) << 13) | (((val >> 7) & 0x1ff) << 27) 6216 | (((val >> 16) & 0x1f) << 22) | (((val >> 21) & 0x1) << 21) 6217 | (((val >> 63) & 0x1) << 36)); 6218 continue; 6219 6220 case IA64_OPND_IMMU62: 6221 val &= 0x3fffffffffffffffULL; 6222 if (val != slot->opnd[i].X_add_number) 6223 as_warn (_("Value truncated to 62 bits")); 6224 *insnp++ = (val >> 21) & 0x1ffffffffffLL; 6225 insn |= (((val & 0xfffff) << 6) | (((val >> 20) & 0x1) << 36)); 6226 continue; 6227 6228 case IA64_OPND_TGT64: 6229 val >>= 4; 6230 *insnp++ = ((val >> 20) & 0x7fffffffffLL) << 2; 6231 insn |= ((((val >> 59) & 0x1) << 36) 6232 | (((val >> 0) & 0xfffff) << 13)); 6233 continue; 6234 6235 case IA64_OPND_AR3: 6236 val -= REG_AR; 6237 break; 6238 6239 case IA64_OPND_B1: 6240 case IA64_OPND_B2: 6241 val -= REG_BR; 6242 break; 6243 6244 case IA64_OPND_CR3: 6245 val -= REG_CR; 6246 break; 6247 6248 case IA64_OPND_F1: 6249 case IA64_OPND_F2: 6250 case IA64_OPND_F3: 6251 case IA64_OPND_F4: 6252 val -= REG_FR; 6253 break; 6254 6255 case IA64_OPND_P1: 6256 case IA64_OPND_P2: 6257 val -= REG_P; 6258 break; 6259 6260 case IA64_OPND_R1: 6261 case IA64_OPND_R2: 6262 case IA64_OPND_R3: 6263 case IA64_OPND_R3_2: 6264 case IA64_OPND_CPUID_R3: 6265 case IA64_OPND_DBR_R3: 6266 case IA64_OPND_DTR_R3: 6267 case IA64_OPND_ITR_R3: 6268 case IA64_OPND_IBR_R3: 6269 case IA64_OPND_MR3: 6270 case IA64_OPND_MSR_R3: 6271 case IA64_OPND_PKR_R3: 6272 case IA64_OPND_PMC_R3: 6273 case IA64_OPND_PMD_R3: 6274 case IA64_OPND_RR_R3: 6275 val -= REG_GR; 6276 break; 6277 6278 default: 6279 break; 6280 } 6281 6282 odesc = elf64_ia64_operands + idesc->operands[i]; 6283 err = (*odesc->insert) (odesc, val, &insn); 6284 if (err) 6285 as_bad_where (slot->src_file, slot->src_line, 6286 _("Bad operand value: %s"), err); 6287 if (idesc->flags & IA64_OPCODE_PSEUDO) 6288 { 6289 if ((idesc->flags & IA64_OPCODE_F2_EQ_F3) 6290 && odesc == elf64_ia64_operands + IA64_OPND_F3) 6291 { 6292 o2desc = elf64_ia64_operands + IA64_OPND_F2; 6293 (*o2desc->insert) (o2desc, val, &insn); 6294 } 6295 if ((idesc->flags & IA64_OPCODE_LEN_EQ_64MCNT) 6296 && (odesc == elf64_ia64_operands + IA64_OPND_CPOS6a 6297 || odesc == elf64_ia64_operands + IA64_OPND_POS6)) 6298 { 6299 o2desc = elf64_ia64_operands + IA64_OPND_LEN6; 6300 (*o2desc->insert) (o2desc, 64 - val, &insn); 6301 } 6302 } 6303 } 6304 *insnp = insn; 6305 } 6306 6307 static void 6308 emit_one_bundle (void) 6309 { 6310 int manual_bundling_off = 0, manual_bundling = 0; 6311 enum ia64_unit required_unit, insn_unit = 0; 6312 enum ia64_insn_type type[3], insn_type; 6313 unsigned int template, orig_template; 6314 bfd_vma insn[3] = { -1, -1, -1 }; 6315 struct ia64_opcode *idesc; 6316 int end_of_insn_group = 0, user_template = -1; 6317 int n, i, j, first, curr, last_slot; 6318 bfd_vma t0 = 0, t1 = 0; 6319 struct label_fix *lfix; 6320 bfd_boolean mark_label; 6321 struct insn_fix *ifix; 6322 char mnemonic[16]; 6323 fixS *fix; 6324 char *f; 6325 int addr_mod; 6326 6327 first = (md.curr_slot + NUM_SLOTS - md.num_slots_in_use) % NUM_SLOTS; 6328 know (first >= 0 && first < NUM_SLOTS); 6329 n = MIN (3, md.num_slots_in_use); 6330 6331 /* Determine template: user user_template if specified, best match 6332 otherwise: */ 6333 6334 if (md.slot[first].user_template >= 0) 6335 user_template = template = md.slot[first].user_template; 6336 else 6337 { 6338 /* Auto select appropriate template. */ 6339 memset (type, 0, sizeof (type)); 6340 curr = first; 6341 for (i = 0; i < n; ++i) 6342 { 6343 if (md.slot[curr].label_fixups && i != 0) 6344 break; 6345 type[i] = md.slot[curr].idesc->type; 6346 curr = (curr + 1) % NUM_SLOTS; 6347 } 6348 template = best_template[type[0]][type[1]][type[2]]; 6349 } 6350 6351 /* initialize instructions with appropriate nops: */ 6352 for (i = 0; i < 3; ++i) 6353 insn[i] = nop[ia64_templ_desc[template].exec_unit[i]]; 6354 6355 f = frag_more (16); 6356 6357 /* Check to see if this bundle is at an offset that is a multiple of 16-bytes 6358 from the start of the frag. */ 6359 addr_mod = frag_now_fix () & 15; 6360 if (frag_now->has_code && frag_now->insn_addr != addr_mod) 6361 as_bad (_("instruction address is not a multiple of 16")); 6362 frag_now->insn_addr = addr_mod; 6363 frag_now->has_code = 1; 6364 6365 /* now fill in slots with as many insns as possible: */ 6366 curr = first; 6367 idesc = md.slot[curr].idesc; 6368 end_of_insn_group = 0; 6369 last_slot = -1; 6370 for (i = 0; i < 3 && md.num_slots_in_use > 0; ++i) 6371 { 6372 /* If we have unwind records, we may need to update some now. */ 6373 unw_rec_list *ptr = md.slot[curr].unwind_record; 6374 unw_rec_list *end_ptr = NULL; 6375 6376 if (ptr) 6377 { 6378 /* Find the last prologue/body record in the list for the current 6379 insn, and set the slot number for all records up to that point. 6380 This needs to be done now, because prologue/body records refer to 6381 the current point, not the point after the instruction has been 6382 issued. This matters because there may have been nops emitted 6383 meanwhile. Any non-prologue non-body record followed by a 6384 prologue/body record must also refer to the current point. */ 6385 unw_rec_list *last_ptr; 6386 6387 for (j = 1; end_ptr == NULL && j < md.num_slots_in_use; ++j) 6388 end_ptr = md.slot[(curr + j) % NUM_SLOTS].unwind_record; 6389 for (last_ptr = NULL; ptr != end_ptr; ptr = ptr->next) 6390 if (ptr->r.type == prologue || ptr->r.type == prologue_gr 6391 || ptr->r.type == body) 6392 last_ptr = ptr; 6393 if (last_ptr) 6394 { 6395 /* Make last_ptr point one after the last prologue/body 6396 record. */ 6397 last_ptr = last_ptr->next; 6398 for (ptr = md.slot[curr].unwind_record; ptr != last_ptr; 6399 ptr = ptr->next) 6400 { 6401 ptr->slot_number = (unsigned long) f + i; 6402 ptr->slot_frag = frag_now; 6403 } 6404 /* Remove the initialized records, so that we won't accidentally 6405 update them again if we insert a nop and continue. */ 6406 md.slot[curr].unwind_record = last_ptr; 6407 } 6408 } 6409 6410 manual_bundling_off = md.slot[curr].manual_bundling_off; 6411 if (md.slot[curr].manual_bundling_on) 6412 { 6413 if (curr == first) 6414 manual_bundling = 1; 6415 else 6416 break; /* Need to start a new bundle. */ 6417 } 6418 6419 /* If this instruction specifies a template, then it must be the first 6420 instruction of a bundle. */ 6421 if (curr != first && md.slot[curr].user_template >= 0) 6422 break; 6423 6424 if (idesc->flags & IA64_OPCODE_SLOT2) 6425 { 6426 if (manual_bundling && !manual_bundling_off) 6427 { 6428 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line, 6429 _("`%s' must be last in bundle"), idesc->name); 6430 if (i < 2) 6431 manual_bundling = -1; /* Suppress meaningless post-loop errors. */ 6432 } 6433 i = 2; 6434 } 6435 if (idesc->flags & IA64_OPCODE_LAST) 6436 { 6437 int required_slot; 6438 unsigned int required_template; 6439 6440 /* If we need a stop bit after an M slot, our only choice is 6441 template 5 (M;;MI). If we need a stop bit after a B 6442 slot, our only choice is to place it at the end of the 6443 bundle, because the only available templates are MIB, 6444 MBB, BBB, MMB, and MFB. We don't handle anything other 6445 than M and B slots because these are the only kind of 6446 instructions that can have the IA64_OPCODE_LAST bit set. */ 6447 required_template = template; 6448 switch (idesc->type) 6449 { 6450 case IA64_TYPE_M: 6451 required_slot = 0; 6452 required_template = 5; 6453 break; 6454 6455 case IA64_TYPE_B: 6456 required_slot = 2; 6457 break; 6458 6459 default: 6460 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line, 6461 _("Internal error: don't know how to force %s to end of instruction group"), 6462 idesc->name); 6463 required_slot = i; 6464 break; 6465 } 6466 if (manual_bundling 6467 && (i > required_slot 6468 || (required_slot == 2 && !manual_bundling_off) 6469 || (user_template >= 0 6470 /* Changing from MMI to M;MI is OK. */ 6471 && (template ^ required_template) > 1))) 6472 { 6473 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line, 6474 _("`%s' must be last in instruction group"), 6475 idesc->name); 6476 if (i < 2 && required_slot == 2 && !manual_bundling_off) 6477 manual_bundling = -1; /* Suppress meaningless post-loop errors. */ 6478 } 6479 if (required_slot < i) 6480 /* Can't fit this instruction. */ 6481 break; 6482 6483 i = required_slot; 6484 if (required_template != template) 6485 { 6486 /* If we switch the template, we need to reset the NOPs 6487 after slot i. The slot-types of the instructions ahead 6488 of i never change, so we don't need to worry about 6489 changing NOPs in front of this slot. */ 6490 for (j = i; j < 3; ++j) 6491 insn[j] = nop[ia64_templ_desc[required_template].exec_unit[j]]; 6492 6493 /* We just picked a template that includes the stop bit in the 6494 middle, so we don't need another one emitted later. */ 6495 md.slot[curr].end_of_insn_group = 0; 6496 } 6497 template = required_template; 6498 } 6499 if (curr != first && md.slot[curr].label_fixups) 6500 { 6501 if (manual_bundling) 6502 { 6503 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line, 6504 _("Label must be first in a bundle")); 6505 manual_bundling = -1; /* Suppress meaningless post-loop errors. */ 6506 } 6507 /* This insn must go into the first slot of a bundle. */ 6508 break; 6509 } 6510 6511 if (end_of_insn_group && md.num_slots_in_use >= 1) 6512 { 6513 /* We need an instruction group boundary in the middle of a 6514 bundle. See if we can switch to an other template with 6515 an appropriate boundary. */ 6516 6517 orig_template = template; 6518 if (i == 1 && (user_template == 4 6519 || (user_template < 0 6520 && (ia64_templ_desc[template].exec_unit[0] 6521 == IA64_UNIT_M)))) 6522 { 6523 template = 5; 6524 end_of_insn_group = 0; 6525 } 6526 else if (i == 2 && (user_template == 0 6527 || (user_template < 0 6528 && (ia64_templ_desc[template].exec_unit[1] 6529 == IA64_UNIT_I))) 6530 /* This test makes sure we don't switch the template if 6531 the next instruction is one that needs to be first in 6532 an instruction group. Since all those instructions are 6533 in the M group, there is no way such an instruction can 6534 fit in this bundle even if we switch the template. The 6535 reason we have to check for this is that otherwise we 6536 may end up generating "MI;;I M.." which has the deadly 6537 effect that the second M instruction is no longer the 6538 first in the group! --davidm 99/12/16 */ 6539 && (idesc->flags & IA64_OPCODE_FIRST) == 0) 6540 { 6541 template = 1; 6542 end_of_insn_group = 0; 6543 } 6544 else if (i == 1 6545 && user_template == 0 6546 && !(idesc->flags & IA64_OPCODE_FIRST)) 6547 /* Use the next slot. */ 6548 continue; 6549 else if (curr != first) 6550 /* can't fit this insn */ 6551 break; 6552 6553 if (template != orig_template) 6554 /* if we switch the template, we need to reset the NOPs 6555 after slot i. The slot-types of the instructions ahead 6556 of i never change, so we don't need to worry about 6557 changing NOPs in front of this slot. */ 6558 for (j = i; j < 3; ++j) 6559 insn[j] = nop[ia64_templ_desc[template].exec_unit[j]]; 6560 } 6561 required_unit = ia64_templ_desc[template].exec_unit[i]; 6562 6563 /* resolve dynamic opcodes such as "break", "hint", and "nop": */ 6564 if (idesc->type == IA64_TYPE_DYN) 6565 { 6566 enum ia64_opnd opnd1, opnd2; 6567 6568 if ((strcmp (idesc->name, "nop") == 0) 6569 || (strcmp (idesc->name, "break") == 0)) 6570 insn_unit = required_unit; 6571 else if (strcmp (idesc->name, "hint") == 0) 6572 { 6573 insn_unit = required_unit; 6574 if (required_unit == IA64_UNIT_B) 6575 { 6576 switch (md.hint_b) 6577 { 6578 case hint_b_ok: 6579 break; 6580 case hint_b_warning: 6581 as_warn (_("hint in B unit may be treated as nop")); 6582 break; 6583 case hint_b_error: 6584 /* When manual bundling is off and there is no 6585 user template, we choose a different unit so 6586 that hint won't go into the current slot. We 6587 will fill the current bundle with nops and 6588 try to put hint into the next bundle. */ 6589 if (!manual_bundling && user_template < 0) 6590 insn_unit = IA64_UNIT_I; 6591 else 6592 as_bad (_("hint in B unit can't be used")); 6593 break; 6594 } 6595 } 6596 } 6597 else if (strcmp (idesc->name, "chk.s") == 0 6598 || strcmp (idesc->name, "mov") == 0) 6599 { 6600 insn_unit = IA64_UNIT_M; 6601 if (required_unit == IA64_UNIT_I 6602 || (required_unit == IA64_UNIT_F && template == 6)) 6603 insn_unit = IA64_UNIT_I; 6604 } 6605 else 6606 as_fatal (_("emit_one_bundle: unexpected dynamic op")); 6607 6608 snprintf (mnemonic, sizeof (mnemonic), "%s.%c", 6609 idesc->name, "?imbfxx"[insn_unit]); 6610 opnd1 = idesc->operands[0]; 6611 opnd2 = idesc->operands[1]; 6612 ia64_free_opcode (idesc); 6613 idesc = ia64_find_opcode (mnemonic); 6614 /* moves to/from ARs have collisions */ 6615 if (opnd1 == IA64_OPND_AR3 || opnd2 == IA64_OPND_AR3) 6616 { 6617 while (idesc != NULL 6618 && (idesc->operands[0] != opnd1 6619 || idesc->operands[1] != opnd2)) 6620 idesc = get_next_opcode (idesc); 6621 } 6622 md.slot[curr].idesc = idesc; 6623 } 6624 else 6625 { 6626 insn_type = idesc->type; 6627 insn_unit = IA64_UNIT_NIL; 6628 switch (insn_type) 6629 { 6630 case IA64_TYPE_A: 6631 if (required_unit == IA64_UNIT_I || required_unit == IA64_UNIT_M) 6632 insn_unit = required_unit; 6633 break; 6634 case IA64_TYPE_X: insn_unit = IA64_UNIT_L; break; 6635 case IA64_TYPE_I: insn_unit = IA64_UNIT_I; break; 6636 case IA64_TYPE_M: insn_unit = IA64_UNIT_M; break; 6637 case IA64_TYPE_B: insn_unit = IA64_UNIT_B; break; 6638 case IA64_TYPE_F: insn_unit = IA64_UNIT_F; break; 6639 default: break; 6640 } 6641 } 6642 6643 if (insn_unit != required_unit) 6644 continue; /* Try next slot. */ 6645 6646 /* Now is a good time to fix up the labels for this insn. */ 6647 mark_label = FALSE; 6648 for (lfix = md.slot[curr].label_fixups; lfix; lfix = lfix->next) 6649 { 6650 S_SET_VALUE (lfix->sym, frag_now_fix () - 16); 6651 symbol_set_frag (lfix->sym, frag_now); 6652 mark_label |= lfix->dw2_mark_labels; 6653 } 6654 for (lfix = md.slot[curr].tag_fixups; lfix; lfix = lfix->next) 6655 { 6656 S_SET_VALUE (lfix->sym, frag_now_fix () - 16 + i); 6657 symbol_set_frag (lfix->sym, frag_now); 6658 } 6659 6660 if (debug_type == DEBUG_DWARF2 6661 || md.slot[curr].loc_directive_seen 6662 || mark_label) 6663 { 6664 bfd_vma addr = frag_now->fr_address + frag_now_fix () - 16 + i; 6665 6666 md.slot[curr].loc_directive_seen = 0; 6667 if (mark_label) 6668 md.slot[curr].debug_line.flags |= DWARF2_FLAG_BASIC_BLOCK; 6669 6670 dwarf2_gen_line_info (addr, &md.slot[curr].debug_line); 6671 } 6672 6673 build_insn (md.slot + curr, insn + i); 6674 6675 ptr = md.slot[curr].unwind_record; 6676 if (ptr) 6677 { 6678 /* Set slot numbers for all remaining unwind records belonging to the 6679 current insn. There can not be any prologue/body unwind records 6680 here. */ 6681 for (; ptr != end_ptr; ptr = ptr->next) 6682 { 6683 ptr->slot_number = (unsigned long) f + i; 6684 ptr->slot_frag = frag_now; 6685 } 6686 md.slot[curr].unwind_record = NULL; 6687 } 6688 6689 if (required_unit == IA64_UNIT_L) 6690 { 6691 know (i == 1); 6692 /* skip one slot for long/X-unit instructions */ 6693 ++i; 6694 } 6695 --md.num_slots_in_use; 6696 last_slot = i; 6697 6698 for (j = 0; j < md.slot[curr].num_fixups; ++j) 6699 { 6700 ifix = md.slot[curr].fixup + j; 6701 fix = fix_new_exp (frag_now, frag_now_fix () - 16 + i, 8, 6702 &ifix->expr, ifix->is_pcrel, ifix->code); 6703 fix->tc_fix_data.opnd = ifix->opnd; 6704 fix->fx_file = md.slot[curr].src_file; 6705 fix->fx_line = md.slot[curr].src_line; 6706 } 6707 6708 end_of_insn_group = md.slot[curr].end_of_insn_group; 6709 6710 /* clear slot: */ 6711 ia64_free_opcode (md.slot[curr].idesc); 6712 memset (md.slot + curr, 0, sizeof (md.slot[curr])); 6713 md.slot[curr].user_template = -1; 6714 6715 if (manual_bundling_off) 6716 { 6717 manual_bundling = 0; 6718 break; 6719 } 6720 curr = (curr + 1) % NUM_SLOTS; 6721 idesc = md.slot[curr].idesc; 6722 } 6723 6724 /* A user template was specified, but the first following instruction did 6725 not fit. This can happen with or without manual bundling. */ 6726 if (md.num_slots_in_use > 0 && last_slot < 0) 6727 { 6728 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line, 6729 _("`%s' does not fit into %s template"), 6730 idesc->name, ia64_templ_desc[template].name); 6731 /* Drop first insn so we don't livelock. */ 6732 --md.num_slots_in_use; 6733 know (curr == first); 6734 ia64_free_opcode (md.slot[curr].idesc); 6735 memset (md.slot + curr, 0, sizeof (md.slot[curr])); 6736 md.slot[curr].user_template = -1; 6737 } 6738 else if (manual_bundling > 0) 6739 { 6740 if (md.num_slots_in_use > 0) 6741 { 6742 if (last_slot >= 2) 6743 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line, 6744 _("`%s' does not fit into bundle"), idesc->name); 6745 else 6746 { 6747 const char *where; 6748 6749 if (template == 2) 6750 where = "X slot"; 6751 else if (last_slot == 0) 6752 where = "slots 2 or 3"; 6753 else 6754 where = "slot 3"; 6755 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line, 6756 _("`%s' can't go in %s of %s template"), 6757 idesc->name, where, ia64_templ_desc[template].name); 6758 } 6759 } 6760 else 6761 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line, 6762 _("Missing '}' at end of file")); 6763 } 6764 6765 know (md.num_slots_in_use < NUM_SLOTS); 6766 6767 t0 = end_of_insn_group | (template << 1) | (insn[0] << 5) | (insn[1] << 46); 6768 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23); 6769 6770 number_to_chars_littleendian (f + 0, t0, 8); 6771 number_to_chars_littleendian (f + 8, t1, 8); 6772 } 6773 6774 int 6775 md_parse_option (int c, char *arg) 6776 { 6777 6778 switch (c) 6779 { 6780 /* Switches from the Intel assembler. */ 6781 case 'm': 6782 if (strcmp (arg, "ilp64") == 0 6783 || strcmp (arg, "lp64") == 0 6784 || strcmp (arg, "p64") == 0) 6785 { 6786 md.flags |= EF_IA_64_ABI64; 6787 } 6788 else if (strcmp (arg, "ilp32") == 0) 6789 { 6790 md.flags &= ~EF_IA_64_ABI64; 6791 } 6792 else if (strcmp (arg, "le") == 0) 6793 { 6794 md.flags &= ~EF_IA_64_BE; 6795 default_big_endian = 0; 6796 } 6797 else if (strcmp (arg, "be") == 0) 6798 { 6799 md.flags |= EF_IA_64_BE; 6800 default_big_endian = 1; 6801 } 6802 else if (strncmp (arg, "unwind-check=", 13) == 0) 6803 { 6804 arg += 13; 6805 if (strcmp (arg, "warning") == 0) 6806 md.unwind_check = unwind_check_warning; 6807 else if (strcmp (arg, "error") == 0) 6808 md.unwind_check = unwind_check_error; 6809 else 6810 return 0; 6811 } 6812 else if (strncmp (arg, "hint.b=", 7) == 0) 6813 { 6814 arg += 7; 6815 if (strcmp (arg, "ok") == 0) 6816 md.hint_b = hint_b_ok; 6817 else if (strcmp (arg, "warning") == 0) 6818 md.hint_b = hint_b_warning; 6819 else if (strcmp (arg, "error") == 0) 6820 md.hint_b = hint_b_error; 6821 else 6822 return 0; 6823 } 6824 else if (strncmp (arg, "tune=", 5) == 0) 6825 { 6826 arg += 5; 6827 if (strcmp (arg, "itanium1") == 0) 6828 md.tune = itanium1; 6829 else if (strcmp (arg, "itanium2") == 0) 6830 md.tune = itanium2; 6831 else 6832 return 0; 6833 } 6834 else 6835 return 0; 6836 break; 6837 6838 case 'N': 6839 if (strcmp (arg, "so") == 0) 6840 { 6841 /* Suppress signon message. */ 6842 } 6843 else if (strcmp (arg, "pi") == 0) 6844 { 6845 /* Reject privileged instructions. FIXME */ 6846 } 6847 else if (strcmp (arg, "us") == 0) 6848 { 6849 /* Allow union of signed and unsigned range. FIXME */ 6850 } 6851 else if (strcmp (arg, "close_fcalls") == 0) 6852 { 6853 /* Do not resolve global function calls. */ 6854 } 6855 else 6856 return 0; 6857 break; 6858 6859 case 'C': 6860 /* temp[="prefix"] Insert temporary labels into the object file 6861 symbol table prefixed by "prefix". 6862 Default prefix is ":temp:". 6863 */ 6864 break; 6865 6866 case 'a': 6867 /* indirect=<tgt> Assume unannotated indirect branches behavior 6868 according to <tgt> -- 6869 exit: branch out from the current context (default) 6870 labels: all labels in context may be branch targets 6871 */ 6872 if (strncmp (arg, "indirect=", 9) != 0) 6873 return 0; 6874 break; 6875 6876 case 'x': 6877 /* -X conflicts with an ignored option, use -x instead */ 6878 md.detect_dv = 1; 6879 if (!arg || strcmp (arg, "explicit") == 0) 6880 { 6881 /* set default mode to explicit */ 6882 md.default_explicit_mode = 1; 6883 break; 6884 } 6885 else if (strcmp (arg, "auto") == 0) 6886 { 6887 md.default_explicit_mode = 0; 6888 } 6889 else if (strcmp (arg, "none") == 0) 6890 { 6891 md.detect_dv = 0; 6892 } 6893 else if (strcmp (arg, "debug") == 0) 6894 { 6895 md.debug_dv = 1; 6896 } 6897 else if (strcmp (arg, "debugx") == 0) 6898 { 6899 md.default_explicit_mode = 1; 6900 md.debug_dv = 1; 6901 } 6902 else if (strcmp (arg, "debugn") == 0) 6903 { 6904 md.debug_dv = 1; 6905 md.detect_dv = 0; 6906 } 6907 else 6908 { 6909 as_bad (_("Unrecognized option '-x%s'"), arg); 6910 } 6911 break; 6912 6913 case 'S': 6914 /* nops Print nops statistics. */ 6915 break; 6916 6917 /* GNU specific switches for gcc. */ 6918 case OPTION_MCONSTANT_GP: 6919 md.flags |= EF_IA_64_CONS_GP; 6920 break; 6921 6922 case OPTION_MAUTO_PIC: 6923 md.flags |= EF_IA_64_NOFUNCDESC_CONS_GP; 6924 break; 6925 6926 default: 6927 return 0; 6928 } 6929 6930 return 1; 6931 } 6932 6933 void 6934 md_show_usage (FILE *stream) 6935 { 6936 fputs (_("\ 6937 IA-64 options:\n\ 6938 --mconstant-gp mark output file as using the constant-GP model\n\ 6939 (sets ELF header flag EF_IA_64_CONS_GP)\n\ 6940 --mauto-pic mark output file as using the constant-GP model\n\ 6941 without function descriptors (sets ELF header flag\n\ 6942 EF_IA_64_NOFUNCDESC_CONS_GP)\n\ 6943 -milp32|-milp64|-mlp64|-mp64 select data model (default -mlp64)\n\ 6944 -mle | -mbe select little- or big-endian byte order (default -mle)\n\ 6945 -mtune=[itanium1|itanium2]\n\ 6946 tune for a specific CPU (default -mtune=itanium2)\n\ 6947 -munwind-check=[warning|error]\n\ 6948 unwind directive check (default -munwind-check=warning)\n\ 6949 -mhint.b=[ok|warning|error]\n\ 6950 hint.b check (default -mhint.b=error)\n\ 6951 -x | -xexplicit turn on dependency violation checking\n\ 6952 -xauto automagically remove dependency violations (default)\n\ 6953 -xnone turn off dependency violation checking\n\ 6954 -xdebug debug dependency violation checker\n\ 6955 -xdebugn debug dependency violation checker but turn off\n\ 6956 dependency violation checking\n\ 6957 -xdebugx debug dependency violation checker and turn on\n\ 6958 dependency violation checking\n"), 6959 stream); 6960 } 6961 6962 void 6963 ia64_after_parse_args (void) 6964 { 6965 if (debug_type == DEBUG_STABS) 6966 as_fatal (_("--gstabs is not supported for ia64")); 6967 } 6968 6969 /* Return true if TYPE fits in TEMPL at SLOT. */ 6970 6971 static int 6972 match (int templ, int type, int slot) 6973 { 6974 enum ia64_unit unit; 6975 int result; 6976 6977 unit = ia64_templ_desc[templ].exec_unit[slot]; 6978 switch (type) 6979 { 6980 case IA64_TYPE_DYN: result = 1; break; /* for nop and break */ 6981 case IA64_TYPE_A: 6982 result = (unit == IA64_UNIT_I || unit == IA64_UNIT_M); 6983 break; 6984 case IA64_TYPE_X: result = (unit == IA64_UNIT_L); break; 6985 case IA64_TYPE_I: result = (unit == IA64_UNIT_I); break; 6986 case IA64_TYPE_M: result = (unit == IA64_UNIT_M); break; 6987 case IA64_TYPE_B: result = (unit == IA64_UNIT_B); break; 6988 case IA64_TYPE_F: result = (unit == IA64_UNIT_F); break; 6989 default: result = 0; break; 6990 } 6991 return result; 6992 } 6993 6994 /* For Itanium 1, add a bit of extra goodness if a nop of type F or B would fit 6995 in TEMPL at SLOT. For Itanium 2, add a bit of extra goodness if a nop of 6996 type M or I would fit in TEMPL at SLOT. */ 6997 6998 static inline int 6999 extra_goodness (int templ, int slot) 7000 { 7001 switch (md.tune) 7002 { 7003 case itanium1: 7004 if (slot == 1 && match (templ, IA64_TYPE_F, slot)) 7005 return 2; 7006 else if (slot == 2 && match (templ, IA64_TYPE_B, slot)) 7007 return 1; 7008 else 7009 return 0; 7010 break; 7011 case itanium2: 7012 if (match (templ, IA64_TYPE_M, slot) 7013 || match (templ, IA64_TYPE_I, slot)) 7014 /* Favor M- and I-unit NOPs. We definitely want to avoid 7015 F-unit and B-unit may cause split-issue or less-than-optimal 7016 branch-prediction. */ 7017 return 2; 7018 else 7019 return 0; 7020 break; 7021 default: 7022 abort (); 7023 return 0; 7024 } 7025 } 7026 7027 /* This function is called once, at assembler startup time. It sets 7028 up all the tables, etc. that the MD part of the assembler will need 7029 that can be determined before arguments are parsed. */ 7030 void 7031 md_begin (void) 7032 { 7033 int i, j, k, t, goodness, best, ok; 7034 const char *err; 7035 char name[8]; 7036 7037 md.auto_align = 1; 7038 md.explicit_mode = md.default_explicit_mode; 7039 7040 bfd_set_section_alignment (stdoutput, text_section, 4); 7041 7042 /* Make sure function pointers get initialized. */ 7043 target_big_endian = -1; 7044 dot_byteorder (default_big_endian); 7045 7046 alias_hash = hash_new (); 7047 alias_name_hash = hash_new (); 7048 secalias_hash = hash_new (); 7049 secalias_name_hash = hash_new (); 7050 7051 pseudo_func[FUNC_DTP_MODULE].u.sym = 7052 symbol_new (".<dtpmod>", undefined_section, FUNC_DTP_MODULE, 7053 &zero_address_frag); 7054 7055 pseudo_func[FUNC_DTP_RELATIVE].u.sym = 7056 symbol_new (".<dtprel>", undefined_section, FUNC_DTP_RELATIVE, 7057 &zero_address_frag); 7058 7059 pseudo_func[FUNC_FPTR_RELATIVE].u.sym = 7060 symbol_new (".<fptr>", undefined_section, FUNC_FPTR_RELATIVE, 7061 &zero_address_frag); 7062 7063 pseudo_func[FUNC_GP_RELATIVE].u.sym = 7064 symbol_new (".<gprel>", undefined_section, FUNC_GP_RELATIVE, 7065 &zero_address_frag); 7066 7067 pseudo_func[FUNC_LT_RELATIVE].u.sym = 7068 symbol_new (".<ltoff>", undefined_section, FUNC_LT_RELATIVE, 7069 &zero_address_frag); 7070 7071 pseudo_func[FUNC_LT_RELATIVE_X].u.sym = 7072 symbol_new (".<ltoffx>", undefined_section, FUNC_LT_RELATIVE_X, 7073 &zero_address_frag); 7074 7075 pseudo_func[FUNC_PC_RELATIVE].u.sym = 7076 symbol_new (".<pcrel>", undefined_section, FUNC_PC_RELATIVE, 7077 &zero_address_frag); 7078 7079 pseudo_func[FUNC_PLT_RELATIVE].u.sym = 7080 symbol_new (".<pltoff>", undefined_section, FUNC_PLT_RELATIVE, 7081 &zero_address_frag); 7082 7083 pseudo_func[FUNC_SEC_RELATIVE].u.sym = 7084 symbol_new (".<secrel>", undefined_section, FUNC_SEC_RELATIVE, 7085 &zero_address_frag); 7086 7087 pseudo_func[FUNC_SEG_RELATIVE].u.sym = 7088 symbol_new (".<segrel>", undefined_section, FUNC_SEG_RELATIVE, 7089 &zero_address_frag); 7090 7091 pseudo_func[FUNC_TP_RELATIVE].u.sym = 7092 symbol_new (".<tprel>", undefined_section, FUNC_TP_RELATIVE, 7093 &zero_address_frag); 7094 7095 pseudo_func[FUNC_LTV_RELATIVE].u.sym = 7096 symbol_new (".<ltv>", undefined_section, FUNC_LTV_RELATIVE, 7097 &zero_address_frag); 7098 7099 pseudo_func[FUNC_LT_FPTR_RELATIVE].u.sym = 7100 symbol_new (".<ltoff.fptr>", undefined_section, FUNC_LT_FPTR_RELATIVE, 7101 &zero_address_frag); 7102 7103 pseudo_func[FUNC_LT_DTP_MODULE].u.sym = 7104 symbol_new (".<ltoff.dtpmod>", undefined_section, FUNC_LT_DTP_MODULE, 7105 &zero_address_frag); 7106 7107 pseudo_func[FUNC_LT_DTP_RELATIVE].u.sym = 7108 symbol_new (".<ltoff.dptrel>", undefined_section, FUNC_LT_DTP_RELATIVE, 7109 &zero_address_frag); 7110 7111 pseudo_func[FUNC_LT_TP_RELATIVE].u.sym = 7112 symbol_new (".<ltoff.tprel>", undefined_section, FUNC_LT_TP_RELATIVE, 7113 &zero_address_frag); 7114 7115 pseudo_func[FUNC_IPLT_RELOC].u.sym = 7116 symbol_new (".<iplt>", undefined_section, FUNC_IPLT_RELOC, 7117 &zero_address_frag); 7118 7119 if (md.tune != itanium1) 7120 { 7121 /* Convert MFI NOPs bundles into MMI NOPs bundles. */ 7122 le_nop[0] = 0x8; 7123 le_nop_stop[0] = 0x9; 7124 } 7125 7126 /* Compute the table of best templates. We compute goodness as a 7127 base 4 value, in which each match counts for 3. Match-failures 7128 result in NOPs and we use extra_goodness() to pick the execution 7129 units that are best suited for issuing the NOP. */ 7130 for (i = 0; i < IA64_NUM_TYPES; ++i) 7131 for (j = 0; j < IA64_NUM_TYPES; ++j) 7132 for (k = 0; k < IA64_NUM_TYPES; ++k) 7133 { 7134 best = 0; 7135 for (t = 0; t < NELEMS (ia64_templ_desc); ++t) 7136 { 7137 goodness = 0; 7138 if (match (t, i, 0)) 7139 { 7140 if (match (t, j, 1)) 7141 { 7142 if ((t == 2 && j == IA64_TYPE_X) || match (t, k, 2)) 7143 goodness = 3 + 3 + 3; 7144 else 7145 goodness = 3 + 3 + extra_goodness (t, 2); 7146 } 7147 else if (match (t, j, 2)) 7148 goodness = 3 + 3 + extra_goodness (t, 1); 7149 else 7150 { 7151 goodness = 3; 7152 goodness += extra_goodness (t, 1); 7153 goodness += extra_goodness (t, 2); 7154 } 7155 } 7156 else if (match (t, i, 1)) 7157 { 7158 if ((t == 2 && i == IA64_TYPE_X) || match (t, j, 2)) 7159 goodness = 3 + 3; 7160 else 7161 goodness = 3 + extra_goodness (t, 2); 7162 } 7163 else if (match (t, i, 2)) 7164 goodness = 3 + extra_goodness (t, 1); 7165 7166 if (goodness > best) 7167 { 7168 best = goodness; 7169 best_template[i][j][k] = t; 7170 } 7171 } 7172 } 7173 7174 #ifdef DEBUG_TEMPLATES 7175 /* For debugging changes to the best_template calculations. We don't care 7176 about combinations with invalid instructions, so start the loops at 1. */ 7177 for (i = 0; i < IA64_NUM_TYPES; ++i) 7178 for (j = 0; j < IA64_NUM_TYPES; ++j) 7179 for (k = 0; k < IA64_NUM_TYPES; ++k) 7180 { 7181 char type_letter[IA64_NUM_TYPES] = { 'n', 'a', 'i', 'm', 'b', 'f', 7182 'x', 'd' }; 7183 fprintf (stderr, "%c%c%c %s\n", type_letter[i], type_letter[j], 7184 type_letter[k], 7185 ia64_templ_desc[best_template[i][j][k]].name); 7186 } 7187 #endif 7188 7189 for (i = 0; i < NUM_SLOTS; ++i) 7190 md.slot[i].user_template = -1; 7191 7192 md.pseudo_hash = hash_new (); 7193 for (i = 0; i < NELEMS (pseudo_opcode); ++i) 7194 { 7195 err = hash_insert (md.pseudo_hash, pseudo_opcode[i].name, 7196 (void *) (pseudo_opcode + i)); 7197 if (err) 7198 as_fatal (_("ia64.md_begin: can't hash `%s': %s"), 7199 pseudo_opcode[i].name, err); 7200 } 7201 7202 md.reg_hash = hash_new (); 7203 md.dynreg_hash = hash_new (); 7204 md.const_hash = hash_new (); 7205 md.entry_hash = hash_new (); 7206 7207 /* general registers: */ 7208 declare_register_set ("r", 128, REG_GR); 7209 declare_register ("gp", REG_GR + 1); 7210 declare_register ("sp", REG_GR + 12); 7211 declare_register ("tp", REG_GR + 13); 7212 declare_register_set ("ret", 4, REG_GR + 8); 7213 7214 /* floating point registers: */ 7215 declare_register_set ("f", 128, REG_FR); 7216 declare_register_set ("farg", 8, REG_FR + 8); 7217 declare_register_set ("fret", 8, REG_FR + 8); 7218 7219 /* branch registers: */ 7220 declare_register_set ("b", 8, REG_BR); 7221 declare_register ("rp", REG_BR + 0); 7222 7223 /* predicate registers: */ 7224 declare_register_set ("p", 64, REG_P); 7225 declare_register ("pr", REG_PR); 7226 declare_register ("pr.rot", REG_PR_ROT); 7227 7228 /* application registers: */ 7229 declare_register_set ("ar", 128, REG_AR); 7230 for (i = 0; i < NELEMS (ar); ++i) 7231 declare_register (ar[i].name, REG_AR + ar[i].regnum); 7232 7233 /* control registers: */ 7234 declare_register_set ("cr", 128, REG_CR); 7235 for (i = 0; i < NELEMS (cr); ++i) 7236 declare_register (cr[i].name, REG_CR + cr[i].regnum); 7237 7238 declare_register ("ip", REG_IP); 7239 declare_register ("cfm", REG_CFM); 7240 declare_register ("psr", REG_PSR); 7241 declare_register ("psr.l", REG_PSR_L); 7242 declare_register ("psr.um", REG_PSR_UM); 7243 7244 for (i = 0; i < NELEMS (indirect_reg); ++i) 7245 { 7246 unsigned int regnum = indirect_reg[i].regnum; 7247 7248 md.indregsym[regnum - IND_CPUID] = declare_register (indirect_reg[i].name, regnum); 7249 } 7250 7251 /* pseudo-registers used to specify unwind info: */ 7252 declare_register ("psp", REG_PSP); 7253 7254 for (i = 0; i < NELEMS (const_bits); ++i) 7255 { 7256 err = hash_insert (md.const_hash, const_bits[i].name, 7257 (void *) (const_bits + i)); 7258 if (err) 7259 as_fatal (_("Inserting \"%s\" into constant hash table failed: %s"), 7260 name, err); 7261 } 7262 7263 /* Set the architecture and machine depending on defaults and command line 7264 options. */ 7265 if (md.flags & EF_IA_64_ABI64) 7266 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf64); 7267 else 7268 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf32); 7269 7270 if (! ok) 7271 as_warn (_("Could not set architecture and machine")); 7272 7273 /* Set the pointer size and pointer shift size depending on md.flags */ 7274 7275 if (md.flags & EF_IA_64_ABI64) 7276 { 7277 md.pointer_size = 8; /* pointers are 8 bytes */ 7278 md.pointer_size_shift = 3; /* alignment is 8 bytes = 2^2 */ 7279 } 7280 else 7281 { 7282 md.pointer_size = 4; /* pointers are 4 bytes */ 7283 md.pointer_size_shift = 2; /* alignment is 4 bytes = 2^2 */ 7284 } 7285 7286 md.mem_offset.hint = 0; 7287 md.path = 0; 7288 md.maxpaths = 0; 7289 md.entry_labels = NULL; 7290 } 7291 7292 /* Set the default options in md. Cannot do this in md_begin because 7293 that is called after md_parse_option which is where we set the 7294 options in md based on command line options. */ 7295 7296 void 7297 ia64_init (int argc ATTRIBUTE_UNUSED, char **argv ATTRIBUTE_UNUSED) 7298 { 7299 md.flags = MD_FLAGS_DEFAULT; 7300 md.detect_dv = 1; 7301 /* FIXME: We should change it to unwind_check_error someday. */ 7302 md.unwind_check = unwind_check_warning; 7303 md.hint_b = hint_b_error; 7304 md.tune = itanium2; 7305 } 7306 7307 /* Return a string for the target object file format. */ 7308 7309 const char * 7310 ia64_target_format (void) 7311 { 7312 if (OUTPUT_FLAVOR == bfd_target_elf_flavour) 7313 { 7314 if (md.flags & EF_IA_64_BE) 7315 { 7316 if (md.flags & EF_IA_64_ABI64) 7317 #if defined(TE_AIX50) 7318 return "elf64-ia64-aix-big"; 7319 #elif defined(TE_HPUX) 7320 return "elf64-ia64-hpux-big"; 7321 #else 7322 return "elf64-ia64-big"; 7323 #endif 7324 else 7325 #if defined(TE_AIX50) 7326 return "elf32-ia64-aix-big"; 7327 #elif defined(TE_HPUX) 7328 return "elf32-ia64-hpux-big"; 7329 #else 7330 return "elf32-ia64-big"; 7331 #endif 7332 } 7333 else 7334 { 7335 if (md.flags & EF_IA_64_ABI64) 7336 #ifdef TE_AIX50 7337 return "elf64-ia64-aix-little"; 7338 #else 7339 return "elf64-ia64-little"; 7340 #endif 7341 else 7342 #ifdef TE_AIX50 7343 return "elf32-ia64-aix-little"; 7344 #else 7345 return "elf32-ia64-little"; 7346 #endif 7347 } 7348 } 7349 else 7350 return "unknown-format"; 7351 } 7352 7353 void 7354 ia64_end_of_source (void) 7355 { 7356 /* terminate insn group upon reaching end of file: */ 7357 insn_group_break (1, 0, 0); 7358 7359 /* emits slots we haven't written yet: */ 7360 ia64_flush_insns (); 7361 7362 bfd_set_private_flags (stdoutput, md.flags); 7363 7364 md.mem_offset.hint = 0; 7365 } 7366 7367 void 7368 ia64_start_line (void) 7369 { 7370 static int first; 7371 7372 if (!first) { 7373 /* Make sure we don't reference input_line_pointer[-1] when that's 7374 not valid. */ 7375 first = 1; 7376 return; 7377 } 7378 7379 if (md.qp.X_op == O_register) 7380 as_bad (_("qualifying predicate not followed by instruction")); 7381 md.qp.X_op = O_absent; 7382 7383 if (ignore_input ()) 7384 return; 7385 7386 if (input_line_pointer[0] == ';' && input_line_pointer[-1] == ';') 7387 { 7388 if (md.detect_dv && !md.explicit_mode) 7389 { 7390 static int warned; 7391 7392 if (!warned) 7393 { 7394 warned = 1; 7395 as_warn (_("Explicit stops are ignored in auto mode")); 7396 } 7397 } 7398 else 7399 insn_group_break (1, 0, 0); 7400 } 7401 else if (input_line_pointer[-1] == '{') 7402 { 7403 if (md.manual_bundling) 7404 as_warn (_("Found '{' when manual bundling is already turned on")); 7405 else 7406 CURR_SLOT.manual_bundling_on = 1; 7407 md.manual_bundling = 1; 7408 7409 /* Bundling is only acceptable in explicit mode 7410 or when in default automatic mode. */ 7411 if (md.detect_dv && !md.explicit_mode) 7412 { 7413 if (!md.mode_explicitly_set 7414 && !md.default_explicit_mode) 7415 dot_dv_mode ('E'); 7416 else 7417 as_warn (_("Found '{' after explicit switch to automatic mode")); 7418 } 7419 } 7420 else if (input_line_pointer[-1] == '}') 7421 { 7422 if (!md.manual_bundling) 7423 as_warn (_("Found '}' when manual bundling is off")); 7424 else 7425 PREV_SLOT.manual_bundling_off = 1; 7426 md.manual_bundling = 0; 7427 7428 /* switch back to automatic mode, if applicable */ 7429 if (md.detect_dv 7430 && md.explicit_mode 7431 && !md.mode_explicitly_set 7432 && !md.default_explicit_mode) 7433 dot_dv_mode ('A'); 7434 } 7435 } 7436 7437 /* This is a hook for ia64_frob_label, so that it can distinguish tags from 7438 labels. */ 7439 static int defining_tag = 0; 7440 7441 int 7442 ia64_unrecognized_line (int ch) 7443 { 7444 switch (ch) 7445 { 7446 case '(': 7447 expression_and_evaluate (&md.qp); 7448 if (*input_line_pointer++ != ')') 7449 { 7450 as_bad (_("Expected ')'")); 7451 return 0; 7452 } 7453 if (md.qp.X_op != O_register) 7454 { 7455 as_bad (_("Qualifying predicate expected")); 7456 return 0; 7457 } 7458 if (md.qp.X_add_number < REG_P || md.qp.X_add_number >= REG_P + 64) 7459 { 7460 as_bad (_("Predicate register expected")); 7461 return 0; 7462 } 7463 return 1; 7464 7465 case '[': 7466 { 7467 char *s; 7468 char c; 7469 symbolS *tag; 7470 int temp; 7471 7472 if (md.qp.X_op == O_register) 7473 { 7474 as_bad (_("Tag must come before qualifying predicate.")); 7475 return 0; 7476 } 7477 7478 /* This implements just enough of read_a_source_file in read.c to 7479 recognize labels. */ 7480 if (is_name_beginner (*input_line_pointer)) 7481 { 7482 s = input_line_pointer; 7483 c = get_symbol_end (); 7484 } 7485 else if (LOCAL_LABELS_FB 7486 && ISDIGIT (*input_line_pointer)) 7487 { 7488 temp = 0; 7489 while (ISDIGIT (*input_line_pointer)) 7490 temp = (temp * 10) + *input_line_pointer++ - '0'; 7491 fb_label_instance_inc (temp); 7492 s = fb_label_name (temp, 0); 7493 c = *input_line_pointer; 7494 } 7495 else 7496 { 7497 s = NULL; 7498 c = '\0'; 7499 } 7500 if (c != ':') 7501 { 7502 /* Put ':' back for error messages' sake. */ 7503 *input_line_pointer++ = ':'; 7504 as_bad (_("Expected ':'")); 7505 return 0; 7506 } 7507 7508 defining_tag = 1; 7509 tag = colon (s); 7510 defining_tag = 0; 7511 /* Put ':' back for error messages' sake. */ 7512 *input_line_pointer++ = ':'; 7513 if (*input_line_pointer++ != ']') 7514 { 7515 as_bad (_("Expected ']'")); 7516 return 0; 7517 } 7518 if (! tag) 7519 { 7520 as_bad (_("Tag name expected")); 7521 return 0; 7522 } 7523 return 1; 7524 } 7525 7526 default: 7527 break; 7528 } 7529 7530 /* Not a valid line. */ 7531 return 0; 7532 } 7533 7534 void 7535 ia64_frob_label (struct symbol *sym) 7536 { 7537 struct label_fix *fix; 7538 7539 /* Tags need special handling since they are not bundle breaks like 7540 labels. */ 7541 if (defining_tag) 7542 { 7543 fix = obstack_alloc (¬es, sizeof (*fix)); 7544 fix->sym = sym; 7545 fix->next = CURR_SLOT.tag_fixups; 7546 fix->dw2_mark_labels = FALSE; 7547 CURR_SLOT.tag_fixups = fix; 7548 7549 return; 7550 } 7551 7552 if (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) 7553 { 7554 md.last_text_seg = now_seg; 7555 fix = obstack_alloc (¬es, sizeof (*fix)); 7556 fix->sym = sym; 7557 fix->next = CURR_SLOT.label_fixups; 7558 fix->dw2_mark_labels = dwarf2_loc_mark_labels; 7559 CURR_SLOT.label_fixups = fix; 7560 7561 /* Keep track of how many code entry points we've seen. */ 7562 if (md.path == md.maxpaths) 7563 { 7564 md.maxpaths += 20; 7565 md.entry_labels = (const char **) 7566 xrealloc ((void *) md.entry_labels, 7567 md.maxpaths * sizeof (char *)); 7568 } 7569 md.entry_labels[md.path++] = S_GET_NAME (sym); 7570 } 7571 } 7572 7573 #ifdef TE_HPUX 7574 /* The HP-UX linker will give unresolved symbol errors for symbols 7575 that are declared but unused. This routine removes declared, 7576 unused symbols from an object. */ 7577 int 7578 ia64_frob_symbol (struct symbol *sym) 7579 { 7580 if ((S_GET_SEGMENT (sym) == &bfd_und_section && ! symbol_used_p (sym) && 7581 ELF_ST_VISIBILITY (S_GET_OTHER (sym)) == STV_DEFAULT) 7582 || (S_GET_SEGMENT (sym) == &bfd_abs_section 7583 && ! S_IS_EXTERNAL (sym))) 7584 return 1; 7585 return 0; 7586 } 7587 #endif 7588 7589 void 7590 ia64_flush_pending_output (void) 7591 { 7592 if (!md.keep_pending_output 7593 && bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) 7594 { 7595 /* ??? This causes many unnecessary stop bits to be emitted. 7596 Unfortunately, it isn't clear if it is safe to remove this. */ 7597 insn_group_break (1, 0, 0); 7598 ia64_flush_insns (); 7599 } 7600 } 7601 7602 /* Do ia64-specific expression optimization. All that's done here is 7603 to transform index expressions that are either due to the indexing 7604 of rotating registers or due to the indexing of indirect register 7605 sets. */ 7606 int 7607 ia64_optimize_expr (expressionS *l, operatorT op, expressionS *r) 7608 { 7609 if (op != O_index) 7610 return 0; 7611 resolve_expression (l); 7612 if (l->X_op == O_register) 7613 { 7614 unsigned num_regs = l->X_add_number >> 16; 7615 7616 resolve_expression (r); 7617 if (num_regs) 7618 { 7619 /* Left side is a .rotX-allocated register. */ 7620 if (r->X_op != O_constant) 7621 { 7622 as_bad (_("Rotating register index must be a non-negative constant")); 7623 r->X_add_number = 0; 7624 } 7625 else if ((valueT) r->X_add_number >= num_regs) 7626 { 7627 as_bad (_("Index out of range 0..%u"), num_regs - 1); 7628 r->X_add_number = 0; 7629 } 7630 l->X_add_number = (l->X_add_number & 0xffff) + r->X_add_number; 7631 return 1; 7632 } 7633 else if (l->X_add_number >= IND_CPUID && l->X_add_number <= IND_RR) 7634 { 7635 if (r->X_op != O_register 7636 || r->X_add_number < REG_GR 7637 || r->X_add_number > REG_GR + 127) 7638 { 7639 as_bad (_("Indirect register index must be a general register")); 7640 r->X_add_number = REG_GR; 7641 } 7642 l->X_op = O_index; 7643 l->X_op_symbol = md.indregsym[l->X_add_number - IND_CPUID]; 7644 l->X_add_number = r->X_add_number; 7645 return 1; 7646 } 7647 } 7648 as_bad (_("Index can only be applied to rotating or indirect registers")); 7649 /* Fall back to some register use of which has as little as possible 7650 side effects, to minimize subsequent error messages. */ 7651 l->X_op = O_register; 7652 l->X_add_number = REG_GR + 3; 7653 return 1; 7654 } 7655 7656 int 7657 ia64_parse_name (char *name, expressionS *e, char *nextcharP) 7658 { 7659 struct const_desc *cdesc; 7660 struct dynreg *dr = 0; 7661 unsigned int idx; 7662 struct symbol *sym; 7663 char *end; 7664 7665 if (*name == '@') 7666 { 7667 enum pseudo_type pseudo_type = PSEUDO_FUNC_NONE; 7668 7669 /* Find what relocation pseudo-function we're dealing with. */ 7670 for (idx = 0; idx < NELEMS (pseudo_func); ++idx) 7671 if (pseudo_func[idx].name 7672 && pseudo_func[idx].name[0] == name[1] 7673 && strcmp (pseudo_func[idx].name + 1, name + 2) == 0) 7674 { 7675 pseudo_type = pseudo_func[idx].type; 7676 break; 7677 } 7678 switch (pseudo_type) 7679 { 7680 case PSEUDO_FUNC_RELOC: 7681 end = input_line_pointer; 7682 if (*nextcharP != '(') 7683 { 7684 as_bad (_("Expected '('")); 7685 break; 7686 } 7687 /* Skip '('. */ 7688 ++input_line_pointer; 7689 expression (e); 7690 if (*input_line_pointer != ')') 7691 { 7692 as_bad (_("Missing ')'")); 7693 goto done; 7694 } 7695 /* Skip ')'. */ 7696 ++input_line_pointer; 7697 if (e->X_op != O_symbol) 7698 { 7699 if (e->X_op != O_pseudo_fixup) 7700 { 7701 as_bad (_("Not a symbolic expression")); 7702 goto done; 7703 } 7704 if (idx != FUNC_LT_RELATIVE) 7705 { 7706 as_bad (_("Illegal combination of relocation functions")); 7707 goto done; 7708 } 7709 switch (S_GET_VALUE (e->X_op_symbol)) 7710 { 7711 case FUNC_FPTR_RELATIVE: 7712 idx = FUNC_LT_FPTR_RELATIVE; break; 7713 case FUNC_DTP_MODULE: 7714 idx = FUNC_LT_DTP_MODULE; break; 7715 case FUNC_DTP_RELATIVE: 7716 idx = FUNC_LT_DTP_RELATIVE; break; 7717 case FUNC_TP_RELATIVE: 7718 idx = FUNC_LT_TP_RELATIVE; break; 7719 default: 7720 as_bad (_("Illegal combination of relocation functions")); 7721 goto done; 7722 } 7723 } 7724 /* Make sure gas doesn't get rid of local symbols that are used 7725 in relocs. */ 7726 e->X_op = O_pseudo_fixup; 7727 e->X_op_symbol = pseudo_func[idx].u.sym; 7728 done: 7729 *nextcharP = *input_line_pointer; 7730 break; 7731 7732 case PSEUDO_FUNC_CONST: 7733 e->X_op = O_constant; 7734 e->X_add_number = pseudo_func[idx].u.ival; 7735 break; 7736 7737 case PSEUDO_FUNC_REG: 7738 e->X_op = O_register; 7739 e->X_add_number = pseudo_func[idx].u.ival; 7740 break; 7741 7742 default: 7743 return 0; 7744 } 7745 return 1; 7746 } 7747 7748 /* first see if NAME is a known register name: */ 7749 sym = hash_find (md.reg_hash, name); 7750 if (sym) 7751 { 7752 e->X_op = O_register; 7753 e->X_add_number = S_GET_VALUE (sym); 7754 return 1; 7755 } 7756 7757 cdesc = hash_find (md.const_hash, name); 7758 if (cdesc) 7759 { 7760 e->X_op = O_constant; 7761 e->X_add_number = cdesc->value; 7762 return 1; 7763 } 7764 7765 /* check for inN, locN, or outN: */ 7766 idx = 0; 7767 switch (name[0]) 7768 { 7769 case 'i': 7770 if (name[1] == 'n' && ISDIGIT (name[2])) 7771 { 7772 dr = &md.in; 7773 idx = 2; 7774 } 7775 break; 7776 7777 case 'l': 7778 if (name[1] == 'o' && name[2] == 'c' && ISDIGIT (name[3])) 7779 { 7780 dr = &md.loc; 7781 idx = 3; 7782 } 7783 break; 7784 7785 case 'o': 7786 if (name[1] == 'u' && name[2] == 't' && ISDIGIT (name[3])) 7787 { 7788 dr = &md.out; 7789 idx = 3; 7790 } 7791 break; 7792 7793 default: 7794 break; 7795 } 7796 7797 /* Ignore register numbers with leading zeroes, except zero itself. */ 7798 if (dr && (name[idx] != '0' || name[idx + 1] == '\0')) 7799 { 7800 unsigned long regnum; 7801 7802 /* The name is inN, locN, or outN; parse the register number. */ 7803 regnum = strtoul (name + idx, &end, 10); 7804 if (end > name + idx && *end == '\0' && regnum < 96) 7805 { 7806 if (regnum >= dr->num_regs) 7807 { 7808 if (!dr->num_regs) 7809 as_bad (_("No current frame")); 7810 else 7811 as_bad (_("Register number out of range 0..%u"), 7812 dr->num_regs - 1); 7813 regnum = 0; 7814 } 7815 e->X_op = O_register; 7816 e->X_add_number = dr->base + regnum; 7817 return 1; 7818 } 7819 } 7820 7821 end = alloca (strlen (name) + 1); 7822 strcpy (end, name); 7823 name = ia64_canonicalize_symbol_name (end); 7824 if ((dr = hash_find (md.dynreg_hash, name))) 7825 { 7826 /* We've got ourselves the name of a rotating register set. 7827 Store the base register number in the low 16 bits of 7828 X_add_number and the size of the register set in the top 16 7829 bits. */ 7830 e->X_op = O_register; 7831 e->X_add_number = dr->base | (dr->num_regs << 16); 7832 return 1; 7833 } 7834 return 0; 7835 } 7836 7837 /* Remove the '#' suffix that indicates a symbol as opposed to a register. */ 7838 7839 char * 7840 ia64_canonicalize_symbol_name (char *name) 7841 { 7842 size_t len = strlen (name), full = len; 7843 7844 while (len > 0 && name[len - 1] == '#') 7845 --len; 7846 if (len <= 0) 7847 { 7848 if (full > 0) 7849 as_bad (_("Standalone `#' is illegal")); 7850 } 7851 else if (len < full - 1) 7852 as_warn (_("Redundant `#' suffix operators")); 7853 name[len] = '\0'; 7854 return name; 7855 } 7856 7857 /* Return true if idesc is a conditional branch instruction. This excludes 7858 the modulo scheduled branches, and br.ia. Mod-sched branches are excluded 7859 because they always read/write resources regardless of the value of the 7860 qualifying predicate. br.ia must always use p0, and hence is always 7861 taken. Thus this function returns true for branches which can fall 7862 through, and which use no resources if they do fall through. */ 7863 7864 static int 7865 is_conditional_branch (struct ia64_opcode *idesc) 7866 { 7867 /* br is a conditional branch. Everything that starts with br. except 7868 br.ia, br.c{loop,top,exit}, and br.w{top,exit} is a conditional branch. 7869 Everything that starts with brl is a conditional branch. */ 7870 return (idesc->name[0] == 'b' && idesc->name[1] == 'r' 7871 && (idesc->name[2] == '\0' 7872 || (idesc->name[2] == '.' && idesc->name[3] != 'i' 7873 && idesc->name[3] != 'c' && idesc->name[3] != 'w') 7874 || idesc->name[2] == 'l' 7875 /* br.cond, br.call, br.clr */ 7876 || (idesc->name[2] == '.' && idesc->name[3] == 'c' 7877 && (idesc->name[4] == 'a' || idesc->name[4] == 'o' 7878 || (idesc->name[4] == 'l' && idesc->name[5] == 'r'))))); 7879 } 7880 7881 /* Return whether the given opcode is a taken branch. If there's any doubt, 7882 returns zero. */ 7883 7884 static int 7885 is_taken_branch (struct ia64_opcode *idesc) 7886 { 7887 return ((is_conditional_branch (idesc) && CURR_SLOT.qp_regno == 0) 7888 || strncmp (idesc->name, "br.ia", 5) == 0); 7889 } 7890 7891 /* Return whether the given opcode is an interruption or rfi. If there's any 7892 doubt, returns zero. */ 7893 7894 static int 7895 is_interruption_or_rfi (struct ia64_opcode *idesc) 7896 { 7897 if (strcmp (idesc->name, "rfi") == 0) 7898 return 1; 7899 return 0; 7900 } 7901 7902 /* Returns the index of the given dependency in the opcode's list of chks, or 7903 -1 if there is no dependency. */ 7904 7905 static int 7906 depends_on (int depind, struct ia64_opcode *idesc) 7907 { 7908 int i; 7909 const struct ia64_opcode_dependency *dep = idesc->dependencies; 7910 for (i = 0; i < dep->nchks; i++) 7911 { 7912 if (depind == DEP (dep->chks[i])) 7913 return i; 7914 } 7915 return -1; 7916 } 7917 7918 /* Determine a set of specific resources used for a particular resource 7919 class. Returns the number of specific resources identified For those 7920 cases which are not determinable statically, the resource returned is 7921 marked nonspecific. 7922 7923 Meanings of value in 'NOTE': 7924 1) only read/write when the register number is explicitly encoded in the 7925 insn. 7926 2) only read CFM when accessing a rotating GR, FR, or PR. mov pr only 7927 accesses CFM when qualifying predicate is in the rotating region. 7928 3) general register value is used to specify an indirect register; not 7929 determinable statically. 7930 4) only read the given resource when bits 7:0 of the indirect index 7931 register value does not match the register number of the resource; not 7932 determinable statically. 7933 5) all rules are implementation specific. 7934 6) only when both the index specified by the reader and the index specified 7935 by the writer have the same value in bits 63:61; not determinable 7936 statically. 7937 7) only access the specified resource when the corresponding mask bit is 7938 set 7939 8) PSR.dfh is only read when these insns reference FR32-127. PSR.dfl is 7940 only read when these insns reference FR2-31 7941 9) PSR.mfl is only written when these insns write FR2-31. PSR.mfh is only 7942 written when these insns write FR32-127 7943 10) The PSR.bn bit is only accessed when one of GR16-31 is specified in the 7944 instruction 7945 11) The target predicates are written independently of PR[qp], but source 7946 registers are only read if PR[qp] is true. Since the state of PR[qp] 7947 cannot statically be determined, all source registers are marked used. 7948 12) This insn only reads the specified predicate register when that 7949 register is the PR[qp]. 7950 13) This reference to ld-c only applies to the GR whose value is loaded 7951 with data returned from memory, not the post-incremented address register. 7952 14) The RSE resource includes the implementation-specific RSE internal 7953 state resources. At least one (and possibly more) of these resources are 7954 read by each instruction listed in IC:rse-readers. At least one (and 7955 possibly more) of these resources are written by each insn listed in 7956 IC:rse-writers. 7957 15+16) Represents reserved instructions, which the assembler does not 7958 generate. 7959 17) CR[TPR] has a RAW dependency only between mov-to-CR-TPR and 7960 mov-to-PSR-l or ssm instructions that set PSR.i, PSR.pp or PSR.up. 7961 7962 Memory resources (i.e. locations in memory) are *not* marked or tracked by 7963 this code; there are no dependency violations based on memory access. 7964 */ 7965 7966 #define MAX_SPECS 256 7967 #define DV_CHK 1 7968 #define DV_REG 0 7969 7970 static int 7971 specify_resource (const struct ia64_dependency *dep, 7972 struct ia64_opcode *idesc, 7973 /* is this a DV chk or a DV reg? */ 7974 int type, 7975 /* returned specific resources */ 7976 struct rsrc specs[MAX_SPECS], 7977 /* resource note for this insn's usage */ 7978 int note, 7979 /* which execution path to examine */ 7980 int path) 7981 { 7982 int count = 0; 7983 int i; 7984 int rsrc_write = 0; 7985 struct rsrc tmpl; 7986 7987 if (dep->mode == IA64_DV_WAW 7988 || (dep->mode == IA64_DV_RAW && type == DV_REG) 7989 || (dep->mode == IA64_DV_WAR && type == DV_CHK)) 7990 rsrc_write = 1; 7991 7992 /* template for any resources we identify */ 7993 tmpl.dependency = dep; 7994 tmpl.note = note; 7995 tmpl.insn_srlz = tmpl.data_srlz = 0; 7996 tmpl.qp_regno = CURR_SLOT.qp_regno; 7997 tmpl.link_to_qp_branch = 1; 7998 tmpl.mem_offset.hint = 0; 7999 tmpl.mem_offset.offset = 0; 8000 tmpl.mem_offset.base = 0; 8001 tmpl.specific = 1; 8002 tmpl.index = -1; 8003 tmpl.cmp_type = CMP_NONE; 8004 tmpl.depind = 0; 8005 tmpl.file = NULL; 8006 tmpl.line = 0; 8007 tmpl.path = 0; 8008 8009 #define UNHANDLED \ 8010 as_warn (_("Unhandled dependency %s for %s (%s), note %d"), \ 8011 dep->name, idesc->name, (rsrc_write?"write":"read"), note) 8012 #define KNOWN(REG) (gr_values[REG].known && gr_values[REG].path >= path) 8013 8014 /* we don't need to track these */ 8015 if (dep->semantics == IA64_DVS_NONE) 8016 return 0; 8017 8018 switch (dep->specifier) 8019 { 8020 case IA64_RS_AR_K: 8021 if (note == 1) 8022 { 8023 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3) 8024 { 8025 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR; 8026 if (regno >= 0 && regno <= 7) 8027 { 8028 specs[count] = tmpl; 8029 specs[count++].index = regno; 8030 } 8031 } 8032 } 8033 else if (note == 0) 8034 { 8035 for (i = 0; i < 8; i++) 8036 { 8037 specs[count] = tmpl; 8038 specs[count++].index = i; 8039 } 8040 } 8041 else 8042 { 8043 UNHANDLED; 8044 } 8045 break; 8046 8047 case IA64_RS_AR_UNAT: 8048 /* This is a mov =AR or mov AR= instruction. */ 8049 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3) 8050 { 8051 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR; 8052 if (regno == AR_UNAT) 8053 { 8054 specs[count++] = tmpl; 8055 } 8056 } 8057 else 8058 { 8059 /* This is a spill/fill, or other instruction that modifies the 8060 unat register. */ 8061 8062 /* Unless we can determine the specific bits used, mark the whole 8063 thing; bits 8:3 of the memory address indicate the bit used in 8064 UNAT. The .mem.offset hint may be used to eliminate a small 8065 subset of conflicts. */ 8066 specs[count] = tmpl; 8067 if (md.mem_offset.hint) 8068 { 8069 if (md.debug_dv) 8070 fprintf (stderr, " Using hint for spill/fill\n"); 8071 /* The index isn't actually used, just set it to something 8072 approximating the bit index. */ 8073 specs[count].index = (md.mem_offset.offset >> 3) & 0x3F; 8074 specs[count].mem_offset.hint = 1; 8075 specs[count].mem_offset.offset = md.mem_offset.offset; 8076 specs[count++].mem_offset.base = md.mem_offset.base; 8077 } 8078 else 8079 { 8080 specs[count++].specific = 0; 8081 } 8082 } 8083 break; 8084 8085 case IA64_RS_AR: 8086 if (note == 1) 8087 { 8088 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3) 8089 { 8090 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR; 8091 if ((regno >= 8 && regno <= 15) 8092 || (regno >= 20 && regno <= 23) 8093 || (regno >= 31 && regno <= 39) 8094 || (regno >= 41 && regno <= 47) 8095 || (regno >= 67 && regno <= 111)) 8096 { 8097 specs[count] = tmpl; 8098 specs[count++].index = regno; 8099 } 8100 } 8101 } 8102 else 8103 { 8104 UNHANDLED; 8105 } 8106 break; 8107 8108 case IA64_RS_ARb: 8109 if (note == 1) 8110 { 8111 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3) 8112 { 8113 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR; 8114 if ((regno >= 48 && regno <= 63) 8115 || (regno >= 112 && regno <= 127)) 8116 { 8117 specs[count] = tmpl; 8118 specs[count++].index = regno; 8119 } 8120 } 8121 } 8122 else if (note == 0) 8123 { 8124 for (i = 48; i < 64; i++) 8125 { 8126 specs[count] = tmpl; 8127 specs[count++].index = i; 8128 } 8129 for (i = 112; i < 128; i++) 8130 { 8131 specs[count] = tmpl; 8132 specs[count++].index = i; 8133 } 8134 } 8135 else 8136 { 8137 UNHANDLED; 8138 } 8139 break; 8140 8141 case IA64_RS_BR: 8142 if (note != 1) 8143 { 8144 UNHANDLED; 8145 } 8146 else 8147 { 8148 if (rsrc_write) 8149 { 8150 for (i = 0; i < idesc->num_outputs; i++) 8151 if (idesc->operands[i] == IA64_OPND_B1 8152 || idesc->operands[i] == IA64_OPND_B2) 8153 { 8154 specs[count] = tmpl; 8155 specs[count++].index = 8156 CURR_SLOT.opnd[i].X_add_number - REG_BR; 8157 } 8158 } 8159 else 8160 { 8161 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++) 8162 if (idesc->operands[i] == IA64_OPND_B1 8163 || idesc->operands[i] == IA64_OPND_B2) 8164 { 8165 specs[count] = tmpl; 8166 specs[count++].index = 8167 CURR_SLOT.opnd[i].X_add_number - REG_BR; 8168 } 8169 } 8170 } 8171 break; 8172 8173 case IA64_RS_CPUID: /* four or more registers */ 8174 if (note == 3) 8175 { 8176 if (idesc->operands[!rsrc_write] == IA64_OPND_CPUID_R3) 8177 { 8178 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR; 8179 if (regno >= 0 && regno < NELEMS (gr_values) 8180 && KNOWN (regno)) 8181 { 8182 specs[count] = tmpl; 8183 specs[count++].index = gr_values[regno].value & 0xFF; 8184 } 8185 else 8186 { 8187 specs[count] = tmpl; 8188 specs[count++].specific = 0; 8189 } 8190 } 8191 } 8192 else 8193 { 8194 UNHANDLED; 8195 } 8196 break; 8197 8198 case IA64_RS_DBR: /* four or more registers */ 8199 if (note == 3) 8200 { 8201 if (idesc->operands[!rsrc_write] == IA64_OPND_DBR_R3) 8202 { 8203 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR; 8204 if (regno >= 0 && regno < NELEMS (gr_values) 8205 && KNOWN (regno)) 8206 { 8207 specs[count] = tmpl; 8208 specs[count++].index = gr_values[regno].value & 0xFF; 8209 } 8210 else 8211 { 8212 specs[count] = tmpl; 8213 specs[count++].specific = 0; 8214 } 8215 } 8216 } 8217 else if (note == 0 && !rsrc_write) 8218 { 8219 specs[count] = tmpl; 8220 specs[count++].specific = 0; 8221 } 8222 else 8223 { 8224 UNHANDLED; 8225 } 8226 break; 8227 8228 case IA64_RS_IBR: /* four or more registers */ 8229 if (note == 3) 8230 { 8231 if (idesc->operands[!rsrc_write] == IA64_OPND_IBR_R3) 8232 { 8233 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR; 8234 if (regno >= 0 && regno < NELEMS (gr_values) 8235 && KNOWN (regno)) 8236 { 8237 specs[count] = tmpl; 8238 specs[count++].index = gr_values[regno].value & 0xFF; 8239 } 8240 else 8241 { 8242 specs[count] = tmpl; 8243 specs[count++].specific = 0; 8244 } 8245 } 8246 } 8247 else 8248 { 8249 UNHANDLED; 8250 } 8251 break; 8252 8253 case IA64_RS_MSR: 8254 if (note == 5) 8255 { 8256 /* These are implementation specific. Force all references to 8257 conflict with all other references. */ 8258 specs[count] = tmpl; 8259 specs[count++].specific = 0; 8260 } 8261 else 8262 { 8263 UNHANDLED; 8264 } 8265 break; 8266 8267 case IA64_RS_PKR: /* 16 or more registers */ 8268 if (note == 3 || note == 4) 8269 { 8270 if (idesc->operands[!rsrc_write] == IA64_OPND_PKR_R3) 8271 { 8272 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR; 8273 if (regno >= 0 && regno < NELEMS (gr_values) 8274 && KNOWN (regno)) 8275 { 8276 if (note == 3) 8277 { 8278 specs[count] = tmpl; 8279 specs[count++].index = gr_values[regno].value & 0xFF; 8280 } 8281 else 8282 for (i = 0; i < NELEMS (gr_values); i++) 8283 { 8284 /* Uses all registers *except* the one in R3. */ 8285 if ((unsigned)i != (gr_values[regno].value & 0xFF)) 8286 { 8287 specs[count] = tmpl; 8288 specs[count++].index = i; 8289 } 8290 } 8291 } 8292 else 8293 { 8294 specs[count] = tmpl; 8295 specs[count++].specific = 0; 8296 } 8297 } 8298 } 8299 else if (note == 0) 8300 { 8301 /* probe et al. */ 8302 specs[count] = tmpl; 8303 specs[count++].specific = 0; 8304 } 8305 break; 8306 8307 case IA64_RS_PMC: /* four or more registers */ 8308 if (note == 3) 8309 { 8310 if (idesc->operands[!rsrc_write] == IA64_OPND_PMC_R3 8311 || (!rsrc_write && idesc->operands[1] == IA64_OPND_PMD_R3)) 8312 8313 { 8314 int index = ((idesc->operands[1] == IA64_OPND_R3 && !rsrc_write) 8315 ? 1 : !rsrc_write); 8316 int regno = CURR_SLOT.opnd[index].X_add_number - REG_GR; 8317 if (regno >= 0 && regno < NELEMS (gr_values) 8318 && KNOWN (regno)) 8319 { 8320 specs[count] = tmpl; 8321 specs[count++].index = gr_values[regno].value & 0xFF; 8322 } 8323 else 8324 { 8325 specs[count] = tmpl; 8326 specs[count++].specific = 0; 8327 } 8328 } 8329 } 8330 else 8331 { 8332 UNHANDLED; 8333 } 8334 break; 8335 8336 case IA64_RS_PMD: /* four or more registers */ 8337 if (note == 3) 8338 { 8339 if (idesc->operands[!rsrc_write] == IA64_OPND_PMD_R3) 8340 { 8341 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR; 8342 if (regno >= 0 && regno < NELEMS (gr_values) 8343 && KNOWN (regno)) 8344 { 8345 specs[count] = tmpl; 8346 specs[count++].index = gr_values[regno].value & 0xFF; 8347 } 8348 else 8349 { 8350 specs[count] = tmpl; 8351 specs[count++].specific = 0; 8352 } 8353 } 8354 } 8355 else 8356 { 8357 UNHANDLED; 8358 } 8359 break; 8360 8361 case IA64_RS_RR: /* eight registers */ 8362 if (note == 6) 8363 { 8364 if (idesc->operands[!rsrc_write] == IA64_OPND_RR_R3) 8365 { 8366 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR; 8367 if (regno >= 0 && regno < NELEMS (gr_values) 8368 && KNOWN (regno)) 8369 { 8370 specs[count] = tmpl; 8371 specs[count++].index = (gr_values[regno].value >> 61) & 0x7; 8372 } 8373 else 8374 { 8375 specs[count] = tmpl; 8376 specs[count++].specific = 0; 8377 } 8378 } 8379 } 8380 else if (note == 0 && !rsrc_write) 8381 { 8382 specs[count] = tmpl; 8383 specs[count++].specific = 0; 8384 } 8385 else 8386 { 8387 UNHANDLED; 8388 } 8389 break; 8390 8391 case IA64_RS_CR_IRR: 8392 if (note == 0) 8393 { 8394 /* handle mov-from-CR-IVR; it's a read that writes CR[IRR] */ 8395 int regno = CURR_SLOT.opnd[1].X_add_number - REG_CR; 8396 if (rsrc_write 8397 && idesc->operands[1] == IA64_OPND_CR3 8398 && regno == CR_IVR) 8399 { 8400 for (i = 0; i < 4; i++) 8401 { 8402 specs[count] = tmpl; 8403 specs[count++].index = CR_IRR0 + i; 8404 } 8405 } 8406 } 8407 else if (note == 1) 8408 { 8409 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR; 8410 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3 8411 && regno >= CR_IRR0 8412 && regno <= CR_IRR3) 8413 { 8414 specs[count] = tmpl; 8415 specs[count++].index = regno; 8416 } 8417 } 8418 else 8419 { 8420 UNHANDLED; 8421 } 8422 break; 8423 8424 case IA64_RS_CR_IIB: 8425 if (note != 0) 8426 { 8427 UNHANDLED; 8428 } 8429 else 8430 { 8431 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR; 8432 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3 8433 && (regno == CR_IIB0 || regno == CR_IIB1)) 8434 { 8435 specs[count] = tmpl; 8436 specs[count++].index = regno; 8437 } 8438 } 8439 break; 8440 8441 case IA64_RS_CR_LRR: 8442 if (note != 1) 8443 { 8444 UNHANDLED; 8445 } 8446 else 8447 { 8448 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR; 8449 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3 8450 && (regno == CR_LRR0 || regno == CR_LRR1)) 8451 { 8452 specs[count] = tmpl; 8453 specs[count++].index = regno; 8454 } 8455 } 8456 break; 8457 8458 case IA64_RS_CR: 8459 if (note == 1) 8460 { 8461 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3) 8462 { 8463 specs[count] = tmpl; 8464 specs[count++].index = 8465 CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR; 8466 } 8467 } 8468 else 8469 { 8470 UNHANDLED; 8471 } 8472 break; 8473 8474 case IA64_RS_FR: 8475 case IA64_RS_FRb: 8476 if (note != 1) 8477 { 8478 UNHANDLED; 8479 } 8480 else if (rsrc_write) 8481 { 8482 if (dep->specifier == IA64_RS_FRb 8483 && idesc->operands[0] == IA64_OPND_F1) 8484 { 8485 specs[count] = tmpl; 8486 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_FR; 8487 } 8488 } 8489 else 8490 { 8491 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++) 8492 { 8493 if (idesc->operands[i] == IA64_OPND_F2 8494 || idesc->operands[i] == IA64_OPND_F3 8495 || idesc->operands[i] == IA64_OPND_F4) 8496 { 8497 specs[count] = tmpl; 8498 specs[count++].index = 8499 CURR_SLOT.opnd[i].X_add_number - REG_FR; 8500 } 8501 } 8502 } 8503 break; 8504 8505 case IA64_RS_GR: 8506 if (note == 13) 8507 { 8508 /* This reference applies only to the GR whose value is loaded with 8509 data returned from memory. */ 8510 specs[count] = tmpl; 8511 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_GR; 8512 } 8513 else if (note == 1) 8514 { 8515 if (rsrc_write) 8516 { 8517 for (i = 0; i < idesc->num_outputs; i++) 8518 if (idesc->operands[i] == IA64_OPND_R1 8519 || idesc->operands[i] == IA64_OPND_R2 8520 || idesc->operands[i] == IA64_OPND_R3) 8521 { 8522 specs[count] = tmpl; 8523 specs[count++].index = 8524 CURR_SLOT.opnd[i].X_add_number - REG_GR; 8525 } 8526 if (idesc->flags & IA64_OPCODE_POSTINC) 8527 for (i = 0; i < NELEMS (idesc->operands); i++) 8528 if (idesc->operands[i] == IA64_OPND_MR3) 8529 { 8530 specs[count] = tmpl; 8531 specs[count++].index = 8532 CURR_SLOT.opnd[i].X_add_number - REG_GR; 8533 } 8534 } 8535 else 8536 { 8537 /* Look for anything that reads a GR. */ 8538 for (i = 0; i < NELEMS (idesc->operands); i++) 8539 { 8540 if (idesc->operands[i] == IA64_OPND_MR3 8541 || idesc->operands[i] == IA64_OPND_CPUID_R3 8542 || idesc->operands[i] == IA64_OPND_DBR_R3 8543 || idesc->operands[i] == IA64_OPND_IBR_R3 8544 || idesc->operands[i] == IA64_OPND_MSR_R3 8545 || idesc->operands[i] == IA64_OPND_PKR_R3 8546 || idesc->operands[i] == IA64_OPND_PMC_R3 8547 || idesc->operands[i] == IA64_OPND_PMD_R3 8548 || idesc->operands[i] == IA64_OPND_RR_R3 8549 || ((i >= idesc->num_outputs) 8550 && (idesc->operands[i] == IA64_OPND_R1 8551 || idesc->operands[i] == IA64_OPND_R2 8552 || idesc->operands[i] == IA64_OPND_R3 8553 /* addl source register. */ 8554 || idesc->operands[i] == IA64_OPND_R3_2))) 8555 { 8556 specs[count] = tmpl; 8557 specs[count++].index = 8558 CURR_SLOT.opnd[i].X_add_number - REG_GR; 8559 } 8560 } 8561 } 8562 } 8563 else 8564 { 8565 UNHANDLED; 8566 } 8567 break; 8568 8569 /* This is the same as IA64_RS_PRr, except that the register range is 8570 from 1 - 15, and there are no rotating register reads/writes here. */ 8571 case IA64_RS_PR: 8572 if (note == 0) 8573 { 8574 for (i = 1; i < 16; i++) 8575 { 8576 specs[count] = tmpl; 8577 specs[count++].index = i; 8578 } 8579 } 8580 else if (note == 7) 8581 { 8582 valueT mask = 0; 8583 /* Mark only those registers indicated by the mask. */ 8584 if (rsrc_write) 8585 { 8586 mask = CURR_SLOT.opnd[2].X_add_number; 8587 for (i = 1; i < 16; i++) 8588 if (mask & ((valueT) 1 << i)) 8589 { 8590 specs[count] = tmpl; 8591 specs[count++].index = i; 8592 } 8593 } 8594 else 8595 { 8596 UNHANDLED; 8597 } 8598 } 8599 else if (note == 11) /* note 11 implies note 1 as well */ 8600 { 8601 if (rsrc_write) 8602 { 8603 for (i = 0; i < idesc->num_outputs; i++) 8604 { 8605 if (idesc->operands[i] == IA64_OPND_P1 8606 || idesc->operands[i] == IA64_OPND_P2) 8607 { 8608 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P; 8609 if (regno >= 1 && regno < 16) 8610 { 8611 specs[count] = tmpl; 8612 specs[count++].index = regno; 8613 } 8614 } 8615 } 8616 } 8617 else 8618 { 8619 UNHANDLED; 8620 } 8621 } 8622 else if (note == 12) 8623 { 8624 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16) 8625 { 8626 specs[count] = tmpl; 8627 specs[count++].index = CURR_SLOT.qp_regno; 8628 } 8629 } 8630 else if (note == 1) 8631 { 8632 if (rsrc_write) 8633 { 8634 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P; 8635 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P; 8636 int or_andcm = strstr (idesc->name, "or.andcm") != NULL; 8637 int and_orcm = strstr (idesc->name, "and.orcm") != NULL; 8638 8639 if ((idesc->operands[0] == IA64_OPND_P1 8640 || idesc->operands[0] == IA64_OPND_P2) 8641 && p1 >= 1 && p1 < 16) 8642 { 8643 specs[count] = tmpl; 8644 specs[count].cmp_type = 8645 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE)); 8646 specs[count++].index = p1; 8647 } 8648 if ((idesc->operands[1] == IA64_OPND_P1 8649 || idesc->operands[1] == IA64_OPND_P2) 8650 && p2 >= 1 && p2 < 16) 8651 { 8652 specs[count] = tmpl; 8653 specs[count].cmp_type = 8654 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE)); 8655 specs[count++].index = p2; 8656 } 8657 } 8658 else 8659 { 8660 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16) 8661 { 8662 specs[count] = tmpl; 8663 specs[count++].index = CURR_SLOT.qp_regno; 8664 } 8665 if (idesc->operands[1] == IA64_OPND_PR) 8666 { 8667 for (i = 1; i < 16; i++) 8668 { 8669 specs[count] = tmpl; 8670 specs[count++].index = i; 8671 } 8672 } 8673 } 8674 } 8675 else 8676 { 8677 UNHANDLED; 8678 } 8679 break; 8680 8681 /* This is the general case for PRs. IA64_RS_PR and IA64_RS_PR63 are 8682 simplified cases of this. */ 8683 case IA64_RS_PRr: 8684 if (note == 0) 8685 { 8686 for (i = 16; i < 63; i++) 8687 { 8688 specs[count] = tmpl; 8689 specs[count++].index = i; 8690 } 8691 } 8692 else if (note == 7) 8693 { 8694 valueT mask = 0; 8695 /* Mark only those registers indicated by the mask. */ 8696 if (rsrc_write 8697 && idesc->operands[0] == IA64_OPND_PR) 8698 { 8699 mask = CURR_SLOT.opnd[2].X_add_number; 8700 if (mask & ((valueT) 1 << 16)) 8701 for (i = 16; i < 63; i++) 8702 { 8703 specs[count] = tmpl; 8704 specs[count++].index = i; 8705 } 8706 } 8707 else if (rsrc_write 8708 && idesc->operands[0] == IA64_OPND_PR_ROT) 8709 { 8710 for (i = 16; i < 63; i++) 8711 { 8712 specs[count] = tmpl; 8713 specs[count++].index = i; 8714 } 8715 } 8716 else 8717 { 8718 UNHANDLED; 8719 } 8720 } 8721 else if (note == 11) /* note 11 implies note 1 as well */ 8722 { 8723 if (rsrc_write) 8724 { 8725 for (i = 0; i < idesc->num_outputs; i++) 8726 { 8727 if (idesc->operands[i] == IA64_OPND_P1 8728 || idesc->operands[i] == IA64_OPND_P2) 8729 { 8730 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P; 8731 if (regno >= 16 && regno < 63) 8732 { 8733 specs[count] = tmpl; 8734 specs[count++].index = regno; 8735 } 8736 } 8737 } 8738 } 8739 else 8740 { 8741 UNHANDLED; 8742 } 8743 } 8744 else if (note == 12) 8745 { 8746 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63) 8747 { 8748 specs[count] = tmpl; 8749 specs[count++].index = CURR_SLOT.qp_regno; 8750 } 8751 } 8752 else if (note == 1) 8753 { 8754 if (rsrc_write) 8755 { 8756 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P; 8757 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P; 8758 int or_andcm = strstr (idesc->name, "or.andcm") != NULL; 8759 int and_orcm = strstr (idesc->name, "and.orcm") != NULL; 8760 8761 if ((idesc->operands[0] == IA64_OPND_P1 8762 || idesc->operands[0] == IA64_OPND_P2) 8763 && p1 >= 16 && p1 < 63) 8764 { 8765 specs[count] = tmpl; 8766 specs[count].cmp_type = 8767 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE)); 8768 specs[count++].index = p1; 8769 } 8770 if ((idesc->operands[1] == IA64_OPND_P1 8771 || idesc->operands[1] == IA64_OPND_P2) 8772 && p2 >= 16 && p2 < 63) 8773 { 8774 specs[count] = tmpl; 8775 specs[count].cmp_type = 8776 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE)); 8777 specs[count++].index = p2; 8778 } 8779 } 8780 else 8781 { 8782 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63) 8783 { 8784 specs[count] = tmpl; 8785 specs[count++].index = CURR_SLOT.qp_regno; 8786 } 8787 if (idesc->operands[1] == IA64_OPND_PR) 8788 { 8789 for (i = 16; i < 63; i++) 8790 { 8791 specs[count] = tmpl; 8792 specs[count++].index = i; 8793 } 8794 } 8795 } 8796 } 8797 else 8798 { 8799 UNHANDLED; 8800 } 8801 break; 8802 8803 case IA64_RS_PSR: 8804 /* Verify that the instruction is using the PSR bit indicated in 8805 dep->regindex. */ 8806 if (note == 0) 8807 { 8808 if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_UM) 8809 { 8810 if (dep->regindex < 6) 8811 { 8812 specs[count++] = tmpl; 8813 } 8814 } 8815 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR) 8816 { 8817 if (dep->regindex < 32 8818 || dep->regindex == 35 8819 || dep->regindex == 36 8820 || (!rsrc_write && dep->regindex == PSR_CPL)) 8821 { 8822 specs[count++] = tmpl; 8823 } 8824 } 8825 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_L) 8826 { 8827 if (dep->regindex < 32 8828 || dep->regindex == 35 8829 || dep->regindex == 36 8830 || (rsrc_write && dep->regindex == PSR_CPL)) 8831 { 8832 specs[count++] = tmpl; 8833 } 8834 } 8835 else 8836 { 8837 /* Several PSR bits have very specific dependencies. */ 8838 switch (dep->regindex) 8839 { 8840 default: 8841 specs[count++] = tmpl; 8842 break; 8843 case PSR_IC: 8844 if (rsrc_write) 8845 { 8846 specs[count++] = tmpl; 8847 } 8848 else 8849 { 8850 /* Only certain CR accesses use PSR.ic */ 8851 if (idesc->operands[0] == IA64_OPND_CR3 8852 || idesc->operands[1] == IA64_OPND_CR3) 8853 { 8854 int index = 8855 ((idesc->operands[0] == IA64_OPND_CR3) 8856 ? 0 : 1); 8857 int regno = 8858 CURR_SLOT.opnd[index].X_add_number - REG_CR; 8859 8860 switch (regno) 8861 { 8862 default: 8863 break; 8864 case CR_ITIR: 8865 case CR_IFS: 8866 case CR_IIM: 8867 case CR_IIP: 8868 case CR_IPSR: 8869 case CR_ISR: 8870 case CR_IFA: 8871 case CR_IHA: 8872 case CR_IIB0: 8873 case CR_IIB1: 8874 case CR_IIPA: 8875 specs[count++] = tmpl; 8876 break; 8877 } 8878 } 8879 } 8880 break; 8881 case PSR_CPL: 8882 if (rsrc_write) 8883 { 8884 specs[count++] = tmpl; 8885 } 8886 else 8887 { 8888 /* Only some AR accesses use cpl */ 8889 if (idesc->operands[0] == IA64_OPND_AR3 8890 || idesc->operands[1] == IA64_OPND_AR3) 8891 { 8892 int index = 8893 ((idesc->operands[0] == IA64_OPND_AR3) 8894 ? 0 : 1); 8895 int regno = 8896 CURR_SLOT.opnd[index].X_add_number - REG_AR; 8897 8898 if (regno == AR_ITC 8899 || regno == AR_RUC 8900 || (index == 0 8901 && (regno == AR_RSC 8902 || (regno >= AR_K0 8903 && regno <= AR_K7)))) 8904 { 8905 specs[count++] = tmpl; 8906 } 8907 } 8908 else 8909 { 8910 specs[count++] = tmpl; 8911 } 8912 break; 8913 } 8914 } 8915 } 8916 } 8917 else if (note == 7) 8918 { 8919 valueT mask = 0; 8920 if (idesc->operands[0] == IA64_OPND_IMMU24) 8921 { 8922 mask = CURR_SLOT.opnd[0].X_add_number; 8923 } 8924 else 8925 { 8926 UNHANDLED; 8927 } 8928 if (mask & ((valueT) 1 << dep->regindex)) 8929 { 8930 specs[count++] = tmpl; 8931 } 8932 } 8933 else if (note == 8) 8934 { 8935 int min = dep->regindex == PSR_DFL ? 2 : 32; 8936 int max = dep->regindex == PSR_DFL ? 31 : 127; 8937 /* dfh is read on FR32-127; dfl is read on FR2-31 */ 8938 for (i = 0; i < NELEMS (idesc->operands); i++) 8939 { 8940 if (idesc->operands[i] == IA64_OPND_F1 8941 || idesc->operands[i] == IA64_OPND_F2 8942 || idesc->operands[i] == IA64_OPND_F3 8943 || idesc->operands[i] == IA64_OPND_F4) 8944 { 8945 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR; 8946 if (reg >= min && reg <= max) 8947 { 8948 specs[count++] = tmpl; 8949 } 8950 } 8951 } 8952 } 8953 else if (note == 9) 8954 { 8955 int min = dep->regindex == PSR_MFL ? 2 : 32; 8956 int max = dep->regindex == PSR_MFL ? 31 : 127; 8957 /* mfh is read on writes to FR32-127; mfl is read on writes to 8958 FR2-31 */ 8959 for (i = 0; i < idesc->num_outputs; i++) 8960 { 8961 if (idesc->operands[i] == IA64_OPND_F1) 8962 { 8963 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR; 8964 if (reg >= min && reg <= max) 8965 { 8966 specs[count++] = tmpl; 8967 } 8968 } 8969 } 8970 } 8971 else if (note == 10) 8972 { 8973 for (i = 0; i < NELEMS (idesc->operands); i++) 8974 { 8975 if (idesc->operands[i] == IA64_OPND_R1 8976 || idesc->operands[i] == IA64_OPND_R2 8977 || idesc->operands[i] == IA64_OPND_R3) 8978 { 8979 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR; 8980 if (regno >= 16 && regno <= 31) 8981 { 8982 specs[count++] = tmpl; 8983 } 8984 } 8985 } 8986 } 8987 else 8988 { 8989 UNHANDLED; 8990 } 8991 break; 8992 8993 case IA64_RS_AR_FPSR: 8994 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3) 8995 { 8996 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR; 8997 if (regno == AR_FPSR) 8998 { 8999 specs[count++] = tmpl; 9000 } 9001 } 9002 else 9003 { 9004 specs[count++] = tmpl; 9005 } 9006 break; 9007 9008 case IA64_RS_ARX: 9009 /* Handle all AR[REG] resources */ 9010 if (note == 0 || note == 1) 9011 { 9012 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR; 9013 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3 9014 && regno == dep->regindex) 9015 { 9016 specs[count++] = tmpl; 9017 } 9018 /* other AR[REG] resources may be affected by AR accesses */ 9019 else if (idesc->operands[0] == IA64_OPND_AR3) 9020 { 9021 /* AR[] writes */ 9022 regno = CURR_SLOT.opnd[0].X_add_number - REG_AR; 9023 switch (dep->regindex) 9024 { 9025 default: 9026 break; 9027 case AR_BSP: 9028 case AR_RNAT: 9029 if (regno == AR_BSPSTORE) 9030 { 9031 specs[count++] = tmpl; 9032 } 9033 case AR_RSC: 9034 if (!rsrc_write && 9035 (regno == AR_BSPSTORE 9036 || regno == AR_RNAT)) 9037 { 9038 specs[count++] = tmpl; 9039 } 9040 break; 9041 } 9042 } 9043 else if (idesc->operands[1] == IA64_OPND_AR3) 9044 { 9045 /* AR[] reads */ 9046 regno = CURR_SLOT.opnd[1].X_add_number - REG_AR; 9047 switch (dep->regindex) 9048 { 9049 default: 9050 break; 9051 case AR_RSC: 9052 if (regno == AR_BSPSTORE || regno == AR_RNAT) 9053 { 9054 specs[count++] = tmpl; 9055 } 9056 break; 9057 } 9058 } 9059 else 9060 { 9061 specs[count++] = tmpl; 9062 } 9063 } 9064 else 9065 { 9066 UNHANDLED; 9067 } 9068 break; 9069 9070 case IA64_RS_CRX: 9071 /* Handle all CR[REG] resources. 9072 ??? FIXME: The rule 17 isn't really handled correctly. */ 9073 if (note == 0 || note == 1 || note == 17) 9074 { 9075 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3) 9076 { 9077 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR; 9078 if (regno == dep->regindex) 9079 { 9080 specs[count++] = tmpl; 9081 } 9082 else if (!rsrc_write) 9083 { 9084 /* Reads from CR[IVR] affect other resources. */ 9085 if (regno == CR_IVR) 9086 { 9087 if ((dep->regindex >= CR_IRR0 9088 && dep->regindex <= CR_IRR3) 9089 || dep->regindex == CR_TPR) 9090 { 9091 specs[count++] = tmpl; 9092 } 9093 } 9094 } 9095 } 9096 else 9097 { 9098 specs[count++] = tmpl; 9099 } 9100 } 9101 else 9102 { 9103 UNHANDLED; 9104 } 9105 break; 9106 9107 case IA64_RS_INSERVICE: 9108 /* look for write of EOI (67) or read of IVR (65) */ 9109 if ((idesc->operands[0] == IA64_OPND_CR3 9110 && CURR_SLOT.opnd[0].X_add_number - REG_CR == CR_EOI) 9111 || (idesc->operands[1] == IA64_OPND_CR3 9112 && CURR_SLOT.opnd[1].X_add_number - REG_CR == CR_IVR)) 9113 { 9114 specs[count++] = tmpl; 9115 } 9116 break; 9117 9118 case IA64_RS_GR0: 9119 if (note == 1) 9120 { 9121 specs[count++] = tmpl; 9122 } 9123 else 9124 { 9125 UNHANDLED; 9126 } 9127 break; 9128 9129 case IA64_RS_CFM: 9130 if (note != 2) 9131 { 9132 specs[count++] = tmpl; 9133 } 9134 else 9135 { 9136 /* Check if any of the registers accessed are in the rotating region. 9137 mov to/from pr accesses CFM only when qp_regno is in the rotating 9138 region */ 9139 for (i = 0; i < NELEMS (idesc->operands); i++) 9140 { 9141 if (idesc->operands[i] == IA64_OPND_R1 9142 || idesc->operands[i] == IA64_OPND_R2 9143 || idesc->operands[i] == IA64_OPND_R3) 9144 { 9145 int num = CURR_SLOT.opnd[i].X_add_number - REG_GR; 9146 /* Assumes that md.rot.num_regs is always valid */ 9147 if (md.rot.num_regs > 0 9148 && num > 31 9149 && num < 31 + md.rot.num_regs) 9150 { 9151 specs[count] = tmpl; 9152 specs[count++].specific = 0; 9153 } 9154 } 9155 else if (idesc->operands[i] == IA64_OPND_F1 9156 || idesc->operands[i] == IA64_OPND_F2 9157 || idesc->operands[i] == IA64_OPND_F3 9158 || idesc->operands[i] == IA64_OPND_F4) 9159 { 9160 int num = CURR_SLOT.opnd[i].X_add_number - REG_FR; 9161 if (num > 31) 9162 { 9163 specs[count] = tmpl; 9164 specs[count++].specific = 0; 9165 } 9166 } 9167 else if (idesc->operands[i] == IA64_OPND_P1 9168 || idesc->operands[i] == IA64_OPND_P2) 9169 { 9170 int num = CURR_SLOT.opnd[i].X_add_number - REG_P; 9171 if (num > 15) 9172 { 9173 specs[count] = tmpl; 9174 specs[count++].specific = 0; 9175 } 9176 } 9177 } 9178 if (CURR_SLOT.qp_regno > 15) 9179 { 9180 specs[count] = tmpl; 9181 specs[count++].specific = 0; 9182 } 9183 } 9184 break; 9185 9186 /* This is the same as IA64_RS_PRr, except simplified to account for 9187 the fact that there is only one register. */ 9188 case IA64_RS_PR63: 9189 if (note == 0) 9190 { 9191 specs[count++] = tmpl; 9192 } 9193 else if (note == 7) 9194 { 9195 valueT mask = 0; 9196 if (idesc->operands[2] == IA64_OPND_IMM17) 9197 mask = CURR_SLOT.opnd[2].X_add_number; 9198 if (mask & ((valueT) 1 << 63)) 9199 specs[count++] = tmpl; 9200 } 9201 else if (note == 11) 9202 { 9203 if ((idesc->operands[0] == IA64_OPND_P1 9204 && CURR_SLOT.opnd[0].X_add_number - REG_P == 63) 9205 || (idesc->operands[1] == IA64_OPND_P2 9206 && CURR_SLOT.opnd[1].X_add_number - REG_P == 63)) 9207 { 9208 specs[count++] = tmpl; 9209 } 9210 } 9211 else if (note == 12) 9212 { 9213 if (CURR_SLOT.qp_regno == 63) 9214 { 9215 specs[count++] = tmpl; 9216 } 9217 } 9218 else if (note == 1) 9219 { 9220 if (rsrc_write) 9221 { 9222 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P; 9223 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P; 9224 int or_andcm = strstr (idesc->name, "or.andcm") != NULL; 9225 int and_orcm = strstr (idesc->name, "and.orcm") != NULL; 9226 9227 if (p1 == 63 9228 && (idesc->operands[0] == IA64_OPND_P1 9229 || idesc->operands[0] == IA64_OPND_P2)) 9230 { 9231 specs[count] = tmpl; 9232 specs[count++].cmp_type = 9233 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE)); 9234 } 9235 if (p2 == 63 9236 && (idesc->operands[1] == IA64_OPND_P1 9237 || idesc->operands[1] == IA64_OPND_P2)) 9238 { 9239 specs[count] = tmpl; 9240 specs[count++].cmp_type = 9241 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE)); 9242 } 9243 } 9244 else 9245 { 9246 if (CURR_SLOT.qp_regno == 63) 9247 { 9248 specs[count++] = tmpl; 9249 } 9250 } 9251 } 9252 else 9253 { 9254 UNHANDLED; 9255 } 9256 break; 9257 9258 case IA64_RS_RSE: 9259 /* FIXME we can identify some individual RSE written resources, but RSE 9260 read resources have not yet been completely identified, so for now 9261 treat RSE as a single resource */ 9262 if (strncmp (idesc->name, "mov", 3) == 0) 9263 { 9264 if (rsrc_write) 9265 { 9266 if (idesc->operands[0] == IA64_OPND_AR3 9267 && CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE) 9268 { 9269 specs[count++] = tmpl; 9270 } 9271 } 9272 else 9273 { 9274 if (idesc->operands[0] == IA64_OPND_AR3) 9275 { 9276 if (CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE 9277 || CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_RNAT) 9278 { 9279 specs[count++] = tmpl; 9280 } 9281 } 9282 else if (idesc->operands[1] == IA64_OPND_AR3) 9283 { 9284 if (CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSP 9285 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSPSTORE 9286 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_RNAT) 9287 { 9288 specs[count++] = tmpl; 9289 } 9290 } 9291 } 9292 } 9293 else 9294 { 9295 specs[count++] = tmpl; 9296 } 9297 break; 9298 9299 case IA64_RS_ANY: 9300 /* FIXME -- do any of these need to be non-specific? */ 9301 specs[count++] = tmpl; 9302 break; 9303 9304 default: 9305 as_bad (_("Unrecognized dependency specifier %d\n"), dep->specifier); 9306 break; 9307 } 9308 9309 return count; 9310 } 9311 9312 /* Clear branch flags on marked resources. This breaks the link between the 9313 QP of the marking instruction and a subsequent branch on the same QP. */ 9314 9315 static void 9316 clear_qp_branch_flag (valueT mask) 9317 { 9318 int i; 9319 for (i = 0; i < regdepslen; i++) 9320 { 9321 valueT bit = ((valueT) 1 << regdeps[i].qp_regno); 9322 if ((bit & mask) != 0) 9323 { 9324 regdeps[i].link_to_qp_branch = 0; 9325 } 9326 } 9327 } 9328 9329 /* MASK contains 2 and only 2 PRs which are mutually exclusive. Remove 9330 any mutexes which contain one of the PRs and create new ones when 9331 needed. */ 9332 9333 static int 9334 update_qp_mutex (valueT mask) 9335 { 9336 int i; 9337 int add = 0; 9338 9339 i = 0; 9340 while (i < qp_mutexeslen) 9341 { 9342 if ((qp_mutexes[i].prmask & mask) != 0) 9343 { 9344 /* If it destroys and creates the same mutex, do nothing. */ 9345 if (qp_mutexes[i].prmask == mask 9346 && qp_mutexes[i].path == md.path) 9347 { 9348 i++; 9349 add = -1; 9350 } 9351 else 9352 { 9353 int keep = 0; 9354 9355 if (md.debug_dv) 9356 { 9357 fprintf (stderr, " Clearing mutex relation"); 9358 print_prmask (qp_mutexes[i].prmask); 9359 fprintf (stderr, "\n"); 9360 } 9361 9362 /* Deal with the old mutex with more than 3+ PRs only if 9363 the new mutex on the same execution path with it. 9364 9365 FIXME: The 3+ mutex support is incomplete. 9366 dot_pred_rel () may be a better place to fix it. */ 9367 if (qp_mutexes[i].path == md.path) 9368 { 9369 /* If it is a proper subset of the mutex, create a 9370 new mutex. */ 9371 if (add == 0 9372 && (qp_mutexes[i].prmask & mask) == mask) 9373 add = 1; 9374 9375 qp_mutexes[i].prmask &= ~mask; 9376 if (qp_mutexes[i].prmask & (qp_mutexes[i].prmask - 1)) 9377 { 9378 /* Modify the mutex if there are more than one 9379 PR left. */ 9380 keep = 1; 9381 i++; 9382 } 9383 } 9384 9385 if (keep == 0) 9386 /* Remove the mutex. */ 9387 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen]; 9388 } 9389 } 9390 else 9391 ++i; 9392 } 9393 9394 if (add == 1) 9395 add_qp_mutex (mask); 9396 9397 return add; 9398 } 9399 9400 /* Remove any mutexes which contain any of the PRs indicated in the mask. 9401 9402 Any changes to a PR clears the mutex relations which include that PR. */ 9403 9404 static void 9405 clear_qp_mutex (valueT mask) 9406 { 9407 int i; 9408 9409 i = 0; 9410 while (i < qp_mutexeslen) 9411 { 9412 if ((qp_mutexes[i].prmask & mask) != 0) 9413 { 9414 if (md.debug_dv) 9415 { 9416 fprintf (stderr, " Clearing mutex relation"); 9417 print_prmask (qp_mutexes[i].prmask); 9418 fprintf (stderr, "\n"); 9419 } 9420 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen]; 9421 } 9422 else 9423 ++i; 9424 } 9425 } 9426 9427 /* Clear implies relations which contain PRs in the given masks. 9428 P1_MASK indicates the source of the implies relation, while P2_MASK 9429 indicates the implied PR. */ 9430 9431 static void 9432 clear_qp_implies (valueT p1_mask, valueT p2_mask) 9433 { 9434 int i; 9435 9436 i = 0; 9437 while (i < qp_implieslen) 9438 { 9439 if ((((valueT) 1 << qp_implies[i].p1) & p1_mask) != 0 9440 || (((valueT) 1 << qp_implies[i].p2) & p2_mask) != 0) 9441 { 9442 if (md.debug_dv) 9443 fprintf (stderr, "Clearing implied relation PR%d->PR%d\n", 9444 qp_implies[i].p1, qp_implies[i].p2); 9445 qp_implies[i] = qp_implies[--qp_implieslen]; 9446 } 9447 else 9448 ++i; 9449 } 9450 } 9451 9452 /* Add the PRs specified to the list of implied relations. */ 9453 9454 static void 9455 add_qp_imply (int p1, int p2) 9456 { 9457 valueT mask; 9458 valueT bit; 9459 int i; 9460 9461 /* p0 is not meaningful here. */ 9462 if (p1 == 0 || p2 == 0) 9463 abort (); 9464 9465 if (p1 == p2) 9466 return; 9467 9468 /* If it exists already, ignore it. */ 9469 for (i = 0; i < qp_implieslen; i++) 9470 { 9471 if (qp_implies[i].p1 == p1 9472 && qp_implies[i].p2 == p2 9473 && qp_implies[i].path == md.path 9474 && !qp_implies[i].p2_branched) 9475 return; 9476 } 9477 9478 if (qp_implieslen == qp_impliestotlen) 9479 { 9480 qp_impliestotlen += 20; 9481 qp_implies = (struct qp_imply *) 9482 xrealloc ((void *) qp_implies, 9483 qp_impliestotlen * sizeof (struct qp_imply)); 9484 } 9485 if (md.debug_dv) 9486 fprintf (stderr, " Registering PR%d implies PR%d\n", p1, p2); 9487 qp_implies[qp_implieslen].p1 = p1; 9488 qp_implies[qp_implieslen].p2 = p2; 9489 qp_implies[qp_implieslen].path = md.path; 9490 qp_implies[qp_implieslen++].p2_branched = 0; 9491 9492 /* Add in the implied transitive relations; for everything that p2 implies, 9493 make p1 imply that, too; for everything that implies p1, make it imply p2 9494 as well. */ 9495 for (i = 0; i < qp_implieslen; i++) 9496 { 9497 if (qp_implies[i].p1 == p2) 9498 add_qp_imply (p1, qp_implies[i].p2); 9499 if (qp_implies[i].p2 == p1) 9500 add_qp_imply (qp_implies[i].p1, p2); 9501 } 9502 /* Add in mutex relations implied by this implies relation; for each mutex 9503 relation containing p2, duplicate it and replace p2 with p1. */ 9504 bit = (valueT) 1 << p1; 9505 mask = (valueT) 1 << p2; 9506 for (i = 0; i < qp_mutexeslen; i++) 9507 { 9508 if (qp_mutexes[i].prmask & mask) 9509 add_qp_mutex ((qp_mutexes[i].prmask & ~mask) | bit); 9510 } 9511 } 9512 9513 /* Add the PRs specified in the mask to the mutex list; this means that only 9514 one of the PRs can be true at any time. PR0 should never be included in 9515 the mask. */ 9516 9517 static void 9518 add_qp_mutex (valueT mask) 9519 { 9520 if (mask & 0x1) 9521 abort (); 9522 9523 if (qp_mutexeslen == qp_mutexestotlen) 9524 { 9525 qp_mutexestotlen += 20; 9526 qp_mutexes = (struct qpmutex *) 9527 xrealloc ((void *) qp_mutexes, 9528 qp_mutexestotlen * sizeof (struct qpmutex)); 9529 } 9530 if (md.debug_dv) 9531 { 9532 fprintf (stderr, " Registering mutex on"); 9533 print_prmask (mask); 9534 fprintf (stderr, "\n"); 9535 } 9536 qp_mutexes[qp_mutexeslen].path = md.path; 9537 qp_mutexes[qp_mutexeslen++].prmask = mask; 9538 } 9539 9540 static int 9541 has_suffix_p (const char *name, const char *suffix) 9542 { 9543 size_t namelen = strlen (name); 9544 size_t sufflen = strlen (suffix); 9545 9546 if (namelen <= sufflen) 9547 return 0; 9548 return strcmp (name + namelen - sufflen, suffix) == 0; 9549 } 9550 9551 static void 9552 clear_register_values (void) 9553 { 9554 int i; 9555 if (md.debug_dv) 9556 fprintf (stderr, " Clearing register values\n"); 9557 for (i = 1; i < NELEMS (gr_values); i++) 9558 gr_values[i].known = 0; 9559 } 9560 9561 /* Keep track of register values/changes which affect DV tracking. 9562 9563 optimization note: should add a flag to classes of insns where otherwise we 9564 have to examine a group of strings to identify them. */ 9565 9566 static void 9567 note_register_values (struct ia64_opcode *idesc) 9568 { 9569 valueT qp_changemask = 0; 9570 int i; 9571 9572 /* Invalidate values for registers being written to. */ 9573 for (i = 0; i < idesc->num_outputs; i++) 9574 { 9575 if (idesc->operands[i] == IA64_OPND_R1 9576 || idesc->operands[i] == IA64_OPND_R2 9577 || idesc->operands[i] == IA64_OPND_R3) 9578 { 9579 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR; 9580 if (regno > 0 && regno < NELEMS (gr_values)) 9581 gr_values[regno].known = 0; 9582 } 9583 else if (idesc->operands[i] == IA64_OPND_R3_2) 9584 { 9585 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR; 9586 if (regno > 0 && regno < 4) 9587 gr_values[regno].known = 0; 9588 } 9589 else if (idesc->operands[i] == IA64_OPND_P1 9590 || idesc->operands[i] == IA64_OPND_P2) 9591 { 9592 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P; 9593 qp_changemask |= (valueT) 1 << regno; 9594 } 9595 else if (idesc->operands[i] == IA64_OPND_PR) 9596 { 9597 if (idesc->operands[2] & (valueT) 0x10000) 9598 qp_changemask = ~(valueT) 0x1FFFF | idesc->operands[2]; 9599 else 9600 qp_changemask = idesc->operands[2]; 9601 break; 9602 } 9603 else if (idesc->operands[i] == IA64_OPND_PR_ROT) 9604 { 9605 if (idesc->operands[1] & ((valueT) 1 << 43)) 9606 qp_changemask = -((valueT) 1 << 44) | idesc->operands[1]; 9607 else 9608 qp_changemask = idesc->operands[1]; 9609 qp_changemask &= ~(valueT) 0xFFFF; 9610 break; 9611 } 9612 } 9613 9614 /* Always clear qp branch flags on any PR change. */ 9615 /* FIXME there may be exceptions for certain compares. */ 9616 clear_qp_branch_flag (qp_changemask); 9617 9618 /* Invalidate rotating registers on insns which affect RRBs in CFM. */ 9619 if (idesc->flags & IA64_OPCODE_MOD_RRBS) 9620 { 9621 qp_changemask |= ~(valueT) 0xFFFF; 9622 if (strcmp (idesc->name, "clrrrb.pr") != 0) 9623 { 9624 for (i = 32; i < 32 + md.rot.num_regs; i++) 9625 gr_values[i].known = 0; 9626 } 9627 clear_qp_mutex (qp_changemask); 9628 clear_qp_implies (qp_changemask, qp_changemask); 9629 } 9630 /* After a call, all register values are undefined, except those marked 9631 as "safe". */ 9632 else if (strncmp (idesc->name, "br.call", 6) == 0 9633 || strncmp (idesc->name, "brl.call", 7) == 0) 9634 { 9635 /* FIXME keep GR values which are marked as "safe_across_calls" */ 9636 clear_register_values (); 9637 clear_qp_mutex (~qp_safe_across_calls); 9638 clear_qp_implies (~qp_safe_across_calls, ~qp_safe_across_calls); 9639 clear_qp_branch_flag (~qp_safe_across_calls); 9640 } 9641 else if (is_interruption_or_rfi (idesc) 9642 || is_taken_branch (idesc)) 9643 { 9644 clear_register_values (); 9645 clear_qp_mutex (~(valueT) 0); 9646 clear_qp_implies (~(valueT) 0, ~(valueT) 0); 9647 } 9648 /* Look for mutex and implies relations. */ 9649 else if ((idesc->operands[0] == IA64_OPND_P1 9650 || idesc->operands[0] == IA64_OPND_P2) 9651 && (idesc->operands[1] == IA64_OPND_P1 9652 || idesc->operands[1] == IA64_OPND_P2)) 9653 { 9654 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P; 9655 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P; 9656 valueT p1mask = (p1 != 0) ? (valueT) 1 << p1 : 0; 9657 valueT p2mask = (p2 != 0) ? (valueT) 1 << p2 : 0; 9658 9659 /* If both PRs are PR0, we can't really do anything. */ 9660 if (p1 == 0 && p2 == 0) 9661 { 9662 if (md.debug_dv) 9663 fprintf (stderr, " Ignoring PRs due to inclusion of p0\n"); 9664 } 9665 /* In general, clear mutexes and implies which include P1 or P2, 9666 with the following exceptions. */ 9667 else if (has_suffix_p (idesc->name, ".or.andcm") 9668 || has_suffix_p (idesc->name, ".and.orcm")) 9669 { 9670 clear_qp_implies (p2mask, p1mask); 9671 } 9672 else if (has_suffix_p (idesc->name, ".andcm") 9673 || has_suffix_p (idesc->name, ".and")) 9674 { 9675 clear_qp_implies (0, p1mask | p2mask); 9676 } 9677 else if (has_suffix_p (idesc->name, ".orcm") 9678 || has_suffix_p (idesc->name, ".or")) 9679 { 9680 clear_qp_mutex (p1mask | p2mask); 9681 clear_qp_implies (p1mask | p2mask, 0); 9682 } 9683 else 9684 { 9685 int added = 0; 9686 9687 clear_qp_implies (p1mask | p2mask, p1mask | p2mask); 9688 9689 /* If one of the PRs is PR0, we call clear_qp_mutex. */ 9690 if (p1 == 0 || p2 == 0) 9691 clear_qp_mutex (p1mask | p2mask); 9692 else 9693 added = update_qp_mutex (p1mask | p2mask); 9694 9695 if (CURR_SLOT.qp_regno == 0 9696 || has_suffix_p (idesc->name, ".unc")) 9697 { 9698 if (added == 0 && p1 && p2) 9699 add_qp_mutex (p1mask | p2mask); 9700 if (CURR_SLOT.qp_regno != 0) 9701 { 9702 if (p1) 9703 add_qp_imply (p1, CURR_SLOT.qp_regno); 9704 if (p2) 9705 add_qp_imply (p2, CURR_SLOT.qp_regno); 9706 } 9707 } 9708 } 9709 } 9710 /* Look for mov imm insns into GRs. */ 9711 else if (idesc->operands[0] == IA64_OPND_R1 9712 && (idesc->operands[1] == IA64_OPND_IMM22 9713 || idesc->operands[1] == IA64_OPND_IMMU64) 9714 && CURR_SLOT.opnd[1].X_op == O_constant 9715 && (strcmp (idesc->name, "mov") == 0 9716 || strcmp (idesc->name, "movl") == 0)) 9717 { 9718 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR; 9719 if (regno > 0 && regno < NELEMS (gr_values)) 9720 { 9721 gr_values[regno].known = 1; 9722 gr_values[regno].value = CURR_SLOT.opnd[1].X_add_number; 9723 gr_values[regno].path = md.path; 9724 if (md.debug_dv) 9725 { 9726 fprintf (stderr, " Know gr%d = ", regno); 9727 fprintf_vma (stderr, gr_values[regno].value); 9728 fputs ("\n", stderr); 9729 } 9730 } 9731 } 9732 /* Look for dep.z imm insns. */ 9733 else if (idesc->operands[0] == IA64_OPND_R1 9734 && idesc->operands[1] == IA64_OPND_IMM8 9735 && strcmp (idesc->name, "dep.z") == 0) 9736 { 9737 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR; 9738 if (regno > 0 && regno < NELEMS (gr_values)) 9739 { 9740 valueT value = CURR_SLOT.opnd[1].X_add_number; 9741 9742 if (CURR_SLOT.opnd[3].X_add_number < 64) 9743 value &= ((valueT)1 << CURR_SLOT.opnd[3].X_add_number) - 1; 9744 value <<= CURR_SLOT.opnd[2].X_add_number; 9745 gr_values[regno].known = 1; 9746 gr_values[regno].value = value; 9747 gr_values[regno].path = md.path; 9748 if (md.debug_dv) 9749 { 9750 fprintf (stderr, " Know gr%d = ", regno); 9751 fprintf_vma (stderr, gr_values[regno].value); 9752 fputs ("\n", stderr); 9753 } 9754 } 9755 } 9756 else 9757 { 9758 clear_qp_mutex (qp_changemask); 9759 clear_qp_implies (qp_changemask, qp_changemask); 9760 } 9761 } 9762 9763 /* Return whether the given predicate registers are currently mutex. */ 9764 9765 static int 9766 qp_mutex (int p1, int p2, int path) 9767 { 9768 int i; 9769 valueT mask; 9770 9771 if (p1 != p2) 9772 { 9773 mask = ((valueT) 1 << p1) | (valueT) 1 << p2; 9774 for (i = 0; i < qp_mutexeslen; i++) 9775 { 9776 if (qp_mutexes[i].path >= path 9777 && (qp_mutexes[i].prmask & mask) == mask) 9778 return 1; 9779 } 9780 } 9781 return 0; 9782 } 9783 9784 /* Return whether the given resource is in the given insn's list of chks 9785 Return 1 if the conflict is absolutely determined, 2 if it's a potential 9786 conflict. */ 9787 9788 static int 9789 resources_match (struct rsrc *rs, 9790 struct ia64_opcode *idesc, 9791 int note, 9792 int qp_regno, 9793 int path) 9794 { 9795 struct rsrc specs[MAX_SPECS]; 9796 int count; 9797 9798 /* If the marked resource's qp_regno and the given qp_regno are mutex, 9799 we don't need to check. One exception is note 11, which indicates that 9800 target predicates are written regardless of PR[qp]. */ 9801 if (qp_mutex (rs->qp_regno, qp_regno, path) 9802 && note != 11) 9803 return 0; 9804 9805 count = specify_resource (rs->dependency, idesc, DV_CHK, specs, note, path); 9806 while (count-- > 0) 9807 { 9808 /* UNAT checking is a bit more specific than other resources */ 9809 if (rs->dependency->specifier == IA64_RS_AR_UNAT 9810 && specs[count].mem_offset.hint 9811 && rs->mem_offset.hint) 9812 { 9813 if (rs->mem_offset.base == specs[count].mem_offset.base) 9814 { 9815 if (((rs->mem_offset.offset >> 3) & 0x3F) == 9816 ((specs[count].mem_offset.offset >> 3) & 0x3F)) 9817 return 1; 9818 else 9819 continue; 9820 } 9821 } 9822 9823 /* Skip apparent PR write conflicts where both writes are an AND or both 9824 writes are an OR. */ 9825 if (rs->dependency->specifier == IA64_RS_PR 9826 || rs->dependency->specifier == IA64_RS_PRr 9827 || rs->dependency->specifier == IA64_RS_PR63) 9828 { 9829 if (specs[count].cmp_type != CMP_NONE 9830 && specs[count].cmp_type == rs->cmp_type) 9831 { 9832 if (md.debug_dv) 9833 fprintf (stderr, " %s on parallel compare allowed (PR%d)\n", 9834 dv_mode[rs->dependency->mode], 9835 rs->dependency->specifier != IA64_RS_PR63 ? 9836 specs[count].index : 63); 9837 continue; 9838 } 9839 if (md.debug_dv) 9840 fprintf (stderr, 9841 " %s on parallel compare conflict %s vs %s on PR%d\n", 9842 dv_mode[rs->dependency->mode], 9843 dv_cmp_type[rs->cmp_type], 9844 dv_cmp_type[specs[count].cmp_type], 9845 rs->dependency->specifier != IA64_RS_PR63 ? 9846 specs[count].index : 63); 9847 9848 } 9849 9850 /* If either resource is not specific, conservatively assume a conflict 9851 */ 9852 if (!specs[count].specific || !rs->specific) 9853 return 2; 9854 else if (specs[count].index == rs->index) 9855 return 1; 9856 } 9857 9858 return 0; 9859 } 9860 9861 /* Indicate an instruction group break; if INSERT_STOP is non-zero, then 9862 insert a stop to create the break. Update all resource dependencies 9863 appropriately. If QP_REGNO is non-zero, only apply the break to resources 9864 which use the same QP_REGNO and have the link_to_qp_branch flag set. 9865 If SAVE_CURRENT is non-zero, don't affect resources marked by the current 9866 instruction. */ 9867 9868 static void 9869 insn_group_break (int insert_stop, int qp_regno, int save_current) 9870 { 9871 int i; 9872 9873 if (insert_stop && md.num_slots_in_use > 0) 9874 PREV_SLOT.end_of_insn_group = 1; 9875 9876 if (md.debug_dv) 9877 { 9878 fprintf (stderr, " Insn group break%s", 9879 (insert_stop ? " (w/stop)" : "")); 9880 if (qp_regno != 0) 9881 fprintf (stderr, " effective for QP=%d", qp_regno); 9882 fprintf (stderr, "\n"); 9883 } 9884 9885 i = 0; 9886 while (i < regdepslen) 9887 { 9888 const struct ia64_dependency *dep = regdeps[i].dependency; 9889 9890 if (qp_regno != 0 9891 && regdeps[i].qp_regno != qp_regno) 9892 { 9893 ++i; 9894 continue; 9895 } 9896 9897 if (save_current 9898 && CURR_SLOT.src_file == regdeps[i].file 9899 && CURR_SLOT.src_line == regdeps[i].line) 9900 { 9901 ++i; 9902 continue; 9903 } 9904 9905 /* clear dependencies which are automatically cleared by a stop, or 9906 those that have reached the appropriate state of insn serialization */ 9907 if (dep->semantics == IA64_DVS_IMPLIED 9908 || dep->semantics == IA64_DVS_IMPLIEDF 9909 || regdeps[i].insn_srlz == STATE_SRLZ) 9910 { 9911 print_dependency ("Removing", i); 9912 regdeps[i] = regdeps[--regdepslen]; 9913 } 9914 else 9915 { 9916 if (dep->semantics == IA64_DVS_DATA 9917 || dep->semantics == IA64_DVS_INSTR 9918 || dep->semantics == IA64_DVS_SPECIFIC) 9919 { 9920 if (regdeps[i].insn_srlz == STATE_NONE) 9921 regdeps[i].insn_srlz = STATE_STOP; 9922 if (regdeps[i].data_srlz == STATE_NONE) 9923 regdeps[i].data_srlz = STATE_STOP; 9924 } 9925 ++i; 9926 } 9927 } 9928 } 9929 9930 /* Add the given resource usage spec to the list of active dependencies. */ 9931 9932 static void 9933 mark_resource (struct ia64_opcode *idesc ATTRIBUTE_UNUSED, 9934 const struct ia64_dependency *dep ATTRIBUTE_UNUSED, 9935 struct rsrc *spec, 9936 int depind, 9937 int path) 9938 { 9939 if (regdepslen == regdepstotlen) 9940 { 9941 regdepstotlen += 20; 9942 regdeps = (struct rsrc *) 9943 xrealloc ((void *) regdeps, 9944 regdepstotlen * sizeof (struct rsrc)); 9945 } 9946 9947 regdeps[regdepslen] = *spec; 9948 regdeps[regdepslen].depind = depind; 9949 regdeps[regdepslen].path = path; 9950 regdeps[regdepslen].file = CURR_SLOT.src_file; 9951 regdeps[regdepslen].line = CURR_SLOT.src_line; 9952 9953 print_dependency ("Adding", regdepslen); 9954 9955 ++regdepslen; 9956 } 9957 9958 static void 9959 print_dependency (const char *action, int depind) 9960 { 9961 if (md.debug_dv) 9962 { 9963 fprintf (stderr, " %s %s '%s'", 9964 action, dv_mode[(regdeps[depind].dependency)->mode], 9965 (regdeps[depind].dependency)->name); 9966 if (regdeps[depind].specific && regdeps[depind].index >= 0) 9967 fprintf (stderr, " (%d)", regdeps[depind].index); 9968 if (regdeps[depind].mem_offset.hint) 9969 { 9970 fputs (" ", stderr); 9971 fprintf_vma (stderr, regdeps[depind].mem_offset.base); 9972 fputs ("+", stderr); 9973 fprintf_vma (stderr, regdeps[depind].mem_offset.offset); 9974 } 9975 fprintf (stderr, "\n"); 9976 } 9977 } 9978 9979 static void 9980 instruction_serialization (void) 9981 { 9982 int i; 9983 if (md.debug_dv) 9984 fprintf (stderr, " Instruction serialization\n"); 9985 for (i = 0; i < regdepslen; i++) 9986 if (regdeps[i].insn_srlz == STATE_STOP) 9987 regdeps[i].insn_srlz = STATE_SRLZ; 9988 } 9989 9990 static void 9991 data_serialization (void) 9992 { 9993 int i = 0; 9994 if (md.debug_dv) 9995 fprintf (stderr, " Data serialization\n"); 9996 while (i < regdepslen) 9997 { 9998 if (regdeps[i].data_srlz == STATE_STOP 9999 /* Note: as of 991210, all "other" dependencies are cleared by a 10000 data serialization. This might change with new tables */ 10001 || (regdeps[i].dependency)->semantics == IA64_DVS_OTHER) 10002 { 10003 print_dependency ("Removing", i); 10004 regdeps[i] = regdeps[--regdepslen]; 10005 } 10006 else 10007 ++i; 10008 } 10009 } 10010 10011 /* Insert stops and serializations as needed to avoid DVs. */ 10012 10013 static void 10014 remove_marked_resource (struct rsrc *rs) 10015 { 10016 switch (rs->dependency->semantics) 10017 { 10018 case IA64_DVS_SPECIFIC: 10019 if (md.debug_dv) 10020 fprintf (stderr, "Implementation-specific, assume worst case...\n"); 10021 /* ...fall through... */ 10022 case IA64_DVS_INSTR: 10023 if (md.debug_dv) 10024 fprintf (stderr, "Inserting instr serialization\n"); 10025 if (rs->insn_srlz < STATE_STOP) 10026 insn_group_break (1, 0, 0); 10027 if (rs->insn_srlz < STATE_SRLZ) 10028 { 10029 struct slot oldslot = CURR_SLOT; 10030 /* Manually jam a srlz.i insn into the stream */ 10031 memset (&CURR_SLOT, 0, sizeof (CURR_SLOT)); 10032 CURR_SLOT.user_template = -1; 10033 CURR_SLOT.idesc = ia64_find_opcode ("srlz.i"); 10034 instruction_serialization (); 10035 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS; 10036 if (++md.num_slots_in_use >= NUM_SLOTS) 10037 emit_one_bundle (); 10038 CURR_SLOT = oldslot; 10039 } 10040 insn_group_break (1, 0, 0); 10041 break; 10042 case IA64_DVS_OTHER: /* as of rev2 (991220) of the DV tables, all 10043 "other" types of DV are eliminated 10044 by a data serialization */ 10045 case IA64_DVS_DATA: 10046 if (md.debug_dv) 10047 fprintf (stderr, "Inserting data serialization\n"); 10048 if (rs->data_srlz < STATE_STOP) 10049 insn_group_break (1, 0, 0); 10050 { 10051 struct slot oldslot = CURR_SLOT; 10052 /* Manually jam a srlz.d insn into the stream */ 10053 memset (&CURR_SLOT, 0, sizeof (CURR_SLOT)); 10054 CURR_SLOT.user_template = -1; 10055 CURR_SLOT.idesc = ia64_find_opcode ("srlz.d"); 10056 data_serialization (); 10057 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS; 10058 if (++md.num_slots_in_use >= NUM_SLOTS) 10059 emit_one_bundle (); 10060 CURR_SLOT = oldslot; 10061 } 10062 break; 10063 case IA64_DVS_IMPLIED: 10064 case IA64_DVS_IMPLIEDF: 10065 if (md.debug_dv) 10066 fprintf (stderr, "Inserting stop\n"); 10067 insn_group_break (1, 0, 0); 10068 break; 10069 default: 10070 break; 10071 } 10072 } 10073 10074 /* Check the resources used by the given opcode against the current dependency 10075 list. 10076 10077 The check is run once for each execution path encountered. In this case, 10078 a unique execution path is the sequence of instructions following a code 10079 entry point, e.g. the following has three execution paths, one starting 10080 at L0, one at L1, and one at L2. 10081 10082 L0: nop 10083 L1: add 10084 L2: add 10085 br.ret 10086 */ 10087 10088 static void 10089 check_dependencies (struct ia64_opcode *idesc) 10090 { 10091 const struct ia64_opcode_dependency *opdeps = idesc->dependencies; 10092 int path; 10093 int i; 10094 10095 /* Note that the number of marked resources may change within the 10096 loop if in auto mode. */ 10097 i = 0; 10098 while (i < regdepslen) 10099 { 10100 struct rsrc *rs = ®deps[i]; 10101 const struct ia64_dependency *dep = rs->dependency; 10102 int chkind; 10103 int note; 10104 int start_over = 0; 10105 10106 if (dep->semantics == IA64_DVS_NONE 10107 || (chkind = depends_on (rs->depind, idesc)) == -1) 10108 { 10109 ++i; 10110 continue; 10111 } 10112 10113 note = NOTE (opdeps->chks[chkind]); 10114 10115 /* Check this resource against each execution path seen thus far. */ 10116 for (path = 0; path <= md.path; path++) 10117 { 10118 int matchtype; 10119 10120 /* If the dependency wasn't on the path being checked, ignore it. */ 10121 if (rs->path < path) 10122 continue; 10123 10124 /* If the QP for this insn implies a QP which has branched, don't 10125 bother checking. Ed. NOTE: I don't think this check is terribly 10126 useful; what's the point of generating code which will only be 10127 reached if its QP is zero? 10128 This code was specifically inserted to handle the following code, 10129 based on notes from Intel's DV checking code, where p1 implies p2. 10130 10131 mov r4 = 2 10132 (p2) br.cond L 10133 (p1) mov r4 = 7 10134 */ 10135 if (CURR_SLOT.qp_regno != 0) 10136 { 10137 int skip = 0; 10138 int implies; 10139 for (implies = 0; implies < qp_implieslen; implies++) 10140 { 10141 if (qp_implies[implies].path >= path 10142 && qp_implies[implies].p1 == CURR_SLOT.qp_regno 10143 && qp_implies[implies].p2_branched) 10144 { 10145 skip = 1; 10146 break; 10147 } 10148 } 10149 if (skip) 10150 continue; 10151 } 10152 10153 if ((matchtype = resources_match (rs, idesc, note, 10154 CURR_SLOT.qp_regno, path)) != 0) 10155 { 10156 char msg[1024]; 10157 char pathmsg[256] = ""; 10158 char indexmsg[256] = ""; 10159 int certain = (matchtype == 1 && CURR_SLOT.qp_regno == 0); 10160 10161 if (path != 0) 10162 snprintf (pathmsg, sizeof (pathmsg), 10163 " when entry is at label '%s'", 10164 md.entry_labels[path - 1]); 10165 if (matchtype == 1 && rs->index >= 0) 10166 snprintf (indexmsg, sizeof (indexmsg), 10167 ", specific resource number is %d", 10168 rs->index); 10169 snprintf (msg, sizeof (msg), 10170 "Use of '%s' %s %s dependency '%s' (%s)%s%s", 10171 idesc->name, 10172 (certain ? "violates" : "may violate"), 10173 dv_mode[dep->mode], dep->name, 10174 dv_sem[dep->semantics], 10175 pathmsg, indexmsg); 10176 10177 if (md.explicit_mode) 10178 { 10179 as_warn ("%s", msg); 10180 if (path < md.path) 10181 as_warn (_("Only the first path encountering the conflict is reported")); 10182 as_warn_where (rs->file, rs->line, 10183 _("This is the location of the conflicting usage")); 10184 /* Don't bother checking other paths, to avoid duplicating 10185 the same warning */ 10186 break; 10187 } 10188 else 10189 { 10190 if (md.debug_dv) 10191 fprintf (stderr, "%s @ %s:%d\n", msg, rs->file, rs->line); 10192 10193 remove_marked_resource (rs); 10194 10195 /* since the set of dependencies has changed, start over */ 10196 /* FIXME -- since we're removing dvs as we go, we 10197 probably don't really need to start over... */ 10198 start_over = 1; 10199 break; 10200 } 10201 } 10202 } 10203 if (start_over) 10204 i = 0; 10205 else 10206 ++i; 10207 } 10208 } 10209 10210 /* Register new dependencies based on the given opcode. */ 10211 10212 static void 10213 mark_resources (struct ia64_opcode *idesc) 10214 { 10215 int i; 10216 const struct ia64_opcode_dependency *opdeps = idesc->dependencies; 10217 int add_only_qp_reads = 0; 10218 10219 /* A conditional branch only uses its resources if it is taken; if it is 10220 taken, we stop following that path. The other branch types effectively 10221 *always* write their resources. If it's not taken, register only QP 10222 reads. */ 10223 if (is_conditional_branch (idesc) || is_interruption_or_rfi (idesc)) 10224 { 10225 add_only_qp_reads = 1; 10226 } 10227 10228 if (md.debug_dv) 10229 fprintf (stderr, "Registering '%s' resource usage\n", idesc->name); 10230 10231 for (i = 0; i < opdeps->nregs; i++) 10232 { 10233 const struct ia64_dependency *dep; 10234 struct rsrc specs[MAX_SPECS]; 10235 int note; 10236 int path; 10237 int count; 10238 10239 dep = ia64_find_dependency (opdeps->regs[i]); 10240 note = NOTE (opdeps->regs[i]); 10241 10242 if (add_only_qp_reads 10243 && !(dep->mode == IA64_DV_WAR 10244 && (dep->specifier == IA64_RS_PR 10245 || dep->specifier == IA64_RS_PRr 10246 || dep->specifier == IA64_RS_PR63))) 10247 continue; 10248 10249 count = specify_resource (dep, idesc, DV_REG, specs, note, md.path); 10250 10251 while (count-- > 0) 10252 { 10253 mark_resource (idesc, dep, &specs[count], 10254 DEP (opdeps->regs[i]), md.path); 10255 } 10256 10257 /* The execution path may affect register values, which may in turn 10258 affect which indirect-access resources are accessed. */ 10259 switch (dep->specifier) 10260 { 10261 default: 10262 break; 10263 case IA64_RS_CPUID: 10264 case IA64_RS_DBR: 10265 case IA64_RS_IBR: 10266 case IA64_RS_MSR: 10267 case IA64_RS_PKR: 10268 case IA64_RS_PMC: 10269 case IA64_RS_PMD: 10270 case IA64_RS_RR: 10271 for (path = 0; path < md.path; path++) 10272 { 10273 count = specify_resource (dep, idesc, DV_REG, specs, note, path); 10274 while (count-- > 0) 10275 mark_resource (idesc, dep, &specs[count], 10276 DEP (opdeps->regs[i]), path); 10277 } 10278 break; 10279 } 10280 } 10281 } 10282 10283 /* Remove dependencies when they no longer apply. */ 10284 10285 static void 10286 update_dependencies (struct ia64_opcode *idesc) 10287 { 10288 int i; 10289 10290 if (strcmp (idesc->name, "srlz.i") == 0) 10291 { 10292 instruction_serialization (); 10293 } 10294 else if (strcmp (idesc->name, "srlz.d") == 0) 10295 { 10296 data_serialization (); 10297 } 10298 else if (is_interruption_or_rfi (idesc) 10299 || is_taken_branch (idesc)) 10300 { 10301 /* Although technically the taken branch doesn't clear dependencies 10302 which require a srlz.[id], we don't follow the branch; the next 10303 instruction is assumed to start with a clean slate. */ 10304 regdepslen = 0; 10305 md.path = 0; 10306 } 10307 else if (is_conditional_branch (idesc) 10308 && CURR_SLOT.qp_regno != 0) 10309 { 10310 int is_call = strstr (idesc->name, ".call") != NULL; 10311 10312 for (i = 0; i < qp_implieslen; i++) 10313 { 10314 /* If the conditional branch's predicate is implied by the predicate 10315 in an existing dependency, remove that dependency. */ 10316 if (qp_implies[i].p2 == CURR_SLOT.qp_regno) 10317 { 10318 int depind = 0; 10319 /* Note that this implied predicate takes a branch so that if 10320 a later insn generates a DV but its predicate implies this 10321 one, we can avoid the false DV warning. */ 10322 qp_implies[i].p2_branched = 1; 10323 while (depind < regdepslen) 10324 { 10325 if (regdeps[depind].qp_regno == qp_implies[i].p1) 10326 { 10327 print_dependency ("Removing", depind); 10328 regdeps[depind] = regdeps[--regdepslen]; 10329 } 10330 else 10331 ++depind; 10332 } 10333 } 10334 } 10335 /* Any marked resources which have this same predicate should be 10336 cleared, provided that the QP hasn't been modified between the 10337 marking instruction and the branch. */ 10338 if (is_call) 10339 { 10340 insn_group_break (0, CURR_SLOT.qp_regno, 1); 10341 } 10342 else 10343 { 10344 i = 0; 10345 while (i < regdepslen) 10346 { 10347 if (regdeps[i].qp_regno == CURR_SLOT.qp_regno 10348 && regdeps[i].link_to_qp_branch 10349 && (regdeps[i].file != CURR_SLOT.src_file 10350 || regdeps[i].line != CURR_SLOT.src_line)) 10351 { 10352 /* Treat like a taken branch */ 10353 print_dependency ("Removing", i); 10354 regdeps[i] = regdeps[--regdepslen]; 10355 } 10356 else 10357 ++i; 10358 } 10359 } 10360 } 10361 } 10362 10363 /* Examine the current instruction for dependency violations. */ 10364 10365 static int 10366 check_dv (struct ia64_opcode *idesc) 10367 { 10368 if (md.debug_dv) 10369 { 10370 fprintf (stderr, "Checking %s for violations (line %d, %d/%d)\n", 10371 idesc->name, CURR_SLOT.src_line, 10372 idesc->dependencies->nchks, 10373 idesc->dependencies->nregs); 10374 } 10375 10376 /* Look through the list of currently marked resources; if the current 10377 instruction has the dependency in its chks list which uses that resource, 10378 check against the specific resources used. */ 10379 check_dependencies (idesc); 10380 10381 /* Look up the instruction's regdeps (RAW writes, WAW writes, and WAR reads), 10382 then add them to the list of marked resources. */ 10383 mark_resources (idesc); 10384 10385 /* There are several types of dependency semantics, and each has its own 10386 requirements for being cleared 10387 10388 Instruction serialization (insns separated by interruption, rfi, or 10389 writer + srlz.i + reader, all in separate groups) clears DVS_INSTR. 10390 10391 Data serialization (instruction serialization, or writer + srlz.d + 10392 reader, where writer and srlz.d are in separate groups) clears 10393 DVS_DATA. (This also clears DVS_OTHER, but that is not guaranteed to 10394 always be the case). 10395 10396 Instruction group break (groups separated by stop, taken branch, 10397 interruption or rfi) clears DVS_IMPLIED and DVS_IMPLIEDF. 10398 */ 10399 update_dependencies (idesc); 10400 10401 /* Sometimes, knowing a register value allows us to avoid giving a false DV 10402 warning. Keep track of as many as possible that are useful. */ 10403 note_register_values (idesc); 10404 10405 /* We don't need or want this anymore. */ 10406 md.mem_offset.hint = 0; 10407 10408 return 0; 10409 } 10410 10411 /* Translate one line of assembly. Pseudo ops and labels do not show 10412 here. */ 10413 void 10414 md_assemble (char *str) 10415 { 10416 char *saved_input_line_pointer, *mnemonic; 10417 const struct pseudo_opcode *pdesc; 10418 struct ia64_opcode *idesc; 10419 unsigned char qp_regno; 10420 unsigned int flags; 10421 int ch; 10422 10423 saved_input_line_pointer = input_line_pointer; 10424 input_line_pointer = str; 10425 10426 /* extract the opcode (mnemonic): */ 10427 10428 mnemonic = input_line_pointer; 10429 ch = get_symbol_end (); 10430 pdesc = (struct pseudo_opcode *) hash_find (md.pseudo_hash, mnemonic); 10431 if (pdesc) 10432 { 10433 *input_line_pointer = ch; 10434 (*pdesc->handler) (pdesc->arg); 10435 goto done; 10436 } 10437 10438 /* Find the instruction descriptor matching the arguments. */ 10439 10440 idesc = ia64_find_opcode (mnemonic); 10441 *input_line_pointer = ch; 10442 if (!idesc) 10443 { 10444 as_bad (_("Unknown opcode `%s'"), mnemonic); 10445 goto done; 10446 } 10447 10448 idesc = parse_operands (idesc); 10449 if (!idesc) 10450 goto done; 10451 10452 /* Handle the dynamic ops we can handle now: */ 10453 if (idesc->type == IA64_TYPE_DYN) 10454 { 10455 if (strcmp (idesc->name, "add") == 0) 10456 { 10457 if (CURR_SLOT.opnd[2].X_op == O_register 10458 && CURR_SLOT.opnd[2].X_add_number < 4) 10459 mnemonic = "addl"; 10460 else 10461 mnemonic = "adds"; 10462 ia64_free_opcode (idesc); 10463 idesc = ia64_find_opcode (mnemonic); 10464 } 10465 else if (strcmp (idesc->name, "mov") == 0) 10466 { 10467 enum ia64_opnd opnd1, opnd2; 10468 int rop; 10469 10470 opnd1 = idesc->operands[0]; 10471 opnd2 = idesc->operands[1]; 10472 if (opnd1 == IA64_OPND_AR3) 10473 rop = 0; 10474 else if (opnd2 == IA64_OPND_AR3) 10475 rop = 1; 10476 else 10477 abort (); 10478 if (CURR_SLOT.opnd[rop].X_op == O_register) 10479 { 10480 if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number)) 10481 mnemonic = "mov.i"; 10482 else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number)) 10483 mnemonic = "mov.m"; 10484 else 10485 rop = -1; 10486 } 10487 else 10488 abort (); 10489 if (rop >= 0) 10490 { 10491 ia64_free_opcode (idesc); 10492 idesc = ia64_find_opcode (mnemonic); 10493 while (idesc != NULL 10494 && (idesc->operands[0] != opnd1 10495 || idesc->operands[1] != opnd2)) 10496 idesc = get_next_opcode (idesc); 10497 } 10498 } 10499 } 10500 else if (strcmp (idesc->name, "mov.i") == 0 10501 || strcmp (idesc->name, "mov.m") == 0) 10502 { 10503 enum ia64_opnd opnd1, opnd2; 10504 int rop; 10505 10506 opnd1 = idesc->operands[0]; 10507 opnd2 = idesc->operands[1]; 10508 if (opnd1 == IA64_OPND_AR3) 10509 rop = 0; 10510 else if (opnd2 == IA64_OPND_AR3) 10511 rop = 1; 10512 else 10513 abort (); 10514 if (CURR_SLOT.opnd[rop].X_op == O_register) 10515 { 10516 char unit = 'a'; 10517 if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number)) 10518 unit = 'i'; 10519 else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number)) 10520 unit = 'm'; 10521 if (unit != 'a' && unit != idesc->name [4]) 10522 as_bad (_("AR %d can only be accessed by %c-unit"), 10523 (int) (CURR_SLOT.opnd[rop].X_add_number - REG_AR), 10524 TOUPPER (unit)); 10525 } 10526 } 10527 else if (strcmp (idesc->name, "hint.b") == 0) 10528 { 10529 switch (md.hint_b) 10530 { 10531 case hint_b_ok: 10532 break; 10533 case hint_b_warning: 10534 as_warn (_("hint.b may be treated as nop")); 10535 break; 10536 case hint_b_error: 10537 as_bad (_("hint.b shouldn't be used")); 10538 break; 10539 } 10540 } 10541 10542 qp_regno = 0; 10543 if (md.qp.X_op == O_register) 10544 { 10545 qp_regno = md.qp.X_add_number - REG_P; 10546 md.qp.X_op = O_absent; 10547 } 10548 10549 flags = idesc->flags; 10550 10551 if ((flags & IA64_OPCODE_FIRST) != 0) 10552 { 10553 /* The alignment frag has to end with a stop bit only if the 10554 next instruction after the alignment directive has to be 10555 the first instruction in an instruction group. */ 10556 if (align_frag) 10557 { 10558 while (align_frag->fr_type != rs_align_code) 10559 { 10560 align_frag = align_frag->fr_next; 10561 if (!align_frag) 10562 break; 10563 } 10564 /* align_frag can be NULL if there are directives in 10565 between. */ 10566 if (align_frag && align_frag->fr_next == frag_now) 10567 align_frag->tc_frag_data = 1; 10568 } 10569 10570 insn_group_break (1, 0, 0); 10571 } 10572 align_frag = NULL; 10573 10574 if ((flags & IA64_OPCODE_NO_PRED) != 0 && qp_regno != 0) 10575 { 10576 as_bad (_("`%s' cannot be predicated"), idesc->name); 10577 goto done; 10578 } 10579 10580 /* Build the instruction. */ 10581 CURR_SLOT.qp_regno = qp_regno; 10582 CURR_SLOT.idesc = idesc; 10583 as_where (&CURR_SLOT.src_file, &CURR_SLOT.src_line); 10584 dwarf2_where (&CURR_SLOT.debug_line); 10585 dwarf2_consume_line_info (); 10586 10587 /* Add unwind entries, if there are any. */ 10588 if (unwind.current_entry) 10589 { 10590 CURR_SLOT.unwind_record = unwind.current_entry; 10591 unwind.current_entry = NULL; 10592 } 10593 if (unwind.pending_saves) 10594 { 10595 if (unwind.pending_saves->next) 10596 { 10597 /* Attach the next pending save to the next slot so that its 10598 slot number will get set correctly. */ 10599 add_unwind_entry (unwind.pending_saves->next, NOT_A_CHAR); 10600 unwind.pending_saves = &unwind.pending_saves->next->r.record.p; 10601 } 10602 else 10603 unwind.pending_saves = NULL; 10604 } 10605 if (unwind.proc_pending.sym && S_IS_DEFINED (unwind.proc_pending.sym)) 10606 unwind.insn = 1; 10607 10608 /* Check for dependency violations. */ 10609 if (md.detect_dv) 10610 check_dv (idesc); 10611 10612 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS; 10613 if (++md.num_slots_in_use >= NUM_SLOTS) 10614 emit_one_bundle (); 10615 10616 if ((flags & IA64_OPCODE_LAST) != 0) 10617 insn_group_break (1, 0, 0); 10618 10619 md.last_text_seg = now_seg; 10620 10621 done: 10622 input_line_pointer = saved_input_line_pointer; 10623 } 10624 10625 /* Called when symbol NAME cannot be found in the symbol table. 10626 Should be used for dynamic valued symbols only. */ 10627 10628 symbolS * 10629 md_undefined_symbol (char *name ATTRIBUTE_UNUSED) 10630 { 10631 return 0; 10632 } 10633 10634 /* Called for any expression that can not be recognized. When the 10635 function is called, `input_line_pointer' will point to the start of 10636 the expression. */ 10637 10638 void 10639 md_operand (expressionS *e) 10640 { 10641 switch (*input_line_pointer) 10642 { 10643 case '[': 10644 ++input_line_pointer; 10645 expression_and_evaluate (e); 10646 if (*input_line_pointer != ']') 10647 { 10648 as_bad (_("Closing bracket missing")); 10649 goto err; 10650 } 10651 else 10652 { 10653 if (e->X_op != O_register 10654 || e->X_add_number < REG_GR 10655 || e->X_add_number > REG_GR + 127) 10656 { 10657 as_bad (_("Index must be a general register")); 10658 e->X_add_number = REG_GR; 10659 } 10660 10661 ++input_line_pointer; 10662 e->X_op = O_index; 10663 } 10664 break; 10665 10666 default: 10667 break; 10668 } 10669 return; 10670 10671 err: 10672 ignore_rest_of_line (); 10673 } 10674 10675 /* Return 1 if it's OK to adjust a reloc by replacing the symbol with 10676 a section symbol plus some offset. For relocs involving @fptr(), 10677 directives we don't want such adjustments since we need to have the 10678 original symbol's name in the reloc. */ 10679 int 10680 ia64_fix_adjustable (fixS *fix) 10681 { 10682 /* Prevent all adjustments to global symbols */ 10683 if (S_IS_EXTERNAL (fix->fx_addsy) || S_IS_WEAK (fix->fx_addsy)) 10684 return 0; 10685 10686 switch (fix->fx_r_type) 10687 { 10688 case BFD_RELOC_IA64_FPTR64I: 10689 case BFD_RELOC_IA64_FPTR32MSB: 10690 case BFD_RELOC_IA64_FPTR32LSB: 10691 case BFD_RELOC_IA64_FPTR64MSB: 10692 case BFD_RELOC_IA64_FPTR64LSB: 10693 case BFD_RELOC_IA64_LTOFF_FPTR22: 10694 case BFD_RELOC_IA64_LTOFF_FPTR64I: 10695 return 0; 10696 default: 10697 break; 10698 } 10699 10700 return 1; 10701 } 10702 10703 int 10704 ia64_force_relocation (fixS *fix) 10705 { 10706 switch (fix->fx_r_type) 10707 { 10708 case BFD_RELOC_IA64_FPTR64I: 10709 case BFD_RELOC_IA64_FPTR32MSB: 10710 case BFD_RELOC_IA64_FPTR32LSB: 10711 case BFD_RELOC_IA64_FPTR64MSB: 10712 case BFD_RELOC_IA64_FPTR64LSB: 10713 10714 case BFD_RELOC_IA64_LTOFF22: 10715 case BFD_RELOC_IA64_LTOFF64I: 10716 case BFD_RELOC_IA64_LTOFF_FPTR22: 10717 case BFD_RELOC_IA64_LTOFF_FPTR64I: 10718 case BFD_RELOC_IA64_PLTOFF22: 10719 case BFD_RELOC_IA64_PLTOFF64I: 10720 case BFD_RELOC_IA64_PLTOFF64MSB: 10721 case BFD_RELOC_IA64_PLTOFF64LSB: 10722 10723 case BFD_RELOC_IA64_LTOFF22X: 10724 case BFD_RELOC_IA64_LDXMOV: 10725 return 1; 10726 10727 default: 10728 break; 10729 } 10730 10731 return generic_force_reloc (fix); 10732 } 10733 10734 /* Decide from what point a pc-relative relocation is relative to, 10735 relative to the pc-relative fixup. Er, relatively speaking. */ 10736 long 10737 ia64_pcrel_from_section (fixS *fix, segT sec) 10738 { 10739 unsigned long off = fix->fx_frag->fr_address + fix->fx_where; 10740 10741 if (bfd_get_section_flags (stdoutput, sec) & SEC_CODE) 10742 off &= ~0xfUL; 10743 10744 return off; 10745 } 10746 10747 10748 /* Used to emit section-relative relocs for the dwarf2 debug data. */ 10749 void 10750 ia64_dwarf2_emit_offset (symbolS *symbol, unsigned int size) 10751 { 10752 expressionS expr; 10753 10754 expr.X_op = O_pseudo_fixup; 10755 expr.X_op_symbol = pseudo_func[FUNC_SEC_RELATIVE].u.sym; 10756 expr.X_add_number = 0; 10757 expr.X_add_symbol = symbol; 10758 emit_expr (&expr, size); 10759 } 10760 10761 /* This is called whenever some data item (not an instruction) needs a 10762 fixup. We pick the right reloc code depending on the byteorder 10763 currently in effect. */ 10764 void 10765 ia64_cons_fix_new (fragS *f, int where, int nbytes, expressionS *exp) 10766 { 10767 bfd_reloc_code_real_type code; 10768 fixS *fix; 10769 10770 switch (nbytes) 10771 { 10772 /* There are no reloc for 8 and 16 bit quantities, but we allow 10773 them here since they will work fine as long as the expression 10774 is fully defined at the end of the pass over the source file. */ 10775 case 1: code = BFD_RELOC_8; break; 10776 case 2: code = BFD_RELOC_16; break; 10777 case 4: 10778 if (target_big_endian) 10779 code = BFD_RELOC_IA64_DIR32MSB; 10780 else 10781 code = BFD_RELOC_IA64_DIR32LSB; 10782 break; 10783 10784 case 8: 10785 /* In 32-bit mode, data8 could mean function descriptors too. */ 10786 if (exp->X_op == O_pseudo_fixup 10787 && exp->X_op_symbol 10788 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC 10789 && !(md.flags & EF_IA_64_ABI64)) 10790 { 10791 if (target_big_endian) 10792 code = BFD_RELOC_IA64_IPLTMSB; 10793 else 10794 code = BFD_RELOC_IA64_IPLTLSB; 10795 exp->X_op = O_symbol; 10796 break; 10797 } 10798 else 10799 { 10800 if (target_big_endian) 10801 code = BFD_RELOC_IA64_DIR64MSB; 10802 else 10803 code = BFD_RELOC_IA64_DIR64LSB; 10804 break; 10805 } 10806 10807 case 16: 10808 if (exp->X_op == O_pseudo_fixup 10809 && exp->X_op_symbol 10810 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC) 10811 { 10812 if (target_big_endian) 10813 code = BFD_RELOC_IA64_IPLTMSB; 10814 else 10815 code = BFD_RELOC_IA64_IPLTLSB; 10816 exp->X_op = O_symbol; 10817 break; 10818 } 10819 /* FALLTHRU */ 10820 10821 default: 10822 as_bad (_("Unsupported fixup size %d"), nbytes); 10823 ignore_rest_of_line (); 10824 return; 10825 } 10826 10827 if (exp->X_op == O_pseudo_fixup) 10828 { 10829 exp->X_op = O_symbol; 10830 code = ia64_gen_real_reloc_type (exp->X_op_symbol, code); 10831 /* ??? If code unchanged, unsupported. */ 10832 } 10833 10834 fix = fix_new_exp (f, where, nbytes, exp, 0, code); 10835 /* We need to store the byte order in effect in case we're going 10836 to fix an 8 or 16 bit relocation (for which there no real 10837 relocs available). See md_apply_fix(). */ 10838 fix->tc_fix_data.bigendian = target_big_endian; 10839 } 10840 10841 /* Return the actual relocation we wish to associate with the pseudo 10842 reloc described by SYM and R_TYPE. SYM should be one of the 10843 symbols in the pseudo_func array, or NULL. */ 10844 10845 static bfd_reloc_code_real_type 10846 ia64_gen_real_reloc_type (struct symbol *sym, bfd_reloc_code_real_type r_type) 10847 { 10848 bfd_reloc_code_real_type new = 0; 10849 const char *type = NULL, *suffix = ""; 10850 10851 if (sym == NULL) 10852 { 10853 return r_type; 10854 } 10855 10856 switch (S_GET_VALUE (sym)) 10857 { 10858 case FUNC_FPTR_RELATIVE: 10859 switch (r_type) 10860 { 10861 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_FPTR64I; break; 10862 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_FPTR32MSB; break; 10863 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_FPTR32LSB; break; 10864 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_FPTR64MSB; break; 10865 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_FPTR64LSB; break; 10866 default: type = "FPTR"; break; 10867 } 10868 break; 10869 10870 case FUNC_GP_RELATIVE: 10871 switch (r_type) 10872 { 10873 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_GPREL22; break; 10874 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_GPREL64I; break; 10875 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_GPREL32MSB; break; 10876 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_GPREL32LSB; break; 10877 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_GPREL64MSB; break; 10878 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_GPREL64LSB; break; 10879 default: type = "GPREL"; break; 10880 } 10881 break; 10882 10883 case FUNC_LT_RELATIVE: 10884 switch (r_type) 10885 { 10886 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_LTOFF22; break; 10887 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_LTOFF64I; break; 10888 default: type = "LTOFF"; break; 10889 } 10890 break; 10891 10892 case FUNC_LT_RELATIVE_X: 10893 switch (r_type) 10894 { 10895 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_LTOFF22X; break; 10896 default: type = "LTOFF"; suffix = "X"; break; 10897 } 10898 break; 10899 10900 case FUNC_PC_RELATIVE: 10901 switch (r_type) 10902 { 10903 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_PCREL22; break; 10904 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_PCREL64I; break; 10905 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_PCREL32MSB; break; 10906 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_PCREL32LSB; break; 10907 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_PCREL64MSB; break; 10908 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_PCREL64LSB; break; 10909 default: type = "PCREL"; break; 10910 } 10911 break; 10912 10913 case FUNC_PLT_RELATIVE: 10914 switch (r_type) 10915 { 10916 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_PLTOFF22; break; 10917 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_PLTOFF64I; break; 10918 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_PLTOFF64MSB;break; 10919 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_PLTOFF64LSB;break; 10920 default: type = "PLTOFF"; break; 10921 } 10922 break; 10923 10924 case FUNC_SEC_RELATIVE: 10925 switch (r_type) 10926 { 10927 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_SECREL32MSB;break; 10928 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_SECREL32LSB;break; 10929 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_SECREL64MSB;break; 10930 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_SECREL64LSB;break; 10931 default: type = "SECREL"; break; 10932 } 10933 break; 10934 10935 case FUNC_SEG_RELATIVE: 10936 switch (r_type) 10937 { 10938 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_SEGREL32MSB;break; 10939 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_SEGREL32LSB;break; 10940 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_SEGREL64MSB;break; 10941 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_SEGREL64LSB;break; 10942 default: type = "SEGREL"; break; 10943 } 10944 break; 10945 10946 case FUNC_LTV_RELATIVE: 10947 switch (r_type) 10948 { 10949 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_LTV32MSB; break; 10950 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_LTV32LSB; break; 10951 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_LTV64MSB; break; 10952 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_LTV64LSB; break; 10953 default: type = "LTV"; break; 10954 } 10955 break; 10956 10957 case FUNC_LT_FPTR_RELATIVE: 10958 switch (r_type) 10959 { 10960 case BFD_RELOC_IA64_IMM22: 10961 new = BFD_RELOC_IA64_LTOFF_FPTR22; break; 10962 case BFD_RELOC_IA64_IMM64: 10963 new = BFD_RELOC_IA64_LTOFF_FPTR64I; break; 10964 case BFD_RELOC_IA64_DIR32MSB: 10965 new = BFD_RELOC_IA64_LTOFF_FPTR32MSB; break; 10966 case BFD_RELOC_IA64_DIR32LSB: 10967 new = BFD_RELOC_IA64_LTOFF_FPTR32LSB; break; 10968 case BFD_RELOC_IA64_DIR64MSB: 10969 new = BFD_RELOC_IA64_LTOFF_FPTR64MSB; break; 10970 case BFD_RELOC_IA64_DIR64LSB: 10971 new = BFD_RELOC_IA64_LTOFF_FPTR64LSB; break; 10972 default: 10973 type = "LTOFF_FPTR"; break; 10974 } 10975 break; 10976 10977 case FUNC_TP_RELATIVE: 10978 switch (r_type) 10979 { 10980 case BFD_RELOC_IA64_IMM14: new = BFD_RELOC_IA64_TPREL14; break; 10981 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_TPREL22; break; 10982 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_TPREL64I; break; 10983 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_TPREL64MSB; break; 10984 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_TPREL64LSB; break; 10985 default: type = "TPREL"; break; 10986 } 10987 break; 10988 10989 case FUNC_LT_TP_RELATIVE: 10990 switch (r_type) 10991 { 10992 case BFD_RELOC_IA64_IMM22: 10993 new = BFD_RELOC_IA64_LTOFF_TPREL22; break; 10994 default: 10995 type = "LTOFF_TPREL"; break; 10996 } 10997 break; 10998 10999 case FUNC_DTP_MODULE: 11000 switch (r_type) 11001 { 11002 case BFD_RELOC_IA64_DIR64MSB: 11003 new = BFD_RELOC_IA64_DTPMOD64MSB; break; 11004 case BFD_RELOC_IA64_DIR64LSB: 11005 new = BFD_RELOC_IA64_DTPMOD64LSB; break; 11006 default: 11007 type = "DTPMOD"; break; 11008 } 11009 break; 11010 11011 case FUNC_LT_DTP_MODULE: 11012 switch (r_type) 11013 { 11014 case BFD_RELOC_IA64_IMM22: 11015 new = BFD_RELOC_IA64_LTOFF_DTPMOD22; break; 11016 default: 11017 type = "LTOFF_DTPMOD"; break; 11018 } 11019 break; 11020 11021 case FUNC_DTP_RELATIVE: 11022 switch (r_type) 11023 { 11024 case BFD_RELOC_IA64_DIR32MSB: 11025 new = BFD_RELOC_IA64_DTPREL32MSB; break; 11026 case BFD_RELOC_IA64_DIR32LSB: 11027 new = BFD_RELOC_IA64_DTPREL32LSB; break; 11028 case BFD_RELOC_IA64_DIR64MSB: 11029 new = BFD_RELOC_IA64_DTPREL64MSB; break; 11030 case BFD_RELOC_IA64_DIR64LSB: 11031 new = BFD_RELOC_IA64_DTPREL64LSB; break; 11032 case BFD_RELOC_IA64_IMM14: 11033 new = BFD_RELOC_IA64_DTPREL14; break; 11034 case BFD_RELOC_IA64_IMM22: 11035 new = BFD_RELOC_IA64_DTPREL22; break; 11036 case BFD_RELOC_IA64_IMM64: 11037 new = BFD_RELOC_IA64_DTPREL64I; break; 11038 default: 11039 type = "DTPREL"; break; 11040 } 11041 break; 11042 11043 case FUNC_LT_DTP_RELATIVE: 11044 switch (r_type) 11045 { 11046 case BFD_RELOC_IA64_IMM22: 11047 new = BFD_RELOC_IA64_LTOFF_DTPREL22; break; 11048 default: 11049 type = "LTOFF_DTPREL"; break; 11050 } 11051 break; 11052 11053 case FUNC_IPLT_RELOC: 11054 switch (r_type) 11055 { 11056 case BFD_RELOC_IA64_IPLTMSB: return r_type; 11057 case BFD_RELOC_IA64_IPLTLSB: return r_type; 11058 default: type = "IPLT"; break; 11059 } 11060 break; 11061 11062 default: 11063 abort (); 11064 } 11065 11066 if (new) 11067 return new; 11068 else 11069 { 11070 int width; 11071 11072 if (!type) 11073 abort (); 11074 switch (r_type) 11075 { 11076 case BFD_RELOC_IA64_DIR32MSB: width = 32; suffix = "MSB"; break; 11077 case BFD_RELOC_IA64_DIR32LSB: width = 32; suffix = "LSB"; break; 11078 case BFD_RELOC_IA64_DIR64MSB: width = 64; suffix = "MSB"; break; 11079 case BFD_RELOC_IA64_DIR64LSB: width = 64; suffix = "LSB"; break; 11080 case BFD_RELOC_UNUSED: width = 13; break; 11081 case BFD_RELOC_IA64_IMM14: width = 14; break; 11082 case BFD_RELOC_IA64_IMM22: width = 22; break; 11083 case BFD_RELOC_IA64_IMM64: width = 64; suffix = "I"; break; 11084 default: abort (); 11085 } 11086 11087 /* This should be an error, but since previously there wasn't any 11088 diagnostic here, don't make it fail because of this for now. */ 11089 as_warn (_("Cannot express %s%d%s relocation"), type, width, suffix); 11090 return r_type; 11091 } 11092 } 11093 11094 /* Here is where generate the appropriate reloc for pseudo relocation 11095 functions. */ 11096 void 11097 ia64_validate_fix (fixS *fix) 11098 { 11099 switch (fix->fx_r_type) 11100 { 11101 case BFD_RELOC_IA64_FPTR64I: 11102 case BFD_RELOC_IA64_FPTR32MSB: 11103 case BFD_RELOC_IA64_FPTR64LSB: 11104 case BFD_RELOC_IA64_LTOFF_FPTR22: 11105 case BFD_RELOC_IA64_LTOFF_FPTR64I: 11106 if (fix->fx_offset != 0) 11107 as_bad_where (fix->fx_file, fix->fx_line, 11108 _("No addend allowed in @fptr() relocation")); 11109 break; 11110 default: 11111 break; 11112 } 11113 } 11114 11115 static void 11116 fix_insn (fixS *fix, const struct ia64_operand *odesc, valueT value) 11117 { 11118 bfd_vma insn[3], t0, t1, control_bits; 11119 const char *err; 11120 char *fixpos; 11121 long slot; 11122 11123 slot = fix->fx_where & 0x3; 11124 fixpos = fix->fx_frag->fr_literal + (fix->fx_where - slot); 11125 11126 /* Bundles are always in little-endian byte order */ 11127 t0 = bfd_getl64 (fixpos); 11128 t1 = bfd_getl64 (fixpos + 8); 11129 control_bits = t0 & 0x1f; 11130 insn[0] = (t0 >> 5) & 0x1ffffffffffLL; 11131 insn[1] = ((t0 >> 46) & 0x3ffff) | ((t1 & 0x7fffff) << 18); 11132 insn[2] = (t1 >> 23) & 0x1ffffffffffLL; 11133 11134 err = NULL; 11135 if (odesc - elf64_ia64_operands == IA64_OPND_IMMU64) 11136 { 11137 insn[1] = (value >> 22) & 0x1ffffffffffLL; 11138 insn[2] |= (((value & 0x7f) << 13) 11139 | (((value >> 7) & 0x1ff) << 27) 11140 | (((value >> 16) & 0x1f) << 22) 11141 | (((value >> 21) & 0x1) << 21) 11142 | (((value >> 63) & 0x1) << 36)); 11143 } 11144 else if (odesc - elf64_ia64_operands == IA64_OPND_IMMU62) 11145 { 11146 if (value & ~0x3fffffffffffffffULL) 11147 err = "integer operand out of range"; 11148 insn[1] = (value >> 21) & 0x1ffffffffffLL; 11149 insn[2] |= (((value & 0xfffff) << 6) | (((value >> 20) & 0x1) << 36)); 11150 } 11151 else if (odesc - elf64_ia64_operands == IA64_OPND_TGT64) 11152 { 11153 value >>= 4; 11154 insn[1] = ((value >> 20) & 0x7fffffffffLL) << 2; 11155 insn[2] |= ((((value >> 59) & 0x1) << 36) 11156 | (((value >> 0) & 0xfffff) << 13)); 11157 } 11158 else 11159 err = (*odesc->insert) (odesc, value, insn + slot); 11160 11161 if (err) 11162 as_bad_where (fix->fx_file, fix->fx_line, err); 11163 11164 t0 = control_bits | (insn[0] << 5) | (insn[1] << 46); 11165 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23); 11166 number_to_chars_littleendian (fixpos + 0, t0, 8); 11167 number_to_chars_littleendian (fixpos + 8, t1, 8); 11168 } 11169 11170 /* Attempt to simplify or even eliminate a fixup. The return value is 11171 ignored; perhaps it was once meaningful, but now it is historical. 11172 To indicate that a fixup has been eliminated, set FIXP->FX_DONE. 11173 11174 If fixp->fx_addsy is non-NULL, we'll have to generate a reloc entry 11175 (if possible). */ 11176 11177 void 11178 md_apply_fix (fixS *fix, valueT *valP, segT seg ATTRIBUTE_UNUSED) 11179 { 11180 char *fixpos; 11181 valueT value = *valP; 11182 11183 fixpos = fix->fx_frag->fr_literal + fix->fx_where; 11184 11185 if (fix->fx_pcrel) 11186 { 11187 switch (fix->fx_r_type) 11188 { 11189 case BFD_RELOC_IA64_PCREL21B: break; 11190 case BFD_RELOC_IA64_PCREL21BI: break; 11191 case BFD_RELOC_IA64_PCREL21F: break; 11192 case BFD_RELOC_IA64_PCREL21M: break; 11193 case BFD_RELOC_IA64_PCREL60B: break; 11194 case BFD_RELOC_IA64_PCREL22: break; 11195 case BFD_RELOC_IA64_PCREL64I: break; 11196 case BFD_RELOC_IA64_PCREL32MSB: break; 11197 case BFD_RELOC_IA64_PCREL32LSB: break; 11198 case BFD_RELOC_IA64_PCREL64MSB: break; 11199 case BFD_RELOC_IA64_PCREL64LSB: break; 11200 default: 11201 fix->fx_r_type = ia64_gen_real_reloc_type (pseudo_func[FUNC_PC_RELATIVE].u.sym, 11202 fix->fx_r_type); 11203 break; 11204 } 11205 } 11206 if (fix->fx_addsy) 11207 { 11208 switch (fix->fx_r_type) 11209 { 11210 case BFD_RELOC_UNUSED: 11211 /* This must be a TAG13 or TAG13b operand. There are no external 11212 relocs defined for them, so we must give an error. */ 11213 as_bad_where (fix->fx_file, fix->fx_line, 11214 _("%s must have a constant value"), 11215 elf64_ia64_operands[fix->tc_fix_data.opnd].desc); 11216 fix->fx_done = 1; 11217 return; 11218 11219 case BFD_RELOC_IA64_TPREL14: 11220 case BFD_RELOC_IA64_TPREL22: 11221 case BFD_RELOC_IA64_TPREL64I: 11222 case BFD_RELOC_IA64_LTOFF_TPREL22: 11223 case BFD_RELOC_IA64_LTOFF_DTPMOD22: 11224 case BFD_RELOC_IA64_DTPREL14: 11225 case BFD_RELOC_IA64_DTPREL22: 11226 case BFD_RELOC_IA64_DTPREL64I: 11227 case BFD_RELOC_IA64_LTOFF_DTPREL22: 11228 S_SET_THREAD_LOCAL (fix->fx_addsy); 11229 break; 11230 11231 default: 11232 break; 11233 } 11234 } 11235 else if (fix->tc_fix_data.opnd == IA64_OPND_NIL) 11236 { 11237 if (fix->tc_fix_data.bigendian) 11238 number_to_chars_bigendian (fixpos, value, fix->fx_size); 11239 else 11240 number_to_chars_littleendian (fixpos, value, fix->fx_size); 11241 fix->fx_done = 1; 11242 } 11243 else 11244 { 11245 fix_insn (fix, elf64_ia64_operands + fix->tc_fix_data.opnd, value); 11246 fix->fx_done = 1; 11247 } 11248 } 11249 11250 /* Generate the BFD reloc to be stuck in the object file from the 11251 fixup used internally in the assembler. */ 11252 11253 arelent * 11254 tc_gen_reloc (asection *sec ATTRIBUTE_UNUSED, fixS *fixp) 11255 { 11256 arelent *reloc; 11257 11258 reloc = xmalloc (sizeof (*reloc)); 11259 reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *)); 11260 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy); 11261 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where; 11262 reloc->addend = fixp->fx_offset; 11263 reloc->howto = bfd_reloc_type_lookup (stdoutput, fixp->fx_r_type); 11264 11265 if (!reloc->howto) 11266 { 11267 as_bad_where (fixp->fx_file, fixp->fx_line, 11268 _("Cannot represent %s relocation in object file"), 11269 bfd_get_reloc_code_name (fixp->fx_r_type)); 11270 free (reloc); 11271 return NULL; 11272 } 11273 return reloc; 11274 } 11275 11276 /* Turn a string in input_line_pointer into a floating point constant 11277 of type TYPE, and store the appropriate bytes in *LIT. The number 11278 of LITTLENUMS emitted is stored in *SIZE. An error message is 11279 returned, or NULL on OK. */ 11280 11281 #define MAX_LITTLENUMS 5 11282 11283 char * 11284 md_atof (int type, char *lit, int *size) 11285 { 11286 LITTLENUM_TYPE words[MAX_LITTLENUMS]; 11287 char *t; 11288 int prec; 11289 11290 switch (type) 11291 { 11292 /* IEEE floats */ 11293 case 'f': 11294 case 'F': 11295 case 's': 11296 case 'S': 11297 prec = 2; 11298 break; 11299 11300 case 'd': 11301 case 'D': 11302 case 'r': 11303 case 'R': 11304 prec = 4; 11305 break; 11306 11307 case 'x': 11308 case 'X': 11309 case 'p': 11310 case 'P': 11311 prec = 5; 11312 break; 11313 11314 default: 11315 *size = 0; 11316 return _("Unrecognized or unsupported floating point constant"); 11317 } 11318 t = atof_ieee (input_line_pointer, type, words); 11319 if (t) 11320 input_line_pointer = t; 11321 11322 (*ia64_float_to_chars) (lit, words, prec); 11323 11324 if (type == 'X') 11325 { 11326 /* It is 10 byte floating point with 6 byte padding. */ 11327 memset (&lit [10], 0, 6); 11328 *size = 8 * sizeof (LITTLENUM_TYPE); 11329 } 11330 else 11331 *size = prec * sizeof (LITTLENUM_TYPE); 11332 11333 return NULL; 11334 } 11335 11336 /* Handle ia64 specific semantics of the align directive. */ 11337 11338 void 11339 ia64_md_do_align (int n ATTRIBUTE_UNUSED, 11340 const char *fill ATTRIBUTE_UNUSED, 11341 int len ATTRIBUTE_UNUSED, 11342 int max ATTRIBUTE_UNUSED) 11343 { 11344 if (subseg_text_p (now_seg)) 11345 ia64_flush_insns (); 11346 } 11347 11348 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents 11349 of an rs_align_code fragment. */ 11350 11351 void 11352 ia64_handle_align (fragS *fragp) 11353 { 11354 int bytes; 11355 char *p; 11356 const unsigned char *nop; 11357 11358 if (fragp->fr_type != rs_align_code) 11359 return; 11360 11361 /* Check if this frag has to end with a stop bit. */ 11362 nop = fragp->tc_frag_data ? le_nop_stop : le_nop; 11363 11364 bytes = fragp->fr_next->fr_address - fragp->fr_address - fragp->fr_fix; 11365 p = fragp->fr_literal + fragp->fr_fix; 11366 11367 /* If no paddings are needed, we check if we need a stop bit. */ 11368 if (!bytes && fragp->tc_frag_data) 11369 { 11370 if (fragp->fr_fix < 16) 11371 #if 1 11372 /* FIXME: It won't work with 11373 .align 16 11374 alloc r32=ar.pfs,1,2,4,0 11375 */ 11376 ; 11377 #else 11378 as_bad_where (fragp->fr_file, fragp->fr_line, 11379 _("Can't add stop bit to mark end of instruction group")); 11380 #endif 11381 else 11382 /* Bundles are always in little-endian byte order. Make sure 11383 the previous bundle has the stop bit. */ 11384 *(p - 16) |= 1; 11385 } 11386 11387 /* Make sure we are on a 16-byte boundary, in case someone has been 11388 putting data into a text section. */ 11389 if (bytes & 15) 11390 { 11391 int fix = bytes & 15; 11392 memset (p, 0, fix); 11393 p += fix; 11394 bytes -= fix; 11395 fragp->fr_fix += fix; 11396 } 11397 11398 /* Instruction bundles are always little-endian. */ 11399 memcpy (p, nop, 16); 11400 fragp->fr_var = 16; 11401 } 11402 11403 static void 11404 ia64_float_to_chars_bigendian (char *lit, LITTLENUM_TYPE *words, 11405 int prec) 11406 { 11407 while (prec--) 11408 { 11409 number_to_chars_bigendian (lit, (long) (*words++), 11410 sizeof (LITTLENUM_TYPE)); 11411 lit += sizeof (LITTLENUM_TYPE); 11412 } 11413 } 11414 11415 static void 11416 ia64_float_to_chars_littleendian (char *lit, LITTLENUM_TYPE *words, 11417 int prec) 11418 { 11419 while (prec--) 11420 { 11421 number_to_chars_littleendian (lit, (long) (words[prec]), 11422 sizeof (LITTLENUM_TYPE)); 11423 lit += sizeof (LITTLENUM_TYPE); 11424 } 11425 } 11426 11427 void 11428 ia64_elf_section_change_hook (void) 11429 { 11430 if (elf_section_type (now_seg) == SHT_IA_64_UNWIND 11431 && elf_linked_to_section (now_seg) == NULL) 11432 elf_linked_to_section (now_seg) = text_section; 11433 dot_byteorder (-1); 11434 } 11435 11436 /* Check if a label should be made global. */ 11437 void 11438 ia64_check_label (symbolS *label) 11439 { 11440 if (*input_line_pointer == ':') 11441 { 11442 S_SET_EXTERNAL (label); 11443 input_line_pointer++; 11444 } 11445 } 11446 11447 /* Used to remember where .alias and .secalias directives are seen. We 11448 will rename symbol and section names when we are about to output 11449 the relocatable file. */ 11450 struct alias 11451 { 11452 char *file; /* The file where the directive is seen. */ 11453 unsigned int line; /* The line number the directive is at. */ 11454 const char *name; /* The original name of the symbol. */ 11455 }; 11456 11457 /* Called for .alias and .secalias directives. If SECTION is 1, it is 11458 .secalias. Otherwise, it is .alias. */ 11459 static void 11460 dot_alias (int section) 11461 { 11462 char *name, *alias; 11463 char delim; 11464 char *end_name; 11465 int len; 11466 const char *error_string; 11467 struct alias *h; 11468 const char *a; 11469 struct hash_control *ahash, *nhash; 11470 const char *kind; 11471 11472 name = input_line_pointer; 11473 delim = get_symbol_end (); 11474 end_name = input_line_pointer; 11475 *end_name = delim; 11476 11477 if (name == end_name) 11478 { 11479 as_bad (_("expected symbol name")); 11480 ignore_rest_of_line (); 11481 return; 11482 } 11483 11484 SKIP_WHITESPACE (); 11485 11486 if (*input_line_pointer != ',') 11487 { 11488 *end_name = 0; 11489 as_bad (_("expected comma after \"%s\""), name); 11490 *end_name = delim; 11491 ignore_rest_of_line (); 11492 return; 11493 } 11494 11495 input_line_pointer++; 11496 *end_name = 0; 11497 ia64_canonicalize_symbol_name (name); 11498 11499 /* We call demand_copy_C_string to check if alias string is valid. 11500 There should be a closing `"' and no `\0' in the string. */ 11501 alias = demand_copy_C_string (&len); 11502 if (alias == NULL) 11503 { 11504 ignore_rest_of_line (); 11505 return; 11506 } 11507 11508 /* Make a copy of name string. */ 11509 len = strlen (name) + 1; 11510 obstack_grow (¬es, name, len); 11511 name = obstack_finish (¬es); 11512 11513 if (section) 11514 { 11515 kind = "section"; 11516 ahash = secalias_hash; 11517 nhash = secalias_name_hash; 11518 } 11519 else 11520 { 11521 kind = "symbol"; 11522 ahash = alias_hash; 11523 nhash = alias_name_hash; 11524 } 11525 11526 /* Check if alias has been used before. */ 11527 h = (struct alias *) hash_find (ahash, alias); 11528 if (h) 11529 { 11530 if (strcmp (h->name, name)) 11531 as_bad (_("`%s' is already the alias of %s `%s'"), 11532 alias, kind, h->name); 11533 goto out; 11534 } 11535 11536 /* Check if name already has an alias. */ 11537 a = (const char *) hash_find (nhash, name); 11538 if (a) 11539 { 11540 if (strcmp (a, alias)) 11541 as_bad (_("%s `%s' already has an alias `%s'"), kind, name, a); 11542 goto out; 11543 } 11544 11545 h = (struct alias *) xmalloc (sizeof (struct alias)); 11546 as_where (&h->file, &h->line); 11547 h->name = name; 11548 11549 error_string = hash_jam (ahash, alias, (void *) h); 11550 if (error_string) 11551 { 11552 as_fatal (_("inserting \"%s\" into %s alias hash table failed: %s"), 11553 alias, kind, error_string); 11554 goto out; 11555 } 11556 11557 error_string = hash_jam (nhash, name, (void *) alias); 11558 if (error_string) 11559 { 11560 as_fatal (_("inserting \"%s\" into %s name hash table failed: %s"), 11561 alias, kind, error_string); 11562 out: 11563 obstack_free (¬es, name); 11564 obstack_free (¬es, alias); 11565 } 11566 11567 demand_empty_rest_of_line (); 11568 } 11569 11570 /* It renames the original symbol name to its alias. */ 11571 static void 11572 do_alias (const char *alias, void *value) 11573 { 11574 struct alias *h = (struct alias *) value; 11575 symbolS *sym = symbol_find (h->name); 11576 11577 if (sym == NULL) 11578 as_warn_where (h->file, h->line, 11579 _("symbol `%s' aliased to `%s' is not used"), 11580 h->name, alias); 11581 else 11582 S_SET_NAME (sym, (char *) alias); 11583 } 11584 11585 /* Called from write_object_file. */ 11586 void 11587 ia64_adjust_symtab (void) 11588 { 11589 hash_traverse (alias_hash, do_alias); 11590 } 11591 11592 /* It renames the original section name to its alias. */ 11593 static void 11594 do_secalias (const char *alias, void *value) 11595 { 11596 struct alias *h = (struct alias *) value; 11597 segT sec = bfd_get_section_by_name (stdoutput, h->name); 11598 11599 if (sec == NULL) 11600 as_warn_where (h->file, h->line, 11601 _("section `%s' aliased to `%s' is not used"), 11602 h->name, alias); 11603 else 11604 sec->name = alias; 11605 } 11606 11607 /* Called from write_object_file. */ 11608 void 11609 ia64_frob_file (void) 11610 { 11611 hash_traverse (secalias_hash, do_secalias); 11612 } 11613