1 /* Definitions of target machine for GNU compiler, for IBM S/390 2 Copyright (C) 1999-2022 Free Software Foundation, Inc. 3 Contributed by Hartmut Penner (hpenner@de.ibm.com) and 4 Ulrich Weigand (uweigand@de.ibm.com). 5 Andreas Krebbel (Andreas.Krebbel@de.ibm.com) 6 7 This file is part of GCC. 8 9 GCC is free software; you can redistribute it and/or modify it under 10 the terms of the GNU General Public License as published by the Free 11 Software Foundation; either version 3, or (at your option) any later 12 version. 13 14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY 15 WARRANTY; without even the implied warranty of MERCHANTABILITY or 16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 17 for more details. 18 19 You should have received a copy of the GNU General Public License 20 along with GCC; see the file COPYING3. If not see 21 <http://www.gnu.org/licenses/>. */ 22 23 #ifndef _S390_H 24 #define _S390_H 25 26 /* Optional architectural facilities supported by the processor. */ 27 28 enum processor_flags 29 { 30 PF_IEEE_FLOAT = 1, 31 PF_ZARCH = 2, 32 PF_LONG_DISPLACEMENT = 4, 33 PF_EXTIMM = 8, 34 PF_DFP = 16, 35 PF_Z10 = 32, 36 PF_Z196 = 64, 37 PF_ZEC12 = 128, 38 PF_TX = 256, 39 PF_Z13 = 512, 40 PF_VX = 1024, 41 PF_Z14 = 2048, 42 PF_VXE = 4096, 43 PF_VXE2 = 8192, 44 PF_Z15 = 16384, 45 PF_NNPA = 32768, 46 PF_Z16 = 65536 47 }; 48 49 /* This is necessary to avoid a warning about comparing different enum 50 types. */ 51 #define s390_tune_attr ((enum attr_cpu)(s390_tune > PROCESSOR_3931_Z16 ? PROCESSOR_3931_Z16 : s390_tune )) 52 53 /* These flags indicate that the generated code should run on a cpu 54 providing the respective hardware facility regardless of the 55 current cpu mode (ESA or z/Architecture). */ 56 57 #define TARGET_CPU_IEEE_FLOAT \ 58 (s390_arch_flags & PF_IEEE_FLOAT) 59 #define TARGET_CPU_IEEE_FLOAT_P(opts) \ 60 (opts->x_s390_arch_flags & PF_IEEE_FLOAT) 61 #define TARGET_CPU_LONG_DISPLACEMENT \ 62 (s390_arch_flags & PF_LONG_DISPLACEMENT) 63 #define TARGET_CPU_LONG_DISPLACEMENT_P(opts) \ 64 (opts->x_s390_arch_flags & PF_LONG_DISPLACEMENT) 65 #define TARGET_CPU_EXTIMM \ 66 (s390_arch_flags & PF_EXTIMM) 67 #define TARGET_CPU_EXTIMM_P(opts) \ 68 (opts->x_s390_arch_flags & PF_EXTIMM) 69 #define TARGET_CPU_DFP \ 70 (s390_arch_flags & PF_DFP) 71 #define TARGET_CPU_DFP_P(opts) \ 72 (opts->x_s390_arch_flags & PF_DFP) 73 #define TARGET_CPU_Z10 \ 74 (s390_arch_flags & PF_Z10) 75 #define TARGET_CPU_Z10_P(opts) \ 76 (opts->x_s390_arch_flags & PF_Z10) 77 #define TARGET_CPU_Z196 \ 78 (s390_arch_flags & PF_Z196) 79 #define TARGET_CPU_Z196_P(opts) \ 80 (opts->x_s390_arch_flags & PF_Z196) 81 #define TARGET_CPU_ZEC12 \ 82 (s390_arch_flags & PF_ZEC12) 83 #define TARGET_CPU_ZEC12_P(opts) \ 84 (opts->x_s390_arch_flags & PF_ZEC12) 85 #define TARGET_CPU_HTM \ 86 (s390_arch_flags & PF_TX) 87 #define TARGET_CPU_HTM_P(opts) \ 88 (opts->x_s390_arch_flags & PF_TX) 89 #define TARGET_CPU_Z13 \ 90 (s390_arch_flags & PF_Z13) 91 #define TARGET_CPU_Z13_P(opts) \ 92 (opts->x_s390_arch_flags & PF_Z13) 93 #define TARGET_CPU_VX \ 94 (s390_arch_flags & PF_VX) 95 #define TARGET_CPU_VX_P(opts) \ 96 (opts->x_s390_arch_flags & PF_VX) 97 #define TARGET_CPU_Z14 \ 98 (s390_arch_flags & PF_Z14) 99 #define TARGET_CPU_Z14_P(opts) \ 100 (opts->x_s390_arch_flags & PF_Z14) 101 #define TARGET_CPU_VXE \ 102 (s390_arch_flags & PF_VXE) 103 #define TARGET_CPU_VXE_P(opts) \ 104 (opts->x_s390_arch_flags & PF_VXE) 105 #define TARGET_CPU_Z15 \ 106 (s390_arch_flags & PF_Z15) 107 #define TARGET_CPU_Z15_P(opts) \ 108 (opts->x_s390_arch_flags & PF_Z15) 109 #define TARGET_CPU_VXE2 \ 110 (s390_arch_flags & PF_VXE2) 111 #define TARGET_CPU_VXE2_P(opts) \ 112 (opts->x_s390_arch_flags & PF_VXE2) 113 #define TARGET_CPU_Z16 \ 114 (s390_arch_flags & PF_Z16) 115 #define TARGET_CPU_Z16_P(opts) \ 116 (opts->x_s390_arch_flags & PF_Z16) 117 #define TARGET_CPU_NNPA \ 118 (s390_arch_flags & PF_NNPA) 119 #define TARGET_CPU_NNPA_P(opts) \ 120 (opts->x_s390_arch_flags & PF_NNPA) 121 122 #define TARGET_HARD_FLOAT_P(opts) (!TARGET_SOFT_FLOAT_P(opts)) 123 124 /* These flags indicate that the generated code should run on a cpu 125 providing the respective hardware facility when run in 126 z/Architecture mode. */ 127 128 #define TARGET_LONG_DISPLACEMENT \ 129 (TARGET_ZARCH && TARGET_CPU_LONG_DISPLACEMENT) 130 #define TARGET_LONG_DISPLACEMENT_P(opts) \ 131 (TARGET_ZARCH_P (opts->x_target_flags) \ 132 && TARGET_CPU_LONG_DISPLACEMENT_P (opts)) 133 #define TARGET_EXTIMM \ 134 (TARGET_ZARCH && TARGET_CPU_EXTIMM) 135 #define TARGET_EXTIMM_P(opts) \ 136 (TARGET_ZARCH_P (opts->x_target_flags) && TARGET_CPU_EXTIMM_P (opts)) 137 #define TARGET_DFP \ 138 (TARGET_ZARCH && TARGET_CPU_DFP && TARGET_HARD_FLOAT) 139 #define TARGET_DFP_P(opts) \ 140 (TARGET_ZARCH_P (opts->x_target_flags) && TARGET_CPU_DFP_P (opts) \ 141 && TARGET_HARD_FLOAT_P (opts->x_target_flags)) 142 #define TARGET_Z10 \ 143 (TARGET_ZARCH && TARGET_CPU_Z10) 144 #define TARGET_Z10_P(opts) \ 145 (TARGET_ZARCH_P (opts->x_target_flags) && TARGET_CPU_Z10_P (opts)) 146 #define TARGET_Z196 \ 147 (TARGET_ZARCH && TARGET_CPU_Z196) 148 #define TARGET_Z196_P(opts) \ 149 (TARGET_ZARCH_P (opts->x_target_flags) && TARGET_CPU_Z196_P (opts)) 150 #define TARGET_ZEC12 \ 151 (TARGET_ZARCH && TARGET_CPU_ZEC12) 152 #define TARGET_ZEC12_P(opts) \ 153 (TARGET_ZARCH_P (opts->x_target_flags) && TARGET_CPU_ZEC12_P (opts)) 154 #define TARGET_HTM (TARGET_OPT_HTM) 155 #define TARGET_HTM_P(opts) (TARGET_OPT_HTM_P (opts->x_target_flags)) 156 #define TARGET_Z13 \ 157 (TARGET_ZARCH && TARGET_CPU_Z13) 158 #define TARGET_Z13_P(opts) \ 159 (TARGET_ZARCH_P (opts->x_target_flags) && TARGET_CPU_Z13_P (opts)) 160 #define TARGET_VX \ 161 (TARGET_ZARCH && TARGET_CPU_VX && TARGET_OPT_VX && TARGET_HARD_FLOAT) 162 #define TARGET_VX_P(opts) \ 163 (TARGET_ZARCH_P (opts->x_target_flags) && TARGET_CPU_VX_P (opts) \ 164 && TARGET_OPT_VX_P (opts->x_target_flags) \ 165 && TARGET_HARD_FLOAT_P (opts->x_target_flags)) 166 #define TARGET_Z14 (TARGET_ZARCH && TARGET_CPU_Z14) 167 #define TARGET_Z14_P(opts) \ 168 (TARGET_ZARCH_P (opts->x_target_flags) && TARGET_CPU_Z14_P (opts)) 169 #define TARGET_VXE \ 170 (TARGET_VX && TARGET_CPU_VXE) 171 #define TARGET_VXE_P(opts) \ 172 (TARGET_VX_P (opts) && TARGET_CPU_VXE_P (opts)) 173 #define TARGET_Z15 (TARGET_ZARCH && TARGET_CPU_Z15) 174 #define TARGET_Z15_P(opts) \ 175 (TARGET_ZARCH_P (opts->x_target_flags) && TARGET_CPU_Z15_P (opts)) 176 #define TARGET_VXE2 \ 177 (TARGET_VX && TARGET_CPU_VXE2) 178 #define TARGET_VXE2_P(opts) \ 179 (TARGET_VX_P (opts) && TARGET_CPU_VXE2_P (opts)) 180 #define TARGET_Z16 (TARGET_ZARCH && TARGET_CPU_Z16) 181 #define TARGET_Z16_P(opts) \ 182 (TARGET_ZARCH_P (opts->x_target_flags) && TARGET_CPU_Z16_P (opts)) 183 #define TARGET_NNPA \ 184 (TARGET_ZARCH && TARGET_CPU_NNPA) 185 #define TARGET_NNPA_P(opts) \ 186 (TARGET_ZARCH_P (opts) && TARGET_CPU_NNPA_P (opts)) 187 188 #if defined(HAVE_AS_VECTOR_LOADSTORE_ALIGNMENT_HINTS_ON_Z13) 189 #define TARGET_VECTOR_LOADSTORE_ALIGNMENT_HINTS TARGET_Z13 190 #elif defined(HAVE_AS_VECTOR_LOADSTORE_ALIGNMENT_HINTS) 191 #define TARGET_VECTOR_LOADSTORE_ALIGNMENT_HINTS TARGET_Z14 192 #else 193 #define TARGET_VECTOR_LOADSTORE_ALIGNMENT_HINTS 0 194 #endif 195 196 /* Evaluate to true if it is ok to emit a non-signaling vector 197 comparison. */ 198 #define TARGET_NONSIGNALING_VECTOR_COMPARE_OK \ 199 (TARGET_VX && !TARGET_VXE && (flag_finite_math_only || !flag_trapping_math)) 200 201 #ifdef HAVE_AS_MACHINE_MACHINEMODE 202 #define S390_USE_TARGET_ATTRIBUTE 1 203 #else 204 #define S390_USE_TARGET_ATTRIBUTE 0 205 #endif 206 207 #ifdef HAVE_AS_ARCHITECTURE_MODIFIERS 208 #define S390_USE_ARCHITECTURE_MODIFIERS 1 209 #else 210 #define S390_USE_ARCHITECTURE_MODIFIERS 0 211 #endif 212 213 #if S390_USE_TARGET_ATTRIBUTE 214 /* For switching between functions with different target attributes. */ 215 #define SWITCHABLE_TARGET 1 216 #endif 217 218 #define TARGET_SUPPORTS_WIDE_INT 1 219 220 /* Use the ABI introduced with IBM z13: 221 - pass vector arguments <= 16 bytes in VRs 222 - align *all* vector types to 8 bytes */ 223 #define TARGET_VX_ABI TARGET_VX 224 225 #define TARGET_AVOID_CMP_AND_BRANCH (s390_tune == PROCESSOR_2817_Z196) 226 227 /* Issue a write prefetch for the +4 cache line. */ 228 #define TARGET_SETMEM_PREFETCH_DISTANCE 1024 229 230 /* Expand to a C expressions evaluating to true if a setmem to VAL of 231 length LEN should be emitted using prefetch instructions. */ 232 #define TARGET_SETMEM_PFD(VAL,LEN) \ 233 (TARGET_Z10 \ 234 && (s390_tune < PROCESSOR_2964_Z13 || (VAL) != const0_rtx) \ 235 && (!CONST_INT_P (LEN) || INTVAL ((LEN)) > TARGET_SETMEM_PREFETCH_DISTANCE)) 236 237 /* Run-time target specification. */ 238 239 /* Defaults for option flags defined only on some subtargets. */ 240 #ifndef TARGET_TPF_PROFILING 241 #define TARGET_TPF_PROFILING 0 242 #endif 243 244 /* This will be overridden by OS headers. */ 245 #define TARGET_TPF 0 246 247 /* Target CPU builtins. */ 248 #define TARGET_CPU_CPP_BUILTINS() s390_cpu_cpp_builtins (pfile) 249 250 #ifdef DEFAULT_TARGET_64BIT 251 #define TARGET_DEFAULT (MASK_64BIT | MASK_ZARCH | MASK_HARD_DFP \ 252 | MASK_OPT_HTM | MASK_OPT_VX) 253 #else 254 #define TARGET_DEFAULT 0 255 #endif 256 257 /* Support for configure-time defaults. 258 The order here is important so that -march doesn't squash the 259 tune values. */ 260 #define OPTION_DEFAULT_SPECS \ 261 { "mode", "%{!mesa:%{!mzarch:-m%(VALUE)}}" }, \ 262 { "tune", "%{!mtune=*:%{!march=*:-mtune=%(VALUE)}}" }, \ 263 { "arch", "%{!march=*:-march=%(VALUE)}" } 264 265 #ifdef __s390__ 266 extern const char *s390_host_detect_local_cpu (int argc, const char **argv); 267 # define EXTRA_SPEC_FUNCTIONS \ 268 { "local_cpu_detect", s390_host_detect_local_cpu }, 269 270 #define MARCH_MTUNE_NATIVE_SPECS \ 271 "%{mtune=native:%<mtune=native %:local_cpu_detect(tune)} " \ 272 "%{march=native:%<march=native" \ 273 " %:local_cpu_detect(arch %{mesa|mzarch:mesa_mzarch})}" 274 #else 275 # define MARCH_MTUNE_NATIVE_SPECS "" 276 #endif 277 278 #ifdef DEFAULT_TARGET_64BIT 279 #define S390_TARGET_BITS_STRING "64" 280 #else 281 #define S390_TARGET_BITS_STRING "31" 282 #endif 283 284 /* Defaulting rules. */ 285 #define DRIVER_SELF_SPECS \ 286 MARCH_MTUNE_NATIVE_SPECS, \ 287 "%{!m31:%{!m64:-m" S390_TARGET_BITS_STRING "}}", \ 288 "%{!mesa:%{!mzarch:%{m31:-mesa}%{m64:-mzarch}}}", \ 289 "%{!march=*:-march=z900}" 290 291 /* Constants needed to control the TEST DATA CLASS (TDC) instruction. */ 292 #define S390_TDC_POSITIVE_ZERO (1 << 11) 293 #define S390_TDC_NEGATIVE_ZERO (1 << 10) 294 #define S390_TDC_POSITIVE_NORMALIZED_BFP_NUMBER (1 << 9) 295 #define S390_TDC_NEGATIVE_NORMALIZED_BFP_NUMBER (1 << 8) 296 #define S390_TDC_POSITIVE_DENORMALIZED_BFP_NUMBER (1 << 7) 297 #define S390_TDC_NEGATIVE_DENORMALIZED_BFP_NUMBER (1 << 6) 298 #define S390_TDC_POSITIVE_INFINITY (1 << 5) 299 #define S390_TDC_NEGATIVE_INFINITY (1 << 4) 300 #define S390_TDC_POSITIVE_QUIET_NAN (1 << 3) 301 #define S390_TDC_NEGATIVE_QUIET_NAN (1 << 2) 302 #define S390_TDC_POSITIVE_SIGNALING_NAN (1 << 1) 303 #define S390_TDC_NEGATIVE_SIGNALING_NAN (1 << 0) 304 305 /* The following values are different for DFP. */ 306 #define S390_TDC_POSITIVE_DENORMALIZED_DFP_NUMBER (1 << 9) 307 #define S390_TDC_NEGATIVE_DENORMALIZED_DFP_NUMBER (1 << 8) 308 #define S390_TDC_POSITIVE_NORMALIZED_DFP_NUMBER (1 << 7) 309 #define S390_TDC_NEGATIVE_NORMALIZED_DFP_NUMBER (1 << 6) 310 311 /* For signbit, the BFP-DFP-difference makes no difference. */ 312 #define S390_TDC_SIGNBIT_SET (S390_TDC_NEGATIVE_ZERO \ 313 | S390_TDC_NEGATIVE_NORMALIZED_BFP_NUMBER \ 314 | S390_TDC_NEGATIVE_DENORMALIZED_BFP_NUMBER\ 315 | S390_TDC_NEGATIVE_INFINITY \ 316 | S390_TDC_NEGATIVE_QUIET_NAN \ 317 | S390_TDC_NEGATIVE_SIGNALING_NAN ) 318 319 #define S390_TDC_INFINITY (S390_TDC_POSITIVE_INFINITY \ 320 | S390_TDC_NEGATIVE_INFINITY ) 321 322 /* Target machine storage layout. */ 323 324 /* Everything is big-endian. */ 325 #define BITS_BIG_ENDIAN 1 326 #define BYTES_BIG_ENDIAN 1 327 #define WORDS_BIG_ENDIAN 1 328 329 #define STACK_SIZE_MODE (Pmode) 330 331 /* Make the stack pointer to be moved downwards while issuing stack probes with 332 -fstack-check. We need this to prevent memory below the stack pointer from 333 being accessed. */ 334 #define STACK_CHECK_MOVING_SP 1 335 336 #ifndef IN_LIBGCC2 337 338 /* Width of a word, in units (bytes). */ 339 #define UNITS_PER_WORD (TARGET_ZARCH ? 8 : 4) 340 341 /* Width of a pointer. To be used instead of UNITS_PER_WORD in 342 ABI-relevant contexts. This always matches 343 GET_MODE_SIZE (Pmode). */ 344 #define UNITS_PER_LONG (TARGET_64BIT ? 8 : 4) 345 #define MIN_UNITS_PER_WORD 4 346 #define MAX_BITS_PER_WORD 64 347 #else 348 349 /* In libgcc, UNITS_PER_WORD has ABI-relevant effects, e.g. whether 350 the library should export TImode functions or not. Thus, we have 351 to redefine UNITS_PER_WORD depending on __s390x__ for libgcc. */ 352 #ifdef __s390x__ 353 #define UNITS_PER_WORD 8 354 #else 355 #define UNITS_PER_WORD 4 356 #endif 357 #endif 358 359 /* Width of a pointer, in bits. */ 360 #define POINTER_SIZE (TARGET_64BIT ? 64 : 32) 361 362 /* Allocation boundary (in *bits*) for storing arguments in argument list. */ 363 #define PARM_BOUNDARY (TARGET_64BIT ? 64 : 32) 364 365 /* Boundary (in *bits*) on which stack pointer should be aligned. */ 366 #define STACK_BOUNDARY 64 367 368 /* Allocation boundary (in *bits*) for the code of a function. */ 369 #define FUNCTION_BOUNDARY 64 370 371 /* There is no point aligning anything to a rounder boundary than this. */ 372 #define BIGGEST_ALIGNMENT 64 373 374 /* Alignment of field after `int : 0' in a structure. */ 375 #define EMPTY_FIELD_BOUNDARY 32 376 377 /* Alignment on even addresses for LARL instruction. */ 378 #define DATA_ABI_ALIGNMENT(TYPE, ALIGN) (ALIGN) < 16 ? 16 : (ALIGN) 379 380 /* Alignment is not required by the hardware. */ 381 #define STRICT_ALIGNMENT 0 382 383 /* Mode of stack savearea. 384 FUNCTION is VOIDmode because calling convention maintains SP. 385 BLOCK needs Pmode for SP. 386 NONLOCAL needs twice Pmode to maintain both backchain and SP. */ 387 #define STACK_SAVEAREA_MODE(LEVEL) \ 388 ((LEVEL) == SAVE_FUNCTION ? VOIDmode \ 389 : (LEVEL) == SAVE_NONLOCAL ? (TARGET_64BIT ? OImode : TImode) : Pmode) 390 391 392 /* Type layout. */ 393 394 /* Sizes in bits of the source language data types. */ 395 #define SHORT_TYPE_SIZE 16 396 #define INT_TYPE_SIZE 32 397 #define LONG_TYPE_SIZE (TARGET_64BIT ? 64 : 32) 398 #define LONG_LONG_TYPE_SIZE 64 399 #define FLOAT_TYPE_SIZE 32 400 #define DOUBLE_TYPE_SIZE 64 401 #define LONG_DOUBLE_TYPE_SIZE (TARGET_LONG_DOUBLE_128 ? 128 : 64) 402 403 /* Work around target_flags dependency in ada/targtyps.cc. */ 404 #define WIDEST_HARDWARE_FP_SIZE 64 405 406 /* We use "unsigned char" as default. */ 407 #define DEFAULT_SIGNED_CHAR 0 408 409 410 /* Register usage. */ 411 412 /* We have 16 general purpose registers (registers 0-15), 413 and 16 floating point registers (registers 16-31). 414 (On non-IEEE machines, we have only 4 fp registers.) 415 416 Amongst the general purpose registers, some are used 417 for specific purposes: 418 GPR 11: Hard frame pointer (if needed) 419 GPR 12: Global offset table pointer (if needed) 420 GPR 13: Literal pool base register 421 GPR 14: Return address register 422 GPR 15: Stack pointer 423 424 Registers 32-35 are 'fake' hard registers that do not 425 correspond to actual hardware: 426 Reg 32: Argument pointer 427 Reg 33: Condition code 428 Reg 34: Frame pointer 429 Reg 35: Return address pointer 430 431 Registers 36 and 37 are mapped to access registers 432 0 and 1, used to implement thread-local storage. 433 434 Reg 38-53: Vector registers v16-v31 */ 435 436 #define FIRST_PSEUDO_REGISTER 54 437 438 /* Standard register usage. */ 439 #define GENERAL_REGNO_P(N) ((int)(N) >= 0 && (N) < 16) 440 #define ADDR_REGNO_P(N) ((N) >= 1 && (N) < 16) 441 #define FP_REGNO_P(N) ((N) >= 16 && (N) < 32) 442 #define CC_REGNO_P(N) ((N) == 33) 443 #define FRAME_REGNO_P(N) ((N) == 32 || (N) == 34 || (N) == 35) 444 #define ACCESS_REGNO_P(N) ((N) == 36 || (N) == 37) 445 #define VECTOR_NOFP_REGNO_P(N) ((N) >= 38 && (N) <= 53) 446 #define VECTOR_REGNO_P(N) (FP_REGNO_P (N) || VECTOR_NOFP_REGNO_P (N)) 447 448 #define GENERAL_REG_P(X) (REG_P (X) && GENERAL_REGNO_P (REGNO (X))) 449 #define ADDR_REG_P(X) (REG_P (X) && ADDR_REGNO_P (REGNO (X))) 450 #define FP_REG_P(X) (REG_P (X) && FP_REGNO_P (REGNO (X))) 451 #define CC_REG_P(X) (REG_P (X) && CC_REGNO_P (REGNO (X))) 452 #define FRAME_REG_P(X) (REG_P (X) && FRAME_REGNO_P (REGNO (X))) 453 #define ACCESS_REG_P(X) (REG_P (X) && ACCESS_REGNO_P (REGNO (X))) 454 #define VECTOR_NOFP_REG_P(X) (REG_P (X) && VECTOR_NOFP_REGNO_P (REGNO (X))) 455 #define VECTOR_REG_P(X) (REG_P (X) && VECTOR_REGNO_P (REGNO (X))) 456 457 /* Set up fixed registers and calling convention: 458 459 GPRs 0-5 are always call-clobbered, 460 GPRs 6-15 are always call-saved. 461 GPR 12 is fixed if used as GOT pointer. 462 GPR 13 is always fixed (as literal pool pointer). 463 GPR 14 is always fixed on S/390 machines (as return address). 464 GPR 15 is always fixed (as stack pointer). 465 The 'fake' hard registers are call-clobbered and fixed. 466 The access registers are call-saved and fixed. 467 468 On 31-bit, FPRs 18-19 are call-clobbered; 469 on 64-bit, FPRs 24-31 are call-clobbered. 470 The remaining FPRs are call-saved. 471 472 All non-FP vector registers are call-clobbered v16-v31. */ 473 474 #define FIXED_REGISTERS \ 475 { 0, 0, 0, 0, \ 476 0, 0, 0, 0, \ 477 0, 0, 0, 0, \ 478 0, 1, 1, 1, \ 479 0, 0, 0, 0, \ 480 0, 0, 0, 0, \ 481 0, 0, 0, 0, \ 482 0, 0, 0, 0, \ 483 1, 1, 1, 1, \ 484 1, 1, \ 485 0, 0, 0, 0, \ 486 0, 0, 0, 0, \ 487 0, 0, 0, 0, \ 488 0, 0, 0, 0 } 489 490 #define CALL_REALLY_USED_REGISTERS \ 491 { 1, 1, 1, 1, /* r0 - r15 */ \ 492 1, 1, 0, 0, \ 493 0, 0, 0, 0, \ 494 0, 0, 0, 0, \ 495 1, 1, 1, 1, /* f0 (16) - f15 (31) */ \ 496 1, 1, 1, 1, \ 497 1, 1, 1, 1, \ 498 1, 1, 1, 1, \ 499 1, 1, 1, 1, /* arg, cc, fp, ret addr */ \ 500 0, 0, /* a0 (36), a1 (37) */ \ 501 1, 1, 1, 1, /* v16 (38) - v23 (45) */ \ 502 1, 1, 1, 1, \ 503 1, 1, 1, 1, /* v24 (46) - v31 (53) */ \ 504 1, 1, 1, 1 } 505 506 /* Preferred register allocation order. */ 507 #define REG_ALLOC_ORDER \ 508 { 1, 2, 3, 4, 5, 0, 12, 11, 10, 9, 8, 7, 6, 14, 13, \ 509 16, 17, 18, 19, 20, 21, 22, 23, \ 510 24, 25, 26, 27, 28, 29, 30, 31, \ 511 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, \ 512 15, 32, 33, 34, 35, 36, 37 } 513 514 515 #define HARD_REGNO_RENAME_OK(FROM, TO) \ 516 s390_hard_regno_rename_ok ((FROM), (TO)) 517 518 /* Maximum number of registers to represent a value of mode MODE 519 in a register of class CLASS. */ 520 #define CLASS_MAX_NREGS(CLASS, MODE) \ 521 s390_class_max_nregs ((CLASS), (MODE)) 522 523 /* We can reverse a CC mode safely if we know whether it comes from a 524 floating point compare or not. With the vector modes it is encoded 525 as part of the mode. 526 FIXME: It might make sense to do this for other cc modes as well. */ 527 #define REVERSIBLE_CC_MODE(MODE) \ 528 ((MODE) == CCVIALLmode || (MODE) == CCVIANYmode \ 529 || (MODE) == CCVFALLmode || (MODE) == CCVFANYmode) 530 531 /* Given a condition code and a mode, return the inverse condition. */ 532 #define REVERSE_CONDITION(CODE, MODE) s390_reverse_condition (MODE, CODE) 533 534 535 /* Register classes. */ 536 537 /* We use the following register classes: 538 GENERAL_REGS All general purpose registers 539 ADDR_REGS All general purpose registers except %r0 540 (These registers can be used in address generation) 541 FP_REGS All floating point registers 542 CC_REGS The condition code register 543 ACCESS_REGS The access registers 544 545 GENERAL_FP_REGS Union of GENERAL_REGS and FP_REGS 546 ADDR_FP_REGS Union of ADDR_REGS and FP_REGS 547 GENERAL_CC_REGS Union of GENERAL_REGS and CC_REGS 548 ADDR_CC_REGS Union of ADDR_REGS and CC_REGS 549 550 NO_REGS No registers 551 ALL_REGS All registers 552 553 Note that the 'fake' frame pointer and argument pointer registers 554 are included amongst the address registers here. */ 555 556 enum reg_class 557 { 558 NO_REGS, CC_REGS, ADDR_REGS, GENERAL_REGS, ACCESS_REGS, 559 ADDR_CC_REGS, GENERAL_CC_REGS, 560 FP_REGS, ADDR_FP_REGS, GENERAL_FP_REGS, 561 VEC_REGS, ADDR_VEC_REGS, GENERAL_VEC_REGS, 562 ALL_REGS, LIM_REG_CLASSES 563 }; 564 #define N_REG_CLASSES (int) LIM_REG_CLASSES 565 566 #define REG_CLASS_NAMES \ 567 { "NO_REGS", "CC_REGS", "ADDR_REGS", "GENERAL_REGS", "ACCESS_REGS", \ 568 "ADDR_CC_REGS", "GENERAL_CC_REGS", \ 569 "FP_REGS", "ADDR_FP_REGS", "GENERAL_FP_REGS", \ 570 "VEC_REGS", "ADDR_VEC_REGS", "GENERAL_VEC_REGS", \ 571 "ALL_REGS" } 572 573 /* Class -> register mapping. */ 574 #define REG_CLASS_CONTENTS \ 575 { \ 576 { 0x00000000, 0x00000000 }, /* NO_REGS */ \ 577 { 0x00000000, 0x00000002 }, /* CC_REGS */ \ 578 { 0x0000fffe, 0x0000000d }, /* ADDR_REGS */ \ 579 { 0x0000ffff, 0x0000000d }, /* GENERAL_REGS */ \ 580 { 0x00000000, 0x00000030 }, /* ACCESS_REGS */ \ 581 { 0x0000fffe, 0x0000000f }, /* ADDR_CC_REGS */ \ 582 { 0x0000ffff, 0x0000000f }, /* GENERAL_CC_REGS */ \ 583 { 0xffff0000, 0x00000000 }, /* FP_REGS */ \ 584 { 0xfffffffe, 0x0000000d }, /* ADDR_FP_REGS */ \ 585 { 0xffffffff, 0x0000000d }, /* GENERAL_FP_REGS */ \ 586 { 0xffff0000, 0x003fffc0 }, /* VEC_REGS */ \ 587 { 0xfffffffe, 0x003fffcd }, /* ADDR_VEC_REGS */ \ 588 { 0xffffffff, 0x003fffcd }, /* GENERAL_VEC_REGS */ \ 589 { 0xffffffff, 0x003fffff }, /* ALL_REGS */ \ 590 } 591 592 /* In some case register allocation order is not enough for IRA to 593 generate a good code. The following macro (if defined) increases 594 cost of REGNO for a pseudo approximately by pseudo usage frequency 595 multiplied by the macro value. 596 597 We avoid usage of BASE_REGNUM by nonzero macro value because the 598 reload can decide not to use the hard register because some 599 constant was forced to be in memory. */ 600 #define IRA_HARD_REGNO_ADD_COST_MULTIPLIER(regno) \ 601 ((regno) != BASE_REGNUM ? 0.0 : 0.5) 602 603 /* Register -> class mapping. */ 604 extern const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER]; 605 #define REGNO_REG_CLASS(REGNO) (regclass_map[REGNO]) 606 607 /* ADDR_REGS can be used as base or index register. */ 608 #define INDEX_REG_CLASS ADDR_REGS 609 #define BASE_REG_CLASS ADDR_REGS 610 611 /* Check whether REGNO is a hard register of the suitable class 612 or a pseudo register currently allocated to one such. */ 613 #define REGNO_OK_FOR_INDEX_P(REGNO) \ 614 (((REGNO) < FIRST_PSEUDO_REGISTER \ 615 && REGNO_REG_CLASS ((REGNO)) == ADDR_REGS) \ 616 || ADDR_REGNO_P (reg_renumber[REGNO])) 617 #define REGNO_OK_FOR_BASE_P(REGNO) REGNO_OK_FOR_INDEX_P (REGNO) 618 619 620 /* Stack layout and calling conventions. */ 621 622 /* Our stack grows from higher to lower addresses. However, local variables 623 are accessed by positive offsets, and function arguments are stored at 624 increasing addresses. */ 625 #define STACK_GROWS_DOWNWARD 1 626 #define FRAME_GROWS_DOWNWARD 1 627 /* #undef ARGS_GROW_DOWNWARD */ 628 629 /* The basic stack layout looks like this: the stack pointer points 630 to the register save area for called functions. Above that area 631 is the location to place outgoing arguments. Above those follow 632 dynamic allocations (alloca), and finally the local variables. */ 633 634 /* Offset from stack-pointer to first location of outgoing args. */ 635 #define STACK_POINTER_OFFSET (TARGET_64BIT ? 160 : 96) 636 637 /* Offset from the stack pointer register to an item dynamically 638 allocated on the stack, e.g., by `alloca'. */ 639 #define STACK_DYNAMIC_OFFSET(FUNDECL) \ 640 (STACK_POINTER_OFFSET + crtl->outgoing_args_size) 641 642 /* Offset of first parameter from the argument pointer register value. 643 We have a fake argument pointer register that points directly to 644 the argument area. */ 645 #define FIRST_PARM_OFFSET(FNDECL) 0 646 647 /* Defining this macro makes __builtin_frame_address(0) and 648 __builtin_return_address(0) work with -fomit-frame-pointer. */ 649 #define INITIAL_FRAME_ADDRESS_RTX \ 650 (plus_constant (Pmode, arg_pointer_rtx, -STACK_POINTER_OFFSET)) 651 652 /* The return address of the current frame is retrieved 653 from the initial value of register RETURN_REGNUM. 654 For frames farther back, we use the stack slot where 655 the corresponding RETURN_REGNUM register was saved. */ 656 #define DYNAMIC_CHAIN_ADDRESS(FRAME) \ 657 (TARGET_PACKED_STACK ? \ 658 plus_constant (Pmode, (FRAME), \ 659 STACK_POINTER_OFFSET - UNITS_PER_LONG) : (FRAME)) 660 661 /* For -mpacked-stack this adds 160 - 8 (96 - 4) to the output of 662 builtin_frame_address. Otherwise arg pointer - 663 STACK_POINTER_OFFSET would be returned for 664 __builtin_frame_address(0) what might result in an address pointing 665 somewhere into the middle of the local variables since the packed 666 stack layout generally does not need all the bytes in the register 667 save area. */ 668 #define FRAME_ADDR_RTX(FRAME) \ 669 DYNAMIC_CHAIN_ADDRESS ((FRAME)) 670 671 #define RETURN_ADDR_RTX(COUNT, FRAME) \ 672 s390_return_addr_rtx ((COUNT), DYNAMIC_CHAIN_ADDRESS ((FRAME))) 673 674 /* In 31-bit mode, we need to mask off the high bit of return addresses. */ 675 #define MASK_RETURN_ADDR (TARGET_64BIT ? constm1_rtx : GEN_INT (0x7fffffff)) 676 677 678 /* Exception handling. */ 679 680 /* Describe calling conventions for DWARF-2 exception handling. */ 681 #define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, RETURN_REGNUM) 682 #define INCOMING_FRAME_SP_OFFSET STACK_POINTER_OFFSET 683 #define DWARF_FRAME_RETURN_COLUMN 14 684 685 /* Describe how we implement __builtin_eh_return. */ 686 #define EH_RETURN_DATA_REGNO(N) ((N) < 4 ? (N) + 6 : INVALID_REGNUM) 687 #define EH_RETURN_HANDLER_RTX gen_rtx_MEM (Pmode, return_address_pointer_rtx) 688 689 /* Select a format to encode pointers in exception handling data. */ 690 #define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \ 691 (flag_pic \ 692 ? ((GLOBAL) ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | DW_EH_PE_sdata4 \ 693 : DW_EH_PE_absptr) 694 695 /* Register save slot alignment. */ 696 #define DWARF_CIE_DATA_ALIGNMENT (-UNITS_PER_LONG) 697 698 /* Let the assembler generate debug line info. */ 699 #define DWARF2_ASM_LINE_DEBUG_INFO 1 700 701 /* Define the dwarf register mapping. 702 v16-v31 -> 68-83 703 rX -> X otherwise */ 704 #define DBX_REGISTER_NUMBER(regno) \ 705 (((regno) >= 38 && (regno) <= 53) ? (regno) + 30 : (regno)) 706 707 /* Frame registers. */ 708 709 #define STACK_POINTER_REGNUM 15 710 #define FRAME_POINTER_REGNUM 34 711 #define HARD_FRAME_POINTER_REGNUM 11 712 #define ARG_POINTER_REGNUM 32 713 #define RETURN_ADDRESS_POINTER_REGNUM 35 714 715 /* The static chain must be call-clobbered, but not used for 716 function argument passing. As register 1 is clobbered by 717 the trampoline code, we only have one option. */ 718 #define STATIC_CHAIN_REGNUM 0 719 720 /* Number of hardware registers that go into the DWARF-2 unwind info. 721 To avoid ABI incompatibility, this number must not change even as 722 'fake' hard registers are added or removed. */ 723 #define DWARF_FRAME_REGISTERS 34 724 725 726 /* Frame pointer and argument pointer elimination. */ 727 728 #define ELIMINABLE_REGS \ 729 {{ FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM }, \ 730 { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM }, \ 731 { ARG_POINTER_REGNUM, STACK_POINTER_REGNUM }, \ 732 { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM }, \ 733 { RETURN_ADDRESS_POINTER_REGNUM, STACK_POINTER_REGNUM }, \ 734 { RETURN_ADDRESS_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM }, \ 735 { BASE_REGNUM, BASE_REGNUM }} 736 737 #define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \ 738 (OFFSET) = s390_initial_elimination_offset ((FROM), (TO)) 739 740 741 /* Stack arguments. */ 742 743 /* We need current_function_outgoing_args to be valid. */ 744 #define ACCUMULATE_OUTGOING_ARGS 1 745 746 747 /* Register arguments. */ 748 749 typedef struct s390_arg_structure 750 { 751 int gprs; /* gpr so far */ 752 int fprs; /* fpr so far */ 753 int vrs; /* vr so far */ 754 } 755 CUMULATIVE_ARGS; 756 757 #define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, NN, N_NAMED_ARGS) \ 758 ((CUM).gprs=0, (CUM).fprs=0, (CUM).vrs=0) 759 760 #define FIRST_VEC_ARG_REGNO 46 761 #define LAST_VEC_ARG_REGNO 53 762 763 /* Arguments can be placed in general registers 2 to 6, or in floating 764 point registers 0 and 2 for 31 bit and fprs 0, 2, 4 and 6 for 64 765 bit. */ 766 #define FUNCTION_ARG_REGNO_P(N) \ 767 (((N) >=2 && (N) < 7) || (N) == 16 || (N) == 17 \ 768 || (TARGET_64BIT && ((N) == 18 || (N) == 19)) \ 769 || (TARGET_VX && ((N) >= FIRST_VEC_ARG_REGNO && (N) <= LAST_VEC_ARG_REGNO))) 770 771 772 /* Only gpr 2, fpr 0, and v24 are ever used as return registers. */ 773 #define FUNCTION_VALUE_REGNO_P(N) \ 774 ((N) == 2 || (N) == 16 \ 775 || (TARGET_VX && (N) == FIRST_VEC_ARG_REGNO)) 776 777 778 /* Function entry and exit. */ 779 780 /* When returning from a function, the stack pointer does not matter. */ 781 #define EXIT_IGNORE_STACK 1 782 783 784 /* Profiling. */ 785 786 #define FUNCTION_PROFILER(FILE, LABELNO) \ 787 s390_function_profiler ((FILE), ((LABELNO))) 788 789 #define PROFILE_BEFORE_PROLOGUE 1 790 791 #define NO_PROFILE_COUNTERS 1 792 793 794 /* Trampolines for nested functions. */ 795 796 #define TRAMPOLINE_SIZE (TARGET_64BIT ? 32 : 16) 797 #define TRAMPOLINE_ALIGNMENT BITS_PER_WORD 798 799 /* Addressing modes, and classification of registers for them. */ 800 801 /* Recognize any constant value that is a valid address. */ 802 #define CONSTANT_ADDRESS_P(X) 0 803 804 /* Maximum number of registers that can appear in a valid memory address. */ 805 #define MAX_REGS_PER_ADDRESS 2 806 807 /* This definition replaces the formerly used 'm' constraint with a 808 different constraint letter in order to avoid changing semantics of 809 the 'm' constraint when accepting new address formats in 810 TARGET_LEGITIMATE_ADDRESS_P. The constraint letter defined here 811 must not be used in insn definitions or inline assemblies. */ 812 #define TARGET_MEM_CONSTRAINT 'e' 813 814 /* Try a machine-dependent way of reloading an illegitimate address 815 operand. If we find one, push the reload and jump to WIN. This 816 macro is used in only one place: `find_reloads_address' in reload.cc. */ 817 #define LEGITIMIZE_RELOAD_ADDRESS(AD, MODE, OPNUM, TYPE, IND, WIN) \ 818 do { \ 819 rtx new_rtx = legitimize_reload_address ((AD), (MODE), \ 820 (OPNUM), (int)(TYPE)); \ 821 if (new_rtx) \ 822 { \ 823 (AD) = new_rtx; \ 824 goto WIN; \ 825 } \ 826 } while (0) 827 828 /* Helper macro for s390.cc and s390.md to check for symbolic constants. */ 829 #define SYMBOLIC_CONST(X) \ 830 (GET_CODE (X) == SYMBOL_REF \ 831 || GET_CODE (X) == LABEL_REF \ 832 || (GET_CODE (X) == CONST && symbolic_reference_mentioned_p (X))) 833 834 #define TLS_SYMBOLIC_CONST(X) \ 835 ((GET_CODE (X) == SYMBOL_REF && tls_symbolic_operand (X)) \ 836 || (GET_CODE (X) == CONST && tls_symbolic_reference_mentioned_p (X))) 837 838 839 /* Condition codes. */ 840 841 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE, 842 return the mode to be used for the comparison. */ 843 #define SELECT_CC_MODE(OP, X, Y) s390_select_ccmode ((OP), (X), (Y)) 844 845 /* Relative costs of operations. */ 846 847 /* A C expression for the cost of a branch instruction. A value of 1 848 is the default; other values are interpreted relative to that. */ 849 #define BRANCH_COST(speed_p, predictable_p) s390_branch_cost 850 851 /* Nonzero if access to memory by bytes is slow and undesirable. */ 852 #define SLOW_BYTE_ACCESS 1 853 854 /* An integer expression for the size in bits of the largest integer machine 855 mode that should actually be used. We allow pairs of registers. */ 856 #define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (TARGET_64BIT ? TImode : DImode) 857 858 /* The maximum number of bytes that a single instruction can move quickly 859 between memory and registers or between two memory locations. */ 860 #define MOVE_MAX (TARGET_ZARCH ? 16 : 8) 861 #define MOVE_MAX_PIECES (TARGET_ZARCH ? 8 : 4) 862 #define MAX_MOVE_MAX 16 863 864 /* Don't perform CSE on function addresses. */ 865 #define NO_FUNCTION_CSE 1 866 867 /* This value is used in tree-sra to decide whether it might benefical 868 to split a struct move into several word-size moves. For S/390 869 only small values make sense here since struct moves are relatively 870 cheap thanks to mvc so the small default value chosen for archs 871 with memmove patterns should be ok. But this value is multiplied 872 in tree-sra with UNITS_PER_WORD to make a decision so we adjust it 873 here to compensate for that factor since mvc costs exactly the same 874 on 31 and 64 bit. */ 875 #define MOVE_RATIO(speed) (TARGET_64BIT? 2 : 4) 876 877 878 /* Sections. */ 879 880 /* Output before read-only data. */ 881 #define TEXT_SECTION_ASM_OP ".text" 882 883 /* Output before writable (initialized) data. */ 884 #define DATA_SECTION_ASM_OP ".data" 885 886 /* Output before writable (uninitialized) data. */ 887 #define BSS_SECTION_ASM_OP ".bss" 888 889 /* S/390 constant pool breaks the devices in crtstuff.c to control section 890 in where code resides. We have to write it as asm code. */ 891 #ifndef __s390x__ 892 #define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \ 893 asm (SECTION_OP "\n\ 894 bras\t%r2,1f\n\ 895 0: .long\t" USER_LABEL_PREFIX #FUNC " - 0b\n\ 896 1: l\t%r3,0(%r2)\n\ 897 bas\t%r14,0(%r3,%r2)\n\ 898 .previous"); 899 #endif 900 901 902 /* Position independent code. */ 903 904 #define PIC_OFFSET_TABLE_REGNUM (flag_pic ? 12 : INVALID_REGNUM) 905 906 #define LEGITIMATE_PIC_OPERAND_P(X) legitimate_pic_operand_p (X) 907 908 #ifndef TARGET_DEFAULT_PIC_DATA_IS_TEXT_RELATIVE 909 #define TARGET_DEFAULT_PIC_DATA_IS_TEXT_RELATIVE 1 910 #endif 911 912 913 /* Assembler file format. */ 914 915 /* Character to start a comment. */ 916 #define ASM_COMMENT_START "#" 917 918 /* Declare an uninitialized external linkage data object. */ 919 #define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \ 920 asm_output_aligned_bss ((FILE), (DECL), (NAME), (SIZE), (ALIGN)) 921 922 /* Globalizing directive for a label. */ 923 #define GLOBAL_ASM_OP ".globl " 924 925 /* Advance the location counter to a multiple of 2**LOG bytes. */ 926 #define ASM_OUTPUT_ALIGN(FILE, LOG) \ 927 if ((LOG)) fprintf ((FILE), "\t.align\t%d\n", 1 << (LOG)) 928 929 /* Advance the location counter by SIZE bytes. */ 930 #define ASM_OUTPUT_SKIP(FILE, SIZE) \ 931 fprintf ((FILE), "\t.set\t.,.+" HOST_WIDE_INT_PRINT_UNSIGNED"\n", (SIZE)) 932 933 /* The LOCAL_LABEL_PREFIX variable is used by dbxelf.h. */ 934 #define LOCAL_LABEL_PREFIX "." 935 936 #define LABEL_ALIGN(LABEL) \ 937 s390_label_align ((LABEL)) 938 939 /* How to refer to registers in assembler output. This sequence is 940 indexed by compiler's hard-register-number (see above). */ 941 #define REGISTER_NAMES \ 942 { "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7", \ 943 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", \ 944 "%f0", "%f2", "%f4", "%f6", "%f1", "%f3", "%f5", "%f7", \ 945 "%f8", "%f10", "%f12", "%f14", "%f9", "%f11", "%f13", "%f15", \ 946 "%ap", "%cc", "%fp", "%rp", "%a0", "%a1", \ 947 "%v16", "%v18", "%v20", "%v22", "%v17", "%v19", "%v21", "%v23", \ 948 "%v24", "%v26", "%v28", "%v30", "%v25", "%v27", "%v29", "%v31" \ 949 } 950 951 #define ADDITIONAL_REGISTER_NAMES \ 952 { { "v0", 16 }, { "v2", 17 }, { "v4", 18 }, { "v6", 19 }, \ 953 { "v1", 20 }, { "v3", 21 }, { "v5", 22 }, { "v7", 23 }, \ 954 { "v8", 24 }, { "v10", 25 }, { "v12", 26 }, { "v14", 27 }, \ 955 { "v9", 28 }, { "v11", 29 }, { "v13", 30 }, { "v15", 31 } }; 956 957 /* Print operand X (an rtx) in assembler syntax to file FILE. */ 958 #define PRINT_OPERAND(FILE, X, CODE) print_operand ((FILE), (X), (CODE)) 959 #define PRINT_OPERAND_ADDRESS(FILE, ADDR) print_operand_address ((FILE), (ADDR)) 960 961 /* Output an element of a case-vector that is absolute. */ 962 #define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \ 963 do { \ 964 char buf[32]; \ 965 fputs (integer_asm_op (UNITS_PER_LONG, TRUE), (FILE)); \ 966 ASM_GENERATE_INTERNAL_LABEL (buf, "L", (VALUE)); \ 967 assemble_name ((FILE), buf); \ 968 fputc ('\n', (FILE)); \ 969 } while (0) 970 971 /* Output an element of a case-vector that is relative. */ 972 #define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \ 973 do { \ 974 char buf[32]; \ 975 fputs (integer_asm_op (UNITS_PER_LONG, TRUE), (FILE)); \ 976 ASM_GENERATE_INTERNAL_LABEL (buf, "L", (VALUE)); \ 977 assemble_name ((FILE), buf); \ 978 fputc ('-', (FILE)); \ 979 ASM_GENERATE_INTERNAL_LABEL (buf, "L", (REL)); \ 980 assemble_name ((FILE), buf); \ 981 fputc ('\n', (FILE)); \ 982 } while (0) 983 984 /* Mark the return register as used by the epilogue so that we can 985 use it in unadorned (return) and (simple_return) instructions. */ 986 #define EPILOGUE_USES(REGNO) ((REGNO) == RETURN_REGNUM) 987 988 #undef ASM_OUTPUT_FUNCTION_LABEL 989 #define ASM_OUTPUT_FUNCTION_LABEL(FILE, NAME, DECL) \ 990 s390_asm_output_function_label ((FILE), (NAME), (DECL)) 991 992 #if S390_USE_TARGET_ATTRIBUTE 993 /* Hook to output .machine and .machinemode at start of function. */ 994 #undef ASM_OUTPUT_FUNCTION_PREFIX 995 #define ASM_OUTPUT_FUNCTION_PREFIX s390_asm_output_function_prefix 996 997 /* Hook to output .machine and .machinemode at end of function. */ 998 #undef ASM_DECLARE_FUNCTION_SIZE 999 #define ASM_DECLARE_FUNCTION_SIZE s390_asm_declare_function_size 1000 #endif 1001 1002 /* Miscellaneous parameters. */ 1003 1004 /* Specify the machine mode that this machine uses for the index in the 1005 tablejump instruction. */ 1006 #define CASE_VECTOR_MODE (TARGET_64BIT ? DImode : SImode) 1007 1008 /* Specify the machine mode that pointers have. 1009 After generation of rtl, the compiler makes no further distinction 1010 between pointers and any other objects of this machine mode. */ 1011 #define Pmode (TARGET_64BIT ? DImode : SImode) 1012 1013 /* This is -1 for "pointer mode" extend. See ptr_extend in s390.md. */ 1014 #define POINTERS_EXTEND_UNSIGNED -1 1015 1016 /* A function address in a call instruction is a byte address (for 1017 indexing purposes) so give the MEM rtx a byte's mode. */ 1018 #define FUNCTION_MODE QImode 1019 1020 /* Specify the value which is used when clz operand is zero. */ 1021 #define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) ((VALUE) = 64, 1) 1022 1023 /* Machine-specific symbol_ref flags. */ 1024 #define SYMBOL_FLAG_ALIGN_SHIFT SYMBOL_FLAG_MACH_DEP_SHIFT 1025 #define SYMBOL_FLAG_ALIGN_MASK \ 1026 ((SYMBOL_FLAG_MACH_DEP << 0) | (SYMBOL_FLAG_MACH_DEP << 1)) 1027 1028 #define SYMBOL_FLAG_SET_ALIGN(X, A) \ 1029 (SYMBOL_REF_FLAGS (X) = (SYMBOL_REF_FLAGS (X) & ~SYMBOL_FLAG_ALIGN_MASK) \ 1030 | (A << SYMBOL_FLAG_ALIGN_SHIFT)) 1031 1032 #define SYMBOL_FLAG_GET_ALIGN(X) \ 1033 ((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_ALIGN_MASK) >> SYMBOL_FLAG_ALIGN_SHIFT) 1034 1035 /* Helpers to access symbol_ref flags. They are used in 1036 check_symref_alignment() and larl_operand to detect if the 1037 available alignment matches the required one. We do not use 1038 a positive check like _ALIGN2 because in that case we would have 1039 to annotate every symbol_ref. However, we only want to touch 1040 the symbol_refs that can be misaligned and assume that the others 1041 are correctly aligned. Hence, if a symbol_ref does not have 1042 a _NOTALIGN flag it is supposed to be correctly aligned. */ 1043 #define SYMBOL_FLAG_SET_NOTALIGN2(X) SYMBOL_FLAG_SET_ALIGN((X), 1) 1044 #define SYMBOL_FLAG_SET_NOTALIGN4(X) SYMBOL_FLAG_SET_ALIGN((X), 2) 1045 #define SYMBOL_FLAG_SET_NOTALIGN8(X) SYMBOL_FLAG_SET_ALIGN((X), 3) 1046 1047 #define SYMBOL_FLAG_NOTALIGN2_P(X) (SYMBOL_FLAG_GET_ALIGN(X) == 1) 1048 #define SYMBOL_FLAG_NOTALIGN4_P(X) (SYMBOL_FLAG_GET_ALIGN(X) == 2 \ 1049 || SYMBOL_FLAG_GET_ALIGN(X) == 1) 1050 #define SYMBOL_FLAG_NOTALIGN8_P(X) (SYMBOL_FLAG_GET_ALIGN(X) == 3 \ 1051 || SYMBOL_FLAG_GET_ALIGN(X) == 2 \ 1052 || SYMBOL_FLAG_GET_ALIGN(X) == 1) 1053 1054 /* Check whether integer displacement is in range for a short displacement. */ 1055 #define SHORT_DISP_IN_RANGE(d) ((d) >= 0 && (d) <= 4095) 1056 1057 /* Check whether integer displacement is in range. */ 1058 #define DISP_IN_RANGE(d) \ 1059 (TARGET_LONG_DISPLACEMENT \ 1060 ? ((d) >= -524288 && (d) <= 524287) \ 1061 : SHORT_DISP_IN_RANGE(d)) 1062 1063 /* Reads can reuse write prefetches, used by tree-ssa-prefetch-loops.c. */ 1064 #define READ_CAN_USE_WRITE_PREFETCH 1 1065 1066 extern const int processor_flags_table[]; 1067 1068 struct s390_processor 1069 { 1070 /* The preferred name to be used in user visible output. */ 1071 const char *const name; 1072 /* CPU name as it should be passed to Binutils via .machine */ 1073 const char *const binutils_name; 1074 const enum processor_type processor; 1075 const struct processor_costs *cost; 1076 int arch_level; 1077 }; 1078 1079 extern const struct s390_processor processor_table[]; 1080 1081 /* The truth element value for vector comparisons. Our instructions 1082 always generate -1 in that case. */ 1083 #define VECTOR_STORE_FLAG_VALUE(MODE) CONSTM1_RTX (GET_MODE_INNER (MODE)) 1084 1085 /* Target pragma. */ 1086 1087 /* resolve_overloaded_builtin cannot be defined the normal way since 1088 it is defined in code which technically belongs to the 1089 front-end. */ 1090 #define REGISTER_TARGET_PRAGMAS() \ 1091 do { \ 1092 s390_register_target_pragmas (); \ 1093 } while (0) 1094 1095 #ifndef USED_FOR_TARGET 1096 /* The following structure is embedded in the machine 1097 specific part of struct function. */ 1098 1099 struct GTY (()) s390_frame_layout 1100 { 1101 /* Offset within stack frame. */ 1102 HOST_WIDE_INT gprs_offset; 1103 HOST_WIDE_INT f0_offset; 1104 HOST_WIDE_INT f4_offset; 1105 HOST_WIDE_INT f8_offset; 1106 HOST_WIDE_INT backchain_offset; 1107 1108 /* Number of first and last gpr where slots in the register 1109 save area are reserved for. */ 1110 int first_save_gpr_slot; 1111 int last_save_gpr_slot; 1112 1113 /* Location (FP register number) where GPRs (r0-r15) should 1114 be saved to. 1115 0 - does not need to be saved at all 1116 -1 - stack slot */ 1117 #define SAVE_SLOT_NONE 0 1118 #define SAVE_SLOT_STACK -1 1119 signed char gpr_save_slots[16]; 1120 1121 /* Number of first and last gpr to be saved, restored. */ 1122 int first_save_gpr; 1123 int first_restore_gpr; 1124 int last_save_gpr; 1125 int last_restore_gpr; 1126 1127 /* Bits standing for floating point registers. Set, if the 1128 respective register has to be saved. Starting with reg 16 (f0) 1129 at the rightmost bit. 1130 Bit 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0 1131 fpr 15 13 11 9 14 12 10 8 7 5 3 1 6 4 2 0 1132 reg 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 */ 1133 unsigned int fpr_bitmap; 1134 1135 /* Number of floating point registers f8-f15 which must be saved. */ 1136 int high_fprs; 1137 1138 /* Set if return address needs to be saved. 1139 This flag is set by s390_return_addr_rtx if it could not use 1140 the initial value of r14 and therefore depends on r14 saved 1141 to the stack. */ 1142 bool save_return_addr_p; 1143 1144 /* Size of stack frame. */ 1145 HOST_WIDE_INT frame_size; 1146 }; 1147 1148 1149 /* Define the structure for the machine field in struct function. */ 1150 1151 struct GTY(()) machine_function 1152 { 1153 struct s390_frame_layout frame_layout; 1154 1155 /* Literal pool base register. */ 1156 rtx base_reg; 1157 1158 bool has_landing_pad_p; 1159 1160 /* True if the current function may contain a tbegin clobbering 1161 FPRs. */ 1162 bool tbegin_p; 1163 1164 /* For -fsplit-stack support: A stack local which holds a pointer to 1165 the stack arguments for a function with a variable number of 1166 arguments. This is set at the start of the function and is used 1167 to initialize the overflow_arg_area field of the va_list 1168 structure. */ 1169 rtx split_stack_varargs_pointer; 1170 1171 enum indirect_branch indirect_branch_jump; 1172 enum indirect_branch indirect_branch_call; 1173 1174 enum indirect_branch function_return_mem; 1175 enum indirect_branch function_return_reg; 1176 }; 1177 #endif 1178 1179 #define TARGET_INDIRECT_BRANCH_NOBP_RET_OPTION \ 1180 (cfun->machine->function_return_reg != indirect_branch_keep \ 1181 || cfun->machine->function_return_mem != indirect_branch_keep) 1182 1183 #define TARGET_INDIRECT_BRANCH_NOBP_RET \ 1184 ((cfun->machine->function_return_reg != indirect_branch_keep \ 1185 && !s390_return_addr_from_memory ()) \ 1186 || (cfun->machine->function_return_mem != indirect_branch_keep \ 1187 && s390_return_addr_from_memory ())) 1188 1189 #define TARGET_INDIRECT_BRANCH_NOBP_JUMP \ 1190 (cfun->machine->indirect_branch_jump != indirect_branch_keep) 1191 1192 #define TARGET_INDIRECT_BRANCH_NOBP_JUMP_THUNK \ 1193 (cfun->machine->indirect_branch_jump == indirect_branch_thunk \ 1194 || cfun->machine->indirect_branch_jump == indirect_branch_thunk_extern) 1195 1196 #define TARGET_INDIRECT_BRANCH_NOBP_JUMP_INLINE_THUNK \ 1197 (cfun->machine->indirect_branch_jump == indirect_branch_thunk_inline) 1198 1199 #define TARGET_INDIRECT_BRANCH_NOBP_CALL \ 1200 (cfun->machine->indirect_branch_call != indirect_branch_keep) 1201 1202 #ifndef TARGET_DEFAULT_INDIRECT_BRANCH_TABLE 1203 #define TARGET_DEFAULT_INDIRECT_BRANCH_TABLE 0 1204 #endif 1205 1206 #define TARGET_INDIRECT_BRANCH_THUNK_NAME_EXRL "__s390_indirect_jump_r%d" 1207 #define TARGET_INDIRECT_BRANCH_THUNK_NAME_EX "__s390_indirect_jump_r%duse_r%d" 1208 1209 #define TARGET_INDIRECT_BRANCH_TABLE s390_indirect_branch_table 1210 1211 #ifdef GENERATOR_FILE 1212 /* gencondmd.cc is built before insn-flags.h. Use an arbitrary opaque value 1213 that cannot be optimized away by gen_insn. */ 1214 #define HAVE_TF(icode) TARGET_HARD_FLOAT 1215 #else 1216 #define HAVE_TF(icode) (HAVE_##icode##_fpr || HAVE_##icode##_vr) 1217 #endif 1218 1219 /* Dispatcher for movtf. */ 1220 #define EXPAND_MOVTF(icode) \ 1221 do \ 1222 { \ 1223 if (TARGET_VXE) \ 1224 emit_insn (gen_##icode##_vr (operands[0], operands[1])); \ 1225 else \ 1226 emit_insn (gen_##icode##_fpr (operands[0], operands[1])); \ 1227 DONE; \ 1228 } \ 1229 while (false) 1230 1231 /* Like EXPAND_MOVTF, but also legitimizes operands. */ 1232 #define EXPAND_TF(icode, nops) \ 1233 do \ 1234 { \ 1235 const size_t __nops = (nops); \ 1236 expand_operand ops[__nops]; \ 1237 create_output_operand (&ops[0], operands[0], GET_MODE (operands[0])); \ 1238 for (size_t i = 1; i < __nops; i++) \ 1239 create_input_operand (&ops[i], operands[i], GET_MODE (operands[i])); \ 1240 if (TARGET_VXE) \ 1241 expand_insn (CODE_FOR_##icode##_vr, __nops, ops); \ 1242 else \ 1243 expand_insn (CODE_FOR_##icode##_fpr, __nops, ops); \ 1244 DONE; \ 1245 } \ 1246 while (false) 1247 1248 #endif /* S390_H */ 1249