1//===- PPCCallingConv.td - Calling Conventions for PowerPC -*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This describes the calling conventions for the PowerPC 32- and 64-bit 10// architectures. 11// 12//===----------------------------------------------------------------------===// 13 14/// CCIfSubtarget - Match if the current subtarget has a feature F. 15class CCIfSubtarget<string F, CCAction A> 16 : CCIf<!strconcat("State.getMachineFunction().getSubtarget<PPCSubtarget>().", 17 F), 18 A>; 19class CCIfNotSubtarget<string F, CCAction A> 20 : CCIf<!strconcat("!State.getMachineFunction().getSubtarget<PPCSubtarget>().", 21 F), 22 A>; 23class CCIfOrigArgWasNotPPCF128<CCAction A> 24 : CCIf<"!static_cast<PPCCCState *>(&State)->WasOriginalArgPPCF128(ValNo)", 25 A>; 26class CCIfOrigArgWasPPCF128<CCAction A> 27 : CCIf<"static_cast<PPCCCState *>(&State)->WasOriginalArgPPCF128(ValNo)", 28 A>; 29 30//===----------------------------------------------------------------------===// 31// Return Value Calling Convention 32//===----------------------------------------------------------------------===// 33 34// PPC64 AnyReg return-value convention. No explicit register is specified for 35// the return-value. The register allocator is allowed and expected to choose 36// any free register. 37// 38// This calling convention is currently only supported by the stackmap and 39// patchpoint intrinsics. All other uses will result in an assert on Debug 40// builds. On Release builds we fallback to the PPC C calling convention. 41def RetCC_PPC64_AnyReg : CallingConv<[ 42 CCCustom<"CC_PPC_AnyReg_Error"> 43]>; 44 45// Return-value convention for PowerPC coldcc. 46let Entry = 1 in 47def RetCC_PPC_Cold : CallingConv<[ 48 // Use the same return registers as RetCC_PPC, but limited to only 49 // one return value. The remaining return values will be saved to 50 // the stack. 51 CCIfType<[i32, i1], CCIfSubtarget<"isPPC64()", CCPromoteToType<i64>>>, 52 CCIfType<[i1], CCIfNotSubtarget<"isPPC64()", CCPromoteToType<i32>>>, 53 54 CCIfType<[i32], CCAssignToReg<[R3]>>, 55 CCIfType<[i64], CCAssignToReg<[X3]>>, 56 CCIfType<[i128], CCAssignToReg<[X3]>>, 57 58 CCIfType<[f32], CCAssignToReg<[F1]>>, 59 CCIfType<[f64], CCAssignToReg<[F1]>>, 60 CCIfType<[f128], CCIfSubtarget<"hasAltivec()", CCAssignToReg<[V2]>>>, 61 62 CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32, v2f64], 63 CCIfSubtarget<"hasAltivec()", 64 CCAssignToReg<[V2]>>> 65]>; 66 67// Return-value convention for PowerPC 68let Entry = 1 in 69def RetCC_PPC : CallingConv<[ 70 CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_PPC64_AnyReg>>, 71 72 // On PPC64, integer return values are always promoted to i64 73 CCIfType<[i32, i1], CCIfSubtarget<"isPPC64()", CCPromoteToType<i64>>>, 74 CCIfType<[i1], CCIfNotSubtarget<"isPPC64()", CCPromoteToType<i32>>>, 75 76 CCIfType<[i32], CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10]>>, 77 CCIfType<[i64], CCAssignToReg<[X3, X4, X5, X6]>>, 78 CCIfType<[i128], CCAssignToReg<[X3, X4, X5, X6]>>, 79 80 // Floating point types returned as "direct" go into F1 .. F8; note that 81 // only the ELFv2 ABI fully utilizes all these registers. 82 CCIfNotSubtarget<"hasSPE()", 83 CCIfType<[f32], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>>, 84 CCIfNotSubtarget<"hasSPE()", 85 CCIfType<[f64], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>>, 86 CCIfSubtarget<"hasSPE()", 87 CCIfType<[f32], CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10]>>>, 88 CCIfSubtarget<"hasSPE()", 89 CCIfType<[f64], CCCustom<"CC_PPC32_SPE_RetF64">>>, 90 91 // For P9, f128 are passed in vector registers. 92 CCIfType<[f128], 93 CCIfSubtarget<"hasAltivec()", 94 CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>>, 95 96 // Vector types returned as "direct" go into V2 .. V9; note that only the 97 // ELFv2 ABI fully utilizes all these registers. 98 CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32, v2f64], 99 CCIfSubtarget<"hasAltivec()", 100 CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>> 101]>; 102 103// No explicit register is specified for the AnyReg calling convention. The 104// register allocator may assign the arguments to any free register. 105// 106// This calling convention is currently only supported by the stackmap and 107// patchpoint intrinsics. All other uses will result in an assert on Debug 108// builds. On Release builds we fallback to the PPC C calling convention. 109def CC_PPC64_AnyReg : CallingConv<[ 110 CCCustom<"CC_PPC_AnyReg_Error"> 111]>; 112 113// Calling Convention corresponding to the 64-bit PowerPC ELFv2 ABI. 114// This calling convention currently only handles integers, floats and 115// vectors within registers, as well as it handles the shadowing of GPRs 116// when floating point and vector arguments are used. 117// FIXME: This calling convention needs to be extended to handle all types and 118// complexities of the ABI. 119let Entry = 1 in 120def CC_PPC64_ELF : CallingConv<[ 121 CCIfCC<"CallingConv::AnyReg", CCDelegateTo<CC_PPC64_AnyReg>>, 122 123 CCIfType<[i1], CCPromoteToType<i64>>, 124 CCIfType<[i8], CCPromoteToType<i64>>, 125 CCIfType<[i16], CCPromoteToType<i64>>, 126 CCIfType<[i32], CCPromoteToType<i64>>, 127 CCIfType<[i64], CCAssignToReg<[X3, X4, X5, X6, X7, X8, X9, X10]>>, 128 129 // Handle fp types and shadow the corresponding registers as necessary. 130 CCIfType<[f32, f64], CCIfNotVarArg<CCCustom<"CC_PPC64_ELF_Shadow_GPR_Regs">>>, 131 CCIfType<[f32, f64], 132 CCIfNotVarArg<CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, 133 F11, F12, F13]>>>, 134 135 // f128 is handled through vector registers instead of fp registers. 136 CCIfType<[f128], 137 CCIfSubtarget<"hasAltivec()", 138 CCIfNotVarArg<CCCustom<"CC_PPC64_ELF_Shadow_GPR_Regs">>>>, 139 CCIfType<[f128], 140 CCIfSubtarget<"hasAltivec()", 141 CCIfNotVarArg<CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9, V10, 142 V11, V12, V13]>>>>, 143 144 // Handle support for vector types, and shadow GPRs as necessary. 145 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64, v1i128], 146 CCIfSubtarget<"hasAltivec()", 147 CCIfNotVarArg<CCCustom<"CC_PPC64_ELF_Shadow_GPR_Regs">>>>, 148 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64, v1i128], 149 CCIfSubtarget<"hasAltivec()", 150 CCIfNotVarArg<CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9, V10, 151 V11, V12, V13]>>>>, 152]>; 153 154// Simple calling convention for 64-bit ELF PowerPC fast isel. 155// Only handle ints and floats. All ints are promoted to i64. 156// Vector types and quadword ints are not handled. 157let Entry = 1 in 158def CC_PPC64_ELF_FIS : CallingConv<[ 159 CCIfCC<"CallingConv::AnyReg", CCDelegateTo<CC_PPC64_AnyReg>>, 160 161 CCIfType<[i1], CCPromoteToType<i64>>, 162 CCIfType<[i8], CCPromoteToType<i64>>, 163 CCIfType<[i16], CCPromoteToType<i64>>, 164 CCIfType<[i32], CCPromoteToType<i64>>, 165 CCIfType<[i64], CCAssignToReg<[X3, X4, X5, X6, X7, X8, X9, X10]>>, 166 CCIfType<[f32, f64], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>> 167]>; 168 169// Simple return-value convention for 64-bit ELF PowerPC fast isel. 170// All small ints are promoted to i64. Vector types, quadword ints, 171// and multiple register returns are "supported" to avoid compile 172// errors, but none are handled by the fast selector. 173let Entry = 1 in 174def RetCC_PPC64_ELF_FIS : CallingConv<[ 175 CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_PPC64_AnyReg>>, 176 177 CCIfType<[i1], CCPromoteToType<i64>>, 178 CCIfType<[i8], CCPromoteToType<i64>>, 179 CCIfType<[i16], CCPromoteToType<i64>>, 180 CCIfType<[i32], CCPromoteToType<i64>>, 181 CCIfType<[i64], CCAssignToReg<[X3, X4, X5, X6]>>, 182 CCIfType<[i128], CCAssignToReg<[X3, X4, X5, X6]>>, 183 CCIfType<[f32], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>, 184 CCIfType<[f64], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>, 185 CCIfType<[f128], 186 CCIfSubtarget<"hasAltivec()", 187 CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>>, 188 CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32, v2f64], 189 CCIfSubtarget<"hasAltivec()", 190 CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>> 191]>; 192 193//===----------------------------------------------------------------------===// 194// PowerPC System V Release 4 32-bit ABI 195//===----------------------------------------------------------------------===// 196 197def CC_PPC32_SVR4_Common : CallingConv<[ 198 CCIfType<[i1], CCPromoteToType<i32>>, 199 200 // The ABI requires i64 to be passed in two adjacent registers with the first 201 // register having an odd register number. 202 CCIfType<[i32], 203 CCIfSplit<CCIfSubtarget<"useSoftFloat()", 204 CCIfOrigArgWasNotPPCF128< 205 CCCustom<"CC_PPC32_SVR4_Custom_AlignArgRegs">>>>>, 206 207 CCIfType<[i32], 208 CCIfSplit<CCIfNotSubtarget<"useSoftFloat()", 209 CCCustom<"CC_PPC32_SVR4_Custom_AlignArgRegs">>>>, 210 CCIfType<[f64], 211 CCIfSubtarget<"hasSPE()", 212 CCCustom<"CC_PPC32_SVR4_Custom_AlignArgRegs">>>, 213 CCIfSplit<CCIfSubtarget<"useSoftFloat()", 214 CCIfOrigArgWasPPCF128<CCCustom< 215 "CC_PPC32_SVR4_Custom_SkipLastArgRegsPPCF128">>>>, 216 217 // The 'nest' parameter, if any, is passed in R11. 218 CCIfNest<CCAssignToReg<[R11]>>, 219 220 // The first 8 integer arguments are passed in integer registers. 221 CCIfType<[i32], CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10]>>, 222 223 // Make sure the i64 words from a long double are either both passed in 224 // registers or both passed on the stack. 225 CCIfType<[f64], CCIfSplit<CCCustom<"CC_PPC32_SVR4_Custom_AlignFPArgRegs">>>, 226 227 // FP values are passed in F1 - F8. 228 CCIfType<[f32, f64], 229 CCIfNotSubtarget<"hasSPE()", 230 CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>>, 231 CCIfType<[f64], 232 CCIfSubtarget<"hasSPE()", 233 CCCustom<"CC_PPC32_SPE_CustomSplitFP64">>>, 234 CCIfType<[f32], 235 CCIfSubtarget<"hasSPE()", 236 CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10]>>>, 237 238 // Split arguments have an alignment of 8 bytes on the stack. 239 CCIfType<[i32], CCIfSplit<CCAssignToStack<4, 8>>>, 240 241 CCIfType<[i32], CCAssignToStack<4, 4>>, 242 243 CCIfType<[f32], CCAssignToStack<4, 4>>, 244 CCIfType<[f64], CCAssignToStack<8, 8>>, 245 246 // Vectors and float128 get 16-byte stack slots that are 16-byte aligned. 247 CCIfType<[v16i8, v8i16, v4i32, v4f32, v2f64, v2i64], CCAssignToStack<16, 16>>, 248 CCIfType<[f128], CCIfSubtarget<"hasAltivec()", CCAssignToStack<16, 16>>> 249]>; 250 251// This calling convention puts vector arguments always on the stack. It is used 252// to assign vector arguments which belong to the variable portion of the 253// parameter list of a variable argument function. 254let Entry = 1 in 255def CC_PPC32_SVR4_VarArg : CallingConv<[ 256 CCDelegateTo<CC_PPC32_SVR4_Common> 257]>; 258 259// In contrast to CC_PPC32_SVR4_VarArg, this calling convention first tries to 260// put vector arguments in vector registers before putting them on the stack. 261let Entry = 1 in 262def CC_PPC32_SVR4 : CallingConv<[ 263 // The first 12 Vector arguments are passed in AltiVec registers. 264 CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32, v2f64], 265 CCIfSubtarget<"hasAltivec()", CCAssignToReg<[V2, V3, V4, V5, V6, V7, 266 V8, V9, V10, V11, V12, V13]>>>, 267 268 // Float128 types treated as vector arguments. 269 CCIfType<[f128], 270 CCIfSubtarget<"hasAltivec()", CCAssignToReg<[V2, V3, V4, V5, V6, V7, 271 V8, V9, V10, V11, V12, V13]>>>, 272 273 CCDelegateTo<CC_PPC32_SVR4_Common> 274]>; 275 276// Helper "calling convention" to handle aggregate by value arguments. 277// Aggregate by value arguments are always placed in the local variable space 278// of the caller. This calling convention is only used to assign those stack 279// offsets in the callers stack frame. 280// 281// Still, the address of the aggregate copy in the callers stack frame is passed 282// in a GPR (or in the parameter list area if all GPRs are allocated) from the 283// caller to the callee. The location for the address argument is assigned by 284// the CC_PPC32_SVR4 calling convention. 285// 286// The only purpose of CC_PPC32_SVR4_Custom_Dummy is to skip arguments which are 287// not passed by value. 288 289let Entry = 1 in 290def CC_PPC32_SVR4_ByVal : CallingConv<[ 291 CCIfByVal<CCPassByVal<4, 4>>, 292 293 CCCustom<"CC_PPC32_SVR4_Custom_Dummy"> 294]>; 295 296def CSR_Altivec : CalleeSavedRegs<(add V20, V21, V22, V23, V24, V25, V26, V27, 297 V28, V29, V30, V31)>; 298 299// SPE does not use FPRs, so break out the common register set as base. 300def CSR_SVR432_COMM : CalleeSavedRegs<(add R14, R15, R16, R17, R18, R19, R20, 301 R21, R22, R23, R24, R25, R26, R27, 302 R28, R29, R30, R31, CR2, CR3, CR4 303 )>; 304def CSR_SVR432 : CalleeSavedRegs<(add CSR_SVR432_COMM, F14, F15, F16, F17, F18, 305 F19, F20, F21, F22, F23, F24, F25, F26, 306 F27, F28, F29, F30, F31 307 )>; 308def CSR_SPE : CalleeSavedRegs<(add S14, S15, S16, S17, S18, S19, S20, S21, S22, 309 S23, S24, S25, S26, S27, S28, S29, S30 310 )>; 311 312def CSR_SPE_NO_S30_31 : CalleeSavedRegs<(add S14, S15, S16, S17, S18, S19, S20, S21, 313 S22, S23, S24, S25, S26, S27, S28, S29 314 )>; 315 316def CSR_SVR432_Altivec : CalleeSavedRegs<(add CSR_SVR432, CSR_Altivec)>; 317 318def CSR_SVR432_SPE : CalleeSavedRegs<(add CSR_SVR432_COMM, CSR_SPE)>; 319 320def CSR_SVR432_SPE_NO_S30_31 : CalleeSavedRegs<(add CSR_SVR432_COMM, CSR_SPE_NO_S30_31)>; 321 322def CSR_AIX32 : CalleeSavedRegs<(add R13, R14, R15, R16, R17, R18, R19, R20, 323 R21, R22, R23, R24, R25, R26, R27, R28, 324 R29, R30, R31, F14, F15, F16, F17, F18, 325 F19, F20, F21, F22, F23, F24, F25, F26, 326 F27, F28, F29, F30, F31, CR2, CR3, CR4 327 )>; 328 329def CSR_AIX32_Altivec : CalleeSavedRegs<(add CSR_AIX32, CSR_Altivec)>; 330 331// Common CalleeSavedRegs for SVR4 and AIX. 332def CSR_PPC64 : CalleeSavedRegs<(add X14, X15, X16, X17, X18, X19, X20, 333 X21, X22, X23, X24, X25, X26, X27, X28, 334 X29, X30, X31, F14, F15, F16, F17, F18, 335 F19, F20, F21, F22, F23, F24, F25, F26, 336 F27, F28, F29, F30, F31, CR2, CR3, CR4 337 )>; 338 339 340def CSR_PPC64_Altivec : CalleeSavedRegs<(add CSR_PPC64, CSR_Altivec)>; 341 342def CSR_PPC64_R2 : CalleeSavedRegs<(add CSR_PPC64, X2)>; 343 344def CSR_PPC64_R2_Altivec : CalleeSavedRegs<(add CSR_PPC64_Altivec, X2)>; 345 346def CSR_NoRegs : CalleeSavedRegs<(add)>; 347 348// coldcc calling convection marks most registers as non-volatile. 349// Do not include r1 since the stack pointer is never considered a CSR. 350// Do not include r2, since it is the TOC register and is added depending 351// on whether or not the function uses the TOC and is a non-leaf. 352// Do not include r0,r11,r13 as they are optional in functional linkage 353// and value may be altered by inter-library calls. 354// Do not include r12 as it is used as a scratch register. 355// Do not include return registers r3, f1, v2. 356def CSR_SVR32_ColdCC_Common : CalleeSavedRegs<(add (sequence "R%u", 4, 10), 357 (sequence "R%u", 14, 31), 358 (sequence "CR%u", 0, 7))>; 359 360def CSR_SVR32_ColdCC : CalleeSavedRegs<(add CSR_SVR32_ColdCC_Common, 361 F0, (sequence "F%u", 2, 31))>; 362 363 364def CSR_SVR32_ColdCC_Altivec : CalleeSavedRegs<(add CSR_SVR32_ColdCC, 365 (sequence "V%u", 0, 1), 366 (sequence "V%u", 3, 31))>; 367 368def CSR_SVR32_ColdCC_SPE : CalleeSavedRegs<(add CSR_SVR32_ColdCC_Common, 369 (sequence "S%u", 4, 10), 370 (sequence "S%u", 14, 31))>; 371 372def CSR_SVR64_ColdCC : CalleeSavedRegs<(add (sequence "X%u", 4, 10), 373 (sequence "X%u", 14, 31), 374 F0, (sequence "F%u", 2, 31), 375 (sequence "CR%u", 0, 7))>; 376 377def CSR_SVR64_ColdCC_R2: CalleeSavedRegs<(add CSR_SVR64_ColdCC, X2)>; 378 379def CSR_SVR64_ColdCC_Altivec : CalleeSavedRegs<(add CSR_SVR64_ColdCC, 380 (sequence "V%u", 0, 1), 381 (sequence "V%u", 3, 31))>; 382 383def CSR_SVR64_ColdCC_R2_Altivec : CalleeSavedRegs<(add CSR_SVR64_ColdCC_Altivec, X2)>; 384 385def CSR_64_AllRegs: CalleeSavedRegs<(add X0, (sequence "X%u", 3, 10), 386 (sequence "X%u", 14, 31), 387 (sequence "F%u", 0, 31), 388 (sequence "CR%u", 0, 7))>; 389 390def CSR_64_AllRegs_Altivec : CalleeSavedRegs<(add CSR_64_AllRegs, 391 (sequence "V%u", 0, 31))>; 392 393def CSR_64_AllRegs_AIX_Dflt_Altivec : CalleeSavedRegs<(add CSR_64_AllRegs, 394 (sequence "V%u", 0, 19))>; 395 396def CSR_64_AllRegs_VSX : CalleeSavedRegs<(add CSR_64_AllRegs_Altivec, 397 (sequence "VSL%u", 0, 31))>; 398 399def CSR_64_AllRegs_AIX_Dflt_VSX : CalleeSavedRegs<(add CSR_64_AllRegs_Altivec, 400 (sequence "VSL%u", 0, 19))>; 401 402def CSR_ALL_VSRP : CalleeSavedRegs<(sequence "VSRp%u", 0, 31)>; 403 404def CSR_VSRP : 405 CalleeSavedRegs<(add VSRp26, VSRp27, VSRp28, VSRp29, VSRp30, VSRp31)>; 406 407def CSR_SVR432_VSRP : CalleeSavedRegs<(add CSR_SVR432_Altivec, CSR_VSRP)>; 408 409def CSR_SVR464_VSRP : CalleeSavedRegs<(add CSR_PPC64_Altivec, CSR_VSRP)>; 410 411def CSR_SVR464_R2_VSRP : CalleeSavedRegs<(add CSR_SVR464_VSRP, X2)>; 412 413def CSR_SVR32_ColdCC_VSRP : CalleeSavedRegs<(add CSR_SVR32_ColdCC_Altivec, 414 (sub CSR_ALL_VSRP, VSRp17))>; 415 416def CSR_SVR64_ColdCC_VSRP : CalleeSavedRegs<(add CSR_SVR64_ColdCC, 417 (sub CSR_ALL_VSRP, VSRp17))>; 418 419def CSR_SVR64_ColdCC_R2_VSRP : CalleeSavedRegs<(add CSR_SVR64_ColdCC_VSRP, X2)>; 420 421def CSR_64_AllRegs_VSRP : 422 CalleeSavedRegs<(add CSR_64_AllRegs_VSX, CSR_ALL_VSRP)>; 423 424def CSR_AIX64_VSRP : CalleeSavedRegs<(add CSR_PPC64_Altivec, CSR_VSRP)>; 425 426def CSR_AIX64_R2_VSRP : CalleeSavedRegs<(add CSR_AIX64_VSRP, X2)>; 427 428def CSR_AIX32_VSRP : CalleeSavedRegs<(add CSR_AIX32_Altivec, CSR_VSRP)>; 429