1 //===-- RISCVCallingConv.cpp - RISC-V Custom CC Routines ------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the custom routines for the RISC-V Calling Convention. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "RISCVCallingConv.h" 14 #include "RISCVSubtarget.h" 15 #include "llvm/IR/DataLayout.h" 16 #include "llvm/MC/MCRegister.h" 17 18 using namespace llvm; 19 20 // Calling Convention Implementation. 21 // The expectations for frontend ABI lowering vary from target to target. 22 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI 23 // details, but this is a longer term goal. For now, we simply try to keep the 24 // role of the frontend as simple and well-defined as possible. The rules can 25 // be summarised as: 26 // * Never split up large scalar arguments. We handle them here. 27 // * If a hardfloat calling convention is being used, and the struct may be 28 // passed in a pair of registers (fp+fp, int+fp), and both registers are 29 // available, then pass as two separate arguments. If either the GPRs or FPRs 30 // are exhausted, then pass according to the rule below. 31 // * If a struct could never be passed in registers or directly in a stack 32 // slot (as it is larger than 2*XLEN and the floating point rules don't 33 // apply), then pass it using a pointer with the byval attribute. 34 // * If a struct is less than 2*XLEN, then coerce to either a two-element 35 // word-sized array or a 2*XLEN scalar (depending on alignment). 36 // * The frontend can determine whether a struct is returned by reference or 37 // not based on its size and fields. If it will be returned by reference, the 38 // frontend must modify the prototype so a pointer with the sret annotation is 39 // passed as the first argument. This is not necessary for large scalar 40 // returns. 41 // * Struct return values and varargs should be coerced to structs containing 42 // register-size fields in the same situations they would be for fixed 43 // arguments. 44 45 static const MCPhysReg ArgFPR16s[] = {RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, 46 RISCV::F13_H, RISCV::F14_H, RISCV::F15_H, 47 RISCV::F16_H, RISCV::F17_H}; 48 static const MCPhysReg ArgFPR32s[] = {RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, 49 RISCV::F13_F, RISCV::F14_F, RISCV::F15_F, 50 RISCV::F16_F, RISCV::F17_F}; 51 static const MCPhysReg ArgFPR64s[] = {RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, 52 RISCV::F13_D, RISCV::F14_D, RISCV::F15_D, 53 RISCV::F16_D, RISCV::F17_D}; 54 // This is an interim calling convention and it may be changed in the future. 55 static const MCPhysReg ArgVRs[] = { 56 RISCV::V8, RISCV::V9, RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13, 57 RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19, 58 RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23}; 59 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2, RISCV::V10M2, RISCV::V12M2, 60 RISCV::V14M2, RISCV::V16M2, RISCV::V18M2, 61 RISCV::V20M2, RISCV::V22M2}; 62 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4, 63 RISCV::V20M4}; 64 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8}; 65 static const MCPhysReg ArgVRN2M1s[] = { 66 RISCV::V8_V9, RISCV::V9_V10, RISCV::V10_V11, RISCV::V11_V12, 67 RISCV::V12_V13, RISCV::V13_V14, RISCV::V14_V15, RISCV::V15_V16, 68 RISCV::V16_V17, RISCV::V17_V18, RISCV::V18_V19, RISCV::V19_V20, 69 RISCV::V20_V21, RISCV::V21_V22, RISCV::V22_V23}; 70 static const MCPhysReg ArgVRN3M1s[] = { 71 RISCV::V8_V9_V10, RISCV::V9_V10_V11, RISCV::V10_V11_V12, 72 RISCV::V11_V12_V13, RISCV::V12_V13_V14, RISCV::V13_V14_V15, 73 RISCV::V14_V15_V16, RISCV::V15_V16_V17, RISCV::V16_V17_V18, 74 RISCV::V17_V18_V19, RISCV::V18_V19_V20, RISCV::V19_V20_V21, 75 RISCV::V20_V21_V22, RISCV::V21_V22_V23}; 76 static const MCPhysReg ArgVRN4M1s[] = { 77 RISCV::V8_V9_V10_V11, RISCV::V9_V10_V11_V12, RISCV::V10_V11_V12_V13, 78 RISCV::V11_V12_V13_V14, RISCV::V12_V13_V14_V15, RISCV::V13_V14_V15_V16, 79 RISCV::V14_V15_V16_V17, RISCV::V15_V16_V17_V18, RISCV::V16_V17_V18_V19, 80 RISCV::V17_V18_V19_V20, RISCV::V18_V19_V20_V21, RISCV::V19_V20_V21_V22, 81 RISCV::V20_V21_V22_V23}; 82 static const MCPhysReg ArgVRN5M1s[] = { 83 RISCV::V8_V9_V10_V11_V12, RISCV::V9_V10_V11_V12_V13, 84 RISCV::V10_V11_V12_V13_V14, RISCV::V11_V12_V13_V14_V15, 85 RISCV::V12_V13_V14_V15_V16, RISCV::V13_V14_V15_V16_V17, 86 RISCV::V14_V15_V16_V17_V18, RISCV::V15_V16_V17_V18_V19, 87 RISCV::V16_V17_V18_V19_V20, RISCV::V17_V18_V19_V20_V21, 88 RISCV::V18_V19_V20_V21_V22, RISCV::V19_V20_V21_V22_V23}; 89 static const MCPhysReg ArgVRN6M1s[] = { 90 RISCV::V8_V9_V10_V11_V12_V13, RISCV::V9_V10_V11_V12_V13_V14, 91 RISCV::V10_V11_V12_V13_V14_V15, RISCV::V11_V12_V13_V14_V15_V16, 92 RISCV::V12_V13_V14_V15_V16_V17, RISCV::V13_V14_V15_V16_V17_V18, 93 RISCV::V14_V15_V16_V17_V18_V19, RISCV::V15_V16_V17_V18_V19_V20, 94 RISCV::V16_V17_V18_V19_V20_V21, RISCV::V17_V18_V19_V20_V21_V22, 95 RISCV::V18_V19_V20_V21_V22_V23}; 96 static const MCPhysReg ArgVRN7M1s[] = { 97 RISCV::V8_V9_V10_V11_V12_V13_V14, RISCV::V9_V10_V11_V12_V13_V14_V15, 98 RISCV::V10_V11_V12_V13_V14_V15_V16, RISCV::V11_V12_V13_V14_V15_V16_V17, 99 RISCV::V12_V13_V14_V15_V16_V17_V18, RISCV::V13_V14_V15_V16_V17_V18_V19, 100 RISCV::V14_V15_V16_V17_V18_V19_V20, RISCV::V15_V16_V17_V18_V19_V20_V21, 101 RISCV::V16_V17_V18_V19_V20_V21_V22, RISCV::V17_V18_V19_V20_V21_V22_V23}; 102 static const MCPhysReg ArgVRN8M1s[] = {RISCV::V8_V9_V10_V11_V12_V13_V14_V15, 103 RISCV::V9_V10_V11_V12_V13_V14_V15_V16, 104 RISCV::V10_V11_V12_V13_V14_V15_V16_V17, 105 RISCV::V11_V12_V13_V14_V15_V16_V17_V18, 106 RISCV::V12_V13_V14_V15_V16_V17_V18_V19, 107 RISCV::V13_V14_V15_V16_V17_V18_V19_V20, 108 RISCV::V14_V15_V16_V17_V18_V19_V20_V21, 109 RISCV::V15_V16_V17_V18_V19_V20_V21_V22, 110 RISCV::V16_V17_V18_V19_V20_V21_V22_V23}; 111 static const MCPhysReg ArgVRN2M2s[] = {RISCV::V8M2_V10M2, RISCV::V10M2_V12M2, 112 RISCV::V12M2_V14M2, RISCV::V14M2_V16M2, 113 RISCV::V16M2_V18M2, RISCV::V18M2_V20M2, 114 RISCV::V20M2_V22M2}; 115 static const MCPhysReg ArgVRN3M2s[] = { 116 RISCV::V8M2_V10M2_V12M2, RISCV::V10M2_V12M2_V14M2, 117 RISCV::V12M2_V14M2_V16M2, RISCV::V14M2_V16M2_V18M2, 118 RISCV::V16M2_V18M2_V20M2, RISCV::V18M2_V20M2_V22M2}; 119 static const MCPhysReg ArgVRN4M2s[] = { 120 RISCV::V8M2_V10M2_V12M2_V14M2, RISCV::V10M2_V12M2_V14M2_V16M2, 121 RISCV::V12M2_V14M2_V16M2_V18M2, RISCV::V14M2_V16M2_V18M2_V20M2, 122 RISCV::V16M2_V18M2_V20M2_V22M2}; 123 static const MCPhysReg ArgVRN2M4s[] = {RISCV::V8M4_V12M4, RISCV::V12M4_V16M4, 124 RISCV::V16M4_V20M4}; 125 126 ArrayRef<MCPhysReg> RISCV::getArgGPRs(const RISCVABI::ABI ABI) { 127 // The GPRs used for passing arguments in the ILP32* and LP64* ABIs, except 128 // the ILP32E ABI. 129 static const MCPhysReg ArgIGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12, 130 RISCV::X13, RISCV::X14, RISCV::X15, 131 RISCV::X16, RISCV::X17}; 132 // The GPRs used for passing arguments in the ILP32E/ILP64E ABI. 133 static const MCPhysReg ArgEGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12, 134 RISCV::X13, RISCV::X14, RISCV::X15}; 135 136 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E) 137 return ArrayRef(ArgEGPRs); 138 139 return ArrayRef(ArgIGPRs); 140 } 141 142 static ArrayRef<MCPhysReg> getFastCCArgGPRs(const RISCVABI::ABI ABI) { 143 // The GPRs used for passing arguments in the FastCC, X5 and X6 might be used 144 // for save-restore libcall, so we don't use them. 145 // Don't use X7 for fastcc, since Zicfilp uses X7 as the label register. 146 static const MCPhysReg FastCCIGPRs[] = { 147 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14, RISCV::X15, 148 RISCV::X16, RISCV::X17, RISCV::X28, RISCV::X29, RISCV::X30, RISCV::X31}; 149 150 // The GPRs used for passing arguments in the FastCC when using ILP32E/ILP64E. 151 static const MCPhysReg FastCCEGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12, 152 RISCV::X13, RISCV::X14, RISCV::X15}; 153 154 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E) 155 return ArrayRef(FastCCEGPRs); 156 157 return ArrayRef(FastCCIGPRs); 158 } 159 160 // Pass a 2*XLEN argument that has been split into two XLEN values through 161 // registers or the stack as necessary. 162 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1, 163 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, 164 MVT ValVT2, MVT LocVT2, 165 ISD::ArgFlagsTy ArgFlags2, bool EABI) { 166 unsigned XLenInBytes = XLen / 8; 167 const RISCVSubtarget &STI = 168 State.getMachineFunction().getSubtarget<RISCVSubtarget>(); 169 ArrayRef<MCPhysReg> ArgGPRs = RISCV::getArgGPRs(STI.getTargetABI()); 170 171 if (MCRegister Reg = State.AllocateReg(ArgGPRs)) { 172 // At least one half can be passed via register. 173 State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg, 174 VA1.getLocVT(), CCValAssign::Full)); 175 } else { 176 // Both halves must be passed on the stack, with proper alignment. 177 // TODO: To be compatible with GCC's behaviors, we force them to have 4-byte 178 // alignment. This behavior may be changed when RV32E/ILP32E is ratified. 179 Align StackAlign(XLenInBytes); 180 if (!EABI || XLen != 32) 181 StackAlign = std::max(StackAlign, ArgFlags1.getNonZeroOrigAlign()); 182 State.addLoc( 183 CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(), 184 State.AllocateStack(XLenInBytes, StackAlign), 185 VA1.getLocVT(), CCValAssign::Full)); 186 State.addLoc(CCValAssign::getMem( 187 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)), 188 LocVT2, CCValAssign::Full)); 189 return false; 190 } 191 192 if (MCRegister Reg = State.AllocateReg(ArgGPRs)) { 193 // The second half can also be passed via register. 194 State.addLoc( 195 CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full)); 196 } else { 197 // The second half is passed via the stack, without additional alignment. 198 State.addLoc(CCValAssign::getMem( 199 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)), 200 LocVT2, CCValAssign::Full)); 201 } 202 203 return false; 204 } 205 206 static MCRegister allocateRVVReg(MVT ValVT, unsigned ValNo, CCState &State, 207 const RISCVTargetLowering &TLI) { 208 const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT); 209 if (RC == &RISCV::VRRegClass) { 210 // Assign the first mask argument to V0. 211 // This is an interim calling convention and it may be changed in the 212 // future. 213 if (ValVT.getVectorElementType() == MVT::i1) 214 if (MCRegister Reg = State.AllocateReg(RISCV::V0)) 215 return Reg; 216 return State.AllocateReg(ArgVRs); 217 } 218 if (RC == &RISCV::VRM2RegClass) 219 return State.AllocateReg(ArgVRM2s); 220 if (RC == &RISCV::VRM4RegClass) 221 return State.AllocateReg(ArgVRM4s); 222 if (RC == &RISCV::VRM8RegClass) 223 return State.AllocateReg(ArgVRM8s); 224 if (RC == &RISCV::VRN2M1RegClass) 225 return State.AllocateReg(ArgVRN2M1s); 226 if (RC == &RISCV::VRN3M1RegClass) 227 return State.AllocateReg(ArgVRN3M1s); 228 if (RC == &RISCV::VRN4M1RegClass) 229 return State.AllocateReg(ArgVRN4M1s); 230 if (RC == &RISCV::VRN5M1RegClass) 231 return State.AllocateReg(ArgVRN5M1s); 232 if (RC == &RISCV::VRN6M1RegClass) 233 return State.AllocateReg(ArgVRN6M1s); 234 if (RC == &RISCV::VRN7M1RegClass) 235 return State.AllocateReg(ArgVRN7M1s); 236 if (RC == &RISCV::VRN8M1RegClass) 237 return State.AllocateReg(ArgVRN8M1s); 238 if (RC == &RISCV::VRN2M2RegClass) 239 return State.AllocateReg(ArgVRN2M2s); 240 if (RC == &RISCV::VRN3M2RegClass) 241 return State.AllocateReg(ArgVRN3M2s); 242 if (RC == &RISCV::VRN4M2RegClass) 243 return State.AllocateReg(ArgVRN4M2s); 244 if (RC == &RISCV::VRN2M4RegClass) 245 return State.AllocateReg(ArgVRN2M4s); 246 llvm_unreachable("Unhandled register class for ValueType"); 247 } 248 249 // Implements the RISC-V calling convention. Returns true upon failure. 250 bool llvm::CC_RISCV(unsigned ValNo, MVT ValVT, MVT LocVT, 251 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, 252 CCState &State, bool IsFixed, bool IsRet, Type *OrigTy) { 253 const MachineFunction &MF = State.getMachineFunction(); 254 const DataLayout &DL = MF.getDataLayout(); 255 const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>(); 256 const RISCVTargetLowering &TLI = *Subtarget.getTargetLowering(); 257 258 unsigned XLen = Subtarget.getXLen(); 259 MVT XLenVT = Subtarget.getXLenVT(); 260 261 // Static chain parameter must not be passed in normal argument registers, 262 // so we assign t2 for it as done in GCC's __builtin_call_with_static_chain 263 if (ArgFlags.isNest()) { 264 if (MCRegister Reg = State.AllocateReg(RISCV::X7)) { 265 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 266 return false; 267 } 268 } 269 270 // Any return value split in to more than two values can't be returned 271 // directly. Vectors are returned via the available vector registers. 272 if (!LocVT.isVector() && IsRet && ValNo > 1) 273 return true; 274 275 // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a 276 // variadic argument, or if no F16/F32 argument registers are available. 277 bool UseGPRForF16_F32 = true; 278 // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a 279 // variadic argument, or if no F64 argument registers are available. 280 bool UseGPRForF64 = true; 281 282 RISCVABI::ABI ABI = Subtarget.getTargetABI(); 283 switch (ABI) { 284 default: 285 llvm_unreachable("Unexpected ABI"); 286 case RISCVABI::ABI_ILP32: 287 case RISCVABI::ABI_ILP32E: 288 case RISCVABI::ABI_LP64: 289 case RISCVABI::ABI_LP64E: 290 break; 291 case RISCVABI::ABI_ILP32F: 292 case RISCVABI::ABI_LP64F: 293 UseGPRForF16_F32 = !IsFixed; 294 break; 295 case RISCVABI::ABI_ILP32D: 296 case RISCVABI::ABI_LP64D: 297 UseGPRForF16_F32 = !IsFixed; 298 UseGPRForF64 = !IsFixed; 299 break; 300 } 301 302 if ((LocVT == MVT::f16 || LocVT == MVT::bf16) && !UseGPRForF16_F32) { 303 if (MCRegister Reg = State.AllocateReg(ArgFPR16s)) { 304 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 305 return false; 306 } 307 } 308 309 if (LocVT == MVT::f32 && !UseGPRForF16_F32) { 310 if (MCRegister Reg = State.AllocateReg(ArgFPR32s)) { 311 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 312 return false; 313 } 314 } 315 316 if (LocVT == MVT::f64 && !UseGPRForF64) { 317 if (MCRegister Reg = State.AllocateReg(ArgFPR64s)) { 318 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 319 return false; 320 } 321 } 322 323 ArrayRef<MCPhysReg> ArgGPRs = RISCV::getArgGPRs(ABI); 324 325 // Zfinx/Zdinx use GPR without a bitcast when possible. 326 if ((LocVT == MVT::f32 && XLen == 32 && Subtarget.hasStdExtZfinx()) || 327 (LocVT == MVT::f64 && XLen == 64 && Subtarget.hasStdExtZdinx())) { 328 if (MCRegister Reg = State.AllocateReg(ArgGPRs)) { 329 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 330 return false; 331 } 332 } 333 334 // FP smaller than XLen, uses custom GPR. 335 if (LocVT == MVT::f16 || LocVT == MVT::bf16 || 336 (LocVT == MVT::f32 && XLen == 64)) { 337 if (MCRegister Reg = State.AllocateReg(ArgGPRs)) { 338 LocVT = XLenVT; 339 State.addLoc( 340 CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 341 return false; 342 } 343 } 344 345 // Bitcast FP to GPR if we can use a GPR register. 346 if ((XLen == 32 && LocVT == MVT::f32) || (XLen == 64 && LocVT == MVT::f64)) { 347 if (MCRegister Reg = State.AllocateReg(ArgGPRs)) { 348 LocVT = XLenVT; 349 LocInfo = CCValAssign::BCvt; 350 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 351 return false; 352 } 353 } 354 355 // If this is a variadic argument, the RISC-V calling convention requires 356 // that it is assigned an 'even' or 'aligned' register if it has 8-byte 357 // alignment (RV32) or 16-byte alignment (RV64). An aligned register should 358 // be used regardless of whether the original argument was split during 359 // legalisation or not. The argument will not be passed by registers if the 360 // original type is larger than 2*XLEN, so the register alignment rule does 361 // not apply. 362 // TODO: To be compatible with GCC's behaviors, we don't align registers 363 // currently if we are using ILP32E calling convention. This behavior may be 364 // changed when RV32E/ILP32E is ratified. 365 unsigned TwoXLenInBytes = (2 * XLen) / 8; 366 if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes && 367 DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes && 368 ABI != RISCVABI::ABI_ILP32E) { 369 unsigned RegIdx = State.getFirstUnallocated(ArgGPRs); 370 // Skip 'odd' register if necessary. 371 if (RegIdx != std::size(ArgGPRs) && RegIdx % 2 == 1) 372 State.AllocateReg(ArgGPRs); 373 } 374 375 SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs(); 376 SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags = 377 State.getPendingArgFlags(); 378 379 assert(PendingLocs.size() == PendingArgFlags.size() && 380 "PendingLocs and PendingArgFlags out of sync"); 381 382 // Handle passing f64 on RV32D with a soft float ABI or when floating point 383 // registers are exhausted. 384 if (XLen == 32 && LocVT == MVT::f64) { 385 assert(PendingLocs.empty() && "Can't lower f64 if it is split"); 386 // Depending on available argument GPRS, f64 may be passed in a pair of 387 // GPRs, split between a GPR and the stack, or passed completely on the 388 // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these 389 // cases. 390 MCRegister Reg = State.AllocateReg(ArgGPRs); 391 if (!Reg) { 392 int64_t StackOffset = State.AllocateStack(8, Align(8)); 393 State.addLoc( 394 CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); 395 return false; 396 } 397 LocVT = MVT::i32; 398 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 399 MCRegister HiReg = State.AllocateReg(ArgGPRs); 400 if (HiReg) { 401 State.addLoc( 402 CCValAssign::getCustomReg(ValNo, ValVT, HiReg, LocVT, LocInfo)); 403 } else { 404 int64_t StackOffset = State.AllocateStack(4, Align(4)); 405 State.addLoc( 406 CCValAssign::getCustomMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); 407 } 408 return false; 409 } 410 411 // Split arguments might be passed indirectly, so keep track of the pending 412 // values. Split vectors are passed via a mix of registers and indirectly, so 413 // treat them as we would any other argument. 414 if (ValVT.isScalarInteger() && (ArgFlags.isSplit() || !PendingLocs.empty())) { 415 LocVT = XLenVT; 416 LocInfo = CCValAssign::Indirect; 417 PendingLocs.push_back( 418 CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo)); 419 PendingArgFlags.push_back(ArgFlags); 420 if (!ArgFlags.isSplitEnd()) { 421 return false; 422 } 423 } 424 425 // If the split argument only had two elements, it should be passed directly 426 // in registers or on the stack. 427 if (ValVT.isScalarInteger() && ArgFlags.isSplitEnd() && 428 PendingLocs.size() <= 2) { 429 assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()"); 430 // Apply the normal calling convention rules to the first half of the 431 // split argument. 432 CCValAssign VA = PendingLocs[0]; 433 ISD::ArgFlagsTy AF = PendingArgFlags[0]; 434 PendingLocs.clear(); 435 PendingArgFlags.clear(); 436 return CC_RISCVAssign2XLen( 437 XLen, State, VA, AF, ValNo, ValVT, LocVT, ArgFlags, 438 ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E); 439 } 440 441 // Allocate to a register if possible, or else a stack slot. 442 MCRegister Reg; 443 unsigned StoreSizeBytes = XLen / 8; 444 Align StackAlign = Align(XLen / 8); 445 446 if (ValVT.isVector() || ValVT.isRISCVVectorTuple()) { 447 Reg = allocateRVVReg(ValVT, ValNo, State, TLI); 448 if (Reg) { 449 // Fixed-length vectors are located in the corresponding scalable-vector 450 // container types. 451 if (ValVT.isFixedLengthVector()) { 452 LocVT = TLI.getContainerForFixedLengthVector(LocVT); 453 State.addLoc( 454 CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 455 return false; 456 } 457 } else { 458 // For return values, the vector must be passed fully via registers or 459 // via the stack. 460 // FIXME: The proposed vector ABI only mandates v8-v15 for return values, 461 // but we're using all of them. 462 if (IsRet) 463 return true; 464 // Try using a GPR to pass the address 465 if ((Reg = State.AllocateReg(ArgGPRs))) { 466 LocVT = XLenVT; 467 LocInfo = CCValAssign::Indirect; 468 } else if (ValVT.isScalableVector()) { 469 LocVT = XLenVT; 470 LocInfo = CCValAssign::Indirect; 471 } else { 472 StoreSizeBytes = ValVT.getStoreSize(); 473 // Align vectors to their element sizes, being careful for vXi1 474 // vectors. 475 StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne(); 476 } 477 } 478 } else { 479 Reg = State.AllocateReg(ArgGPRs); 480 } 481 482 int64_t StackOffset = 483 Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign); 484 485 // If we reach this point and PendingLocs is non-empty, we must be at the 486 // end of a split argument that must be passed indirectly. 487 if (!PendingLocs.empty()) { 488 assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()"); 489 assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()"); 490 491 for (auto &It : PendingLocs) { 492 if (Reg) 493 It.convertToReg(Reg); 494 else 495 It.convertToMem(StackOffset); 496 State.addLoc(It); 497 } 498 PendingLocs.clear(); 499 PendingArgFlags.clear(); 500 return false; 501 } 502 503 assert(((ValVT.isFloatingPoint() && !ValVT.isVector()) || LocVT == XLenVT || 504 (TLI.getSubtarget().hasVInstructions() && 505 (ValVT.isVector() || ValVT.isRISCVVectorTuple()))) && 506 "Expected an XLenVT or vector types at this stage"); 507 508 if (Reg) { 509 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 510 return false; 511 } 512 513 State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); 514 return false; 515 } 516 517 // FastCC has less than 1% performance improvement for some particular 518 // benchmark. But theoretically, it may have benefit for some cases. 519 bool llvm::CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT, 520 CCValAssign::LocInfo LocInfo, 521 ISD::ArgFlagsTy ArgFlags, CCState &State, 522 bool IsFixed, bool IsRet, Type *OrigTy) { 523 const MachineFunction &MF = State.getMachineFunction(); 524 const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>(); 525 const RISCVTargetLowering &TLI = *Subtarget.getTargetLowering(); 526 RISCVABI::ABI ABI = Subtarget.getTargetABI(); 527 528 if ((LocVT == MVT::f16 && Subtarget.hasStdExtZfhmin()) || 529 (LocVT == MVT::bf16 && Subtarget.hasStdExtZfbfmin())) { 530 static const MCPhysReg FPR16List[] = { 531 RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H, 532 RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H, RISCV::F1_H, 533 RISCV::F2_H, RISCV::F3_H, RISCV::F4_H, RISCV::F5_H, RISCV::F6_H, 534 RISCV::F7_H, RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H}; 535 if (MCRegister Reg = State.AllocateReg(FPR16List)) { 536 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 537 return false; 538 } 539 } 540 541 if (LocVT == MVT::f32 && Subtarget.hasStdExtF()) { 542 static const MCPhysReg FPR32List[] = { 543 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F, 544 RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F, RISCV::F1_F, 545 RISCV::F2_F, RISCV::F3_F, RISCV::F4_F, RISCV::F5_F, RISCV::F6_F, 546 RISCV::F7_F, RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F}; 547 if (MCRegister Reg = State.AllocateReg(FPR32List)) { 548 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 549 return false; 550 } 551 } 552 553 if (LocVT == MVT::f64 && Subtarget.hasStdExtD()) { 554 static const MCPhysReg FPR64List[] = { 555 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D, 556 RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D, RISCV::F1_D, 557 RISCV::F2_D, RISCV::F3_D, RISCV::F4_D, RISCV::F5_D, RISCV::F6_D, 558 RISCV::F7_D, RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D}; 559 if (MCRegister Reg = State.AllocateReg(FPR64List)) { 560 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 561 return false; 562 } 563 } 564 565 MVT XLenVT = Subtarget.getXLenVT(); 566 567 // Check if there is an available GPR before hitting the stack. 568 if ((LocVT == MVT::f16 && Subtarget.hasStdExtZhinxmin()) || 569 (LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) || 570 (LocVT == MVT::f64 && Subtarget.is64Bit() && 571 Subtarget.hasStdExtZdinx())) { 572 if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRs(ABI))) { 573 if (LocVT.getSizeInBits() != Subtarget.getXLen()) { 574 LocVT = XLenVT; 575 State.addLoc( 576 CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 577 return false; 578 } 579 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 580 return false; 581 } 582 } 583 584 ArrayRef<MCPhysReg> ArgGPRs = getFastCCArgGPRs(ABI); 585 586 if (LocVT.isVector()) { 587 if (MCRegister Reg = allocateRVVReg(ValVT, ValNo, State, TLI)) { 588 // Fixed-length vectors are located in the corresponding scalable-vector 589 // container types. 590 if (LocVT.isFixedLengthVector()) { 591 LocVT = TLI.getContainerForFixedLengthVector(LocVT); 592 State.addLoc( 593 CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 594 return false; 595 } 596 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 597 return false; 598 } 599 600 // Pass scalable vectors indirectly. Pass fixed vectors indirectly if we 601 // have a free GPR. 602 if (LocVT.isScalableVector() || 603 State.getFirstUnallocated(ArgGPRs) != ArgGPRs.size()) { 604 LocInfo = CCValAssign::Indirect; 605 LocVT = XLenVT; 606 } 607 } 608 609 if (LocVT == XLenVT) { 610 if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRs(ABI))) { 611 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 612 return false; 613 } 614 } 615 616 if (LocVT == XLenVT || LocVT == MVT::f16 || LocVT == MVT::bf16 || 617 LocVT == MVT::f32 || LocVT == MVT::f64 || LocVT.isFixedLengthVector()) { 618 Align StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne(); 619 int64_t Offset = State.AllocateStack(LocVT.getStoreSize(), StackAlign); 620 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 621 return false; 622 } 623 624 return true; // CC didn't match. 625 } 626 627 bool llvm::CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, 628 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, 629 CCState &State) { 630 if (ArgFlags.isNest()) { 631 report_fatal_error( 632 "Attribute 'nest' is not supported in GHC calling convention"); 633 } 634 635 static const MCPhysReg GPRList[] = { 636 RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22, 637 RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27}; 638 639 if (LocVT == MVT::i32 || LocVT == MVT::i64) { 640 // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim 641 // s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 642 if (MCRegister Reg = State.AllocateReg(GPRList)) { 643 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 644 return false; 645 } 646 } 647 648 const RISCVSubtarget &Subtarget = 649 State.getMachineFunction().getSubtarget<RISCVSubtarget>(); 650 651 if (LocVT == MVT::f32 && Subtarget.hasStdExtF()) { 652 // Pass in STG registers: F1, ..., F6 653 // fs0 ... fs5 654 static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F, 655 RISCV::F18_F, RISCV::F19_F, 656 RISCV::F20_F, RISCV::F21_F}; 657 if (MCRegister Reg = State.AllocateReg(FPR32List)) { 658 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 659 return false; 660 } 661 } 662 663 if (LocVT == MVT::f64 && Subtarget.hasStdExtD()) { 664 // Pass in STG registers: D1, ..., D6 665 // fs6 ... fs11 666 static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D, 667 RISCV::F24_D, RISCV::F25_D, 668 RISCV::F26_D, RISCV::F27_D}; 669 if (MCRegister Reg = State.AllocateReg(FPR64List)) { 670 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 671 return false; 672 } 673 } 674 675 if ((LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) || 676 (LocVT == MVT::f64 && Subtarget.hasStdExtZdinx() && 677 Subtarget.is64Bit())) { 678 if (MCRegister Reg = State.AllocateReg(GPRList)) { 679 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 680 return false; 681 } 682 } 683 684 report_fatal_error("No registers left in GHC calling convention"); 685 return true; 686 } 687