1 //===-- RISCVCallingConv.cpp - RISC-V Custom CC Routines ------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the custom routines for the RISC-V Calling Convention. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "RISCVCallingConv.h" 14 #include "RISCVSubtarget.h" 15 #include "llvm/IR/DataLayout.h" 16 #include "llvm/MC/MCRegister.h" 17 18 using namespace llvm; 19 20 // Calling Convention Implementation. 21 // The expectations for frontend ABI lowering vary from target to target. 22 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI 23 // details, but this is a longer term goal. For now, we simply try to keep the 24 // role of the frontend as simple and well-defined as possible. The rules can 25 // be summarised as: 26 // * Never split up large scalar arguments. We handle them here. 27 // * If a hardfloat calling convention is being used, and the struct may be 28 // passed in a pair of registers (fp+fp, int+fp), and both registers are 29 // available, then pass as two separate arguments. If either the GPRs or FPRs 30 // are exhausted, then pass according to the rule below. 31 // * If a struct could never be passed in registers or directly in a stack 32 // slot (as it is larger than 2*XLEN and the floating point rules don't 33 // apply), then pass it using a pointer with the byval attribute. 34 // * If a struct is less than 2*XLEN, then coerce to either a two-element 35 // word-sized array or a 2*XLEN scalar (depending on alignment). 36 // * The frontend can determine whether a struct is returned by reference or 37 // not based on its size and fields. If it will be returned by reference, the 38 // frontend must modify the prototype so a pointer with the sret annotation is 39 // passed as the first argument. This is not necessary for large scalar 40 // returns. 41 // * Struct return values and varargs should be coerced to structs containing 42 // register-size fields in the same situations they would be for fixed 43 // arguments. 44 45 static const MCPhysReg ArgFPR16s[] = {RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, 46 RISCV::F13_H, RISCV::F14_H, RISCV::F15_H, 47 RISCV::F16_H, RISCV::F17_H}; 48 static const MCPhysReg ArgFPR32s[] = {RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, 49 RISCV::F13_F, RISCV::F14_F, RISCV::F15_F, 50 RISCV::F16_F, RISCV::F17_F}; 51 static const MCPhysReg ArgFPR64s[] = {RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, 52 RISCV::F13_D, RISCV::F14_D, RISCV::F15_D, 53 RISCV::F16_D, RISCV::F17_D}; 54 // This is an interim calling convention and it may be changed in the future. 55 static const MCPhysReg ArgVRs[] = { 56 RISCV::V8, RISCV::V9, RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13, 57 RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19, 58 RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23}; 59 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2, RISCV::V10M2, RISCV::V12M2, 60 RISCV::V14M2, RISCV::V16M2, RISCV::V18M2, 61 RISCV::V20M2, RISCV::V22M2}; 62 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4, 63 RISCV::V20M4}; 64 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8}; 65 static const MCPhysReg ArgVRN2M1s[] = { 66 RISCV::V8_V9, RISCV::V9_V10, RISCV::V10_V11, RISCV::V11_V12, 67 RISCV::V12_V13, RISCV::V13_V14, RISCV::V14_V15, RISCV::V15_V16, 68 RISCV::V16_V17, RISCV::V17_V18, RISCV::V18_V19, RISCV::V19_V20, 69 RISCV::V20_V21, RISCV::V21_V22, RISCV::V22_V23}; 70 static const MCPhysReg ArgVRN3M1s[] = { 71 RISCV::V8_V9_V10, RISCV::V9_V10_V11, RISCV::V10_V11_V12, 72 RISCV::V11_V12_V13, RISCV::V12_V13_V14, RISCV::V13_V14_V15, 73 RISCV::V14_V15_V16, RISCV::V15_V16_V17, RISCV::V16_V17_V18, 74 RISCV::V17_V18_V19, RISCV::V18_V19_V20, RISCV::V19_V20_V21, 75 RISCV::V20_V21_V22, RISCV::V21_V22_V23}; 76 static const MCPhysReg ArgVRN4M1s[] = { 77 RISCV::V8_V9_V10_V11, RISCV::V9_V10_V11_V12, RISCV::V10_V11_V12_V13, 78 RISCV::V11_V12_V13_V14, RISCV::V12_V13_V14_V15, RISCV::V13_V14_V15_V16, 79 RISCV::V14_V15_V16_V17, RISCV::V15_V16_V17_V18, RISCV::V16_V17_V18_V19, 80 RISCV::V17_V18_V19_V20, RISCV::V18_V19_V20_V21, RISCV::V19_V20_V21_V22, 81 RISCV::V20_V21_V22_V23}; 82 static const MCPhysReg ArgVRN5M1s[] = { 83 RISCV::V8_V9_V10_V11_V12, RISCV::V9_V10_V11_V12_V13, 84 RISCV::V10_V11_V12_V13_V14, RISCV::V11_V12_V13_V14_V15, 85 RISCV::V12_V13_V14_V15_V16, RISCV::V13_V14_V15_V16_V17, 86 RISCV::V14_V15_V16_V17_V18, RISCV::V15_V16_V17_V18_V19, 87 RISCV::V16_V17_V18_V19_V20, RISCV::V17_V18_V19_V20_V21, 88 RISCV::V18_V19_V20_V21_V22, RISCV::V19_V20_V21_V22_V23}; 89 static const MCPhysReg ArgVRN6M1s[] = { 90 RISCV::V8_V9_V10_V11_V12_V13, RISCV::V9_V10_V11_V12_V13_V14, 91 RISCV::V10_V11_V12_V13_V14_V15, RISCV::V11_V12_V13_V14_V15_V16, 92 RISCV::V12_V13_V14_V15_V16_V17, RISCV::V13_V14_V15_V16_V17_V18, 93 RISCV::V14_V15_V16_V17_V18_V19, RISCV::V15_V16_V17_V18_V19_V20, 94 RISCV::V16_V17_V18_V19_V20_V21, RISCV::V17_V18_V19_V20_V21_V22, 95 RISCV::V18_V19_V20_V21_V22_V23}; 96 static const MCPhysReg ArgVRN7M1s[] = { 97 RISCV::V8_V9_V10_V11_V12_V13_V14, RISCV::V9_V10_V11_V12_V13_V14_V15, 98 RISCV::V10_V11_V12_V13_V14_V15_V16, RISCV::V11_V12_V13_V14_V15_V16_V17, 99 RISCV::V12_V13_V14_V15_V16_V17_V18, RISCV::V13_V14_V15_V16_V17_V18_V19, 100 RISCV::V14_V15_V16_V17_V18_V19_V20, RISCV::V15_V16_V17_V18_V19_V20_V21, 101 RISCV::V16_V17_V18_V19_V20_V21_V22, RISCV::V17_V18_V19_V20_V21_V22_V23}; 102 static const MCPhysReg ArgVRN8M1s[] = {RISCV::V8_V9_V10_V11_V12_V13_V14_V15, 103 RISCV::V9_V10_V11_V12_V13_V14_V15_V16, 104 RISCV::V10_V11_V12_V13_V14_V15_V16_V17, 105 RISCV::V11_V12_V13_V14_V15_V16_V17_V18, 106 RISCV::V12_V13_V14_V15_V16_V17_V18_V19, 107 RISCV::V13_V14_V15_V16_V17_V18_V19_V20, 108 RISCV::V14_V15_V16_V17_V18_V19_V20_V21, 109 RISCV::V15_V16_V17_V18_V19_V20_V21_V22, 110 RISCV::V16_V17_V18_V19_V20_V21_V22_V23}; 111 static const MCPhysReg ArgVRN2M2s[] = {RISCV::V8M2_V10M2, RISCV::V10M2_V12M2, 112 RISCV::V12M2_V14M2, RISCV::V14M2_V16M2, 113 RISCV::V16M2_V18M2, RISCV::V18M2_V20M2, 114 RISCV::V20M2_V22M2}; 115 static const MCPhysReg ArgVRN3M2s[] = { 116 RISCV::V8M2_V10M2_V12M2, RISCV::V10M2_V12M2_V14M2, 117 RISCV::V12M2_V14M2_V16M2, RISCV::V14M2_V16M2_V18M2, 118 RISCV::V16M2_V18M2_V20M2, RISCV::V18M2_V20M2_V22M2}; 119 static const MCPhysReg ArgVRN4M2s[] = { 120 RISCV::V8M2_V10M2_V12M2_V14M2, RISCV::V10M2_V12M2_V14M2_V16M2, 121 RISCV::V12M2_V14M2_V16M2_V18M2, RISCV::V14M2_V16M2_V18M2_V20M2, 122 RISCV::V16M2_V18M2_V20M2_V22M2}; 123 static const MCPhysReg ArgVRN2M4s[] = {RISCV::V8M4_V12M4, RISCV::V12M4_V16M4, 124 RISCV::V16M4_V20M4}; 125 126 ArrayRef<MCPhysReg> RISCV::getArgGPRs(const RISCVABI::ABI ABI) { 127 // The GPRs used for passing arguments in the ILP32* and LP64* ABIs, except 128 // the ILP32E ABI. 129 static const MCPhysReg ArgIGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12, 130 RISCV::X13, RISCV::X14, RISCV::X15, 131 RISCV::X16, RISCV::X17}; 132 // The GPRs used for passing arguments in the ILP32E/ILP64E ABI. 133 static const MCPhysReg ArgEGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12, 134 RISCV::X13, RISCV::X14, RISCV::X15}; 135 136 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E) 137 return ArrayRef(ArgEGPRs); 138 139 return ArrayRef(ArgIGPRs); 140 } 141 142 static ArrayRef<MCPhysReg> getFastCCArgGPRs(const RISCVABI::ABI ABI) { 143 // The GPRs used for passing arguments in the FastCC, X5 and X6 might be used 144 // for save-restore libcall, so we don't use them. 145 // Don't use X7 for fastcc, since Zicfilp uses X7 as the label register. 146 static const MCPhysReg FastCCIGPRs[] = { 147 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14, RISCV::X15, 148 RISCV::X16, RISCV::X17, RISCV::X28, RISCV::X29, RISCV::X30, RISCV::X31}; 149 150 // The GPRs used for passing arguments in the FastCC when using ILP32E/ILP64E. 151 static const MCPhysReg FastCCEGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12, 152 RISCV::X13, RISCV::X14, RISCV::X15}; 153 154 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E) 155 return ArrayRef(FastCCEGPRs); 156 157 return ArrayRef(FastCCIGPRs); 158 } 159 160 // Pass a 2*XLEN argument that has been split into two XLEN values through 161 // registers or the stack as necessary. 162 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1, 163 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, 164 MVT ValVT2, MVT LocVT2, 165 ISD::ArgFlagsTy ArgFlags2, bool EABI) { 166 unsigned XLenInBytes = XLen / 8; 167 const RISCVSubtarget &STI = 168 State.getMachineFunction().getSubtarget<RISCVSubtarget>(); 169 ArrayRef<MCPhysReg> ArgGPRs = RISCV::getArgGPRs(STI.getTargetABI()); 170 171 if (MCRegister Reg = State.AllocateReg(ArgGPRs)) { 172 // At least one half can be passed via register. 173 State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg, 174 VA1.getLocVT(), CCValAssign::Full)); 175 } else { 176 // Both halves must be passed on the stack, with proper alignment. 177 // TODO: To be compatible with GCC's behaviors, we force them to have 4-byte 178 // alignment. This behavior may be changed when RV32E/ILP32E is ratified. 179 Align StackAlign(XLenInBytes); 180 if (!EABI || XLen != 32) 181 StackAlign = std::max(StackAlign, ArgFlags1.getNonZeroOrigAlign()); 182 State.addLoc( 183 CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(), 184 State.AllocateStack(XLenInBytes, StackAlign), 185 VA1.getLocVT(), CCValAssign::Full)); 186 State.addLoc(CCValAssign::getMem( 187 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)), 188 LocVT2, CCValAssign::Full)); 189 return false; 190 } 191 192 if (MCRegister Reg = State.AllocateReg(ArgGPRs)) { 193 // The second half can also be passed via register. 194 State.addLoc( 195 CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full)); 196 } else { 197 // The second half is passed via the stack, without additional alignment. 198 State.addLoc(CCValAssign::getMem( 199 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)), 200 LocVT2, CCValAssign::Full)); 201 } 202 203 return false; 204 } 205 206 static MCRegister allocateRVVReg(MVT ValVT, unsigned ValNo, CCState &State, 207 const RISCVTargetLowering &TLI) { 208 const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT); 209 if (RC == &RISCV::VRRegClass) { 210 // Assign the first mask argument to V0. 211 // This is an interim calling convention and it may be changed in the 212 // future. 213 if (ValVT.getVectorElementType() == MVT::i1) 214 if (MCRegister Reg = State.AllocateReg(RISCV::V0)) 215 return Reg; 216 return State.AllocateReg(ArgVRs); 217 } 218 if (RC == &RISCV::VRM2RegClass) 219 return State.AllocateReg(ArgVRM2s); 220 if (RC == &RISCV::VRM4RegClass) 221 return State.AllocateReg(ArgVRM4s); 222 if (RC == &RISCV::VRM8RegClass) 223 return State.AllocateReg(ArgVRM8s); 224 if (RC == &RISCV::VRN2M1RegClass) 225 return State.AllocateReg(ArgVRN2M1s); 226 if (RC == &RISCV::VRN3M1RegClass) 227 return State.AllocateReg(ArgVRN3M1s); 228 if (RC == &RISCV::VRN4M1RegClass) 229 return State.AllocateReg(ArgVRN4M1s); 230 if (RC == &RISCV::VRN5M1RegClass) 231 return State.AllocateReg(ArgVRN5M1s); 232 if (RC == &RISCV::VRN6M1RegClass) 233 return State.AllocateReg(ArgVRN6M1s); 234 if (RC == &RISCV::VRN7M1RegClass) 235 return State.AllocateReg(ArgVRN7M1s); 236 if (RC == &RISCV::VRN8M1RegClass) 237 return State.AllocateReg(ArgVRN8M1s); 238 if (RC == &RISCV::VRN2M2RegClass) 239 return State.AllocateReg(ArgVRN2M2s); 240 if (RC == &RISCV::VRN3M2RegClass) 241 return State.AllocateReg(ArgVRN3M2s); 242 if (RC == &RISCV::VRN4M2RegClass) 243 return State.AllocateReg(ArgVRN4M2s); 244 if (RC == &RISCV::VRN2M4RegClass) 245 return State.AllocateReg(ArgVRN2M4s); 246 llvm_unreachable("Unhandled register class for ValueType"); 247 } 248 249 // Implements the RISC-V calling convention. Returns true upon failure. 250 bool llvm::CC_RISCV(unsigned ValNo, MVT ValVT, MVT LocVT, 251 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, 252 CCState &State, bool IsFixed, bool IsRet, Type *OrigTy) { 253 const MachineFunction &MF = State.getMachineFunction(); 254 const DataLayout &DL = MF.getDataLayout(); 255 const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>(); 256 const RISCVTargetLowering &TLI = *Subtarget.getTargetLowering(); 257 258 unsigned XLen = DL.getLargestLegalIntTypeSizeInBits(); 259 assert(XLen == 32 || XLen == 64); 260 MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64; 261 262 // Static chain parameter must not be passed in normal argument registers, 263 // so we assign t2 for it as done in GCC's __builtin_call_with_static_chain 264 if (ArgFlags.isNest()) { 265 if (MCRegister Reg = State.AllocateReg(RISCV::X7)) { 266 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 267 return false; 268 } 269 } 270 271 // Any return value split in to more than two values can't be returned 272 // directly. Vectors are returned via the available vector registers. 273 if (!LocVT.isVector() && IsRet && ValNo > 1) 274 return true; 275 276 // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a 277 // variadic argument, or if no F16/F32 argument registers are available. 278 bool UseGPRForF16_F32 = true; 279 // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a 280 // variadic argument, or if no F64 argument registers are available. 281 bool UseGPRForF64 = true; 282 283 RISCVABI::ABI ABI = Subtarget.getTargetABI(); 284 switch (ABI) { 285 default: 286 llvm_unreachable("Unexpected ABI"); 287 case RISCVABI::ABI_ILP32: 288 case RISCVABI::ABI_ILP32E: 289 case RISCVABI::ABI_LP64: 290 case RISCVABI::ABI_LP64E: 291 break; 292 case RISCVABI::ABI_ILP32F: 293 case RISCVABI::ABI_LP64F: 294 UseGPRForF16_F32 = !IsFixed; 295 break; 296 case RISCVABI::ABI_ILP32D: 297 case RISCVABI::ABI_LP64D: 298 UseGPRForF16_F32 = !IsFixed; 299 UseGPRForF64 = !IsFixed; 300 break; 301 } 302 303 // FPR16, FPR32, and FPR64 alias each other. 304 if (State.getFirstUnallocated(ArgFPR32s) == std::size(ArgFPR32s)) { 305 UseGPRForF16_F32 = true; 306 UseGPRForF64 = true; 307 } 308 309 // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and 310 // similar local variables rather than directly checking against the target 311 // ABI. 312 313 ArrayRef<MCPhysReg> ArgGPRs = RISCV::getArgGPRs(ABI); 314 315 if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::bf16 || 316 (ValVT == MVT::f32 && XLen == 64))) { 317 MCRegister Reg = State.AllocateReg(ArgGPRs); 318 if (Reg) { 319 LocVT = XLenVT; 320 State.addLoc( 321 CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 322 return false; 323 } 324 } 325 326 if (UseGPRForF16_F32 && 327 (ValVT == MVT::f16 || ValVT == MVT::bf16 || ValVT == MVT::f32)) { 328 LocVT = XLenVT; 329 LocInfo = CCValAssign::BCvt; 330 } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) { 331 LocVT = MVT::i64; 332 LocInfo = CCValAssign::BCvt; 333 } 334 335 // If this is a variadic argument, the RISC-V calling convention requires 336 // that it is assigned an 'even' or 'aligned' register if it has 8-byte 337 // alignment (RV32) or 16-byte alignment (RV64). An aligned register should 338 // be used regardless of whether the original argument was split during 339 // legalisation or not. The argument will not be passed by registers if the 340 // original type is larger than 2*XLEN, so the register alignment rule does 341 // not apply. 342 // TODO: To be compatible with GCC's behaviors, we don't align registers 343 // currently if we are using ILP32E calling convention. This behavior may be 344 // changed when RV32E/ILP32E is ratified. 345 unsigned TwoXLenInBytes = (2 * XLen) / 8; 346 if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes && 347 DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes && 348 ABI != RISCVABI::ABI_ILP32E) { 349 unsigned RegIdx = State.getFirstUnallocated(ArgGPRs); 350 // Skip 'odd' register if necessary. 351 if (RegIdx != std::size(ArgGPRs) && RegIdx % 2 == 1) 352 State.AllocateReg(ArgGPRs); 353 } 354 355 SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs(); 356 SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags = 357 State.getPendingArgFlags(); 358 359 assert(PendingLocs.size() == PendingArgFlags.size() && 360 "PendingLocs and PendingArgFlags out of sync"); 361 362 // Handle passing f64 on RV32D with a soft float ABI or when floating point 363 // registers are exhausted. 364 if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) { 365 assert(PendingLocs.empty() && "Can't lower f64 if it is split"); 366 // Depending on available argument GPRS, f64 may be passed in a pair of 367 // GPRs, split between a GPR and the stack, or passed completely on the 368 // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these 369 // cases. 370 MCRegister Reg = State.AllocateReg(ArgGPRs); 371 if (!Reg) { 372 unsigned StackOffset = State.AllocateStack(8, Align(8)); 373 State.addLoc( 374 CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); 375 return false; 376 } 377 LocVT = MVT::i32; 378 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 379 MCRegister HiReg = State.AllocateReg(ArgGPRs); 380 if (HiReg) { 381 State.addLoc( 382 CCValAssign::getCustomReg(ValNo, ValVT, HiReg, LocVT, LocInfo)); 383 } else { 384 unsigned StackOffset = State.AllocateStack(4, Align(4)); 385 State.addLoc( 386 CCValAssign::getCustomMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); 387 } 388 return false; 389 } 390 391 // Fixed-length vectors are located in the corresponding scalable-vector 392 // container types. 393 if (ValVT.isFixedLengthVector()) 394 LocVT = TLI.getContainerForFixedLengthVector(LocVT); 395 396 // Split arguments might be passed indirectly, so keep track of the pending 397 // values. Split vectors are passed via a mix of registers and indirectly, so 398 // treat them as we would any other argument. 399 if (ValVT.isScalarInteger() && (ArgFlags.isSplit() || !PendingLocs.empty())) { 400 LocVT = XLenVT; 401 LocInfo = CCValAssign::Indirect; 402 PendingLocs.push_back( 403 CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo)); 404 PendingArgFlags.push_back(ArgFlags); 405 if (!ArgFlags.isSplitEnd()) { 406 return false; 407 } 408 } 409 410 // If the split argument only had two elements, it should be passed directly 411 // in registers or on the stack. 412 if (ValVT.isScalarInteger() && ArgFlags.isSplitEnd() && 413 PendingLocs.size() <= 2) { 414 assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()"); 415 // Apply the normal calling convention rules to the first half of the 416 // split argument. 417 CCValAssign VA = PendingLocs[0]; 418 ISD::ArgFlagsTy AF = PendingArgFlags[0]; 419 PendingLocs.clear(); 420 PendingArgFlags.clear(); 421 return CC_RISCVAssign2XLen( 422 XLen, State, VA, AF, ValNo, ValVT, LocVT, ArgFlags, 423 ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E); 424 } 425 426 // Allocate to a register if possible, or else a stack slot. 427 MCRegister Reg; 428 unsigned StoreSizeBytes = XLen / 8; 429 Align StackAlign = Align(XLen / 8); 430 431 if ((ValVT == MVT::f16 || ValVT == MVT::bf16) && !UseGPRForF16_F32) 432 Reg = State.AllocateReg(ArgFPR16s); 433 else if (ValVT == MVT::f32 && !UseGPRForF16_F32) 434 Reg = State.AllocateReg(ArgFPR32s); 435 else if (ValVT == MVT::f64 && !UseGPRForF64) 436 Reg = State.AllocateReg(ArgFPR64s); 437 else if (ValVT.isVector() || ValVT.isRISCVVectorTuple()) { 438 Reg = allocateRVVReg(ValVT, ValNo, State, TLI); 439 if (!Reg) { 440 // For return values, the vector must be passed fully via registers or 441 // via the stack. 442 // FIXME: The proposed vector ABI only mandates v8-v15 for return values, 443 // but we're using all of them. 444 if (IsRet) 445 return true; 446 // Try using a GPR to pass the address 447 if ((Reg = State.AllocateReg(ArgGPRs))) { 448 LocVT = XLenVT; 449 LocInfo = CCValAssign::Indirect; 450 } else if (ValVT.isScalableVector()) { 451 LocVT = XLenVT; 452 LocInfo = CCValAssign::Indirect; 453 } else { 454 // Pass fixed-length vectors on the stack. 455 LocVT = ValVT; 456 StoreSizeBytes = ValVT.getStoreSize(); 457 // Align vectors to their element sizes, being careful for vXi1 458 // vectors. 459 StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne(); 460 } 461 } 462 } else { 463 Reg = State.AllocateReg(ArgGPRs); 464 } 465 466 unsigned StackOffset = 467 Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign); 468 469 // If we reach this point and PendingLocs is non-empty, we must be at the 470 // end of a split argument that must be passed indirectly. 471 if (!PendingLocs.empty()) { 472 assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()"); 473 assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()"); 474 475 for (auto &It : PendingLocs) { 476 if (Reg) 477 It.convertToReg(Reg); 478 else 479 It.convertToMem(StackOffset); 480 State.addLoc(It); 481 } 482 PendingLocs.clear(); 483 PendingArgFlags.clear(); 484 return false; 485 } 486 487 assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT || 488 (TLI.getSubtarget().hasVInstructions() && 489 (ValVT.isVector() || ValVT.isRISCVVectorTuple()))) && 490 "Expected an XLenVT or vector types at this stage"); 491 492 if (Reg) { 493 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 494 return false; 495 } 496 497 // When a scalar floating-point value is passed on the stack, no 498 // bit-conversion is needed. 499 if (ValVT.isFloatingPoint() && LocInfo != CCValAssign::Indirect) { 500 assert(!ValVT.isVector()); 501 LocVT = ValVT; 502 LocInfo = CCValAssign::Full; 503 } 504 State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); 505 return false; 506 } 507 508 // FastCC has less than 1% performance improvement for some particular 509 // benchmark. But theoretically, it may have benefit for some cases. 510 bool llvm::CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT, 511 CCValAssign::LocInfo LocInfo, 512 ISD::ArgFlagsTy ArgFlags, CCState &State, 513 bool IsFixed, bool IsRet, Type *OrigTy) { 514 const MachineFunction &MF = State.getMachineFunction(); 515 const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>(); 516 const RISCVTargetLowering &TLI = *Subtarget.getTargetLowering(); 517 RISCVABI::ABI ABI = Subtarget.getTargetABI(); 518 519 if (LocVT == MVT::i32 || LocVT == MVT::i64) { 520 if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRs(ABI))) { 521 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 522 return false; 523 } 524 } 525 526 if (LocVT == MVT::f16 && Subtarget.hasStdExtZfhmin()) { 527 static const MCPhysReg FPR16List[] = { 528 RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H, 529 RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H, RISCV::F1_H, 530 RISCV::F2_H, RISCV::F3_H, RISCV::F4_H, RISCV::F5_H, RISCV::F6_H, 531 RISCV::F7_H, RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H}; 532 if (MCRegister Reg = State.AllocateReg(FPR16List)) { 533 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 534 return false; 535 } 536 } 537 538 if (LocVT == MVT::f32 && Subtarget.hasStdExtF()) { 539 static const MCPhysReg FPR32List[] = { 540 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F, 541 RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F, RISCV::F1_F, 542 RISCV::F2_F, RISCV::F3_F, RISCV::F4_F, RISCV::F5_F, RISCV::F6_F, 543 RISCV::F7_F, RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F}; 544 if (MCRegister Reg = State.AllocateReg(FPR32List)) { 545 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 546 return false; 547 } 548 } 549 550 if (LocVT == MVT::f64 && Subtarget.hasStdExtD()) { 551 static const MCPhysReg FPR64List[] = { 552 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D, 553 RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D, RISCV::F1_D, 554 RISCV::F2_D, RISCV::F3_D, RISCV::F4_D, RISCV::F5_D, RISCV::F6_D, 555 RISCV::F7_D, RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D}; 556 if (MCRegister Reg = State.AllocateReg(FPR64List)) { 557 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 558 return false; 559 } 560 } 561 562 // Check if there is an available GPR before hitting the stack. 563 if ((LocVT == MVT::f16 && Subtarget.hasStdExtZhinxmin()) || 564 (LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) || 565 (LocVT == MVT::f64 && Subtarget.is64Bit() && 566 Subtarget.hasStdExtZdinx())) { 567 if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRs(ABI))) { 568 if (LocVT.getSizeInBits() != Subtarget.getXLen()) { 569 LocVT = Subtarget.getXLenVT(); 570 State.addLoc( 571 CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 572 return false; 573 } 574 LocVT = Subtarget.getXLenVT(); 575 LocInfo = CCValAssign::BCvt; 576 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 577 return false; 578 } 579 } 580 581 if (LocVT == MVT::f16) { 582 unsigned Offset2 = State.AllocateStack(2, Align(2)); 583 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset2, LocVT, LocInfo)); 584 return false; 585 } 586 587 if (LocVT == MVT::i32 || LocVT == MVT::f32) { 588 unsigned Offset4 = State.AllocateStack(4, Align(4)); 589 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo)); 590 return false; 591 } 592 593 if (LocVT == MVT::i64 || LocVT == MVT::f64) { 594 unsigned Offset5 = State.AllocateStack(8, Align(8)); 595 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo)); 596 return false; 597 } 598 599 if (LocVT.isVector()) { 600 if (MCRegister Reg = allocateRVVReg(ValVT, ValNo, State, TLI)) { 601 // Fixed-length vectors are located in the corresponding scalable-vector 602 // container types. 603 if (ValVT.isFixedLengthVector()) 604 LocVT = TLI.getContainerForFixedLengthVector(LocVT); 605 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 606 } else { 607 // Try and pass the address via a "fast" GPR. 608 if (MCRegister GPRReg = State.AllocateReg(getFastCCArgGPRs(ABI))) { 609 LocInfo = CCValAssign::Indirect; 610 LocVT = Subtarget.getXLenVT(); 611 State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo)); 612 } else if (ValVT.isFixedLengthVector()) { 613 auto StackAlign = 614 MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne(); 615 unsigned StackOffset = 616 State.AllocateStack(ValVT.getStoreSize(), StackAlign); 617 State.addLoc( 618 CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); 619 } else { 620 // Can't pass scalable vectors on the stack. 621 return true; 622 } 623 } 624 625 return false; 626 } 627 628 return true; // CC didn't match. 629 } 630 631 bool llvm::CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, 632 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, 633 CCState &State) { 634 if (ArgFlags.isNest()) { 635 report_fatal_error( 636 "Attribute 'nest' is not supported in GHC calling convention"); 637 } 638 639 static const MCPhysReg GPRList[] = { 640 RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22, 641 RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27}; 642 643 if (LocVT == MVT::i32 || LocVT == MVT::i64) { 644 // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim 645 // s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 646 if (MCRegister Reg = State.AllocateReg(GPRList)) { 647 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 648 return false; 649 } 650 } 651 652 const RISCVSubtarget &Subtarget = 653 State.getMachineFunction().getSubtarget<RISCVSubtarget>(); 654 655 if (LocVT == MVT::f32 && Subtarget.hasStdExtF()) { 656 // Pass in STG registers: F1, ..., F6 657 // fs0 ... fs5 658 static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F, 659 RISCV::F18_F, RISCV::F19_F, 660 RISCV::F20_F, RISCV::F21_F}; 661 if (MCRegister Reg = State.AllocateReg(FPR32List)) { 662 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 663 return false; 664 } 665 } 666 667 if (LocVT == MVT::f64 && Subtarget.hasStdExtD()) { 668 // Pass in STG registers: D1, ..., D6 669 // fs6 ... fs11 670 static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D, 671 RISCV::F24_D, RISCV::F25_D, 672 RISCV::F26_D, RISCV::F27_D}; 673 if (MCRegister Reg = State.AllocateReg(FPR64List)) { 674 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 675 return false; 676 } 677 } 678 679 if ((LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) || 680 (LocVT == MVT::f64 && Subtarget.hasStdExtZdinx() && 681 Subtarget.is64Bit())) { 682 if (MCRegister Reg = State.AllocateReg(GPRList)) { 683 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 684 return false; 685 } 686 } 687 688 report_fatal_error("No registers left in GHC calling convention"); 689 return true; 690 } 691