1 //===-- RISCVCallingConv.cpp - RISC-V Custom CC Routines ------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the custom routines for the RISC-V Calling Convention. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "RISCVCallingConv.h" 14 #include "RISCVSubtarget.h" 15 #include "llvm/IR/DataLayout.h" 16 #include "llvm/MC/MCRegister.h" 17 18 using namespace llvm; 19 20 // Calling Convention Implementation. 21 // The expectations for frontend ABI lowering vary from target to target. 22 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI 23 // details, but this is a longer term goal. For now, we simply try to keep the 24 // role of the frontend as simple and well-defined as possible. The rules can 25 // be summarised as: 26 // * Never split up large scalar arguments. We handle them here. 27 // * If a hardfloat calling convention is being used, and the struct may be 28 // passed in a pair of registers (fp+fp, int+fp), and both registers are 29 // available, then pass as two separate arguments. If either the GPRs or FPRs 30 // are exhausted, then pass according to the rule below. 31 // * If a struct could never be passed in registers or directly in a stack 32 // slot (as it is larger than 2*XLEN and the floating point rules don't 33 // apply), then pass it using a pointer with the byval attribute. 34 // * If a struct is less than 2*XLEN, then coerce to either a two-element 35 // word-sized array or a 2*XLEN scalar (depending on alignment). 36 // * The frontend can determine whether a struct is returned by reference or 37 // not based on its size and fields. If it will be returned by reference, the 38 // frontend must modify the prototype so a pointer with the sret annotation is 39 // passed as the first argument. This is not necessary for large scalar 40 // returns. 41 // * Struct return values and varargs should be coerced to structs containing 42 // register-size fields in the same situations they would be for fixed 43 // arguments. 44 45 static const MCPhysReg ArgFPR16s[] = {RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, 46 RISCV::F13_H, RISCV::F14_H, RISCV::F15_H, 47 RISCV::F16_H, RISCV::F17_H}; 48 static const MCPhysReg ArgFPR32s[] = {RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, 49 RISCV::F13_F, RISCV::F14_F, RISCV::F15_F, 50 RISCV::F16_F, RISCV::F17_F}; 51 static const MCPhysReg ArgFPR64s[] = {RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, 52 RISCV::F13_D, RISCV::F14_D, RISCV::F15_D, 53 RISCV::F16_D, RISCV::F17_D}; 54 // This is an interim calling convention and it may be changed in the future. 55 static const MCPhysReg ArgVRs[] = { 56 RISCV::V8, RISCV::V9, RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13, 57 RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19, 58 RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23}; 59 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2, RISCV::V10M2, RISCV::V12M2, 60 RISCV::V14M2, RISCV::V16M2, RISCV::V18M2, 61 RISCV::V20M2, RISCV::V22M2}; 62 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4, 63 RISCV::V20M4}; 64 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8}; 65 static const MCPhysReg ArgVRN2M1s[] = { 66 RISCV::V8_V9, RISCV::V9_V10, RISCV::V10_V11, RISCV::V11_V12, 67 RISCV::V12_V13, RISCV::V13_V14, RISCV::V14_V15, RISCV::V15_V16, 68 RISCV::V16_V17, RISCV::V17_V18, RISCV::V18_V19, RISCV::V19_V20, 69 RISCV::V20_V21, RISCV::V21_V22, RISCV::V22_V23}; 70 static const MCPhysReg ArgVRN3M1s[] = { 71 RISCV::V8_V9_V10, RISCV::V9_V10_V11, RISCV::V10_V11_V12, 72 RISCV::V11_V12_V13, RISCV::V12_V13_V14, RISCV::V13_V14_V15, 73 RISCV::V14_V15_V16, RISCV::V15_V16_V17, RISCV::V16_V17_V18, 74 RISCV::V17_V18_V19, RISCV::V18_V19_V20, RISCV::V19_V20_V21, 75 RISCV::V20_V21_V22, RISCV::V21_V22_V23}; 76 static const MCPhysReg ArgVRN4M1s[] = { 77 RISCV::V8_V9_V10_V11, RISCV::V9_V10_V11_V12, RISCV::V10_V11_V12_V13, 78 RISCV::V11_V12_V13_V14, RISCV::V12_V13_V14_V15, RISCV::V13_V14_V15_V16, 79 RISCV::V14_V15_V16_V17, RISCV::V15_V16_V17_V18, RISCV::V16_V17_V18_V19, 80 RISCV::V17_V18_V19_V20, RISCV::V18_V19_V20_V21, RISCV::V19_V20_V21_V22, 81 RISCV::V20_V21_V22_V23}; 82 static const MCPhysReg ArgVRN5M1s[] = { 83 RISCV::V8_V9_V10_V11_V12, RISCV::V9_V10_V11_V12_V13, 84 RISCV::V10_V11_V12_V13_V14, RISCV::V11_V12_V13_V14_V15, 85 RISCV::V12_V13_V14_V15_V16, RISCV::V13_V14_V15_V16_V17, 86 RISCV::V14_V15_V16_V17_V18, RISCV::V15_V16_V17_V18_V19, 87 RISCV::V16_V17_V18_V19_V20, RISCV::V17_V18_V19_V20_V21, 88 RISCV::V18_V19_V20_V21_V22, RISCV::V19_V20_V21_V22_V23}; 89 static const MCPhysReg ArgVRN6M1s[] = { 90 RISCV::V8_V9_V10_V11_V12_V13, RISCV::V9_V10_V11_V12_V13_V14, 91 RISCV::V10_V11_V12_V13_V14_V15, RISCV::V11_V12_V13_V14_V15_V16, 92 RISCV::V12_V13_V14_V15_V16_V17, RISCV::V13_V14_V15_V16_V17_V18, 93 RISCV::V14_V15_V16_V17_V18_V19, RISCV::V15_V16_V17_V18_V19_V20, 94 RISCV::V16_V17_V18_V19_V20_V21, RISCV::V17_V18_V19_V20_V21_V22, 95 RISCV::V18_V19_V20_V21_V22_V23}; 96 static const MCPhysReg ArgVRN7M1s[] = { 97 RISCV::V8_V9_V10_V11_V12_V13_V14, RISCV::V9_V10_V11_V12_V13_V14_V15, 98 RISCV::V10_V11_V12_V13_V14_V15_V16, RISCV::V11_V12_V13_V14_V15_V16_V17, 99 RISCV::V12_V13_V14_V15_V16_V17_V18, RISCV::V13_V14_V15_V16_V17_V18_V19, 100 RISCV::V14_V15_V16_V17_V18_V19_V20, RISCV::V15_V16_V17_V18_V19_V20_V21, 101 RISCV::V16_V17_V18_V19_V20_V21_V22, RISCV::V17_V18_V19_V20_V21_V22_V23}; 102 static const MCPhysReg ArgVRN8M1s[] = {RISCV::V8_V9_V10_V11_V12_V13_V14_V15, 103 RISCV::V9_V10_V11_V12_V13_V14_V15_V16, 104 RISCV::V10_V11_V12_V13_V14_V15_V16_V17, 105 RISCV::V11_V12_V13_V14_V15_V16_V17_V18, 106 RISCV::V12_V13_V14_V15_V16_V17_V18_V19, 107 RISCV::V13_V14_V15_V16_V17_V18_V19_V20, 108 RISCV::V14_V15_V16_V17_V18_V19_V20_V21, 109 RISCV::V15_V16_V17_V18_V19_V20_V21_V22, 110 RISCV::V16_V17_V18_V19_V20_V21_V22_V23}; 111 static const MCPhysReg ArgVRN2M2s[] = {RISCV::V8M2_V10M2, RISCV::V10M2_V12M2, 112 RISCV::V12M2_V14M2, RISCV::V14M2_V16M2, 113 RISCV::V16M2_V18M2, RISCV::V18M2_V20M2, 114 RISCV::V20M2_V22M2}; 115 static const MCPhysReg ArgVRN3M2s[] = { 116 RISCV::V8M2_V10M2_V12M2, RISCV::V10M2_V12M2_V14M2, 117 RISCV::V12M2_V14M2_V16M2, RISCV::V14M2_V16M2_V18M2, 118 RISCV::V16M2_V18M2_V20M2, RISCV::V18M2_V20M2_V22M2}; 119 static const MCPhysReg ArgVRN4M2s[] = { 120 RISCV::V8M2_V10M2_V12M2_V14M2, RISCV::V10M2_V12M2_V14M2_V16M2, 121 RISCV::V12M2_V14M2_V16M2_V18M2, RISCV::V14M2_V16M2_V18M2_V20M2, 122 RISCV::V16M2_V18M2_V20M2_V22M2}; 123 static const MCPhysReg ArgVRN2M4s[] = {RISCV::V8M4_V12M4, RISCV::V12M4_V16M4, 124 RISCV::V16M4_V20M4}; 125 126 ArrayRef<MCPhysReg> RISCV::getArgGPRs(const RISCVABI::ABI ABI) { 127 // The GPRs used for passing arguments in the ILP32* and LP64* ABIs, except 128 // the ILP32E ABI. 129 static const MCPhysReg ArgIGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12, 130 RISCV::X13, RISCV::X14, RISCV::X15, 131 RISCV::X16, RISCV::X17}; 132 // The GPRs used for passing arguments in the ILP32E/ILP64E ABI. 133 static const MCPhysReg ArgEGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12, 134 RISCV::X13, RISCV::X14, RISCV::X15}; 135 136 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E) 137 return ArrayRef(ArgEGPRs); 138 139 return ArrayRef(ArgIGPRs); 140 } 141 142 static ArrayRef<MCPhysReg> getFastCCArgGPRs(const RISCVABI::ABI ABI) { 143 // The GPRs used for passing arguments in the FastCC, X5 and X6 might be used 144 // for save-restore libcall, so we don't use them. 145 // Don't use X7 for fastcc, since Zicfilp uses X7 as the label register. 146 static const MCPhysReg FastCCIGPRs[] = { 147 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14, RISCV::X15, 148 RISCV::X16, RISCV::X17, RISCV::X28, RISCV::X29, RISCV::X30, RISCV::X31}; 149 150 // The GPRs used for passing arguments in the FastCC when using ILP32E/ILP64E. 151 static const MCPhysReg FastCCEGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12, 152 RISCV::X13, RISCV::X14, RISCV::X15}; 153 154 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E) 155 return ArrayRef(FastCCEGPRs); 156 157 return ArrayRef(FastCCIGPRs); 158 } 159 160 // Pass a 2*XLEN argument that has been split into two XLEN values through 161 // registers or the stack as necessary. 162 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1, 163 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, 164 MVT ValVT2, MVT LocVT2, 165 ISD::ArgFlagsTy ArgFlags2, bool EABI) { 166 unsigned XLenInBytes = XLen / 8; 167 const RISCVSubtarget &STI = 168 State.getMachineFunction().getSubtarget<RISCVSubtarget>(); 169 ArrayRef<MCPhysReg> ArgGPRs = RISCV::getArgGPRs(STI.getTargetABI()); 170 171 if (MCRegister Reg = State.AllocateReg(ArgGPRs)) { 172 // At least one half can be passed via register. 173 State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg, 174 VA1.getLocVT(), CCValAssign::Full)); 175 } else { 176 // Both halves must be passed on the stack, with proper alignment. 177 // TODO: To be compatible with GCC's behaviors, we force them to have 4-byte 178 // alignment. This behavior may be changed when RV32E/ILP32E is ratified. 179 Align StackAlign(XLenInBytes); 180 if (!EABI || XLen != 32) 181 StackAlign = std::max(StackAlign, ArgFlags1.getNonZeroOrigAlign()); 182 State.addLoc( 183 CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(), 184 State.AllocateStack(XLenInBytes, StackAlign), 185 VA1.getLocVT(), CCValAssign::Full)); 186 State.addLoc(CCValAssign::getMem( 187 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)), 188 LocVT2, CCValAssign::Full)); 189 return false; 190 } 191 192 if (MCRegister Reg = State.AllocateReg(ArgGPRs)) { 193 // The second half can also be passed via register. 194 State.addLoc( 195 CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full)); 196 } else { 197 // The second half is passed via the stack, without additional alignment. 198 State.addLoc(CCValAssign::getMem( 199 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)), 200 LocVT2, CCValAssign::Full)); 201 } 202 203 return false; 204 } 205 206 static MCRegister allocateRVVReg(MVT ValVT, unsigned ValNo, CCState &State, 207 const RISCVTargetLowering &TLI) { 208 const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT); 209 if (RC == &RISCV::VRRegClass) { 210 // Assign the first mask argument to V0. 211 // This is an interim calling convention and it may be changed in the 212 // future. 213 if (ValVT.getVectorElementType() == MVT::i1) 214 if (MCRegister Reg = State.AllocateReg(RISCV::V0)) 215 return Reg; 216 return State.AllocateReg(ArgVRs); 217 } 218 if (RC == &RISCV::VRM2RegClass) 219 return State.AllocateReg(ArgVRM2s); 220 if (RC == &RISCV::VRM4RegClass) 221 return State.AllocateReg(ArgVRM4s); 222 if (RC == &RISCV::VRM8RegClass) 223 return State.AllocateReg(ArgVRM8s); 224 if (RC == &RISCV::VRN2M1RegClass) 225 return State.AllocateReg(ArgVRN2M1s); 226 if (RC == &RISCV::VRN3M1RegClass) 227 return State.AllocateReg(ArgVRN3M1s); 228 if (RC == &RISCV::VRN4M1RegClass) 229 return State.AllocateReg(ArgVRN4M1s); 230 if (RC == &RISCV::VRN5M1RegClass) 231 return State.AllocateReg(ArgVRN5M1s); 232 if (RC == &RISCV::VRN6M1RegClass) 233 return State.AllocateReg(ArgVRN6M1s); 234 if (RC == &RISCV::VRN7M1RegClass) 235 return State.AllocateReg(ArgVRN7M1s); 236 if (RC == &RISCV::VRN8M1RegClass) 237 return State.AllocateReg(ArgVRN8M1s); 238 if (RC == &RISCV::VRN2M2RegClass) 239 return State.AllocateReg(ArgVRN2M2s); 240 if (RC == &RISCV::VRN3M2RegClass) 241 return State.AllocateReg(ArgVRN3M2s); 242 if (RC == &RISCV::VRN4M2RegClass) 243 return State.AllocateReg(ArgVRN4M2s); 244 if (RC == &RISCV::VRN2M4RegClass) 245 return State.AllocateReg(ArgVRN2M4s); 246 llvm_unreachable("Unhandled register class for ValueType"); 247 } 248 249 // Implements the RISC-V calling convention. Returns true upon failure. 250 bool llvm::CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, 251 MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, 252 ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, 253 bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI) { 254 unsigned XLen = DL.getLargestLegalIntTypeSizeInBits(); 255 assert(XLen == 32 || XLen == 64); 256 MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64; 257 258 // Static chain parameter must not be passed in normal argument registers, 259 // so we assign t2 for it as done in GCC's __builtin_call_with_static_chain 260 if (ArgFlags.isNest()) { 261 if (MCRegister Reg = State.AllocateReg(RISCV::X7)) { 262 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 263 return false; 264 } 265 } 266 267 // Any return value split in to more than two values can't be returned 268 // directly. Vectors are returned via the available vector registers. 269 if (!LocVT.isVector() && IsRet && ValNo > 1) 270 return true; 271 272 // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a 273 // variadic argument, or if no F16/F32 argument registers are available. 274 bool UseGPRForF16_F32 = true; 275 // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a 276 // variadic argument, or if no F64 argument registers are available. 277 bool UseGPRForF64 = true; 278 279 switch (ABI) { 280 default: 281 llvm_unreachable("Unexpected ABI"); 282 case RISCVABI::ABI_ILP32: 283 case RISCVABI::ABI_ILP32E: 284 case RISCVABI::ABI_LP64: 285 case RISCVABI::ABI_LP64E: 286 break; 287 case RISCVABI::ABI_ILP32F: 288 case RISCVABI::ABI_LP64F: 289 UseGPRForF16_F32 = !IsFixed; 290 break; 291 case RISCVABI::ABI_ILP32D: 292 case RISCVABI::ABI_LP64D: 293 UseGPRForF16_F32 = !IsFixed; 294 UseGPRForF64 = !IsFixed; 295 break; 296 } 297 298 // FPR16, FPR32, and FPR64 alias each other. 299 if (State.getFirstUnallocated(ArgFPR32s) == std::size(ArgFPR32s)) { 300 UseGPRForF16_F32 = true; 301 UseGPRForF64 = true; 302 } 303 304 // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and 305 // similar local variables rather than directly checking against the target 306 // ABI. 307 308 ArrayRef<MCPhysReg> ArgGPRs = RISCV::getArgGPRs(ABI); 309 310 if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::bf16 || 311 (ValVT == MVT::f32 && XLen == 64))) { 312 MCRegister Reg = State.AllocateReg(ArgGPRs); 313 if (Reg) { 314 LocVT = XLenVT; 315 State.addLoc( 316 CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 317 return false; 318 } 319 } 320 321 if (UseGPRForF16_F32 && 322 (ValVT == MVT::f16 || ValVT == MVT::bf16 || ValVT == MVT::f32)) { 323 LocVT = XLenVT; 324 LocInfo = CCValAssign::BCvt; 325 } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) { 326 LocVT = MVT::i64; 327 LocInfo = CCValAssign::BCvt; 328 } 329 330 // If this is a variadic argument, the RISC-V calling convention requires 331 // that it is assigned an 'even' or 'aligned' register if it has 8-byte 332 // alignment (RV32) or 16-byte alignment (RV64). An aligned register should 333 // be used regardless of whether the original argument was split during 334 // legalisation or not. The argument will not be passed by registers if the 335 // original type is larger than 2*XLEN, so the register alignment rule does 336 // not apply. 337 // TODO: To be compatible with GCC's behaviors, we don't align registers 338 // currently if we are using ILP32E calling convention. This behavior may be 339 // changed when RV32E/ILP32E is ratified. 340 unsigned TwoXLenInBytes = (2 * XLen) / 8; 341 if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes && 342 DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes && 343 ABI != RISCVABI::ABI_ILP32E) { 344 unsigned RegIdx = State.getFirstUnallocated(ArgGPRs); 345 // Skip 'odd' register if necessary. 346 if (RegIdx != std::size(ArgGPRs) && RegIdx % 2 == 1) 347 State.AllocateReg(ArgGPRs); 348 } 349 350 SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs(); 351 SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags = 352 State.getPendingArgFlags(); 353 354 assert(PendingLocs.size() == PendingArgFlags.size() && 355 "PendingLocs and PendingArgFlags out of sync"); 356 357 // Handle passing f64 on RV32D with a soft float ABI or when floating point 358 // registers are exhausted. 359 if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) { 360 assert(PendingLocs.empty() && "Can't lower f64 if it is split"); 361 // Depending on available argument GPRS, f64 may be passed in a pair of 362 // GPRs, split between a GPR and the stack, or passed completely on the 363 // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these 364 // cases. 365 MCRegister Reg = State.AllocateReg(ArgGPRs); 366 if (!Reg) { 367 unsigned StackOffset = State.AllocateStack(8, Align(8)); 368 State.addLoc( 369 CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); 370 return false; 371 } 372 LocVT = MVT::i32; 373 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 374 MCRegister HiReg = State.AllocateReg(ArgGPRs); 375 if (HiReg) { 376 State.addLoc( 377 CCValAssign::getCustomReg(ValNo, ValVT, HiReg, LocVT, LocInfo)); 378 } else { 379 unsigned StackOffset = State.AllocateStack(4, Align(4)); 380 State.addLoc( 381 CCValAssign::getCustomMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); 382 } 383 return false; 384 } 385 386 // Fixed-length vectors are located in the corresponding scalable-vector 387 // container types. 388 if (ValVT.isFixedLengthVector()) 389 LocVT = TLI.getContainerForFixedLengthVector(LocVT); 390 391 // Split arguments might be passed indirectly, so keep track of the pending 392 // values. Split vectors are passed via a mix of registers and indirectly, so 393 // treat them as we would any other argument. 394 if (ValVT.isScalarInteger() && (ArgFlags.isSplit() || !PendingLocs.empty())) { 395 LocVT = XLenVT; 396 LocInfo = CCValAssign::Indirect; 397 PendingLocs.push_back( 398 CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo)); 399 PendingArgFlags.push_back(ArgFlags); 400 if (!ArgFlags.isSplitEnd()) { 401 return false; 402 } 403 } 404 405 // If the split argument only had two elements, it should be passed directly 406 // in registers or on the stack. 407 if (ValVT.isScalarInteger() && ArgFlags.isSplitEnd() && 408 PendingLocs.size() <= 2) { 409 assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()"); 410 // Apply the normal calling convention rules to the first half of the 411 // split argument. 412 CCValAssign VA = PendingLocs[0]; 413 ISD::ArgFlagsTy AF = PendingArgFlags[0]; 414 PendingLocs.clear(); 415 PendingArgFlags.clear(); 416 return CC_RISCVAssign2XLen( 417 XLen, State, VA, AF, ValNo, ValVT, LocVT, ArgFlags, 418 ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E); 419 } 420 421 // Allocate to a register if possible, or else a stack slot. 422 MCRegister Reg; 423 unsigned StoreSizeBytes = XLen / 8; 424 Align StackAlign = Align(XLen / 8); 425 426 if ((ValVT == MVT::f16 || ValVT == MVT::bf16) && !UseGPRForF16_F32) 427 Reg = State.AllocateReg(ArgFPR16s); 428 else if (ValVT == MVT::f32 && !UseGPRForF16_F32) 429 Reg = State.AllocateReg(ArgFPR32s); 430 else if (ValVT == MVT::f64 && !UseGPRForF64) 431 Reg = State.AllocateReg(ArgFPR64s); 432 else if (ValVT.isVector() || ValVT.isRISCVVectorTuple()) { 433 Reg = allocateRVVReg(ValVT, ValNo, State, TLI); 434 if (!Reg) { 435 // For return values, the vector must be passed fully via registers or 436 // via the stack. 437 // FIXME: The proposed vector ABI only mandates v8-v15 for return values, 438 // but we're using all of them. 439 if (IsRet) 440 return true; 441 // Try using a GPR to pass the address 442 if ((Reg = State.AllocateReg(ArgGPRs))) { 443 LocVT = XLenVT; 444 LocInfo = CCValAssign::Indirect; 445 } else if (ValVT.isScalableVector()) { 446 LocVT = XLenVT; 447 LocInfo = CCValAssign::Indirect; 448 } else { 449 // Pass fixed-length vectors on the stack. 450 LocVT = ValVT; 451 StoreSizeBytes = ValVT.getStoreSize(); 452 // Align vectors to their element sizes, being careful for vXi1 453 // vectors. 454 StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne(); 455 } 456 } 457 } else { 458 Reg = State.AllocateReg(ArgGPRs); 459 } 460 461 unsigned StackOffset = 462 Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign); 463 464 // If we reach this point and PendingLocs is non-empty, we must be at the 465 // end of a split argument that must be passed indirectly. 466 if (!PendingLocs.empty()) { 467 assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()"); 468 assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()"); 469 470 for (auto &It : PendingLocs) { 471 if (Reg) 472 It.convertToReg(Reg); 473 else 474 It.convertToMem(StackOffset); 475 State.addLoc(It); 476 } 477 PendingLocs.clear(); 478 PendingArgFlags.clear(); 479 return false; 480 } 481 482 assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT || 483 (TLI.getSubtarget().hasVInstructions() && 484 (ValVT.isVector() || ValVT.isRISCVVectorTuple()))) && 485 "Expected an XLenVT or vector types at this stage"); 486 487 if (Reg) { 488 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 489 return false; 490 } 491 492 // When a scalar floating-point value is passed on the stack, no 493 // bit-conversion is needed. 494 if (ValVT.isFloatingPoint() && LocInfo != CCValAssign::Indirect) { 495 assert(!ValVT.isVector()); 496 LocVT = ValVT; 497 LocInfo = CCValAssign::Full; 498 } 499 State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); 500 return false; 501 } 502 503 // FastCC has less than 1% performance improvement for some particular 504 // benchmark. But theoretically, it may have benefit for some cases. 505 bool llvm::CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI, 506 unsigned ValNo, MVT ValVT, MVT LocVT, 507 CCValAssign::LocInfo LocInfo, 508 ISD::ArgFlagsTy ArgFlags, CCState &State, 509 bool IsFixed, bool IsRet, Type *OrigTy, 510 const RISCVTargetLowering &TLI) { 511 if (LocVT == MVT::i32 || LocVT == MVT::i64) { 512 if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRs(ABI))) { 513 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 514 return false; 515 } 516 } 517 518 const RISCVSubtarget &Subtarget = TLI.getSubtarget(); 519 520 if (LocVT == MVT::f16 && Subtarget.hasStdExtZfhmin()) { 521 static const MCPhysReg FPR16List[] = { 522 RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H, 523 RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H, RISCV::F1_H, 524 RISCV::F2_H, RISCV::F3_H, RISCV::F4_H, RISCV::F5_H, RISCV::F6_H, 525 RISCV::F7_H, RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H}; 526 if (MCRegister Reg = State.AllocateReg(FPR16List)) { 527 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 528 return false; 529 } 530 } 531 532 if (LocVT == MVT::f32 && Subtarget.hasStdExtF()) { 533 static const MCPhysReg FPR32List[] = { 534 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F, 535 RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F, RISCV::F1_F, 536 RISCV::F2_F, RISCV::F3_F, RISCV::F4_F, RISCV::F5_F, RISCV::F6_F, 537 RISCV::F7_F, RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F}; 538 if (MCRegister Reg = State.AllocateReg(FPR32List)) { 539 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 540 return false; 541 } 542 } 543 544 if (LocVT == MVT::f64 && Subtarget.hasStdExtD()) { 545 static const MCPhysReg FPR64List[] = { 546 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D, 547 RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D, RISCV::F1_D, 548 RISCV::F2_D, RISCV::F3_D, RISCV::F4_D, RISCV::F5_D, RISCV::F6_D, 549 RISCV::F7_D, RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D}; 550 if (MCRegister Reg = State.AllocateReg(FPR64List)) { 551 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 552 return false; 553 } 554 } 555 556 // Check if there is an available GPR before hitting the stack. 557 if ((LocVT == MVT::f16 && Subtarget.hasStdExtZhinxmin()) || 558 (LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) || 559 (LocVT == MVT::f64 && Subtarget.is64Bit() && 560 Subtarget.hasStdExtZdinx())) { 561 if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRs(ABI))) { 562 if (LocVT.getSizeInBits() != Subtarget.getXLen()) { 563 LocVT = Subtarget.getXLenVT(); 564 State.addLoc( 565 CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 566 return false; 567 } 568 LocVT = Subtarget.getXLenVT(); 569 LocInfo = CCValAssign::BCvt; 570 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 571 return false; 572 } 573 } 574 575 if (LocVT == MVT::f16) { 576 unsigned Offset2 = State.AllocateStack(2, Align(2)); 577 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset2, LocVT, LocInfo)); 578 return false; 579 } 580 581 if (LocVT == MVT::i32 || LocVT == MVT::f32) { 582 unsigned Offset4 = State.AllocateStack(4, Align(4)); 583 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo)); 584 return false; 585 } 586 587 if (LocVT == MVT::i64 || LocVT == MVT::f64) { 588 unsigned Offset5 = State.AllocateStack(8, Align(8)); 589 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo)); 590 return false; 591 } 592 593 if (LocVT.isVector()) { 594 if (MCRegister Reg = allocateRVVReg(ValVT, ValNo, State, TLI)) { 595 // Fixed-length vectors are located in the corresponding scalable-vector 596 // container types. 597 if (ValVT.isFixedLengthVector()) 598 LocVT = TLI.getContainerForFixedLengthVector(LocVT); 599 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 600 } else { 601 // Try and pass the address via a "fast" GPR. 602 if (MCRegister GPRReg = State.AllocateReg(getFastCCArgGPRs(ABI))) { 603 LocInfo = CCValAssign::Indirect; 604 LocVT = TLI.getSubtarget().getXLenVT(); 605 State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo)); 606 } else if (ValVT.isFixedLengthVector()) { 607 auto StackAlign = 608 MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne(); 609 unsigned StackOffset = 610 State.AllocateStack(ValVT.getStoreSize(), StackAlign); 611 State.addLoc( 612 CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); 613 } else { 614 // Can't pass scalable vectors on the stack. 615 return true; 616 } 617 } 618 619 return false; 620 } 621 622 return true; // CC didn't match. 623 } 624 625 bool llvm::CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, 626 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, 627 CCState &State) { 628 if (ArgFlags.isNest()) { 629 report_fatal_error( 630 "Attribute 'nest' is not supported in GHC calling convention"); 631 } 632 633 static const MCPhysReg GPRList[] = { 634 RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22, 635 RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27}; 636 637 if (LocVT == MVT::i32 || LocVT == MVT::i64) { 638 // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim 639 // s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 640 if (MCRegister Reg = State.AllocateReg(GPRList)) { 641 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 642 return false; 643 } 644 } 645 646 const RISCVSubtarget &Subtarget = 647 State.getMachineFunction().getSubtarget<RISCVSubtarget>(); 648 649 if (LocVT == MVT::f32 && Subtarget.hasStdExtF()) { 650 // Pass in STG registers: F1, ..., F6 651 // fs0 ... fs5 652 static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F, 653 RISCV::F18_F, RISCV::F19_F, 654 RISCV::F20_F, RISCV::F21_F}; 655 if (MCRegister Reg = State.AllocateReg(FPR32List)) { 656 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 657 return false; 658 } 659 } 660 661 if (LocVT == MVT::f64 && Subtarget.hasStdExtD()) { 662 // Pass in STG registers: D1, ..., D6 663 // fs6 ... fs11 664 static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D, 665 RISCV::F24_D, RISCV::F25_D, 666 RISCV::F26_D, RISCV::F27_D}; 667 if (MCRegister Reg = State.AllocateReg(FPR64List)) { 668 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 669 return false; 670 } 671 } 672 673 if ((LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) || 674 (LocVT == MVT::f64 && Subtarget.hasStdExtZdinx() && 675 Subtarget.is64Bit())) { 676 if (MCRegister Reg = State.AllocateReg(GPRList)) { 677 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 678 return false; 679 } 680 } 681 682 report_fatal_error("No registers left in GHC calling convention"); 683 return true; 684 } 685