xref: /llvm-project/llvm/lib/Target/RISCV/RISCVCallingConv.cpp (revision 49660e5565a3b4f1ec29d6c95b6d6576d398eaf1)
1 //===-- RISCVCallingConv.cpp - RISC-V Custom CC Routines ------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the custom routines for the RISC-V Calling Convention.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "RISCVCallingConv.h"
14 #include "RISCVSubtarget.h"
15 #include "llvm/IR/DataLayout.h"
16 #include "llvm/MC/MCRegister.h"
17 
18 using namespace llvm;
19 
20 // Calling Convention Implementation.
21 // The expectations for frontend ABI lowering vary from target to target.
22 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
23 // details, but this is a longer term goal. For now, we simply try to keep the
24 // role of the frontend as simple and well-defined as possible. The rules can
25 // be summarised as:
26 // * Never split up large scalar arguments. We handle them here.
27 // * If a hardfloat calling convention is being used, and the struct may be
28 // passed in a pair of registers (fp+fp, int+fp), and both registers are
29 // available, then pass as two separate arguments. If either the GPRs or FPRs
30 // are exhausted, then pass according to the rule below.
31 // * If a struct could never be passed in registers or directly in a stack
32 // slot (as it is larger than 2*XLEN and the floating point rules don't
33 // apply), then pass it using a pointer with the byval attribute.
34 // * If a struct is less than 2*XLEN, then coerce to either a two-element
35 // word-sized array or a 2*XLEN scalar (depending on alignment).
36 // * The frontend can determine whether a struct is returned by reference or
37 // not based on its size and fields. If it will be returned by reference, the
38 // frontend must modify the prototype so a pointer with the sret annotation is
39 // passed as the first argument. This is not necessary for large scalar
40 // returns.
41 // * Struct return values and varargs should be coerced to structs containing
42 // register-size fields in the same situations they would be for fixed
43 // arguments.
44 
45 static const MCPhysReg ArgFPR16s[] = {RISCV::F10_H, RISCV::F11_H, RISCV::F12_H,
46                                       RISCV::F13_H, RISCV::F14_H, RISCV::F15_H,
47                                       RISCV::F16_H, RISCV::F17_H};
48 static const MCPhysReg ArgFPR32s[] = {RISCV::F10_F, RISCV::F11_F, RISCV::F12_F,
49                                       RISCV::F13_F, RISCV::F14_F, RISCV::F15_F,
50                                       RISCV::F16_F, RISCV::F17_F};
51 static const MCPhysReg ArgFPR64s[] = {RISCV::F10_D, RISCV::F11_D, RISCV::F12_D,
52                                       RISCV::F13_D, RISCV::F14_D, RISCV::F15_D,
53                                       RISCV::F16_D, RISCV::F17_D};
54 // This is an interim calling convention and it may be changed in the future.
55 static const MCPhysReg ArgVRs[] = {
56     RISCV::V8,  RISCV::V9,  RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
57     RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
58     RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
59 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2,  RISCV::V10M2, RISCV::V12M2,
60                                      RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
61                                      RISCV::V20M2, RISCV::V22M2};
62 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
63                                      RISCV::V20M4};
64 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
65 static const MCPhysReg ArgVRN2M1s[] = {
66     RISCV::V8_V9,   RISCV::V9_V10,  RISCV::V10_V11, RISCV::V11_V12,
67     RISCV::V12_V13, RISCV::V13_V14, RISCV::V14_V15, RISCV::V15_V16,
68     RISCV::V16_V17, RISCV::V17_V18, RISCV::V18_V19, RISCV::V19_V20,
69     RISCV::V20_V21, RISCV::V21_V22, RISCV::V22_V23};
70 static const MCPhysReg ArgVRN3M1s[] = {
71     RISCV::V8_V9_V10,   RISCV::V9_V10_V11,  RISCV::V10_V11_V12,
72     RISCV::V11_V12_V13, RISCV::V12_V13_V14, RISCV::V13_V14_V15,
73     RISCV::V14_V15_V16, RISCV::V15_V16_V17, RISCV::V16_V17_V18,
74     RISCV::V17_V18_V19, RISCV::V18_V19_V20, RISCV::V19_V20_V21,
75     RISCV::V20_V21_V22, RISCV::V21_V22_V23};
76 static const MCPhysReg ArgVRN4M1s[] = {
77     RISCV::V8_V9_V10_V11,   RISCV::V9_V10_V11_V12,  RISCV::V10_V11_V12_V13,
78     RISCV::V11_V12_V13_V14, RISCV::V12_V13_V14_V15, RISCV::V13_V14_V15_V16,
79     RISCV::V14_V15_V16_V17, RISCV::V15_V16_V17_V18, RISCV::V16_V17_V18_V19,
80     RISCV::V17_V18_V19_V20, RISCV::V18_V19_V20_V21, RISCV::V19_V20_V21_V22,
81     RISCV::V20_V21_V22_V23};
82 static const MCPhysReg ArgVRN5M1s[] = {
83     RISCV::V8_V9_V10_V11_V12,   RISCV::V9_V10_V11_V12_V13,
84     RISCV::V10_V11_V12_V13_V14, RISCV::V11_V12_V13_V14_V15,
85     RISCV::V12_V13_V14_V15_V16, RISCV::V13_V14_V15_V16_V17,
86     RISCV::V14_V15_V16_V17_V18, RISCV::V15_V16_V17_V18_V19,
87     RISCV::V16_V17_V18_V19_V20, RISCV::V17_V18_V19_V20_V21,
88     RISCV::V18_V19_V20_V21_V22, RISCV::V19_V20_V21_V22_V23};
89 static const MCPhysReg ArgVRN6M1s[] = {
90     RISCV::V8_V9_V10_V11_V12_V13,   RISCV::V9_V10_V11_V12_V13_V14,
91     RISCV::V10_V11_V12_V13_V14_V15, RISCV::V11_V12_V13_V14_V15_V16,
92     RISCV::V12_V13_V14_V15_V16_V17, RISCV::V13_V14_V15_V16_V17_V18,
93     RISCV::V14_V15_V16_V17_V18_V19, RISCV::V15_V16_V17_V18_V19_V20,
94     RISCV::V16_V17_V18_V19_V20_V21, RISCV::V17_V18_V19_V20_V21_V22,
95     RISCV::V18_V19_V20_V21_V22_V23};
96 static const MCPhysReg ArgVRN7M1s[] = {
97     RISCV::V8_V9_V10_V11_V12_V13_V14,   RISCV::V9_V10_V11_V12_V13_V14_V15,
98     RISCV::V10_V11_V12_V13_V14_V15_V16, RISCV::V11_V12_V13_V14_V15_V16_V17,
99     RISCV::V12_V13_V14_V15_V16_V17_V18, RISCV::V13_V14_V15_V16_V17_V18_V19,
100     RISCV::V14_V15_V16_V17_V18_V19_V20, RISCV::V15_V16_V17_V18_V19_V20_V21,
101     RISCV::V16_V17_V18_V19_V20_V21_V22, RISCV::V17_V18_V19_V20_V21_V22_V23};
102 static const MCPhysReg ArgVRN8M1s[] = {RISCV::V8_V9_V10_V11_V12_V13_V14_V15,
103                                        RISCV::V9_V10_V11_V12_V13_V14_V15_V16,
104                                        RISCV::V10_V11_V12_V13_V14_V15_V16_V17,
105                                        RISCV::V11_V12_V13_V14_V15_V16_V17_V18,
106                                        RISCV::V12_V13_V14_V15_V16_V17_V18_V19,
107                                        RISCV::V13_V14_V15_V16_V17_V18_V19_V20,
108                                        RISCV::V14_V15_V16_V17_V18_V19_V20_V21,
109                                        RISCV::V15_V16_V17_V18_V19_V20_V21_V22,
110                                        RISCV::V16_V17_V18_V19_V20_V21_V22_V23};
111 static const MCPhysReg ArgVRN2M2s[] = {RISCV::V8M2_V10M2,  RISCV::V10M2_V12M2,
112                                        RISCV::V12M2_V14M2, RISCV::V14M2_V16M2,
113                                        RISCV::V16M2_V18M2, RISCV::V18M2_V20M2,
114                                        RISCV::V20M2_V22M2};
115 static const MCPhysReg ArgVRN3M2s[] = {
116     RISCV::V8M2_V10M2_V12M2,  RISCV::V10M2_V12M2_V14M2,
117     RISCV::V12M2_V14M2_V16M2, RISCV::V14M2_V16M2_V18M2,
118     RISCV::V16M2_V18M2_V20M2, RISCV::V18M2_V20M2_V22M2};
119 static const MCPhysReg ArgVRN4M2s[] = {
120     RISCV::V8M2_V10M2_V12M2_V14M2, RISCV::V10M2_V12M2_V14M2_V16M2,
121     RISCV::V12M2_V14M2_V16M2_V18M2, RISCV::V14M2_V16M2_V18M2_V20M2,
122     RISCV::V16M2_V18M2_V20M2_V22M2};
123 static const MCPhysReg ArgVRN2M4s[] = {RISCV::V8M4_V12M4, RISCV::V12M4_V16M4,
124                                        RISCV::V16M4_V20M4};
125 
126 ArrayRef<MCPhysReg> RISCV::getArgGPRs(const RISCVABI::ABI ABI) {
127   // The GPRs used for passing arguments in the ILP32* and LP64* ABIs, except
128   // the ILP32E ABI.
129   static const MCPhysReg ArgIGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12,
130                                        RISCV::X13, RISCV::X14, RISCV::X15,
131                                        RISCV::X16, RISCV::X17};
132   // The GPRs used for passing arguments in the ILP32E/ILP64E ABI.
133   static const MCPhysReg ArgEGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12,
134                                        RISCV::X13, RISCV::X14, RISCV::X15};
135 
136   if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
137     return ArrayRef(ArgEGPRs);
138 
139   return ArrayRef(ArgIGPRs);
140 }
141 
142 static ArrayRef<MCPhysReg> getFastCCArgGPRs(const RISCVABI::ABI ABI) {
143   // The GPRs used for passing arguments in the FastCC, X5 and X6 might be used
144   // for save-restore libcall, so we don't use them.
145   // Don't use X7 for fastcc, since Zicfilp uses X7 as the label register.
146   static const MCPhysReg FastCCIGPRs[] = {
147       RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14, RISCV::X15,
148       RISCV::X16, RISCV::X17, RISCV::X28, RISCV::X29, RISCV::X30, RISCV::X31};
149 
150   // The GPRs used for passing arguments in the FastCC when using ILP32E/ILP64E.
151   static const MCPhysReg FastCCEGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12,
152                                           RISCV::X13, RISCV::X14, RISCV::X15};
153 
154   if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
155     return ArrayRef(FastCCEGPRs);
156 
157   return ArrayRef(FastCCIGPRs);
158 }
159 
160 // Pass a 2*XLEN argument that has been split into two XLEN values through
161 // registers or the stack as necessary.
162 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
163                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
164                                 MVT ValVT2, MVT LocVT2,
165                                 ISD::ArgFlagsTy ArgFlags2, bool EABI) {
166   unsigned XLenInBytes = XLen / 8;
167   const RISCVSubtarget &STI =
168       State.getMachineFunction().getSubtarget<RISCVSubtarget>();
169   ArrayRef<MCPhysReg> ArgGPRs = RISCV::getArgGPRs(STI.getTargetABI());
170 
171   if (MCRegister Reg = State.AllocateReg(ArgGPRs)) {
172     // At least one half can be passed via register.
173     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
174                                      VA1.getLocVT(), CCValAssign::Full));
175   } else {
176     // Both halves must be passed on the stack, with proper alignment.
177     // TODO: To be compatible with GCC's behaviors, we force them to have 4-byte
178     // alignment. This behavior may be changed when RV32E/ILP32E is ratified.
179     Align StackAlign(XLenInBytes);
180     if (!EABI || XLen != 32)
181       StackAlign = std::max(StackAlign, ArgFlags1.getNonZeroOrigAlign());
182     State.addLoc(
183         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
184                             State.AllocateStack(XLenInBytes, StackAlign),
185                             VA1.getLocVT(), CCValAssign::Full));
186     State.addLoc(CCValAssign::getMem(
187         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
188         LocVT2, CCValAssign::Full));
189     return false;
190   }
191 
192   if (MCRegister Reg = State.AllocateReg(ArgGPRs)) {
193     // The second half can also be passed via register.
194     State.addLoc(
195         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
196   } else {
197     // The second half is passed via the stack, without additional alignment.
198     State.addLoc(CCValAssign::getMem(
199         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
200         LocVT2, CCValAssign::Full));
201   }
202 
203   return false;
204 }
205 
206 static MCRegister allocateRVVReg(MVT ValVT, unsigned ValNo, CCState &State,
207                                  const RISCVTargetLowering &TLI) {
208   const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
209   if (RC == &RISCV::VRRegClass) {
210     // Assign the first mask argument to V0.
211     // This is an interim calling convention and it may be changed in the
212     // future.
213     if (ValVT.getVectorElementType() == MVT::i1)
214       if (MCRegister Reg = State.AllocateReg(RISCV::V0))
215         return Reg;
216     return State.AllocateReg(ArgVRs);
217   }
218   if (RC == &RISCV::VRM2RegClass)
219     return State.AllocateReg(ArgVRM2s);
220   if (RC == &RISCV::VRM4RegClass)
221     return State.AllocateReg(ArgVRM4s);
222   if (RC == &RISCV::VRM8RegClass)
223     return State.AllocateReg(ArgVRM8s);
224   if (RC == &RISCV::VRN2M1RegClass)
225     return State.AllocateReg(ArgVRN2M1s);
226   if (RC == &RISCV::VRN3M1RegClass)
227     return State.AllocateReg(ArgVRN3M1s);
228   if (RC == &RISCV::VRN4M1RegClass)
229     return State.AllocateReg(ArgVRN4M1s);
230   if (RC == &RISCV::VRN5M1RegClass)
231     return State.AllocateReg(ArgVRN5M1s);
232   if (RC == &RISCV::VRN6M1RegClass)
233     return State.AllocateReg(ArgVRN6M1s);
234   if (RC == &RISCV::VRN7M1RegClass)
235     return State.AllocateReg(ArgVRN7M1s);
236   if (RC == &RISCV::VRN8M1RegClass)
237     return State.AllocateReg(ArgVRN8M1s);
238   if (RC == &RISCV::VRN2M2RegClass)
239     return State.AllocateReg(ArgVRN2M2s);
240   if (RC == &RISCV::VRN3M2RegClass)
241     return State.AllocateReg(ArgVRN3M2s);
242   if (RC == &RISCV::VRN4M2RegClass)
243     return State.AllocateReg(ArgVRN4M2s);
244   if (RC == &RISCV::VRN2M4RegClass)
245     return State.AllocateReg(ArgVRN2M4s);
246   llvm_unreachable("Unhandled register class for ValueType");
247 }
248 
249 // Implements the RISC-V calling convention. Returns true upon failure.
250 bool llvm::CC_RISCV(unsigned ValNo, MVT ValVT, MVT LocVT,
251                     CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
252                     CCState &State, bool IsFixed, bool IsRet, Type *OrigTy) {
253   const MachineFunction &MF = State.getMachineFunction();
254   const DataLayout &DL = MF.getDataLayout();
255   const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
256   const RISCVTargetLowering &TLI = *Subtarget.getTargetLowering();
257 
258   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
259   assert(XLen == 32 || XLen == 64);
260   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
261 
262   // Static chain parameter must not be passed in normal argument registers,
263   // so we assign t2 for it as done in GCC's __builtin_call_with_static_chain
264   if (ArgFlags.isNest()) {
265     if (MCRegister Reg = State.AllocateReg(RISCV::X7)) {
266       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
267       return false;
268     }
269   }
270 
271   // Any return value split in to more than two values can't be returned
272   // directly. Vectors are returned via the available vector registers.
273   if (!LocVT.isVector() && IsRet && ValNo > 1)
274     return true;
275 
276   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
277   // variadic argument, or if no F16/F32 argument registers are available.
278   bool UseGPRForF16_F32 = true;
279   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
280   // variadic argument, or if no F64 argument registers are available.
281   bool UseGPRForF64 = true;
282 
283   RISCVABI::ABI ABI = Subtarget.getTargetABI();
284   switch (ABI) {
285   default:
286     llvm_unreachable("Unexpected ABI");
287   case RISCVABI::ABI_ILP32:
288   case RISCVABI::ABI_ILP32E:
289   case RISCVABI::ABI_LP64:
290   case RISCVABI::ABI_LP64E:
291     break;
292   case RISCVABI::ABI_ILP32F:
293   case RISCVABI::ABI_LP64F:
294     UseGPRForF16_F32 = !IsFixed;
295     break;
296   case RISCVABI::ABI_ILP32D:
297   case RISCVABI::ABI_LP64D:
298     UseGPRForF16_F32 = !IsFixed;
299     UseGPRForF64 = !IsFixed;
300     break;
301   }
302 
303   // FPR16, FPR32, and FPR64 alias each other.
304   if (State.getFirstUnallocated(ArgFPR32s) == std::size(ArgFPR32s)) {
305     UseGPRForF16_F32 = true;
306     UseGPRForF64 = true;
307   }
308 
309   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
310   // similar local variables rather than directly checking against the target
311   // ABI.
312 
313   ArrayRef<MCPhysReg> ArgGPRs = RISCV::getArgGPRs(ABI);
314 
315   if ((ValVT == MVT::f32 && XLen == 32 && Subtarget.hasStdExtZfinx()) ||
316       (ValVT == MVT::f64 && XLen == 64 && Subtarget.hasStdExtZdinx())) {
317     if (MCRegister Reg = State.AllocateReg(ArgGPRs)) {
318       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
319       return false;
320     }
321   }
322 
323   if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::bf16 ||
324                            (ValVT == MVT::f32 && XLen == 64))) {
325     MCRegister Reg = State.AllocateReg(ArgGPRs);
326     if (Reg) {
327       LocVT = XLenVT;
328       State.addLoc(
329           CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
330       return false;
331     }
332   }
333 
334   if (UseGPRForF16_F32 &&
335       (ValVT == MVT::f16 || ValVT == MVT::bf16 || ValVT == MVT::f32)) {
336     LocVT = XLenVT;
337     LocInfo = CCValAssign::BCvt;
338   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
339     LocVT = MVT::i64;
340     LocInfo = CCValAssign::BCvt;
341   }
342 
343   // If this is a variadic argument, the RISC-V calling convention requires
344   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
345   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
346   // be used regardless of whether the original argument was split during
347   // legalisation or not. The argument will not be passed by registers if the
348   // original type is larger than 2*XLEN, so the register alignment rule does
349   // not apply.
350   // TODO: To be compatible with GCC's behaviors, we don't align registers
351   // currently if we are using ILP32E calling convention. This behavior may be
352   // changed when RV32E/ILP32E is ratified.
353   unsigned TwoXLenInBytes = (2 * XLen) / 8;
354   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
355       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes &&
356       ABI != RISCVABI::ABI_ILP32E) {
357     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
358     // Skip 'odd' register if necessary.
359     if (RegIdx != std::size(ArgGPRs) && RegIdx % 2 == 1)
360       State.AllocateReg(ArgGPRs);
361   }
362 
363   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
364   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
365       State.getPendingArgFlags();
366 
367   assert(PendingLocs.size() == PendingArgFlags.size() &&
368          "PendingLocs and PendingArgFlags out of sync");
369 
370   // Handle passing f64 on RV32D with a soft float ABI or when floating point
371   // registers are exhausted.
372   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
373     assert(PendingLocs.empty() && "Can't lower f64 if it is split");
374     // Depending on available argument GPRS, f64 may be passed in a pair of
375     // GPRs, split between a GPR and the stack, or passed completely on the
376     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
377     // cases.
378     MCRegister Reg = State.AllocateReg(ArgGPRs);
379     if (!Reg) {
380       unsigned StackOffset = State.AllocateStack(8, Align(8));
381       State.addLoc(
382           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
383       return false;
384     }
385     LocVT = MVT::i32;
386     State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
387     MCRegister HiReg = State.AllocateReg(ArgGPRs);
388     if (HiReg) {
389       State.addLoc(
390           CCValAssign::getCustomReg(ValNo, ValVT, HiReg, LocVT, LocInfo));
391     } else {
392       unsigned StackOffset = State.AllocateStack(4, Align(4));
393       State.addLoc(
394           CCValAssign::getCustomMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
395     }
396     return false;
397   }
398 
399   // Fixed-length vectors are located in the corresponding scalable-vector
400   // container types.
401   if (ValVT.isFixedLengthVector())
402     LocVT = TLI.getContainerForFixedLengthVector(LocVT);
403 
404   // Split arguments might be passed indirectly, so keep track of the pending
405   // values. Split vectors are passed via a mix of registers and indirectly, so
406   // treat them as we would any other argument.
407   if (ValVT.isScalarInteger() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
408     LocVT = XLenVT;
409     LocInfo = CCValAssign::Indirect;
410     PendingLocs.push_back(
411         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
412     PendingArgFlags.push_back(ArgFlags);
413     if (!ArgFlags.isSplitEnd()) {
414       return false;
415     }
416   }
417 
418   // If the split argument only had two elements, it should be passed directly
419   // in registers or on the stack.
420   if (ValVT.isScalarInteger() && ArgFlags.isSplitEnd() &&
421       PendingLocs.size() <= 2) {
422     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
423     // Apply the normal calling convention rules to the first half of the
424     // split argument.
425     CCValAssign VA = PendingLocs[0];
426     ISD::ArgFlagsTy AF = PendingArgFlags[0];
427     PendingLocs.clear();
428     PendingArgFlags.clear();
429     return CC_RISCVAssign2XLen(
430         XLen, State, VA, AF, ValNo, ValVT, LocVT, ArgFlags,
431         ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E);
432   }
433 
434   // Allocate to a register if possible, or else a stack slot.
435   MCRegister Reg;
436   unsigned StoreSizeBytes = XLen / 8;
437   Align StackAlign = Align(XLen / 8);
438 
439   if ((ValVT == MVT::f16 || ValVT == MVT::bf16) && !UseGPRForF16_F32)
440     Reg = State.AllocateReg(ArgFPR16s);
441   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
442     Reg = State.AllocateReg(ArgFPR32s);
443   else if (ValVT == MVT::f64 && !UseGPRForF64)
444     Reg = State.AllocateReg(ArgFPR64s);
445   else if (ValVT.isVector() || ValVT.isRISCVVectorTuple()) {
446     Reg = allocateRVVReg(ValVT, ValNo, State, TLI);
447     if (!Reg) {
448       // For return values, the vector must be passed fully via registers or
449       // via the stack.
450       // FIXME: The proposed vector ABI only mandates v8-v15 for return values,
451       // but we're using all of them.
452       if (IsRet)
453         return true;
454       // Try using a GPR to pass the address
455       if ((Reg = State.AllocateReg(ArgGPRs))) {
456         LocVT = XLenVT;
457         LocInfo = CCValAssign::Indirect;
458       } else if (ValVT.isScalableVector()) {
459         LocVT = XLenVT;
460         LocInfo = CCValAssign::Indirect;
461       } else {
462         // Pass fixed-length vectors on the stack.
463         LocVT = ValVT;
464         StoreSizeBytes = ValVT.getStoreSize();
465         // Align vectors to their element sizes, being careful for vXi1
466         // vectors.
467         StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
468       }
469     }
470   } else {
471     Reg = State.AllocateReg(ArgGPRs);
472   }
473 
474   unsigned StackOffset =
475       Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
476 
477   // If we reach this point and PendingLocs is non-empty, we must be at the
478   // end of a split argument that must be passed indirectly.
479   if (!PendingLocs.empty()) {
480     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
481     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
482 
483     for (auto &It : PendingLocs) {
484       if (Reg)
485         It.convertToReg(Reg);
486       else
487         It.convertToMem(StackOffset);
488       State.addLoc(It);
489     }
490     PendingLocs.clear();
491     PendingArgFlags.clear();
492     return false;
493   }
494 
495   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
496           (TLI.getSubtarget().hasVInstructions() &&
497            (ValVT.isVector() || ValVT.isRISCVVectorTuple()))) &&
498          "Expected an XLenVT or vector types at this stage");
499 
500   if (Reg) {
501     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
502     return false;
503   }
504 
505   // When a scalar floating-point value is passed on the stack, no
506   // bit-conversion is needed.
507   if (ValVT.isFloatingPoint() && LocInfo != CCValAssign::Indirect) {
508     assert(!ValVT.isVector());
509     LocVT = ValVT;
510     LocInfo = CCValAssign::Full;
511   }
512   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
513   return false;
514 }
515 
516 // FastCC has less than 1% performance improvement for some particular
517 // benchmark. But theoretically, it may have benefit for some cases.
518 bool llvm::CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT,
519                            CCValAssign::LocInfo LocInfo,
520                            ISD::ArgFlagsTy ArgFlags, CCState &State,
521                            bool IsFixed, bool IsRet, Type *OrigTy) {
522   const MachineFunction &MF = State.getMachineFunction();
523   const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
524   const RISCVTargetLowering &TLI = *Subtarget.getTargetLowering();
525   RISCVABI::ABI ABI = Subtarget.getTargetABI();
526 
527   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
528     if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRs(ABI))) {
529       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
530       return false;
531     }
532   }
533 
534   if (LocVT == MVT::f16 && Subtarget.hasStdExtZfhmin()) {
535     static const MCPhysReg FPR16List[] = {
536         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
537         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
538         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
539         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
540     if (MCRegister Reg = State.AllocateReg(FPR16List)) {
541       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
542       return false;
543     }
544   }
545 
546   if (LocVT == MVT::f32 && Subtarget.hasStdExtF()) {
547     static const MCPhysReg FPR32List[] = {
548         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
549         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
550         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
551         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
552     if (MCRegister Reg = State.AllocateReg(FPR32List)) {
553       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
554       return false;
555     }
556   }
557 
558   if (LocVT == MVT::f64 && Subtarget.hasStdExtD()) {
559     static const MCPhysReg FPR64List[] = {
560         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
561         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
562         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
563         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
564     if (MCRegister Reg = State.AllocateReg(FPR64List)) {
565       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
566       return false;
567     }
568   }
569 
570   // Check if there is an available GPR before hitting the stack.
571   if ((LocVT == MVT::f16 && Subtarget.hasStdExtZhinxmin()) ||
572       (LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) ||
573       (LocVT == MVT::f64 && Subtarget.is64Bit() &&
574        Subtarget.hasStdExtZdinx())) {
575     if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRs(ABI))) {
576       if (LocVT.getSizeInBits() != Subtarget.getXLen()) {
577         LocVT = Subtarget.getXLenVT();
578         State.addLoc(
579             CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
580         return false;
581       }
582       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
583       return false;
584     }
585   }
586 
587   if (LocVT == MVT::f16) {
588     unsigned Offset2 = State.AllocateStack(2, Align(2));
589     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset2, LocVT, LocInfo));
590     return false;
591   }
592 
593   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
594     unsigned Offset4 = State.AllocateStack(4, Align(4));
595     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
596     return false;
597   }
598 
599   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
600     unsigned Offset5 = State.AllocateStack(8, Align(8));
601     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
602     return false;
603   }
604 
605   if (LocVT.isVector()) {
606     if (MCRegister Reg = allocateRVVReg(ValVT, ValNo, State, TLI)) {
607       // Fixed-length vectors are located in the corresponding scalable-vector
608       // container types.
609       if (ValVT.isFixedLengthVector())
610         LocVT = TLI.getContainerForFixedLengthVector(LocVT);
611       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
612     } else {
613       // Try and pass the address via a "fast" GPR.
614       if (MCRegister GPRReg = State.AllocateReg(getFastCCArgGPRs(ABI))) {
615         LocInfo = CCValAssign::Indirect;
616         LocVT = Subtarget.getXLenVT();
617         State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo));
618       } else if (ValVT.isFixedLengthVector()) {
619         auto StackAlign =
620             MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
621         unsigned StackOffset =
622             State.AllocateStack(ValVT.getStoreSize(), StackAlign);
623         State.addLoc(
624             CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
625       } else {
626         // Can't pass scalable vectors on the stack.
627         return true;
628       }
629     }
630 
631     return false;
632   }
633 
634   return true; // CC didn't match.
635 }
636 
637 bool llvm::CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
638                         CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
639                         CCState &State) {
640   if (ArgFlags.isNest()) {
641     report_fatal_error(
642         "Attribute 'nest' is not supported in GHC calling convention");
643   }
644 
645   static const MCPhysReg GPRList[] = {
646       RISCV::X9,  RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
647       RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
648 
649   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
650     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
651     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
652     if (MCRegister Reg = State.AllocateReg(GPRList)) {
653       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
654       return false;
655     }
656   }
657 
658   const RISCVSubtarget &Subtarget =
659       State.getMachineFunction().getSubtarget<RISCVSubtarget>();
660 
661   if (LocVT == MVT::f32 && Subtarget.hasStdExtF()) {
662     // Pass in STG registers: F1, ..., F6
663     //                        fs0 ... fs5
664     static const MCPhysReg FPR32List[] = {RISCV::F8_F,  RISCV::F9_F,
665                                           RISCV::F18_F, RISCV::F19_F,
666                                           RISCV::F20_F, RISCV::F21_F};
667     if (MCRegister Reg = State.AllocateReg(FPR32List)) {
668       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
669       return false;
670     }
671   }
672 
673   if (LocVT == MVT::f64 && Subtarget.hasStdExtD()) {
674     // Pass in STG registers: D1, ..., D6
675     //                        fs6 ... fs11
676     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
677                                           RISCV::F24_D, RISCV::F25_D,
678                                           RISCV::F26_D, RISCV::F27_D};
679     if (MCRegister Reg = State.AllocateReg(FPR64List)) {
680       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
681       return false;
682     }
683   }
684 
685   if ((LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) ||
686       (LocVT == MVT::f64 && Subtarget.hasStdExtZdinx() &&
687        Subtarget.is64Bit())) {
688     if (MCRegister Reg = State.AllocateReg(GPRList)) {
689       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
690       return false;
691     }
692   }
693 
694   report_fatal_error("No registers left in GHC calling convention");
695   return true;
696 }
697