xref: /llvm-project/llvm/lib/Target/RISCV/RISCVCallingConv.cpp (revision bc91f3cdd57cbe4b0a456626f52960158cb3232f)
1 //===-- RISCVCallingConv.cpp - RISC-V Custom CC Routines ------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the custom routines for the RISC-V Calling Convention.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "RISCVCallingConv.h"
14 #include "RISCVSubtarget.h"
15 #include "llvm/IR/DataLayout.h"
16 #include "llvm/MC/MCRegister.h"
17 
18 using namespace llvm;
19 
20 // Calling Convention Implementation.
21 // The expectations for frontend ABI lowering vary from target to target.
22 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
23 // details, but this is a longer term goal. For now, we simply try to keep the
24 // role of the frontend as simple and well-defined as possible. The rules can
25 // be summarised as:
26 // * Never split up large scalar arguments. We handle them here.
27 // * If a hardfloat calling convention is being used, and the struct may be
28 // passed in a pair of registers (fp+fp, int+fp), and both registers are
29 // available, then pass as two separate arguments. If either the GPRs or FPRs
30 // are exhausted, then pass according to the rule below.
31 // * If a struct could never be passed in registers or directly in a stack
32 // slot (as it is larger than 2*XLEN and the floating point rules don't
33 // apply), then pass it using a pointer with the byval attribute.
34 // * If a struct is less than 2*XLEN, then coerce to either a two-element
35 // word-sized array or a 2*XLEN scalar (depending on alignment).
36 // * The frontend can determine whether a struct is returned by reference or
37 // not based on its size and fields. If it will be returned by reference, the
38 // frontend must modify the prototype so a pointer with the sret annotation is
39 // passed as the first argument. This is not necessary for large scalar
40 // returns.
41 // * Struct return values and varargs should be coerced to structs containing
42 // register-size fields in the same situations they would be for fixed
43 // arguments.
44 
45 static const MCPhysReg ArgFPR16s[] = {RISCV::F10_H, RISCV::F11_H, RISCV::F12_H,
46                                       RISCV::F13_H, RISCV::F14_H, RISCV::F15_H,
47                                       RISCV::F16_H, RISCV::F17_H};
48 static const MCPhysReg ArgFPR32s[] = {RISCV::F10_F, RISCV::F11_F, RISCV::F12_F,
49                                       RISCV::F13_F, RISCV::F14_F, RISCV::F15_F,
50                                       RISCV::F16_F, RISCV::F17_F};
51 static const MCPhysReg ArgFPR64s[] = {RISCV::F10_D, RISCV::F11_D, RISCV::F12_D,
52                                       RISCV::F13_D, RISCV::F14_D, RISCV::F15_D,
53                                       RISCV::F16_D, RISCV::F17_D};
54 // This is an interim calling convention and it may be changed in the future.
55 static const MCPhysReg ArgVRs[] = {
56     RISCV::V8,  RISCV::V9,  RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
57     RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
58     RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
59 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2,  RISCV::V10M2, RISCV::V12M2,
60                                      RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
61                                      RISCV::V20M2, RISCV::V22M2};
62 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
63                                      RISCV::V20M4};
64 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
65 static const MCPhysReg ArgVRN2M1s[] = {
66     RISCV::V8_V9,   RISCV::V9_V10,  RISCV::V10_V11, RISCV::V11_V12,
67     RISCV::V12_V13, RISCV::V13_V14, RISCV::V14_V15, RISCV::V15_V16,
68     RISCV::V16_V17, RISCV::V17_V18, RISCV::V18_V19, RISCV::V19_V20,
69     RISCV::V20_V21, RISCV::V21_V22, RISCV::V22_V23};
70 static const MCPhysReg ArgVRN3M1s[] = {
71     RISCV::V8_V9_V10,   RISCV::V9_V10_V11,  RISCV::V10_V11_V12,
72     RISCV::V11_V12_V13, RISCV::V12_V13_V14, RISCV::V13_V14_V15,
73     RISCV::V14_V15_V16, RISCV::V15_V16_V17, RISCV::V16_V17_V18,
74     RISCV::V17_V18_V19, RISCV::V18_V19_V20, RISCV::V19_V20_V21,
75     RISCV::V20_V21_V22, RISCV::V21_V22_V23};
76 static const MCPhysReg ArgVRN4M1s[] = {
77     RISCV::V8_V9_V10_V11,   RISCV::V9_V10_V11_V12,  RISCV::V10_V11_V12_V13,
78     RISCV::V11_V12_V13_V14, RISCV::V12_V13_V14_V15, RISCV::V13_V14_V15_V16,
79     RISCV::V14_V15_V16_V17, RISCV::V15_V16_V17_V18, RISCV::V16_V17_V18_V19,
80     RISCV::V17_V18_V19_V20, RISCV::V18_V19_V20_V21, RISCV::V19_V20_V21_V22,
81     RISCV::V20_V21_V22_V23};
82 static const MCPhysReg ArgVRN5M1s[] = {
83     RISCV::V8_V9_V10_V11_V12,   RISCV::V9_V10_V11_V12_V13,
84     RISCV::V10_V11_V12_V13_V14, RISCV::V11_V12_V13_V14_V15,
85     RISCV::V12_V13_V14_V15_V16, RISCV::V13_V14_V15_V16_V17,
86     RISCV::V14_V15_V16_V17_V18, RISCV::V15_V16_V17_V18_V19,
87     RISCV::V16_V17_V18_V19_V20, RISCV::V17_V18_V19_V20_V21,
88     RISCV::V18_V19_V20_V21_V22, RISCV::V19_V20_V21_V22_V23};
89 static const MCPhysReg ArgVRN6M1s[] = {
90     RISCV::V8_V9_V10_V11_V12_V13,   RISCV::V9_V10_V11_V12_V13_V14,
91     RISCV::V10_V11_V12_V13_V14_V15, RISCV::V11_V12_V13_V14_V15_V16,
92     RISCV::V12_V13_V14_V15_V16_V17, RISCV::V13_V14_V15_V16_V17_V18,
93     RISCV::V14_V15_V16_V17_V18_V19, RISCV::V15_V16_V17_V18_V19_V20,
94     RISCV::V16_V17_V18_V19_V20_V21, RISCV::V17_V18_V19_V20_V21_V22,
95     RISCV::V18_V19_V20_V21_V22_V23};
96 static const MCPhysReg ArgVRN7M1s[] = {
97     RISCV::V8_V9_V10_V11_V12_V13_V14,   RISCV::V9_V10_V11_V12_V13_V14_V15,
98     RISCV::V10_V11_V12_V13_V14_V15_V16, RISCV::V11_V12_V13_V14_V15_V16_V17,
99     RISCV::V12_V13_V14_V15_V16_V17_V18, RISCV::V13_V14_V15_V16_V17_V18_V19,
100     RISCV::V14_V15_V16_V17_V18_V19_V20, RISCV::V15_V16_V17_V18_V19_V20_V21,
101     RISCV::V16_V17_V18_V19_V20_V21_V22, RISCV::V17_V18_V19_V20_V21_V22_V23};
102 static const MCPhysReg ArgVRN8M1s[] = {RISCV::V8_V9_V10_V11_V12_V13_V14_V15,
103                                        RISCV::V9_V10_V11_V12_V13_V14_V15_V16,
104                                        RISCV::V10_V11_V12_V13_V14_V15_V16_V17,
105                                        RISCV::V11_V12_V13_V14_V15_V16_V17_V18,
106                                        RISCV::V12_V13_V14_V15_V16_V17_V18_V19,
107                                        RISCV::V13_V14_V15_V16_V17_V18_V19_V20,
108                                        RISCV::V14_V15_V16_V17_V18_V19_V20_V21,
109                                        RISCV::V15_V16_V17_V18_V19_V20_V21_V22,
110                                        RISCV::V16_V17_V18_V19_V20_V21_V22_V23};
111 static const MCPhysReg ArgVRN2M2s[] = {RISCV::V8M2_V10M2,  RISCV::V10M2_V12M2,
112                                        RISCV::V12M2_V14M2, RISCV::V14M2_V16M2,
113                                        RISCV::V16M2_V18M2, RISCV::V18M2_V20M2,
114                                        RISCV::V20M2_V22M2};
115 static const MCPhysReg ArgVRN3M2s[] = {
116     RISCV::V8M2_V10M2_V12M2,  RISCV::V10M2_V12M2_V14M2,
117     RISCV::V12M2_V14M2_V16M2, RISCV::V14M2_V16M2_V18M2,
118     RISCV::V16M2_V18M2_V20M2, RISCV::V18M2_V20M2_V22M2};
119 static const MCPhysReg ArgVRN4M2s[] = {
120     RISCV::V8M2_V10M2_V12M2_V14M2, RISCV::V10M2_V12M2_V14M2_V16M2,
121     RISCV::V12M2_V14M2_V16M2_V18M2, RISCV::V14M2_V16M2_V18M2_V20M2,
122     RISCV::V16M2_V18M2_V20M2_V22M2};
123 static const MCPhysReg ArgVRN2M4s[] = {RISCV::V8M4_V12M4, RISCV::V12M4_V16M4,
124                                        RISCV::V16M4_V20M4};
125 
126 ArrayRef<MCPhysReg> RISCV::getArgGPRs(const RISCVABI::ABI ABI) {
127   // The GPRs used for passing arguments in the ILP32* and LP64* ABIs, except
128   // the ILP32E ABI.
129   static const MCPhysReg ArgIGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12,
130                                        RISCV::X13, RISCV::X14, RISCV::X15,
131                                        RISCV::X16, RISCV::X17};
132   // The GPRs used for passing arguments in the ILP32E/LP64E ABI.
133   static const MCPhysReg ArgEGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12,
134                                        RISCV::X13, RISCV::X14, RISCV::X15};
135 
136   if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
137     return ArrayRef(ArgEGPRs);
138 
139   return ArrayRef(ArgIGPRs);
140 }
141 
142 static ArrayRef<MCPhysReg> getArgGPR16s(const RISCVABI::ABI ABI) {
143   // The GPRs used for passing arguments in the ILP32* and LP64* ABIs, except
144   // the ILP32E ABI.
145   static const MCPhysReg ArgIGPRs[] = {RISCV::X10_H, RISCV::X11_H, RISCV::X12_H,
146                                        RISCV::X13_H, RISCV::X14_H, RISCV::X15_H,
147                                        RISCV::X16_H, RISCV::X17_H};
148   // The GPRs used for passing arguments in the ILP32E/LP64E ABI.
149   static const MCPhysReg ArgEGPRs[] = {RISCV::X10_H, RISCV::X11_H,
150                                        RISCV::X12_H, RISCV::X13_H,
151                                        RISCV::X14_H, RISCV::X15_H};
152 
153   if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
154     return ArrayRef(ArgEGPRs);
155 
156   return ArrayRef(ArgIGPRs);
157 }
158 
159 static ArrayRef<MCPhysReg> getArgGPR32s(const RISCVABI::ABI ABI) {
160   // The GPRs used for passing arguments in the ILP32* and LP64* ABIs, except
161   // the ILP32E ABI.
162   static const MCPhysReg ArgIGPRs[] = {RISCV::X10_W, RISCV::X11_W, RISCV::X12_W,
163                                        RISCV::X13_W, RISCV::X14_W, RISCV::X15_W,
164                                        RISCV::X16_W, RISCV::X17_W};
165   // The GPRs used for passing arguments in the ILP32E/LP64E ABI.
166   static const MCPhysReg ArgEGPRs[] = {RISCV::X10_W, RISCV::X11_W,
167                                        RISCV::X12_W, RISCV::X13_W,
168                                        RISCV::X14_W, RISCV::X15_W};
169 
170   if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
171     return ArrayRef(ArgEGPRs);
172 
173   return ArrayRef(ArgIGPRs);
174 }
175 
176 static ArrayRef<MCPhysReg> getFastCCArgGPRs(const RISCVABI::ABI ABI) {
177   // The GPRs used for passing arguments in the FastCC, X5 and X6 might be used
178   // for save-restore libcall, so we don't use them.
179   // Don't use X7 for fastcc, since Zicfilp uses X7 as the label register.
180   static const MCPhysReg FastCCIGPRs[] = {
181       RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14, RISCV::X15,
182       RISCV::X16, RISCV::X17, RISCV::X28, RISCV::X29, RISCV::X30, RISCV::X31};
183 
184   // The GPRs used for passing arguments in the FastCC when using ILP32E/LP64E.
185   static const MCPhysReg FastCCEGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12,
186                                           RISCV::X13, RISCV::X14, RISCV::X15};
187 
188   if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
189     return ArrayRef(FastCCEGPRs);
190 
191   return ArrayRef(FastCCIGPRs);
192 }
193 
194 static ArrayRef<MCPhysReg> getFastCCArgGPRF16s(const RISCVABI::ABI ABI) {
195   // The GPRs used for passing arguments in the FastCC, X5 and X6 might be used
196   // for save-restore libcall, so we don't use them.
197   // Don't use X7 for fastcc, since Zicfilp uses X7 as the label register.
198   static const MCPhysReg FastCCIGPRs[] = {
199       RISCV::X10_H, RISCV::X11_H, RISCV::X12_H, RISCV::X13_H,
200       RISCV::X14_H, RISCV::X15_H, RISCV::X16_H, RISCV::X17_H,
201       RISCV::X28_H, RISCV::X29_H, RISCV::X30_H, RISCV::X31_H};
202 
203   // The GPRs used for passing arguments in the FastCC when using ILP32E/LP64E.
204   static const MCPhysReg FastCCEGPRs[] = {RISCV::X10_H, RISCV::X11_H,
205                                           RISCV::X12_H, RISCV::X13_H,
206                                           RISCV::X14_H, RISCV::X15_H};
207 
208   if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
209     return ArrayRef(FastCCEGPRs);
210 
211   return ArrayRef(FastCCIGPRs);
212 }
213 
214 static ArrayRef<MCPhysReg> getFastCCArgGPRF32s(const RISCVABI::ABI ABI) {
215   // The GPRs used for passing arguments in the FastCC, X5 and X6 might be used
216   // for save-restore libcall, so we don't use them.
217   // Don't use X7 for fastcc, since Zicfilp uses X7 as the label register.
218   static const MCPhysReg FastCCIGPRs[] = {
219       RISCV::X10_W, RISCV::X11_W, RISCV::X12_W, RISCV::X13_W,
220       RISCV::X14_W, RISCV::X15_W, RISCV::X16_W, RISCV::X17_W,
221       RISCV::X28_W, RISCV::X29_W, RISCV::X30_W, RISCV::X31_W};
222 
223   // The GPRs used for passing arguments in the FastCC when using ILP32E/LP64E.
224   static const MCPhysReg FastCCEGPRs[] = {RISCV::X10_W, RISCV::X11_W,
225                                           RISCV::X12_W, RISCV::X13_W,
226                                           RISCV::X14_W, RISCV::X15_W};
227 
228   if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
229     return ArrayRef(FastCCEGPRs);
230 
231   return ArrayRef(FastCCIGPRs);
232 }
233 
234 // Pass a 2*XLEN argument that has been split into two XLEN values through
235 // registers or the stack as necessary.
236 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
237                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
238                                 MVT ValVT2, MVT LocVT2,
239                                 ISD::ArgFlagsTy ArgFlags2, bool EABI) {
240   unsigned XLenInBytes = XLen / 8;
241   const RISCVSubtarget &STI =
242       State.getMachineFunction().getSubtarget<RISCVSubtarget>();
243   ArrayRef<MCPhysReg> ArgGPRs = RISCV::getArgGPRs(STI.getTargetABI());
244 
245   if (MCRegister Reg = State.AllocateReg(ArgGPRs)) {
246     // At least one half can be passed via register.
247     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
248                                      VA1.getLocVT(), CCValAssign::Full));
249   } else {
250     // Both halves must be passed on the stack, with proper alignment.
251     // TODO: To be compatible with GCC's behaviors, we force them to have 4-byte
252     // alignment. This behavior may be changed when RV32E/ILP32E is ratified.
253     Align StackAlign(XLenInBytes);
254     if (!EABI || XLen != 32)
255       StackAlign = std::max(StackAlign, ArgFlags1.getNonZeroOrigAlign());
256     State.addLoc(
257         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
258                             State.AllocateStack(XLenInBytes, StackAlign),
259                             VA1.getLocVT(), CCValAssign::Full));
260     State.addLoc(CCValAssign::getMem(
261         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
262         LocVT2, CCValAssign::Full));
263     return false;
264   }
265 
266   if (MCRegister Reg = State.AllocateReg(ArgGPRs)) {
267     // The second half can also be passed via register.
268     State.addLoc(
269         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
270   } else {
271     // The second half is passed via the stack, without additional alignment.
272     State.addLoc(CCValAssign::getMem(
273         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
274         LocVT2, CCValAssign::Full));
275   }
276 
277   return false;
278 }
279 
280 static MCRegister allocateRVVReg(MVT ValVT, unsigned ValNo, CCState &State,
281                                  const RISCVTargetLowering &TLI) {
282   const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
283   if (RC == &RISCV::VRRegClass) {
284     // Assign the first mask argument to V0.
285     // This is an interim calling convention and it may be changed in the
286     // future.
287     if (ValVT.getVectorElementType() == MVT::i1)
288       if (MCRegister Reg = State.AllocateReg(RISCV::V0))
289         return Reg;
290     return State.AllocateReg(ArgVRs);
291   }
292   if (RC == &RISCV::VRM2RegClass)
293     return State.AllocateReg(ArgVRM2s);
294   if (RC == &RISCV::VRM4RegClass)
295     return State.AllocateReg(ArgVRM4s);
296   if (RC == &RISCV::VRM8RegClass)
297     return State.AllocateReg(ArgVRM8s);
298   if (RC == &RISCV::VRN2M1RegClass)
299     return State.AllocateReg(ArgVRN2M1s);
300   if (RC == &RISCV::VRN3M1RegClass)
301     return State.AllocateReg(ArgVRN3M1s);
302   if (RC == &RISCV::VRN4M1RegClass)
303     return State.AllocateReg(ArgVRN4M1s);
304   if (RC == &RISCV::VRN5M1RegClass)
305     return State.AllocateReg(ArgVRN5M1s);
306   if (RC == &RISCV::VRN6M1RegClass)
307     return State.AllocateReg(ArgVRN6M1s);
308   if (RC == &RISCV::VRN7M1RegClass)
309     return State.AllocateReg(ArgVRN7M1s);
310   if (RC == &RISCV::VRN8M1RegClass)
311     return State.AllocateReg(ArgVRN8M1s);
312   if (RC == &RISCV::VRN2M2RegClass)
313     return State.AllocateReg(ArgVRN2M2s);
314   if (RC == &RISCV::VRN3M2RegClass)
315     return State.AllocateReg(ArgVRN3M2s);
316   if (RC == &RISCV::VRN4M2RegClass)
317     return State.AllocateReg(ArgVRN4M2s);
318   if (RC == &RISCV::VRN2M4RegClass)
319     return State.AllocateReg(ArgVRN2M4s);
320   llvm_unreachable("Unhandled register class for ValueType");
321 }
322 
323 // Implements the RISC-V calling convention. Returns true upon failure.
324 bool llvm::CC_RISCV(unsigned ValNo, MVT ValVT, MVT LocVT,
325                     CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
326                     CCState &State, bool IsFixed, bool IsRet, Type *OrigTy) {
327   const MachineFunction &MF = State.getMachineFunction();
328   const DataLayout &DL = MF.getDataLayout();
329   const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
330   const RISCVTargetLowering &TLI = *Subtarget.getTargetLowering();
331 
332   unsigned XLen = Subtarget.getXLen();
333   MVT XLenVT = Subtarget.getXLenVT();
334 
335   // Static chain parameter must not be passed in normal argument registers,
336   // so we assign t2 for it as done in GCC's __builtin_call_with_static_chain
337   if (ArgFlags.isNest()) {
338     if (MCRegister Reg = State.AllocateReg(RISCV::X7)) {
339       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
340       return false;
341     }
342   }
343 
344   // Any return value split in to more than two values can't be returned
345   // directly. Vectors are returned via the available vector registers.
346   if (!LocVT.isVector() && IsRet && ValNo > 1)
347     return true;
348 
349   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
350   // variadic argument, or if no F16/F32 argument registers are available.
351   bool UseGPRForF16_F32 = true;
352   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
353   // variadic argument, or if no F64 argument registers are available.
354   bool UseGPRForF64 = true;
355 
356   RISCVABI::ABI ABI = Subtarget.getTargetABI();
357   switch (ABI) {
358   default:
359     llvm_unreachable("Unexpected ABI");
360   case RISCVABI::ABI_ILP32:
361   case RISCVABI::ABI_ILP32E:
362   case RISCVABI::ABI_LP64:
363   case RISCVABI::ABI_LP64E:
364     break;
365   case RISCVABI::ABI_ILP32F:
366   case RISCVABI::ABI_LP64F:
367     UseGPRForF16_F32 = !IsFixed;
368     break;
369   case RISCVABI::ABI_ILP32D:
370   case RISCVABI::ABI_LP64D:
371     UseGPRForF16_F32 = !IsFixed;
372     UseGPRForF64 = !IsFixed;
373     break;
374   }
375 
376   if ((LocVT == MVT::f16 || LocVT == MVT::bf16) && !UseGPRForF16_F32) {
377     if (MCRegister Reg = State.AllocateReg(ArgFPR16s)) {
378       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
379       return false;
380     }
381   }
382 
383   if (LocVT == MVT::f32 && !UseGPRForF16_F32) {
384     if (MCRegister Reg = State.AllocateReg(ArgFPR32s)) {
385       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
386       return false;
387     }
388   }
389 
390   if (LocVT == MVT::f64 && !UseGPRForF64) {
391     if (MCRegister Reg = State.AllocateReg(ArgFPR64s)) {
392       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
393       return false;
394     }
395   }
396 
397   if ((ValVT == MVT::f16 && Subtarget.hasStdExtZhinxmin())) {
398     if (MCRegister Reg = State.AllocateReg(getArgGPR16s(ABI))) {
399       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
400       return false;
401     }
402   }
403 
404   if (ValVT == MVT::f32 && Subtarget.hasStdExtZfinx()) {
405     if (MCRegister Reg = State.AllocateReg(getArgGPR32s(ABI))) {
406       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
407       return false;
408     }
409   }
410 
411   ArrayRef<MCPhysReg> ArgGPRs = RISCV::getArgGPRs(ABI);
412 
413   // Zdinx use GPR without a bitcast when possible.
414   if (LocVT == MVT::f64 && XLen == 64 && Subtarget.hasStdExtZdinx()) {
415     if (MCRegister Reg = State.AllocateReg(ArgGPRs)) {
416       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
417       return false;
418     }
419   }
420 
421   // FP smaller than XLen, uses custom GPR.
422   if (LocVT == MVT::f16 || LocVT == MVT::bf16 ||
423       (LocVT == MVT::f32 && XLen == 64)) {
424     if (MCRegister Reg = State.AllocateReg(ArgGPRs)) {
425       LocVT = XLenVT;
426       State.addLoc(
427           CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
428       return false;
429     }
430   }
431 
432   // Bitcast FP to GPR if we can use a GPR register.
433   if ((XLen == 32 && LocVT == MVT::f32) || (XLen == 64 && LocVT == MVT::f64)) {
434     if (MCRegister Reg = State.AllocateReg(ArgGPRs)) {
435       LocVT = XLenVT;
436       LocInfo = CCValAssign::BCvt;
437       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
438       return false;
439     }
440   }
441 
442   // If this is a variadic argument, the RISC-V calling convention requires
443   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
444   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
445   // be used regardless of whether the original argument was split during
446   // legalisation or not. The argument will not be passed by registers if the
447   // original type is larger than 2*XLEN, so the register alignment rule does
448   // not apply.
449   // TODO: To be compatible with GCC's behaviors, we don't align registers
450   // currently if we are using ILP32E calling convention. This behavior may be
451   // changed when RV32E/ILP32E is ratified.
452   unsigned TwoXLenInBytes = (2 * XLen) / 8;
453   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
454       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes &&
455       ABI != RISCVABI::ABI_ILP32E) {
456     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
457     // Skip 'odd' register if necessary.
458     if (RegIdx != std::size(ArgGPRs) && RegIdx % 2 == 1)
459       State.AllocateReg(ArgGPRs);
460   }
461 
462   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
463   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
464       State.getPendingArgFlags();
465 
466   assert(PendingLocs.size() == PendingArgFlags.size() &&
467          "PendingLocs and PendingArgFlags out of sync");
468 
469   // Handle passing f64 on RV32D with a soft float ABI or when floating point
470   // registers are exhausted.
471   if (XLen == 32 && LocVT == MVT::f64) {
472     assert(PendingLocs.empty() && "Can't lower f64 if it is split");
473     // Depending on available argument GPRS, f64 may be passed in a pair of
474     // GPRs, split between a GPR and the stack, or passed completely on the
475     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
476     // cases.
477     MCRegister Reg = State.AllocateReg(ArgGPRs);
478     if (!Reg) {
479       int64_t StackOffset = State.AllocateStack(8, Align(8));
480       State.addLoc(
481           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
482       return false;
483     }
484     LocVT = MVT::i32;
485     State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
486     MCRegister HiReg = State.AllocateReg(ArgGPRs);
487     if (HiReg) {
488       State.addLoc(
489           CCValAssign::getCustomReg(ValNo, ValVT, HiReg, LocVT, LocInfo));
490     } else {
491       int64_t StackOffset = State.AllocateStack(4, Align(4));
492       State.addLoc(
493           CCValAssign::getCustomMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
494     }
495     return false;
496   }
497 
498   // Split arguments might be passed indirectly, so keep track of the pending
499   // values. Split vectors are passed via a mix of registers and indirectly, so
500   // treat them as we would any other argument.
501   if (ValVT.isScalarInteger() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
502     LocVT = XLenVT;
503     LocInfo = CCValAssign::Indirect;
504     PendingLocs.push_back(
505         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
506     PendingArgFlags.push_back(ArgFlags);
507     if (!ArgFlags.isSplitEnd()) {
508       return false;
509     }
510   }
511 
512   // If the split argument only had two elements, it should be passed directly
513   // in registers or on the stack.
514   if (ValVT.isScalarInteger() && ArgFlags.isSplitEnd() &&
515       PendingLocs.size() <= 2) {
516     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
517     // Apply the normal calling convention rules to the first half of the
518     // split argument.
519     CCValAssign VA = PendingLocs[0];
520     ISD::ArgFlagsTy AF = PendingArgFlags[0];
521     PendingLocs.clear();
522     PendingArgFlags.clear();
523     return CC_RISCVAssign2XLen(
524         XLen, State, VA, AF, ValNo, ValVT, LocVT, ArgFlags,
525         ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E);
526   }
527 
528   // Allocate to a register if possible, or else a stack slot.
529   MCRegister Reg;
530   unsigned StoreSizeBytes = XLen / 8;
531   Align StackAlign = Align(XLen / 8);
532 
533   if (ValVT.isVector() || ValVT.isRISCVVectorTuple()) {
534     Reg = allocateRVVReg(ValVT, ValNo, State, TLI);
535     if (Reg) {
536       // Fixed-length vectors are located in the corresponding scalable-vector
537       // container types.
538       if (ValVT.isFixedLengthVector()) {
539         LocVT = TLI.getContainerForFixedLengthVector(LocVT);
540         State.addLoc(
541             CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
542         return false;
543       }
544     } else {
545       // For return values, the vector must be passed fully via registers or
546       // via the stack.
547       // FIXME: The proposed vector ABI only mandates v8-v15 for return values,
548       // but we're using all of them.
549       if (IsRet)
550         return true;
551       // Try using a GPR to pass the address
552       if ((Reg = State.AllocateReg(ArgGPRs))) {
553         LocVT = XLenVT;
554         LocInfo = CCValAssign::Indirect;
555       } else if (ValVT.isScalableVector()) {
556         LocVT = XLenVT;
557         LocInfo = CCValAssign::Indirect;
558       } else {
559         StoreSizeBytes = ValVT.getStoreSize();
560         // Align vectors to their element sizes, being careful for vXi1
561         // vectors.
562         StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
563       }
564     }
565   } else {
566     Reg = State.AllocateReg(ArgGPRs);
567   }
568 
569   int64_t StackOffset =
570       Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
571 
572   // If we reach this point and PendingLocs is non-empty, we must be at the
573   // end of a split argument that must be passed indirectly.
574   if (!PendingLocs.empty()) {
575     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
576     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
577 
578     for (auto &It : PendingLocs) {
579       if (Reg)
580         It.convertToReg(Reg);
581       else
582         It.convertToMem(StackOffset);
583       State.addLoc(It);
584     }
585     PendingLocs.clear();
586     PendingArgFlags.clear();
587     return false;
588   }
589 
590   assert(((ValVT.isFloatingPoint() && !ValVT.isVector()) || LocVT == XLenVT ||
591           (TLI.getSubtarget().hasVInstructions() &&
592            (ValVT.isVector() || ValVT.isRISCVVectorTuple()))) &&
593          "Expected an XLenVT or vector types at this stage");
594 
595   if (Reg) {
596     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
597     return false;
598   }
599 
600   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
601   return false;
602 }
603 
604 // FastCC has less than 1% performance improvement for some particular
605 // benchmark. But theoretically, it may have benefit for some cases.
606 bool llvm::CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT,
607                            CCValAssign::LocInfo LocInfo,
608                            ISD::ArgFlagsTy ArgFlags, CCState &State,
609                            bool IsFixed, bool IsRet, Type *OrigTy) {
610   const MachineFunction &MF = State.getMachineFunction();
611   const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
612   const RISCVTargetLowering &TLI = *Subtarget.getTargetLowering();
613   RISCVABI::ABI ABI = Subtarget.getTargetABI();
614 
615   if ((LocVT == MVT::f16 && Subtarget.hasStdExtZfhmin()) ||
616       (LocVT == MVT::bf16 && Subtarget.hasStdExtZfbfmin())) {
617     static const MCPhysReg FPR16List[] = {
618         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
619         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
620         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
621         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
622     if (MCRegister Reg = State.AllocateReg(FPR16List)) {
623       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
624       return false;
625     }
626   }
627 
628   if (LocVT == MVT::f32 && Subtarget.hasStdExtF()) {
629     static const MCPhysReg FPR32List[] = {
630         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
631         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
632         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
633         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
634     if (MCRegister Reg = State.AllocateReg(FPR32List)) {
635       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
636       return false;
637     }
638   }
639 
640   if (LocVT == MVT::f64 && Subtarget.hasStdExtD()) {
641     static const MCPhysReg FPR64List[] = {
642         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
643         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
644         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
645         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
646     if (MCRegister Reg = State.AllocateReg(FPR64List)) {
647       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
648       return false;
649     }
650   }
651 
652   MVT XLenVT = Subtarget.getXLenVT();
653 
654   // Check if there is an available GPRF16 before hitting the stack.
655   if ((LocVT == MVT::f16 && Subtarget.hasStdExtZhinxmin())) {
656     if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRF16s(ABI))) {
657       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
658       return false;
659     }
660   }
661 
662   // Check if there is an available GPRF32 before hitting the stack.
663   if (LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) {
664     if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRF32s(ABI))) {
665       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
666       return false;
667     }
668   }
669 
670   // Check if there is an available GPR before hitting the stack.
671   if (LocVT == MVT::f64 && Subtarget.is64Bit() && Subtarget.hasStdExtZdinx()) {
672     if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRs(ABI))) {
673       if (LocVT.getSizeInBits() != Subtarget.getXLen()) {
674         LocVT = XLenVT;
675         State.addLoc(
676             CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
677         return false;
678       }
679       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
680       return false;
681     }
682   }
683 
684   ArrayRef<MCPhysReg> ArgGPRs = getFastCCArgGPRs(ABI);
685 
686   if (LocVT.isVector()) {
687     if (MCRegister Reg = allocateRVVReg(ValVT, ValNo, State, TLI)) {
688       // Fixed-length vectors are located in the corresponding scalable-vector
689       // container types.
690       if (LocVT.isFixedLengthVector()) {
691         LocVT = TLI.getContainerForFixedLengthVector(LocVT);
692         State.addLoc(
693             CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
694         return false;
695       }
696       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
697       return false;
698     }
699 
700     // Pass scalable vectors indirectly. Pass fixed vectors indirectly if we
701     // have a free GPR.
702     if (LocVT.isScalableVector() ||
703         State.getFirstUnallocated(ArgGPRs) != ArgGPRs.size()) {
704       LocInfo = CCValAssign::Indirect;
705       LocVT = XLenVT;
706     }
707   }
708 
709   if (LocVT == XLenVT) {
710     if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRs(ABI))) {
711       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
712       return false;
713     }
714   }
715 
716   if (LocVT == XLenVT || LocVT == MVT::f16 || LocVT == MVT::bf16 ||
717       LocVT == MVT::f32 || LocVT == MVT::f64 || LocVT.isFixedLengthVector()) {
718     Align StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
719     int64_t Offset = State.AllocateStack(LocVT.getStoreSize(), StackAlign);
720     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
721     return false;
722   }
723 
724   return true; // CC didn't match.
725 }
726 
727 bool llvm::CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
728                         CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
729                         CCState &State) {
730   if (ArgFlags.isNest()) {
731     report_fatal_error(
732         "Attribute 'nest' is not supported in GHC calling convention");
733   }
734 
735   static const MCPhysReg GPRList[] = {
736       RISCV::X9,  RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
737       RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
738 
739   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
740     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
741     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
742     if (MCRegister Reg = State.AllocateReg(GPRList)) {
743       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
744       return false;
745     }
746   }
747 
748   const RISCVSubtarget &Subtarget =
749       State.getMachineFunction().getSubtarget<RISCVSubtarget>();
750 
751   if (LocVT == MVT::f32 && Subtarget.hasStdExtF()) {
752     // Pass in STG registers: F1, ..., F6
753     //                        fs0 ... fs5
754     static const MCPhysReg FPR32List[] = {RISCV::F8_F,  RISCV::F9_F,
755                                           RISCV::F18_F, RISCV::F19_F,
756                                           RISCV::F20_F, RISCV::F21_F};
757     if (MCRegister Reg = State.AllocateReg(FPR32List)) {
758       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
759       return false;
760     }
761   }
762 
763   if (LocVT == MVT::f64 && Subtarget.hasStdExtD()) {
764     // Pass in STG registers: D1, ..., D6
765     //                        fs6 ... fs11
766     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
767                                           RISCV::F24_D, RISCV::F25_D,
768                                           RISCV::F26_D, RISCV::F27_D};
769     if (MCRegister Reg = State.AllocateReg(FPR64List)) {
770       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
771       return false;
772     }
773   }
774 
775   if (LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) {
776     static const MCPhysReg GPR32List[] = {
777         RISCV::X9_W,  RISCV::X18_W, RISCV::X19_W, RISCV::X20_W,
778         RISCV::X21_W, RISCV::X22_W, RISCV::X23_W, RISCV::X24_W,
779         RISCV::X25_W, RISCV::X26_W, RISCV::X27_W};
780     if (MCRegister Reg = State.AllocateReg(GPR32List)) {
781       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
782       return false;
783     }
784   }
785 
786   if (LocVT == MVT::f64 && Subtarget.hasStdExtZdinx() && Subtarget.is64Bit()) {
787     if (MCRegister Reg = State.AllocateReg(GPRList)) {
788       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
789       return false;
790     }
791   }
792 
793   report_fatal_error("No registers left in GHC calling convention");
794   return true;
795 }
796