xref: /llvm-project/llvm/lib/Target/AArch64/AArch64CallingConvention.td (revision 39ec1f79b7b7708f534761d8b8d319b7ba423d6f)
1//=- AArch64CallingConv.td - Calling Conventions for AArch64 -*- tablegen -*-=//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This describes the calling conventions for AArch64 architecture.
10//
11//===----------------------------------------------------------------------===//
12
13/// CCIfBigEndian - Match only if we're in big endian mode.
14class CCIfBigEndian<CCAction A> :
15  CCIf<"State.getMachineFunction().getDataLayout().isBigEndian()", A>;
16
17class CCIfILP32<CCAction A> :
18  CCIf<"State.getMachineFunction().getDataLayout().getPointerSize() == 4", A>;
19
20/// CCIfSubtarget - Match if the current subtarget has a feature F.
21class CCIfSubtarget<string F, CCAction A>
22    : CCIf<!strconcat("State.getMachineFunction()"
23                      ".getSubtarget<AArch64Subtarget>().", F),
24           A>;
25
26//===----------------------------------------------------------------------===//
27// ARM AAPCS64 Calling Convention
28//===----------------------------------------------------------------------===//
29
30defvar AArch64_Common = [
31  CCIfType<[iPTR], CCBitConvertToType<i64>>,
32  CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
33  CCIfType<[v2f64, v4f32], CCBitConvertToType<v2i64>>,
34
35  // Big endian vectors must be passed as if they were 1-element vectors so that
36  // their lanes are in a consistent order.
37  CCIfBigEndian<CCIfType<[v2i32, v2f32, v4i16, v4f16, v4bf16, v8i8],
38                         CCBitConvertToType<f64>>>,
39  CCIfBigEndian<CCIfType<[v2i64, v2f64, v4i32, v4f32, v8i16, v8f16, v8bf16, v16i8],
40                         CCBitConvertToType<f128>>>,
41
42  // In AAPCS, an SRet is passed in X8, not X0 like a normal pointer parameter.
43  // However, on windows, in some circumstances, the SRet is passed in X0 or X1
44  // instead.  The presence of the inreg attribute indicates that SRet is
45  // passed in the alternative register (X0 or X1), not X8:
46  // - X0 for non-instance methods.
47  // - X1 for instance methods.
48
49  // The "sret" attribute identifies indirect returns.
50  // The "inreg" attribute identifies non-aggregate types.
51  // The position of the "sret" attribute identifies instance/non-instance
52  // methods.
53  // "sret" on argument 0 means non-instance methods.
54  // "sret" on argument 1 means instance methods.
55
56  CCIfInReg<CCIfType<[i64],
57    CCIfSRet<CCIfType<[i64], CCAssignToReg<[X0, X1]>>>>>,
58
59  CCIfSRet<CCIfType<[i64], CCAssignToReg<[X8]>>>,
60
61  // Put ByVal arguments directly on the stack. Minimum size and alignment of a
62  // slot is 64-bit.
63  CCIfByVal<CCPassByVal<8, 8>>,
64
65  // Pass SwiftSelf in a callee saved register.
66  CCIfSwiftSelf<CCIfType<[i64], CCAssignToReg<[X20]>>>,
67
68  // A SwiftError is passed in X21.
69  CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[X21]>>>,
70
71  // Pass SwiftAsync in an otherwise callee saved register so that it will be
72  // preserved for normal function calls.
73  CCIfSwiftAsync<CCIfType<[i64], CCAssignToReg<[X22]>>>,
74
75  CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Block">>,
76
77  CCIfType<[nxv16i8, nxv8i16, nxv4i32, nxv2i64, nxv2f16, nxv4f16, nxv8f16,
78            nxv2bf16, nxv4bf16, nxv8bf16, nxv2f32, nxv4f32, nxv2f64],
79           CCAssignToReg<[Z0, Z1, Z2, Z3, Z4, Z5, Z6, Z7]>>,
80  CCIfType<[nxv16i8, nxv8i16, nxv4i32, nxv2i64, nxv2f16, nxv4f16, nxv8f16,
81            nxv2bf16, nxv4bf16, nxv8bf16, nxv2f32, nxv4f32, nxv2f64],
82           CCPassIndirect<i64>>,
83
84  CCIfType<[nxv1i1, nxv2i1, nxv4i1, nxv8i1, nxv16i1, aarch64svcount],
85           CCAssignToReg<[P0, P1, P2, P3]>>,
86  CCIfType<[nxv1i1, nxv2i1, nxv4i1, nxv8i1, nxv16i1, aarch64svcount],
87           CCPassIndirect<i64>>,
88
89  // Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers,
90  // up to eight each of GPR and FPR.
91  CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
92  CCIfType<[i32], CCAssignToReg<[W0, W1, W2, W3, W4, W5, W6, W7]>>,
93  // i128 is split to two i64s, we can't fit half to register X7.
94  CCIfType<[i64], CCIfSplit<CCAssignToRegWithShadow<[X0, X2, X4, X6],
95                                                    [X0, X1, X3, X5]>>>,
96
97  // i128 is split to two i64s, and its stack alignment is 16 bytes.
98  CCIfType<[i64], CCIfSplit<CCAssignToStackWithShadow<8, 16, [X7]>>>,
99
100  CCIfType<[i64], CCAssignToReg<[X0, X1, X2, X3, X4, X5, X6, X7]>>,
101  CCIfType<[f16], CCAssignToReg<[H0, H1, H2, H3, H4, H5, H6, H7]>>,
102  CCIfType<[bf16], CCAssignToReg<[H0, H1, H2, H3, H4, H5, H6, H7]>>,
103  CCIfType<[f32], CCAssignToReg<[S0, S1, S2, S3, S4, S5, S6, S7]>>,
104  CCIfType<[f64], CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>,
105  CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16, v4bf16],
106           CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>,
107  CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16, v8bf16],
108           CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
109
110  // If more than will fit in registers, pass them on the stack instead.
111  CCIfType<[i1, i8, i16, f16, bf16], CCAssignToStack<8, 8>>,
112  CCIfType<[i32, f32], CCAssignToStack<8, 8>>,
113  CCIfType<[i64, f64, v1f64, v2f32, v1i64, v2i32, v4i16, v8i8, v4f16, v4bf16],
114           CCAssignToStack<8, 8>>,
115  CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16, v8bf16],
116           CCAssignToStack<16, 16>>
117];
118
119let Entry = 1 in
120def CC_AArch64_AAPCS : CallingConv<!listconcat(
121  // The 'nest' parameter, if any, is passed in X18.
122  // Darwin and Windows use X18 as the platform register and hence 'nest' isn't
123  // currently supported there.
124  [CCIfNest<CCAssignToReg<[X18]>>],
125  AArch64_Common
126)>;
127
128let Entry = 1 in
129def RetCC_AArch64_AAPCS : CallingConv<[
130  CCIfType<[iPTR], CCBitConvertToType<i64>>,
131  CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
132  CCIfType<[v2f64, v4f32], CCBitConvertToType<v2i64>>,
133
134  CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Block">>,
135  CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[X21]>>>,
136
137  // Big endian vectors must be passed as if they were 1-element vectors so that
138  // their lanes are in a consistent order.
139  CCIfBigEndian<CCIfType<[v2i32, v2f32, v4i16, v4f16, v4bf16, v8i8],
140                         CCBitConvertToType<f64>>>,
141  CCIfBigEndian<CCIfType<[v2i64, v2f64, v4i32, v4f32, v8i16, v8f16, v8bf16, v16i8],
142                         CCBitConvertToType<f128>>>,
143
144  CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
145  CCIfType<[i32], CCAssignToReg<[W0, W1, W2, W3, W4, W5, W6, W7]>>,
146  CCIfType<[i64], CCAssignToReg<[X0, X1, X2, X3, X4, X5, X6, X7]>>,
147  CCIfType<[f16], CCAssignToReg<[H0, H1, H2, H3, H4, H5, H6, H7]>>,
148  CCIfType<[bf16], CCAssignToReg<[H0, H1, H2, H3, H4, H5, H6, H7]>>,
149  CCIfType<[f32], CCAssignToReg<[S0, S1, S2, S3, S4, S5, S6, S7]>>,
150  CCIfType<[f64], CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>,
151  CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16, v4bf16],
152      CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>,
153  CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16, v8bf16],
154      CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
155
156  CCIfType<[nxv16i8, nxv8i16, nxv4i32, nxv2i64, nxv2f16, nxv4f16, nxv8f16,
157            nxv2bf16, nxv4bf16, nxv8bf16, nxv2f32, nxv4f32, nxv2f64],
158           CCAssignToReg<[Z0, Z1, Z2, Z3, Z4, Z5, Z6, Z7]>>,
159
160  CCIfType<[nxv1i1, nxv2i1, nxv4i1, nxv8i1, nxv16i1, aarch64svcount],
161           CCAssignToReg<[P0, P1, P2, P3]>>
162]>;
163
164let Entry = 1 in
165def CC_AArch64_Win64PCS : CallingConv<AArch64_Common>;
166
167// Vararg functions on windows pass floats in integer registers
168let Entry = 1 in
169def CC_AArch64_Win64_VarArg : CallingConv<[
170  CCIfType<[f16, bf16], CCBitConvertToType<i16>>,
171  CCIfType<[f32], CCBitConvertToType<i32>>,
172  CCIfType<[f64], CCBitConvertToType<i64>>,
173  CCDelegateTo<CC_AArch64_Win64PCS>
174]>;
175
176// Vararg functions on Arm64EC ABI use a different convention, using
177// a stack layout compatible with the x64 calling convention.
178let Entry = 1 in
179def CC_AArch64_Arm64EC_VarArg : CallingConv<[
180  // Convert small floating-point values to integer.
181  CCIfType<[f16, bf16], CCBitConvertToType<i16>>,
182  CCIfType<[f32], CCBitConvertToType<i32>>,
183  CCIfType<[f64, v1f64, v1i64, v2f32, v2i32, v4i16, v4f16, v4bf16, v8i8, iPTR],
184           CCBitConvertToType<i64>>,
185
186  // Larger floating-point/vector values are passed indirectly.
187  CCIfType<[f128, v2f64, v2i64, v4i32, v4f32, v8i16, v8f16, v8bf16, v16i8],
188           CCPassIndirect<i64>>,
189  CCIfType<[nxv16i8, nxv8i16, nxv4i32, nxv2i64, nxv2f16, nxv4f16, nxv8f16,
190            nxv2bf16, nxv4bf16, nxv8bf16, nxv2f32, nxv4f32, nxv2f64],
191           CCPassIndirect<i64>>,
192  CCIfType<[nxv2i1, nxv4i1, nxv8i1, nxv16i1],
193           CCPassIndirect<i64>>,
194
195  // Handle SRet. See comment in CC_AArch64_AAPCS.
196  CCIfInReg<CCIfType<[i64],
197    CCIfSRet<CCIfType<[i64], CCAssignToReg<[X0, X1]>>>>>,
198  CCIfSRet<CCIfType<[i64], CCAssignToReg<[X8]>>>,
199
200  // Put ByVal arguments directly on the stack. Minimum size and alignment of a
201  // slot is 64-bit. (Shouldn't normally come up; the Microsoft ABI doesn't
202  // use byval.)
203  CCIfByVal<CCPassByVal<8, 8>>,
204
205  // Promote small integers to i32
206  CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
207
208  // Pass first four arguments in x0-x3.
209  CCIfType<[i32], CCAssignToReg<[W0, W1, W2, W3]>>,
210  CCIfType<[i64], CCAssignToReg<[X0, X1, X2, X3]>>,
211
212  // Put remaining arguments on stack.
213  CCIfType<[i32, i64], CCAssignToStack<8, 8>>,
214]>;
215
216// Arm64EC thunks use a calling convention that's precisely the x64 calling
217// convention, except that the registers have different names, and the callee
218// address is passed in X9.
219let Entry = 1 in
220def CC_AArch64_Arm64EC_Thunk : CallingConv<[
221  // ARM64EC-specific: the InReg attribute can be used to access the x64 sp passed into entry thunks in x4 from the IR.
222  CCIfInReg<CCIfType<[i64], CCAssignToReg<[X4]>>>,
223
224  // Byval aggregates are passed by pointer
225  CCIfByVal<CCPassIndirect<i64>>,
226
227  // ARM64EC-specific: promote small integers to i32. (x86 only promotes i1,
228  // but that would confuse ARM64 lowering code.)
229  CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
230
231  // The 'nest' parameter, if any, is passed in R10 (X4).
232  CCIfNest<CCAssignToReg<[X4]>>,
233
234  // A SwiftError is passed in R12 (X19).
235  CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[X19]>>>,
236
237  // Pass SwiftSelf in R13 (X20).
238  CCIfSwiftSelf<CCIfType<[i64], CCAssignToReg<[X20]>>>,
239
240  // Pass SwiftAsync in an otherwise callee saved register so that calls to
241  // normal functions don't need to save it somewhere.
242  CCIfSwiftAsync<CCIfType<[i64], CCAssignToReg<[X21]>>>,
243
244  // The 'CFGuardTarget' parameter, if any, is passed in RAX (R8).
245  CCIfCFGuardTarget<CCAssignToReg<[X8]>>,
246
247  // 128 bit vectors are passed by pointer
248  CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64], CCPassIndirect<i64>>,
249
250  // 256 bit vectors are passed by pointer
251  CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64], CCPassIndirect<i64>>,
252
253  // 512 bit vectors are passed by pointer
254  CCIfType<[v64i8, v32i16, v16i32, v32f16, v16f32, v8f64, v8i64], CCPassIndirect<i64>>,
255
256  // Long doubles are passed by pointer
257  CCIfType<[f80], CCPassIndirect<i64>>,
258
259  // The first 4 MMX vector arguments are passed in GPRs.
260  CCIfType<[x86mmx], CCBitConvertToType<i64>>,
261
262  // The first 4 FP/Vector arguments are passed in XMM registers.
263  CCIfType<[f16],
264           CCAssignToRegWithShadow<[H0, H1, H2, H3],
265                                   [X0, X1, X2, X3]>>,
266  CCIfType<[f32],
267           CCAssignToRegWithShadow<[S0, S1, S2, S3],
268                                   [X0, X1, X2, X3]>>,
269  CCIfType<[f64],
270           CCAssignToRegWithShadow<[D0, D1, D2, D3],
271                                   [X0, X1, X2, X3]>>,
272
273  // The first 4 integer arguments are passed in integer registers.
274  CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3],
275                                          [Q0, Q1, Q2, Q3]>>,
276
277  // Arm64EC thunks: the first argument is always a pointer to the destination
278  // address, stored in x9.
279  CCIfType<[i64], CCAssignToReg<[X9]>>,
280
281  CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3],
282                                          [Q0, Q1, Q2, Q3]>>,
283
284  // Integer/FP values get stored in stack slots that are 8 bytes in size and
285  // 8-byte aligned if there are no more registers to hold them.
286  CCIfType<[i8, i16, i32, i64, f16, f32, f64], CCAssignToStack<8, 8>>
287]>;
288
289// The native side of ARM64EC thunks
290let Entry = 1 in
291def CC_AArch64_Arm64EC_Thunk_Native : CallingConv<[
292  CCIfType<[i64], CCAssignToReg<[X9]>>,
293  CCDelegateTo<CC_AArch64_AAPCS>
294]>;
295
296let Entry = 1 in
297def RetCC_AArch64_Arm64EC_Thunk : CallingConv<[
298  // The X86-Win64 calling convention always returns __m64 values in RAX.
299  CCIfType<[x86mmx], CCBitConvertToType<i64>>,
300
301  // Otherwise, everything is the same as 'normal' X86-64 C CC.
302
303  // The X86-64 calling convention always returns FP values in XMM0.
304  CCIfType<[f16], CCAssignToReg<[H0, H1]>>,
305  CCIfType<[f32], CCAssignToReg<[S0, S1]>>,
306  CCIfType<[f64], CCAssignToReg<[D0, D1]>>,
307  CCIfType<[f128], CCAssignToReg<[Q0, Q1]>>,
308
309  CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[X19]>>>,
310
311  // Scalar values are returned in AX first, then DX.  For i8, the ABI
312  // requires the values to be in AL and AH, however this code uses AL and DL
313  // instead. This is because using AH for the second register conflicts with
314  // the way LLVM does multiple return values -- a return of {i16,i8} would end
315  // up in AX and AH, which overlap. Front-ends wishing to conform to the ABI
316  // for functions that return two i8 values are currently expected to pack the
317  // values into an i16 (which uses AX, and thus AL:AH).
318  //
319  // For code that doesn't care about the ABI, we allow returning more than two
320  // integer values in registers.
321  CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
322  CCIfType<[i32], CCAssignToReg<[W8, W1, W0]>>,
323  CCIfType<[i64], CCAssignToReg<[X8, X1, X0]>>,
324
325  // Vector types are returned in XMM0 and XMM1, when they fit.  XMM2 and XMM3
326  // can only be used by ABI non-compliant code. If the target doesn't have XMM
327  // registers, it won't have vector types.
328  CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
329            CCAssignToReg<[Q0, Q1, Q2, Q3]>>
330]>;
331
332// Windows Control Flow Guard checks take a single argument (the target function
333// address) and have no return value.
334let Entry = 1 in
335def CC_AArch64_Win64_CFGuard_Check : CallingConv<[
336  CCIfType<[i64], CCAssignToReg<[X15]>>
337]>;
338
339let Entry = 1 in
340def CC_AArch64_Arm64EC_CFGuard_Check : CallingConv<[
341  CCIfType<[i64], CCAssignToReg<[X11, X10, X9]>>
342]>;
343
344let Entry = 1 in
345def RetCC_AArch64_Arm64EC_CFGuard_Check : CallingConv<[
346  CCIfType<[i64], CCAssignToReg<[X11]>>
347]>;
348
349
350// Darwin uses a calling convention which differs in only two ways
351// from the standard one at this level:
352//     + i128s (i.e. split i64s) don't need even registers.
353//     + Stack slots are sized as needed rather than being at least 64-bit.
354let Entry = 1 in
355def CC_AArch64_DarwinPCS : CallingConv<[
356  CCIfType<[iPTR], CCBitConvertToType<i64>>,
357  CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
358  CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>,
359
360  // An SRet is passed in X8, not X0 like a normal pointer parameter.
361  CCIfSRet<CCIfType<[i64], CCAssignToReg<[X8]>>>,
362
363  // Put ByVal arguments directly on the stack. Minimum size and alignment of a
364  // slot is 64-bit.
365  CCIfByVal<CCPassByVal<8, 8>>,
366
367  // Pass SwiftSelf in a callee saved register.
368  CCIfSwiftSelf<CCIfType<[i64], CCAssignToReg<[X20]>>>,
369
370  // A SwiftError is passed in X21.
371  CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[X21]>>>,
372
373  // Pass SwiftAsync in an otherwise callee saved register so that it will be
374  // preserved for normal function calls.
375  CCIfSwiftAsync<CCIfType<[i64], CCAssignToReg<[X22]>>>,
376
377  CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Block">>,
378
379  CCIfType<[nxv16i8, nxv8i16, nxv4i32, nxv2i64, nxv2f16, nxv4f16, nxv8f16,
380            nxv2bf16, nxv4bf16, nxv8bf16, nxv2f32, nxv4f32, nxv2f64],
381           CCAssignToReg<[Z0, Z1, Z2, Z3, Z4, Z5, Z6, Z7]>>,
382  CCIfType<[nxv16i8, nxv8i16, nxv4i32, nxv2i64, nxv2f16, nxv4f16, nxv8f16,
383            nxv2bf16, nxv4bf16, nxv8bf16, nxv2f32, nxv4f32, nxv2f64],
384           CCPassIndirect<i64>>,
385
386  CCIfType<[nxv1i1, nxv2i1, nxv4i1, nxv8i1, nxv16i1, aarch64svcount],
387           CCAssignToReg<[P0, P1, P2, P3]>>,
388  CCIfType<[nxv1i1, nxv2i1, nxv4i1, nxv8i1, nxv16i1, aarch64svcount],
389           CCPassIndirect<i64>>,
390
391  // Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers,
392  // up to eight each of GPR and FPR.
393  CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
394  CCIfType<[i32], CCAssignToReg<[W0, W1, W2, W3, W4, W5, W6, W7]>>,
395  // i128 is split to two i64s, we can't fit half to register X7.
396  CCIfType<[i64],
397           CCIfSplit<CCAssignToReg<[X0, X1, X2, X3, X4, X5, X6]>>>,
398  // i128 is split to two i64s, and its stack alignment is 16 bytes.
399  CCIfType<[i64], CCIfSplit<CCAssignToStackWithShadow<8, 16, [X7]>>>,
400
401  CCIfType<[i64], CCAssignToReg<[X0, X1, X2, X3, X4, X5, X6, X7]>>,
402  CCIfType<[f16], CCAssignToReg<[H0, H1, H2, H3, H4, H5, H6, H7]>>,
403  CCIfType<[bf16], CCAssignToReg<[H0, H1, H2, H3, H4, H5, H6, H7]>>,
404  CCIfType<[f32], CCAssignToReg<[S0, S1, S2, S3, S4, S5, S6, S7]>>,
405  CCIfType<[f64], CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>,
406  CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16, v4bf16],
407           CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>,
408  CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16, v8bf16],
409           CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
410
411  // If more than will fit in registers, pass them on the stack instead.
412  CCIf<"ValVT == MVT::i1 || ValVT == MVT::i8", CCAssignToStack<1, 1>>,
413  CCIf<"ValVT == MVT::i16 || ValVT == MVT::f16 || ValVT == MVT::bf16",
414  CCAssignToStack<2, 2>>,
415  CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
416
417  // Re-demote pointers to 32-bits so we don't end up storing 64-bit
418  // values and clobbering neighbouring stack locations. Not very pretty.
419  CCIfPtr<CCIfILP32<CCTruncToType<i32>>>,
420  CCIfPtr<CCIfILP32<CCAssignToStack<4, 4>>>,
421
422  CCIfType<[i64, f64, v1f64, v2f32, v1i64, v2i32, v4i16, v8i8, v4f16, v4bf16],
423           CCAssignToStack<8, 8>>,
424  CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16, v8bf16],
425           CCAssignToStack<16, 16>>
426]>;
427
428let Entry = 1 in
429def CC_AArch64_DarwinPCS_VarArg : CallingConv<[
430  CCIfType<[iPTR], CCBitConvertToType<i64>>,
431  CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
432  CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>,
433
434  CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Stack_Block">>,
435
436  // Handle all scalar types as either i64 or f64.
437  CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
438  CCIfType<[f16, bf16, f32], CCPromoteToType<f64>>,
439
440  // Everything is on the stack.
441  // i128 is split to two i64s, and its stack alignment is 16 bytes.
442  CCIfType<[i64], CCIfSplit<CCAssignToStack<8, 16>>>,
443  CCIfType<[i64, f64, v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16, v4bf16],
444           CCAssignToStack<8, 8>>,
445  CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16, v8bf16],
446           CCAssignToStack<16, 16>>
447]>;
448
449// In the ILP32 world, the minimum stack slot size is 4 bytes. Otherwise the
450// same as the normal Darwin VarArgs handling.
451let Entry = 1 in
452def CC_AArch64_DarwinPCS_ILP32_VarArg : CallingConv<[
453  CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
454  CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>,
455
456  // Handle all scalar types as either i32 or f32.
457  CCIfType<[i8, i16], CCPromoteToType<i32>>,
458  CCIfType<[f16, bf16], CCPromoteToType<f32>>,
459
460  // Everything is on the stack.
461  // i128 is split to two i64s, and its stack alignment is 16 bytes.
462  CCIfPtr<CCIfILP32<CCTruncToType<i32>>>,
463  CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
464  CCIfType<[i64], CCIfSplit<CCAssignToStack<8, 16>>>,
465  CCIfType<[i64, f64, v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16, v4bf16],
466           CCAssignToStack<8, 8>>,
467  CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16, v8bf16],
468           CCAssignToStack<16, 16>>
469]>;
470
471//===----------------------------------------------------------------------===//
472// ARM64 Calling Convention for GHC
473//===----------------------------------------------------------------------===//
474
475// This calling convention is specific to the Glasgow Haskell Compiler.
476// The only documentation is the GHC source code, specifically the C header
477// file:
478//
479//    https://github.com/ghc/ghc/blob/master/rts/include/stg/MachRegs.h
480//
481// which defines the registers for the Spineless Tagless G-Machine (STG) that
482// GHC uses to implement lazy evaluation. The generic STG machine has a set of
483// registers which are mapped to appropriate set of architecture specific
484// registers for each CPU architecture.
485//
486// The STG Machine is documented here:
487//
488//    https://ghc.haskell.org/trac/ghc/wiki/Commentary/Compiler/GeneratedCode
489//
490// The AArch64 register mapping is defined in the following header file:
491//
492//    https://github.com/ghc/ghc/blob/master/rts/include/stg/MachRegs/arm64.h
493//
494
495let Entry = 1 in
496def CC_AArch64_GHC : CallingConv<[
497  CCIfType<[iPTR], CCBitConvertToType<i64>>,
498
499  // Handle all vector types as either f64 or v2f64.
500  CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
501  CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, f128], CCBitConvertToType<v2f64>>,
502
503  CCIfType<[v2f64], CCAssignToReg<[Q4, Q5]>>,
504  CCIfType<[f32], CCAssignToReg<[S8, S9, S10, S11]>>,
505  CCIfType<[f64], CCAssignToReg<[D12, D13, D14, D15]>>,
506
507  // Promote i8/i16/i32 arguments to i64.
508  CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
509
510  // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, SpLim
511  CCIfType<[i64], CCAssignToReg<[X19, X20, X21, X22, X23, X24, X25, X26, X27, X28]>>
512]>;
513
514let Entry = 1 in
515def CC_AArch64_Preserve_None : CallingConv<[
516  // VarArgs are only supported using the C calling convention.
517  // This handles the non-variadic parameter case. Variadic parameters
518  // are handled in CCAssignFnForCall.
519  CCIfVarArg<CCIfSubtarget<"isTargetDarwin()", CCDelegateTo<CC_AArch64_DarwinPCS>>>,
520  CCIfVarArg<CCIfSubtarget<"isTargetWindows()", CCDelegateTo<CC_AArch64_Win64PCS>>>,
521  CCIfVarArg<CCDelegateTo<CC_AArch64_AAPCS>>,
522
523  // We can pass arguments in all general registers, except:
524  // - X8, used for sret
525  // - X16/X17, used by the linker as IP0/IP1
526  // - X18, the platform register
527  // - X19, the base pointer
528  // - X29, the frame pointer
529  // - X30, the link register
530  // General registers are not preserved with the exception of
531  // FP, LR, and X18
532  // Non-volatile registers are used first, so functions may call
533  // normal functions without saving and reloading arguments.
534  // X9 is assigned last as it is used in FrameLowering as the first
535  // choice for a scratch register.
536  CCIfType<[i32], CCAssignToReg<[W20, W21, W22, W23,
537                                 W24, W25, W26, W27, W28,
538                                 W0, W1, W2, W3, W4, W5,
539                                 W6, W7, W10, W11,
540                                 W12, W13, W14, W9]>>,
541  CCIfType<[i64], CCAssignToReg<[X20, X21, X22, X23,
542                                 X24, X25, X26, X27, X28,
543                                 X0, X1, X2, X3, X4, X5,
544                                 X6, X7, X10, X11,
545                                 X12, X13, X14, X9]>>,
546
547  // Windows uses X15 for stack allocation
548  CCIf<"!State.getMachineFunction().getSubtarget<AArch64Subtarget>().isTargetWindows()",
549    CCIfType<[i32], CCAssignToReg<[W15]>>>,
550  CCIf<"!State.getMachineFunction().getSubtarget<AArch64Subtarget>().isTargetWindows()",
551    CCIfType<[i64], CCAssignToReg<[X15]>>>,
552
553  CCDelegateTo<CC_AArch64_AAPCS>
554]>;
555
556// The order of the callee-saves in this file is important, because the
557// FrameLowering code will use this order to determine the layout the
558// callee-save area in the stack frame. As can be observed below, Darwin
559// requires the frame-record (LR, FP) to be at the top the callee-save area,
560// whereas for other platforms they are at the bottom.
561
562// FIXME: LR is only callee-saved in the sense that *we* preserve it and are
563// presumably a callee to someone. External functions may not do so, but this
564// is currently safe since BL has LR as an implicit-def and what happens after a
565// tail call doesn't matter.
566//
567// It would be better to model its preservation semantics properly (create a
568// vreg on entry, use it in RET & tail call generation; make that vreg def if we
569// end up saving LR as part of a call frame). Watch this space...
570def CSR_AArch64_AAPCS : CalleeSavedRegs<(add X19, X20, X21, X22, X23, X24,
571                                           X25, X26, X27, X28, LR, FP,
572                                           D8,  D9,  D10, D11,
573                                           D12, D13, D14, D15)>;
574
575// A variant for treating X18 as callee saved, when interfacing with
576// code that needs X18 to be preserved.
577def CSR_AArch64_AAPCS_X18 : CalleeSavedRegs<(add X18, CSR_AArch64_AAPCS)>;
578
579// Win64 has unwinding codes for an (FP,LR) pair, save_fplr and save_fplr_x.
580// We put FP before LR, so that frame lowering logic generates (FP,LR) pairs,
581// and not (LR,FP) pairs.
582def CSR_Win_AArch64_AAPCS : CalleeSavedRegs<(add X19, X20, X21, X22, X23, X24,
583                                               X25, X26, X27, X28, FP, LR,
584                                               D8, D9, D10, D11,
585                                               D12, D13, D14, D15)>;
586
587def CSR_Win_AArch64_AAPCS_SwiftError
588    : CalleeSavedRegs<(sub CSR_Win_AArch64_AAPCS, X21)>;
589
590def CSR_Win_AArch64_AAPCS_SwiftTail
591    : CalleeSavedRegs<(sub CSR_Win_AArch64_AAPCS, X20, X22)>;
592
593// The Control Flow Guard check call uses a custom calling convention that also
594// preserves X0-X8 and Q0-Q7.
595def CSR_Win_AArch64_CFGuard_Check : CalleeSavedRegs<(add CSR_Win_AArch64_AAPCS,
596                                               (sequence "X%u", 0, 8),
597                                               (sequence "Q%u", 0, 7))>;
598
599// To match the x64 calling convention, Arm64EC thunks preserve q6-q15.
600def CSR_Win_AArch64_Arm64EC_Thunk : CalleeSavedRegs<(add (sequence "Q%u", 6, 15),
601                                                         X19, X20, X21, X22, X23, X24,
602                                                         X25, X26, X27, X28, FP, LR)>;
603
604// AArch64 PCS for vector functions (VPCS)
605// must (additionally) preserve full Q8-Q23 registers
606def CSR_AArch64_AAVPCS : CalleeSavedRegs<(add X19, X20, X21, X22, X23, X24,
607                                          X25, X26, X27, X28, LR, FP,
608                                          (sequence "Q%u", 8, 23))>;
609
610// Functions taking SVE arguments or returning an SVE type
611// must (additionally) preserve full Z8-Z23 and predicate registers P4-P15
612def CSR_AArch64_SVE_AAPCS : CalleeSavedRegs<(add (sequence "Z%u", 8, 23),
613                                                 (sequence "P%u", 4, 15),
614                                                 X19, X20, X21, X22, X23, X24,
615                                                 X25, X26, X27, X28, LR, FP)>;
616
617def CSR_Darwin_AArch64_SVE_AAPCS : CalleeSavedRegs<(add (sequence "Z%u", 8, 23),
618                                                        (sequence "P%u", 4, 15),
619                                                        LR, FP, X19, X20, X21, X22,
620                                                        X23, X24, X25, X26, X27, X28)>;
621
622// SME ABI support routines such as __arm_tpidr2_save/restore preserve most registers.
623def CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0
624                          : CalleeSavedRegs<(add (sequence "Z%u", 0, 31),
625                                                 (sequence "P%u", 0, 15),
626                                                 (sequence "X%u", 0, 13),
627                                                 (sequence "X%u",19, 28),
628                                                 LR, FP)>;
629
630// SME ABI support routines such as __arm_get_current_vg preserve most registers.
631def CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1
632                          : CalleeSavedRegs<(add (sequence "Z%u", 0, 31),
633                                                 (sequence "P%u", 0, 15),
634                                                 (sequence "X%u", 1, 15),
635                                                 (sequence "X%u",19, 28),
636                                                 LR, FP)>;
637
638// SME ABI support routines __arm_sme_state preserves most registers.
639def CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2
640                          : CalleeSavedRegs<(add (sequence "Z%u", 0, 31),
641                                                 (sequence "P%u", 0, 15),
642                                                 (sequence "X%u", 2, 15),
643                                                 (sequence "X%u",19, 28),
644                                                 LR, FP)>;
645
646// The SMSTART/SMSTOP instructions preserve only GPR registers.
647def CSR_AArch64_SMStartStop : CalleeSavedRegs<(add (sequence "X%u", 0, 28),
648                                                   LR, FP)>;
649
650def CSR_AArch64_AAPCS_SwiftTail
651    : CalleeSavedRegs<(sub CSR_AArch64_AAPCS, X20, X22)>;
652
653// Constructors and destructors return 'this' in the iOS 64-bit C++ ABI; since
654// 'this' and the pointer return value are both passed in X0 in these cases,
655// this can be partially modelled by treating X0 as a callee-saved register;
656// only the resulting RegMask is used; the SaveList is ignored
657//
658// (For generic ARM 64-bit ABI code, clang will not generate constructors or
659// destructors with 'this' returns, so this RegMask will not be used in that
660// case)
661def CSR_AArch64_AAPCS_ThisReturn : CalleeSavedRegs<(add CSR_AArch64_AAPCS, X0)>;
662
663def CSR_AArch64_AAPCS_SwiftError
664    : CalleeSavedRegs<(sub CSR_AArch64_AAPCS, X21)>;
665
666// The ELF stub used for TLS-descriptor access saves every feasible
667// register. Only X0 and LR are clobbered.
668def CSR_AArch64_TLS_ELF
669    : CalleeSavedRegs<(add (sequence "X%u", 1, 28), FP,
670                           (sequence "Q%u", 0, 31))>;
671
672def CSR_AArch64_AllRegs
673    : CalleeSavedRegs<(add (sequence "W%u", 0, 30), WSP,
674                           (sequence "X%u", 0, 28), FP, LR, SP,
675                           (sequence "B%u", 0, 31), (sequence "H%u", 0, 31),
676                           (sequence "S%u", 0, 31), (sequence "D%u", 0, 31),
677                           (sequence "Q%u", 0, 31))>;
678
679def CSR_AArch64_NoRegs : CalleeSavedRegs<(add)>;
680
681def CSR_AArch64_NoneRegs : CalleeSavedRegs<(add LR, FP)>;
682
683def CSR_AArch64_RT_MostRegs :  CalleeSavedRegs<(add CSR_AArch64_AAPCS,
684                                                (sequence "X%u", 9, 15))>;
685
686def CSR_AArch64_RT_AllRegs :  CalleeSavedRegs<(add CSR_AArch64_RT_MostRegs,
687                                                (sequence "Q%u", 8, 31))>;
688
689def CSR_AArch64_StackProbe_Windows
690    : CalleeSavedRegs<(add (sequence "X%u", 0, 15),
691                           (sequence "X%u", 18, 28), FP, SP,
692                           (sequence "Q%u", 0, 31))>;
693
694// Darwin variants of AAPCS.
695// Darwin puts the frame-record at the top of the callee-save area.
696def CSR_Darwin_AArch64_AAPCS : CalleeSavedRegs<(add LR, FP, X19, X20, X21, X22,
697                                                X23, X24, X25, X26, X27, X28,
698                                                D8,  D9,  D10, D11,
699                                                D12, D13, D14, D15)>;
700
701def CSR_Darwin_AArch64_AAVPCS : CalleeSavedRegs<(add LR, FP, X19, X20, X21,
702                                                 X22, X23, X24, X25, X26, X27,
703                                                 X28, (sequence "Q%u", 8, 23))>;
704
705// For Windows calling convention on a non-windows OS, where X18 is treated
706// as reserved, back up X18 when entering non-windows code (marked with the
707// Windows calling convention) and restore when returning regardless of
708// whether the individual function uses it - it might call other functions
709// that clobber it.
710def CSR_Darwin_AArch64_AAPCS_Win64
711    : CalleeSavedRegs<(add CSR_Darwin_AArch64_AAPCS, X18)>;
712
713def CSR_Darwin_AArch64_AAPCS_ThisReturn
714    : CalleeSavedRegs<(add CSR_Darwin_AArch64_AAPCS, X0)>;
715
716def CSR_Darwin_AArch64_AAPCS_SwiftError
717    : CalleeSavedRegs<(sub CSR_Darwin_AArch64_AAPCS, X21)>;
718
719def CSR_Darwin_AArch64_AAPCS_SwiftTail
720    : CalleeSavedRegs<(sub CSR_Darwin_AArch64_AAPCS, X20, X22)>;
721
722// The function used by Darwin to obtain the address of a thread-local variable
723// guarantees more than a normal AAPCS function. x16 and x17 are used on the
724// fast path for calculation, but other registers except X0 (argument/return)
725// and LR (it is a call, after all) are preserved.
726def CSR_Darwin_AArch64_TLS
727    : CalleeSavedRegs<(add (sub (sequence "X%u", 1, 28), X16, X17),
728                           FP,
729                           (sequence "Q%u", 0, 31))>;
730
731// We can only handle a register pair with adjacent registers, the register pair
732// should belong to the same class as well. Since the access function on the
733// fast path calls a function that follows CSR_Darwin_AArch64_TLS,
734// CSR_Darwin_AArch64_CXX_TLS should be a subset of CSR_Darwin_AArch64_TLS.
735def CSR_Darwin_AArch64_CXX_TLS
736    : CalleeSavedRegs<(add CSR_Darwin_AArch64_AAPCS,
737                           (sub (sequence "X%u", 1, 28), X9, X15, X16, X17, X18, X19),
738                           (sequence "D%u", 0, 31))>;
739
740// CSRs that are handled by prologue, epilogue.
741def CSR_Darwin_AArch64_CXX_TLS_PE
742    : CalleeSavedRegs<(add LR, FP)>;
743
744// CSRs that are handled explicitly via copies.
745def CSR_Darwin_AArch64_CXX_TLS_ViaCopy
746    : CalleeSavedRegs<(sub CSR_Darwin_AArch64_CXX_TLS, LR, FP)>;
747
748def CSR_Darwin_AArch64_RT_MostRegs
749    : CalleeSavedRegs<(add CSR_Darwin_AArch64_AAPCS, (sequence "X%u", 9, 15))>;
750
751def CSR_Darwin_AArch64_RT_AllRegs
752    : CalleeSavedRegs<(add CSR_Darwin_AArch64_RT_MostRegs, (sequence "Q%u", 8, 31))>;
753
754// Variants of the standard calling conventions for shadow call stack.
755// These all preserve x18 in addition to any other registers.
756def CSR_AArch64_NoRegs_SCS
757    : CalleeSavedRegs<(add CSR_AArch64_NoRegs, X18)>;
758def CSR_AArch64_NoneRegs_SCS
759    : CalleeSavedRegs<(add CSR_AArch64_NoneRegs, X18)>;
760def CSR_AArch64_AllRegs_SCS
761    : CalleeSavedRegs<(add CSR_AArch64_AllRegs, X18)>;
762def CSR_AArch64_AAPCS_SwiftError_SCS
763    : CalleeSavedRegs<(add CSR_AArch64_AAPCS_SwiftError, X18)>;
764def CSR_AArch64_RT_MostRegs_SCS
765    : CalleeSavedRegs<(add CSR_AArch64_RT_MostRegs, X18)>;
766def CSR_AArch64_RT_AllRegs_SCS
767    : CalleeSavedRegs<(add CSR_AArch64_RT_AllRegs, X18)>;
768def CSR_AArch64_AAVPCS_SCS
769    : CalleeSavedRegs<(add CSR_AArch64_AAVPCS, X18)>;
770def CSR_AArch64_SVE_AAPCS_SCS
771    : CalleeSavedRegs<(add CSR_AArch64_SVE_AAPCS, X18)>;
772def CSR_AArch64_AAPCS_SCS
773    : CalleeSavedRegs<(add CSR_AArch64_AAPCS, X18)>;
774