xref: /freebsd-src/contrib/llvm-project/llvm/lib/Target/X86/X86CallingConv.td (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
10b57cec5SDimitry Andric//===-- X86CallingConv.td - Calling Conventions X86 32/64 --*- tablegen -*-===//
20b57cec5SDimitry Andric//
30b57cec5SDimitry Andric// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
40b57cec5SDimitry Andric// See https://llvm.org/LICENSE.txt for license information.
50b57cec5SDimitry Andric// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
60b57cec5SDimitry Andric//
70b57cec5SDimitry Andric//===----------------------------------------------------------------------===//
80b57cec5SDimitry Andric//
90b57cec5SDimitry Andric// This describes the calling conventions for the X86-32 and X86-64
100b57cec5SDimitry Andric// architectures.
110b57cec5SDimitry Andric//
120b57cec5SDimitry Andric//===----------------------------------------------------------------------===//
130b57cec5SDimitry Andric
140b57cec5SDimitry Andric/// CCIfSubtarget - Match if the current subtarget has a feature F.
150b57cec5SDimitry Andricclass CCIfSubtarget<string F, CCAction A>
160b57cec5SDimitry Andric    : CCIf<!strconcat("static_cast<const X86Subtarget&>"
170b57cec5SDimitry Andric                       "(State.getMachineFunction().getSubtarget()).", F),
180b57cec5SDimitry Andric           A>;
190b57cec5SDimitry Andric
200b57cec5SDimitry Andric/// CCIfNotSubtarget - Match if the current subtarget doesn't has a feature F.
210b57cec5SDimitry Andricclass CCIfNotSubtarget<string F, CCAction A>
220b57cec5SDimitry Andric    : CCIf<!strconcat("!static_cast<const X86Subtarget&>"
230b57cec5SDimitry Andric                       "(State.getMachineFunction().getSubtarget()).", F),
240b57cec5SDimitry Andric           A>;
250b57cec5SDimitry Andric
265f757f3fSDimitry Andric/// CCIfRegCallv4 - Match if RegCall ABIv4 is respected.
275f757f3fSDimitry Andricclass CCIfRegCallv4<CCAction A>
285f757f3fSDimitry Andric    : CCIf<"State.getMachineFunction().getFunction().getParent()->getModuleFlag(\"RegCallv4\")!=nullptr",
295f757f3fSDimitry Andric           A>;
305f757f3fSDimitry Andric
31349cc55cSDimitry Andric/// CCIfIsVarArgOnWin - Match if isVarArg on Windows 32bits.
32349cc55cSDimitry Andricclass CCIfIsVarArgOnWin<CCAction A>
33349cc55cSDimitry Andric    : CCIf<"State.isVarArg() && "
34349cc55cSDimitry Andric           "State.getMachineFunction().getSubtarget().getTargetTriple()."
35349cc55cSDimitry Andric           "isWindowsMSVCEnvironment()",
36349cc55cSDimitry Andric           A>;
37349cc55cSDimitry Andric
380b57cec5SDimitry Andric// Register classes for RegCall
390b57cec5SDimitry Andricclass RC_X86_RegCall {
400b57cec5SDimitry Andric  list<Register> GPR_8 = [];
410b57cec5SDimitry Andric  list<Register> GPR_16 = [];
420b57cec5SDimitry Andric  list<Register> GPR_32 = [];
430b57cec5SDimitry Andric  list<Register> GPR_64 = [];
440b57cec5SDimitry Andric  list<Register> FP_CALL = [FP0];
450b57cec5SDimitry Andric  list<Register> FP_RET = [FP0, FP1];
460b57cec5SDimitry Andric  list<Register> XMM = [];
470b57cec5SDimitry Andric  list<Register> YMM = [];
480b57cec5SDimitry Andric  list<Register> ZMM = [];
490b57cec5SDimitry Andric}
500b57cec5SDimitry Andric
510b57cec5SDimitry Andric// RegCall register classes for 32 bits
520b57cec5SDimitry Andricdef RC_X86_32_RegCall : RC_X86_RegCall {
530b57cec5SDimitry Andric  let GPR_8 = [AL, CL, DL, DIL, SIL];
540b57cec5SDimitry Andric  let GPR_16 = [AX, CX, DX, DI, SI];
550b57cec5SDimitry Andric  let GPR_32 = [EAX, ECX, EDX, EDI, ESI];
560b57cec5SDimitry Andric  let GPR_64 = [RAX]; ///< Not actually used, but AssignToReg can't handle []
570b57cec5SDimitry Andric                      ///< \todo Fix AssignToReg to enable empty lists
580b57cec5SDimitry Andric  let XMM = [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7];
590b57cec5SDimitry Andric  let YMM = [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7];
600b57cec5SDimitry Andric  let ZMM = [ZMM0, ZMM1, ZMM2, ZMM3, ZMM4, ZMM5, ZMM6, ZMM7];
610b57cec5SDimitry Andric}
620b57cec5SDimitry Andric
635f757f3fSDimitry Andric// RegCall register classes for 32 bits if it respect regcall ABI v.4
645f757f3fSDimitry Andric// Change in __regcall ABI v.4: don't use EAX as a spare register is
655f757f3fSDimitry Andric// needed to code virtual call thunk,
665f757f3fSDimitry Andricdef RC_X86_32_RegCallv4_Win : RC_X86_RegCall {
675f757f3fSDimitry Andric  let GPR_8 = [CL, DL, DIL, SIL];
685f757f3fSDimitry Andric  let GPR_16 = [CX, DX, DI, SI];
695f757f3fSDimitry Andric  let GPR_32 = [ECX, EDX, EDI, ESI];
705f757f3fSDimitry Andric  let GPR_64 = [RAX]; ///< Not actually used, but AssignToReg can't handle []
715f757f3fSDimitry Andric                      ///< \todo Fix AssignToReg to enable empty lists
725f757f3fSDimitry Andric  let XMM = [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7];
735f757f3fSDimitry Andric  let YMM = [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7];
745f757f3fSDimitry Andric  let ZMM = [ZMM0, ZMM1, ZMM2, ZMM3, ZMM4, ZMM5, ZMM6, ZMM7];
755f757f3fSDimitry Andric}
765f757f3fSDimitry Andric
770b57cec5SDimitry Andricclass RC_X86_64_RegCall : RC_X86_RegCall {
780b57cec5SDimitry Andric  let XMM = [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
790b57cec5SDimitry Andric             XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15];
800b57cec5SDimitry Andric  let YMM = [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7,
810b57cec5SDimitry Andric             YMM8, YMM9, YMM10, YMM11, YMM12, YMM13, YMM14, YMM15];
820b57cec5SDimitry Andric  let ZMM = [ZMM0, ZMM1, ZMM2, ZMM3, ZMM4, ZMM5, ZMM6, ZMM7,
830b57cec5SDimitry Andric             ZMM8, ZMM9, ZMM10, ZMM11, ZMM12, ZMM13, ZMM14, ZMM15];
840b57cec5SDimitry Andric}
850b57cec5SDimitry Andric
860b57cec5SDimitry Andricdef RC_X86_64_RegCall_Win : RC_X86_64_RegCall {
870b57cec5SDimitry Andric  let GPR_8 = [AL, CL, DL, DIL, SIL, R8B, R9B, R10B, R11B, R12B, R14B, R15B];
880b57cec5SDimitry Andric  let GPR_16 = [AX, CX, DX, DI, SI, R8W, R9W, R10W, R11W, R12W, R14W, R15W];
890b57cec5SDimitry Andric  let GPR_32 = [EAX, ECX, EDX, EDI, ESI, R8D, R9D, R10D, R11D, R12D, R14D, R15D];
900b57cec5SDimitry Andric  let GPR_64 = [RAX, RCX, RDX, RDI, RSI, R8, R9, R10, R11, R12, R14, R15];
910b57cec5SDimitry Andric}
920b57cec5SDimitry Andric
935f757f3fSDimitry Andric// On Windows 64 we don't want to use R13 - it is reserved for
945f757f3fSDimitry Andric// largely aligned stack.
955f757f3fSDimitry Andric// Change in __regcall ABI v.4: additionally don't use R10 as a
965f757f3fSDimitry Andric// a spare register is needed to code virtual call thunk.
975f757f3fSDimitry Andric//
985f757f3fSDimitry Andricdef RC_X86_64_RegCallv4_Win : RC_X86_64_RegCall {
995f757f3fSDimitry Andric  let GPR_8 = [AL, CL, DL, DIL, SIL, R8B, R9B, R11B, R12B, R14B, R15B];
1005f757f3fSDimitry Andric  let GPR_16 = [AX, CX, DX, DI, SI, R8W, R9W, R11W, R12W, R14W, R15W];
1015f757f3fSDimitry Andric  let GPR_32 = [EAX, ECX, EDX, EDI, ESI, R8D, R9D, R11D, R12D, R14D, R15D];
1025f757f3fSDimitry Andric  let GPR_64 = [RAX, RCX, RDX, RDI, RSI, R8, R9, R11, R12, R14, R15];
1035f757f3fSDimitry Andric}
1045f757f3fSDimitry Andric
1050b57cec5SDimitry Andricdef RC_X86_64_RegCall_SysV : RC_X86_64_RegCall {
1060b57cec5SDimitry Andric  let GPR_8 = [AL, CL, DL, DIL, SIL, R8B, R9B, R12B, R13B, R14B, R15B];
1070b57cec5SDimitry Andric  let GPR_16 = [AX, CX, DX, DI, SI, R8W, R9W, R12W, R13W, R14W, R15W];
1080b57cec5SDimitry Andric  let GPR_32 = [EAX, ECX, EDX, EDI, ESI, R8D, R9D, R12D, R13D, R14D, R15D];
1090b57cec5SDimitry Andric  let GPR_64 = [RAX, RCX, RDX, RDI, RSI, R8, R9, R12, R13, R14, R15];
1100b57cec5SDimitry Andric}
1110b57cec5SDimitry Andric
1120b57cec5SDimitry Andric// X86-64 Intel regcall calling convention.
1130b57cec5SDimitry Andricmulticlass X86_RegCall_base<RC_X86_RegCall RC> {
1140b57cec5SDimitry Andricdef CC_#NAME : CallingConv<[
1150b57cec5SDimitry Andric  // Handles byval parameters.
1160b57cec5SDimitry Andric    CCIfSubtarget<"is64Bit()", CCIfByVal<CCPassByVal<8, 8>>>,
1170b57cec5SDimitry Andric    CCIfByVal<CCPassByVal<4, 4>>,
1180b57cec5SDimitry Andric
1190b57cec5SDimitry Andric    // Promote i1/i8/i16/v1i1 arguments to i32.
1200b57cec5SDimitry Andric    CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
1210b57cec5SDimitry Andric
1220b57cec5SDimitry Andric    // Promote v8i1/v16i1/v32i1 arguments to i32.
1230b57cec5SDimitry Andric    CCIfType<[v8i1, v16i1, v32i1], CCPromoteToType<i32>>,
1240b57cec5SDimitry Andric
1250b57cec5SDimitry Andric    // bool, char, int, enum, long, pointer --> GPR
1260b57cec5SDimitry Andric    CCIfType<[i32], CCAssignToReg<RC.GPR_32>>,
1270b57cec5SDimitry Andric
1280b57cec5SDimitry Andric    // long long, __int64 --> GPR
1290b57cec5SDimitry Andric    CCIfType<[i64], CCAssignToReg<RC.GPR_64>>,
1300b57cec5SDimitry Andric
1310b57cec5SDimitry Andric    // __mmask64 (v64i1) --> GPR64 (for x64) or 2 x GPR32 (for IA32)
1320b57cec5SDimitry Andric    CCIfType<[v64i1], CCPromoteToType<i64>>,
1330b57cec5SDimitry Andric    CCIfSubtarget<"is64Bit()", CCIfType<[i64],
1340b57cec5SDimitry Andric      CCAssignToReg<RC.GPR_64>>>,
1350b57cec5SDimitry Andric    CCIfSubtarget<"is32Bit()", CCIfType<[i64],
1360b57cec5SDimitry Andric      CCCustom<"CC_X86_32_RegCall_Assign2Regs">>>,
1370b57cec5SDimitry Andric
1380b57cec5SDimitry Andric    // float, double, float128 --> XMM
1390b57cec5SDimitry Andric    // In the case of SSE disabled --> save to stack
1400b57cec5SDimitry Andric    CCIfType<[f32, f64, f128],
1410b57cec5SDimitry Andric      CCIfSubtarget<"hasSSE1()", CCAssignToReg<RC.XMM>>>,
1420b57cec5SDimitry Andric
1430b57cec5SDimitry Andric    // long double --> FP
1440b57cec5SDimitry Andric    CCIfType<[f80], CCAssignToReg<RC.FP_CALL>>,
1450b57cec5SDimitry Andric
1460b57cec5SDimitry Andric    // __m128, __m128i, __m128d --> XMM
1470b57cec5SDimitry Andric    // In the case of SSE disabled --> save to stack
1480b57cec5SDimitry Andric    CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
1490b57cec5SDimitry Andric      CCIfSubtarget<"hasSSE1()", CCAssignToReg<RC.XMM>>>,
1500b57cec5SDimitry Andric
1510b57cec5SDimitry Andric    // __m256, __m256i, __m256d --> YMM
1520b57cec5SDimitry Andric    // In the case of SSE disabled --> save to stack
1530b57cec5SDimitry Andric    CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
1540b57cec5SDimitry Andric      CCIfSubtarget<"hasAVX()", CCAssignToReg<RC.YMM>>>,
1550b57cec5SDimitry Andric
1560b57cec5SDimitry Andric    // __m512, __m512i, __m512d --> ZMM
1570b57cec5SDimitry Andric    // In the case of SSE disabled --> save to stack
1580b57cec5SDimitry Andric    CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
1590b57cec5SDimitry Andric      CCIfSubtarget<"hasAVX512()",CCAssignToReg<RC.ZMM>>>,
1600b57cec5SDimitry Andric
1610b57cec5SDimitry Andric    // If no register was found -> assign to stack
1620b57cec5SDimitry Andric
1630b57cec5SDimitry Andric    // In 64 bit, assign 64/32 bit values to 8 byte stack
1640b57cec5SDimitry Andric    CCIfSubtarget<"is64Bit()", CCIfType<[i32, i64, f32, f64],
1650b57cec5SDimitry Andric      CCAssignToStack<8, 8>>>,
1660b57cec5SDimitry Andric
1670b57cec5SDimitry Andric    // In 32 bit, assign 64/32 bit values to 8/4 byte stack
1680b57cec5SDimitry Andric    CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
1690b57cec5SDimitry Andric    CCIfType<[i64, f64], CCAssignToStack<8, 4>>,
1700b57cec5SDimitry Andric
1710b57cec5SDimitry Andric    // MMX type gets 8 byte slot in stack , while alignment depends on target
1720b57cec5SDimitry Andric    CCIfSubtarget<"is64Bit()", CCIfType<[x86mmx], CCAssignToStack<8, 8>>>,
1730b57cec5SDimitry Andric    CCIfType<[x86mmx], CCAssignToStack<8, 4>>,
1740b57cec5SDimitry Andric
1750b57cec5SDimitry Andric    // float 128 get stack slots whose size and alignment depends
1760b57cec5SDimitry Andric    // on the subtarget.
1770b57cec5SDimitry Andric    CCIfType<[f80, f128], CCAssignToStack<0, 0>>,
1780b57cec5SDimitry Andric
1790b57cec5SDimitry Andric    // Vectors get 16-byte stack slots that are 16-byte aligned.
1800b57cec5SDimitry Andric    CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
1810b57cec5SDimitry Andric      CCAssignToStack<16, 16>>,
1820b57cec5SDimitry Andric
1830b57cec5SDimitry Andric    // 256-bit vectors get 32-byte stack slots that are 32-byte aligned.
1840b57cec5SDimitry Andric    CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
1850b57cec5SDimitry Andric      CCAssignToStack<32, 32>>,
1860b57cec5SDimitry Andric
1870b57cec5SDimitry Andric    // 512-bit vectors get 64-byte stack slots that are 64-byte aligned.
1880b57cec5SDimitry Andric    CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
1890b57cec5SDimitry Andric      CCAssignToStack<64, 64>>
1900b57cec5SDimitry Andric]>;
1910b57cec5SDimitry Andric
1920b57cec5SDimitry Andricdef RetCC_#NAME : CallingConv<[
1930b57cec5SDimitry Andric    // Promote i1, v1i1, v8i1 arguments to i8.
1940b57cec5SDimitry Andric    CCIfType<[i1, v1i1, v8i1], CCPromoteToType<i8>>,
1950b57cec5SDimitry Andric
1960b57cec5SDimitry Andric    // Promote v16i1 arguments to i16.
1970b57cec5SDimitry Andric    CCIfType<[v16i1], CCPromoteToType<i16>>,
1980b57cec5SDimitry Andric
1990b57cec5SDimitry Andric    // Promote v32i1 arguments to i32.
2000b57cec5SDimitry Andric    CCIfType<[v32i1], CCPromoteToType<i32>>,
2010b57cec5SDimitry Andric
2020b57cec5SDimitry Andric    // bool, char, int, enum, long, pointer --> GPR
2030b57cec5SDimitry Andric    CCIfType<[i8], CCAssignToReg<RC.GPR_8>>,
2040b57cec5SDimitry Andric    CCIfType<[i16], CCAssignToReg<RC.GPR_16>>,
2050b57cec5SDimitry Andric    CCIfType<[i32], CCAssignToReg<RC.GPR_32>>,
2060b57cec5SDimitry Andric
2070b57cec5SDimitry Andric    // long long, __int64 --> GPR
2080b57cec5SDimitry Andric    CCIfType<[i64], CCAssignToReg<RC.GPR_64>>,
2090b57cec5SDimitry Andric
2100b57cec5SDimitry Andric    // __mmask64 (v64i1) --> GPR64 (for x64) or 2 x GPR32 (for IA32)
2110b57cec5SDimitry Andric    CCIfType<[v64i1], CCPromoteToType<i64>>,
2120b57cec5SDimitry Andric    CCIfSubtarget<"is64Bit()", CCIfType<[i64],
2130b57cec5SDimitry Andric      CCAssignToReg<RC.GPR_64>>>,
2140b57cec5SDimitry Andric    CCIfSubtarget<"is32Bit()", CCIfType<[i64],
2150b57cec5SDimitry Andric      CCCustom<"CC_X86_32_RegCall_Assign2Regs">>>,
2160b57cec5SDimitry Andric
2170b57cec5SDimitry Andric    // long double --> FP
2180b57cec5SDimitry Andric    CCIfType<[f80], CCAssignToReg<RC.FP_RET>>,
2190b57cec5SDimitry Andric
2200b57cec5SDimitry Andric    // float, double, float128 --> XMM
2210b57cec5SDimitry Andric    CCIfType<[f32, f64, f128],
2220b57cec5SDimitry Andric      CCIfSubtarget<"hasSSE1()", CCAssignToReg<RC.XMM>>>,
2230b57cec5SDimitry Andric
2240b57cec5SDimitry Andric    // __m128, __m128i, __m128d --> XMM
2250b57cec5SDimitry Andric    CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
2260b57cec5SDimitry Andric      CCIfSubtarget<"hasSSE1()", CCAssignToReg<RC.XMM>>>,
2270b57cec5SDimitry Andric
2280b57cec5SDimitry Andric    // __m256, __m256i, __m256d --> YMM
2290b57cec5SDimitry Andric    CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
2300b57cec5SDimitry Andric      CCIfSubtarget<"hasAVX()", CCAssignToReg<RC.YMM>>>,
2310b57cec5SDimitry Andric
2320b57cec5SDimitry Andric    // __m512, __m512i, __m512d --> ZMM
2330b57cec5SDimitry Andric    CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
2340b57cec5SDimitry Andric      CCIfSubtarget<"hasAVX512()", CCAssignToReg<RC.ZMM>>>
2350b57cec5SDimitry Andric]>;
2360b57cec5SDimitry Andric}
2370b57cec5SDimitry Andric
2380b57cec5SDimitry Andric//===----------------------------------------------------------------------===//
2390b57cec5SDimitry Andric// Return Value Calling Conventions
2400b57cec5SDimitry Andric//===----------------------------------------------------------------------===//
2410b57cec5SDimitry Andric
2420b57cec5SDimitry Andric// Return-value conventions common to all X86 CC's.
2430b57cec5SDimitry Andricdef RetCC_X86Common : CallingConv<[
2440b57cec5SDimitry Andric  // Scalar values are returned in AX first, then DX.  For i8, the ABI
2450b57cec5SDimitry Andric  // requires the values to be in AL and AH, however this code uses AL and DL
2460b57cec5SDimitry Andric  // instead. This is because using AH for the second register conflicts with
2470b57cec5SDimitry Andric  // the way LLVM does multiple return values -- a return of {i16,i8} would end
2480b57cec5SDimitry Andric  // up in AX and AH, which overlap. Front-ends wishing to conform to the ABI
2490b57cec5SDimitry Andric  // for functions that return two i8 values are currently expected to pack the
2500b57cec5SDimitry Andric  // values into an i16 (which uses AX, and thus AL:AH).
2510b57cec5SDimitry Andric  //
2520b57cec5SDimitry Andric  // For code that doesn't care about the ABI, we allow returning more than two
2530b57cec5SDimitry Andric  // integer values in registers.
2540b57cec5SDimitry Andric  CCIfType<[v1i1],  CCPromoteToType<i8>>,
2550b57cec5SDimitry Andric  CCIfType<[i1],  CCPromoteToType<i8>>,
2560b57cec5SDimitry Andric  CCIfType<[i8] , CCAssignToReg<[AL, DL, CL]>>,
2570b57cec5SDimitry Andric  CCIfType<[i16], CCAssignToReg<[AX, DX, CX]>>,
2580b57cec5SDimitry Andric  CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>,
2590b57cec5SDimitry Andric  CCIfType<[i64], CCAssignToReg<[RAX, RDX, RCX]>>,
2600b57cec5SDimitry Andric
2610b57cec5SDimitry Andric  // Boolean vectors of AVX-512 are returned in SIMD registers.
2620b57cec5SDimitry Andric  // The call from AVX to AVX-512 function should work,
2630b57cec5SDimitry Andric  // since the boolean types in AVX/AVX2 are promoted by default.
2640b57cec5SDimitry Andric  CCIfType<[v2i1],  CCPromoteToType<v2i64>>,
2650b57cec5SDimitry Andric  CCIfType<[v4i1],  CCPromoteToType<v4i32>>,
2660b57cec5SDimitry Andric  CCIfType<[v8i1],  CCPromoteToType<v8i16>>,
2670b57cec5SDimitry Andric  CCIfType<[v16i1], CCPromoteToType<v16i8>>,
2680b57cec5SDimitry Andric  CCIfType<[v32i1], CCPromoteToType<v32i8>>,
2690b57cec5SDimitry Andric  CCIfType<[v64i1], CCPromoteToType<v64i8>>,
2700b57cec5SDimitry Andric
2710b57cec5SDimitry Andric  // Vector types are returned in XMM0 and XMM1, when they fit.  XMM2 and XMM3
2720b57cec5SDimitry Andric  // can only be used by ABI non-compliant code. If the target doesn't have XMM
2730b57cec5SDimitry Andric  // registers, it won't have vector types.
274349cc55cSDimitry Andric  CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
2750b57cec5SDimitry Andric            CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>,
2760b57cec5SDimitry Andric
2770b57cec5SDimitry Andric  // 256-bit vectors are returned in YMM0 and XMM1, when they fit. YMM2 and YMM3
2780b57cec5SDimitry Andric  // can only be used by ABI non-compliant code. This vector type is only
2790b57cec5SDimitry Andric  // supported while using the AVX target feature.
280349cc55cSDimitry Andric  CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
2810b57cec5SDimitry Andric            CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>,
2820b57cec5SDimitry Andric
2830b57cec5SDimitry Andric  // 512-bit vectors are returned in ZMM0 and ZMM1, when they fit. ZMM2 and ZMM3
2840b57cec5SDimitry Andric  // can only be used by ABI non-compliant code. This vector type is only
2850b57cec5SDimitry Andric  // supported while using the AVX-512 target feature.
286349cc55cSDimitry Andric  CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
2870b57cec5SDimitry Andric            CCAssignToReg<[ZMM0,ZMM1,ZMM2,ZMM3]>>,
2880b57cec5SDimitry Andric
2890b57cec5SDimitry Andric  // MMX vector types are always returned in MM0. If the target doesn't have
2900b57cec5SDimitry Andric  // MM0, it doesn't support these vector types.
2910b57cec5SDimitry Andric  CCIfType<[x86mmx], CCAssignToReg<[MM0]>>,
2920b57cec5SDimitry Andric
2930b57cec5SDimitry Andric  // Long double types are always returned in FP0 (even with SSE),
2940b57cec5SDimitry Andric  // except on Win64.
2950b57cec5SDimitry Andric  CCIfNotSubtarget<"isTargetWin64()", CCIfType<[f80], CCAssignToReg<[FP0, FP1]>>>
2960b57cec5SDimitry Andric]>;
2970b57cec5SDimitry Andric
2980b57cec5SDimitry Andric// X86-32 C return-value convention.
2990b57cec5SDimitry Andricdef RetCC_X86_32_C : CallingConv<[
3000b57cec5SDimitry Andric  // The X86-32 calling convention returns FP values in FP0, unless marked
3010b57cec5SDimitry Andric  // with "inreg" (used here to distinguish one kind of reg from another,
3020b57cec5SDimitry Andric  // weirdly; this is really the sse-regparm calling convention) in which
3030b57cec5SDimitry Andric  // case they use XMM0, otherwise it is the same as the common X86 calling
3040b57cec5SDimitry Andric  // conv.
3050b57cec5SDimitry Andric  CCIfInReg<CCIfSubtarget<"hasSSE2()",
3060b57cec5SDimitry Andric    CCIfType<[f32, f64], CCAssignToReg<[XMM0,XMM1,XMM2]>>>>,
307349cc55cSDimitry Andric  CCIfSubtarget<"hasX87()",
308349cc55cSDimitry Andric    CCIfType<[f32, f64], CCAssignToReg<[FP0, FP1]>>>,
309349cc55cSDimitry Andric  CCIfNotSubtarget<"hasX87()",
310349cc55cSDimitry Andric    CCIfType<[f32], CCAssignToReg<[EAX, EDX, ECX]>>>,
311349cc55cSDimitry Andric  CCIfType<[f16], CCAssignToReg<[XMM0,XMM1,XMM2]>>,
3120b57cec5SDimitry Andric  CCDelegateTo<RetCC_X86Common>
3130b57cec5SDimitry Andric]>;
3140b57cec5SDimitry Andric
3150b57cec5SDimitry Andric// X86-32 FastCC return-value convention.
3160b57cec5SDimitry Andricdef RetCC_X86_32_Fast : CallingConv<[
3170b57cec5SDimitry Andric  // The X86-32 fastcc returns 1, 2, or 3 FP values in XMM0-2 if the target has
3180b57cec5SDimitry Andric  // SSE2.
3190b57cec5SDimitry Andric  // This can happen when a float, 2 x float, or 3 x float vector is split by
3200b57cec5SDimitry Andric  // target lowering, and is returned in 1-3 sse regs.
3210b57cec5SDimitry Andric  CCIfType<[f32], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>,
3220b57cec5SDimitry Andric  CCIfType<[f64], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>,
3230b57cec5SDimitry Andric
3240b57cec5SDimitry Andric  // For integers, ECX can be used as an extra return register
3250b57cec5SDimitry Andric  CCIfType<[i8],  CCAssignToReg<[AL, DL, CL]>>,
3260b57cec5SDimitry Andric  CCIfType<[i16], CCAssignToReg<[AX, DX, CX]>>,
3270b57cec5SDimitry Andric  CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>,
3280b57cec5SDimitry Andric
3290b57cec5SDimitry Andric  // Otherwise, it is the same as the common X86 calling convention.
3300b57cec5SDimitry Andric  CCDelegateTo<RetCC_X86Common>
3310b57cec5SDimitry Andric]>;
3320b57cec5SDimitry Andric
3330b57cec5SDimitry Andric// Intel_OCL_BI return-value convention.
3340b57cec5SDimitry Andricdef RetCC_Intel_OCL_BI : CallingConv<[
3350b57cec5SDimitry Andric  // Vector types are returned in XMM0,XMM1,XMMM2 and XMM3.
3360b57cec5SDimitry Andric  CCIfType<[f32, f64, v4i32, v2i64, v4f32, v2f64],
3370b57cec5SDimitry Andric            CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>,
3380b57cec5SDimitry Andric
3390b57cec5SDimitry Andric  // 256-bit FP vectors
3400b57cec5SDimitry Andric  // No more than 4 registers
3410b57cec5SDimitry Andric  CCIfType<[v8f32, v4f64, v8i32, v4i64],
3420b57cec5SDimitry Andric            CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>,
3430b57cec5SDimitry Andric
3440b57cec5SDimitry Andric  // 512-bit FP vectors
3450b57cec5SDimitry Andric  CCIfType<[v16f32, v8f64, v16i32, v8i64],
3460b57cec5SDimitry Andric            CCAssignToReg<[ZMM0,ZMM1,ZMM2,ZMM3]>>,
3470b57cec5SDimitry Andric
3480b57cec5SDimitry Andric  // i32, i64 in the standard way
3490b57cec5SDimitry Andric  CCDelegateTo<RetCC_X86Common>
3500b57cec5SDimitry Andric]>;
3510b57cec5SDimitry Andric
3520b57cec5SDimitry Andric// X86-32 HiPE return-value convention.
3530b57cec5SDimitry Andricdef RetCC_X86_32_HiPE : CallingConv<[
3540b57cec5SDimitry Andric  // Promote all types to i32
3550b57cec5SDimitry Andric  CCIfType<[i8, i16], CCPromoteToType<i32>>,
3560b57cec5SDimitry Andric
3570b57cec5SDimitry Andric  // Return: HP, P, VAL1, VAL2
3580b57cec5SDimitry Andric  CCIfType<[i32], CCAssignToReg<[ESI, EBP, EAX, EDX]>>
3590b57cec5SDimitry Andric]>;
3600b57cec5SDimitry Andric
3610b57cec5SDimitry Andric// X86-32 Vectorcall return-value convention.
3620b57cec5SDimitry Andricdef RetCC_X86_32_VectorCall : CallingConv<[
3630b57cec5SDimitry Andric  // Floating Point types are returned in XMM0,XMM1,XMMM2 and XMM3.
3640b57cec5SDimitry Andric  CCIfType<[f32, f64, f128],
3650b57cec5SDimitry Andric            CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>,
3660b57cec5SDimitry Andric
3670b57cec5SDimitry Andric  // Return integers in the standard way.
3680b57cec5SDimitry Andric  CCDelegateTo<RetCC_X86Common>
3690b57cec5SDimitry Andric]>;
3700b57cec5SDimitry Andric
3710b57cec5SDimitry Andric// X86-64 C return-value convention.
3720b57cec5SDimitry Andricdef RetCC_X86_64_C : CallingConv<[
3730b57cec5SDimitry Andric  // The X86-64 calling convention always returns FP values in XMM0.
374349cc55cSDimitry Andric  CCIfType<[f16], CCAssignToReg<[XMM0, XMM1]>>,
3750b57cec5SDimitry Andric  CCIfType<[f32], CCAssignToReg<[XMM0, XMM1]>>,
3760b57cec5SDimitry Andric  CCIfType<[f64], CCAssignToReg<[XMM0, XMM1]>>,
3770b57cec5SDimitry Andric  CCIfType<[f128], CCAssignToReg<[XMM0, XMM1]>>,
3780b57cec5SDimitry Andric
3790b57cec5SDimitry Andric  // MMX vector types are always returned in XMM0.
3800b57cec5SDimitry Andric  CCIfType<[x86mmx], CCAssignToReg<[XMM0, XMM1]>>,
3810b57cec5SDimitry Andric
382e8d8bef9SDimitry Andric  // Pointers are always returned in full 64-bit registers.
383e8d8bef9SDimitry Andric  CCIfPtr<CCCustom<"CC_X86_64_Pointer">>,
384e8d8bef9SDimitry Andric
3850b57cec5SDimitry Andric  CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[R12]>>>,
3860b57cec5SDimitry Andric
3870b57cec5SDimitry Andric  CCDelegateTo<RetCC_X86Common>
3880b57cec5SDimitry Andric]>;
3890b57cec5SDimitry Andric
3900b57cec5SDimitry Andric// X86-Win64 C return-value convention.
3910b57cec5SDimitry Andricdef RetCC_X86_Win64_C : CallingConv<[
3920b57cec5SDimitry Andric  // The X86-Win64 calling convention always returns __m64 values in RAX.
3930b57cec5SDimitry Andric  CCIfType<[x86mmx], CCBitConvertToType<i64>>,
3940b57cec5SDimitry Andric
395480093f4SDimitry Andric  // GCC returns FP values in RAX on Win64.
396480093f4SDimitry Andric  CCIfType<[f32], CCIfNotSubtarget<"hasSSE1()", CCBitConvertToType<i32>>>,
397480093f4SDimitry Andric  CCIfType<[f64], CCIfNotSubtarget<"hasSSE1()", CCBitConvertToType<i64>>>,
398480093f4SDimitry Andric
3990b57cec5SDimitry Andric  // Otherwise, everything is the same as 'normal' X86-64 C CC.
4000b57cec5SDimitry Andric  CCDelegateTo<RetCC_X86_64_C>
4010b57cec5SDimitry Andric]>;
4020b57cec5SDimitry Andric
4030b57cec5SDimitry Andric// X86-64 vectorcall return-value convention.
4040b57cec5SDimitry Andricdef RetCC_X86_64_Vectorcall : CallingConv<[
4050b57cec5SDimitry Andric  // Vectorcall calling convention always returns FP values in XMMs.
4060b57cec5SDimitry Andric  CCIfType<[f32, f64, f128],
4070b57cec5SDimitry Andric    CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
4080b57cec5SDimitry Andric
4090b57cec5SDimitry Andric  // Otherwise, everything is the same as Windows X86-64 C CC.
4100b57cec5SDimitry Andric  CCDelegateTo<RetCC_X86_Win64_C>
4110b57cec5SDimitry Andric]>;
4120b57cec5SDimitry Andric
4130b57cec5SDimitry Andric// X86-64 HiPE return-value convention.
4140b57cec5SDimitry Andricdef RetCC_X86_64_HiPE : CallingConv<[
4150b57cec5SDimitry Andric  // Promote all types to i64
4160b57cec5SDimitry Andric  CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
4170b57cec5SDimitry Andric
4180b57cec5SDimitry Andric  // Return: HP, P, VAL1, VAL2
4190b57cec5SDimitry Andric  CCIfType<[i64], CCAssignToReg<[R15, RBP, RAX, RDX]>>
4200b57cec5SDimitry Andric]>;
4210b57cec5SDimitry Andric
4220b57cec5SDimitry Andricdef RetCC_X86_64_Swift : CallingConv<[
4230b57cec5SDimitry Andric
4240b57cec5SDimitry Andric  CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[R12]>>>,
4250b57cec5SDimitry Andric
4260b57cec5SDimitry Andric  // For integers, ECX, R8D can be used as extra return registers.
4270b57cec5SDimitry Andric  CCIfType<[v1i1],  CCPromoteToType<i8>>,
4280b57cec5SDimitry Andric  CCIfType<[i1],  CCPromoteToType<i8>>,
4290b57cec5SDimitry Andric  CCIfType<[i8] , CCAssignToReg<[AL, DL, CL, R8B]>>,
4300b57cec5SDimitry Andric  CCIfType<[i16], CCAssignToReg<[AX, DX, CX, R8W]>>,
4310b57cec5SDimitry Andric  CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX, R8D]>>,
4320b57cec5SDimitry Andric  CCIfType<[i64], CCAssignToReg<[RAX, RDX, RCX, R8]>>,
4330b57cec5SDimitry Andric
4340b57cec5SDimitry Andric  // XMM0, XMM1, XMM2 and XMM3 can be used to return FP values.
4350b57cec5SDimitry Andric  CCIfType<[f32], CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
4360b57cec5SDimitry Andric  CCIfType<[f64], CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
4370b57cec5SDimitry Andric  CCIfType<[f128], CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
4380b57cec5SDimitry Andric
4390b57cec5SDimitry Andric  // MMX vector types are returned in XMM0, XMM1, XMM2 and XMM3.
4400b57cec5SDimitry Andric  CCIfType<[x86mmx], CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
4410b57cec5SDimitry Andric  CCDelegateTo<RetCC_X86Common>
4420b57cec5SDimitry Andric]>;
4430b57cec5SDimitry Andric
4440b57cec5SDimitry Andric// X86-64 AnyReg return-value convention. No explicit register is specified for
4450b57cec5SDimitry Andric// the return-value. The register allocator is allowed and expected to choose
4460b57cec5SDimitry Andric// any free register.
4470b57cec5SDimitry Andric//
4480b57cec5SDimitry Andric// This calling convention is currently only supported by the stackmap and
4490b57cec5SDimitry Andric// patchpoint intrinsics. All other uses will result in an assert on Debug
4500b57cec5SDimitry Andric// builds. On Release builds we fallback to the X86 C calling convention.
4510b57cec5SDimitry Andricdef RetCC_X86_64_AnyReg : CallingConv<[
4520b57cec5SDimitry Andric  CCCustom<"CC_X86_AnyReg_Error">
4530b57cec5SDimitry Andric]>;
4540b57cec5SDimitry Andric
4550b57cec5SDimitry Andric
4560b57cec5SDimitry Andricdefm X86_32_RegCall :
4570b57cec5SDimitry Andric	 X86_RegCall_base<RC_X86_32_RegCall>;
4585f757f3fSDimitry Andricdefm X86_32_RegCallv4_Win :
4595f757f3fSDimitry Andric	 X86_RegCall_base<RC_X86_32_RegCallv4_Win>;
4600b57cec5SDimitry Andricdefm X86_Win64_RegCall :
4610b57cec5SDimitry Andric     X86_RegCall_base<RC_X86_64_RegCall_Win>;
4625f757f3fSDimitry Andricdefm X86_Win64_RegCallv4 :
4635f757f3fSDimitry Andric     X86_RegCall_base<RC_X86_64_RegCallv4_Win>;
4640b57cec5SDimitry Andricdefm X86_SysV64_RegCall :
4650b57cec5SDimitry Andric     X86_RegCall_base<RC_X86_64_RegCall_SysV>;
4660b57cec5SDimitry Andric
4670b57cec5SDimitry Andric// This is the root return-value convention for the X86-32 backend.
4680b57cec5SDimitry Andricdef RetCC_X86_32 : CallingConv<[
4690b57cec5SDimitry Andric  // If FastCC, use RetCC_X86_32_Fast.
4700b57cec5SDimitry Andric  CCIfCC<"CallingConv::Fast", CCDelegateTo<RetCC_X86_32_Fast>>,
4718bcb0991SDimitry Andric  CCIfCC<"CallingConv::Tail", CCDelegateTo<RetCC_X86_32_Fast>>,
472480093f4SDimitry Andric  // CFGuard_Check never returns a value so does not need a RetCC.
4730b57cec5SDimitry Andric  // If HiPE, use RetCC_X86_32_HiPE.
4740b57cec5SDimitry Andric  CCIfCC<"CallingConv::HiPE", CCDelegateTo<RetCC_X86_32_HiPE>>,
4750b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo<RetCC_X86_32_VectorCall>>,
4765f757f3fSDimitry Andric  CCIfCC<"CallingConv::X86_RegCall",
4775f757f3fSDimitry Andric    CCIfSubtarget<"isTargetWin32()", CCIfRegCallv4<CCDelegateTo<RetCC_X86_32_RegCallv4_Win>>>>,
4780b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_RegCall", CCDelegateTo<RetCC_X86_32_RegCall>>,
4790b57cec5SDimitry Andric
4800b57cec5SDimitry Andric  // Otherwise, use RetCC_X86_32_C.
4810b57cec5SDimitry Andric  CCDelegateTo<RetCC_X86_32_C>
4820b57cec5SDimitry Andric]>;
4830b57cec5SDimitry Andric
4840b57cec5SDimitry Andric// This is the root return-value convention for the X86-64 backend.
4850b57cec5SDimitry Andricdef RetCC_X86_64 : CallingConv<[
4860b57cec5SDimitry Andric  // HiPE uses RetCC_X86_64_HiPE
4870b57cec5SDimitry Andric  CCIfCC<"CallingConv::HiPE", CCDelegateTo<RetCC_X86_64_HiPE>>,
4880b57cec5SDimitry Andric
4895f757f3fSDimitry Andric  // Handle AnyReg calls.
4900b57cec5SDimitry Andric  CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_X86_64_AnyReg>>,
4910b57cec5SDimitry Andric
4920b57cec5SDimitry Andric  // Handle Swift calls.
4930b57cec5SDimitry Andric  CCIfCC<"CallingConv::Swift", CCDelegateTo<RetCC_X86_64_Swift>>,
494fe6060f1SDimitry Andric  CCIfCC<"CallingConv::SwiftTail", CCDelegateTo<RetCC_X86_64_Swift>>,
4950b57cec5SDimitry Andric
4960b57cec5SDimitry Andric  // Handle explicit CC selection
4970b57cec5SDimitry Andric  CCIfCC<"CallingConv::Win64", CCDelegateTo<RetCC_X86_Win64_C>>,
4980b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<RetCC_X86_64_C>>,
4990b57cec5SDimitry Andric
5000b57cec5SDimitry Andric  // Handle Vectorcall CC
5010b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo<RetCC_X86_64_Vectorcall>>,
5020b57cec5SDimitry Andric
5030b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_RegCall",
5045f757f3fSDimitry Andric    CCIfSubtarget<"isTargetWin64()", CCIfRegCallv4<CCDelegateTo<RetCC_X86_Win64_RegCallv4>>>>,
5055f757f3fSDimitry Andric
5065f757f3fSDimitry Andric  CCIfCC<"CallingConv::X86_RegCall",
5070b57cec5SDimitry Andric          CCIfSubtarget<"isTargetWin64()",
5080b57cec5SDimitry Andric                        CCDelegateTo<RetCC_X86_Win64_RegCall>>>,
5090b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_RegCall", CCDelegateTo<RetCC_X86_SysV64_RegCall>>,
5100b57cec5SDimitry Andric
5110b57cec5SDimitry Andric  // Mingw64 and native Win64 use Win64 CC
5120b57cec5SDimitry Andric  CCIfSubtarget<"isTargetWin64()", CCDelegateTo<RetCC_X86_Win64_C>>,
5130b57cec5SDimitry Andric
5140b57cec5SDimitry Andric  // Otherwise, drop to normal X86-64 CC
5150b57cec5SDimitry Andric  CCDelegateTo<RetCC_X86_64_C>
5160b57cec5SDimitry Andric]>;
5170b57cec5SDimitry Andric
5180b57cec5SDimitry Andric// This is the return-value convention used for the entire X86 backend.
5190b57cec5SDimitry Andriclet Entry = 1 in
5200b57cec5SDimitry Andricdef RetCC_X86 : CallingConv<[
5210b57cec5SDimitry Andric
5220b57cec5SDimitry Andric  // Check if this is the Intel OpenCL built-ins calling convention
5230b57cec5SDimitry Andric  CCIfCC<"CallingConv::Intel_OCL_BI", CCDelegateTo<RetCC_Intel_OCL_BI>>,
5240b57cec5SDimitry Andric
5250b57cec5SDimitry Andric  CCIfSubtarget<"is64Bit()", CCDelegateTo<RetCC_X86_64>>,
5260b57cec5SDimitry Andric  CCDelegateTo<RetCC_X86_32>
5270b57cec5SDimitry Andric]>;
5280b57cec5SDimitry Andric
5290b57cec5SDimitry Andric//===----------------------------------------------------------------------===//
5300b57cec5SDimitry Andric// X86-64 Argument Calling Conventions
5310b57cec5SDimitry Andric//===----------------------------------------------------------------------===//
5320b57cec5SDimitry Andric
5330b57cec5SDimitry Andricdef CC_X86_64_C : CallingConv<[
5340b57cec5SDimitry Andric  // Handles byval parameters.
5350b57cec5SDimitry Andric  CCIfByVal<CCPassByVal<8, 8>>,
5360b57cec5SDimitry Andric
5370b57cec5SDimitry Andric  // Promote i1/i8/i16/v1i1 arguments to i32.
5380b57cec5SDimitry Andric  CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
5390b57cec5SDimitry Andric
5400b57cec5SDimitry Andric  // The 'nest' parameter, if any, is passed in R10.
5410b57cec5SDimitry Andric  CCIfNest<CCIfSubtarget<"isTarget64BitILP32()", CCAssignToReg<[R10D]>>>,
5420b57cec5SDimitry Andric  CCIfNest<CCAssignToReg<[R10]>>,
5430b57cec5SDimitry Andric
5440b57cec5SDimitry Andric  // Pass SwiftSelf in a callee saved register.
5450b57cec5SDimitry Andric  CCIfSwiftSelf<CCIfType<[i64], CCAssignToReg<[R13]>>>,
5460b57cec5SDimitry Andric
5470b57cec5SDimitry Andric  // A SwiftError is passed in R12.
5480b57cec5SDimitry Andric  CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[R12]>>>,
5490b57cec5SDimitry Andric
550fe6060f1SDimitry Andric  // Pass SwiftAsync in an otherwise callee saved register so that calls to
551fe6060f1SDimitry Andric  // normal functions don't need to save it somewhere.
552fe6060f1SDimitry Andric  CCIfSwiftAsync<CCIfType<[i64], CCAssignToReg<[R14]>>>,
553fe6060f1SDimitry Andric
554fe6060f1SDimitry Andric  // For Swift Calling Conventions, pass sret in %rax.
5550b57cec5SDimitry Andric  CCIfCC<"CallingConv::Swift",
5560b57cec5SDimitry Andric    CCIfSRet<CCIfType<[i64], CCAssignToReg<[RAX]>>>>,
557fe6060f1SDimitry Andric  CCIfCC<"CallingConv::SwiftTail",
558fe6060f1SDimitry Andric    CCIfSRet<CCIfType<[i64], CCAssignToReg<[RAX]>>>>,
5590b57cec5SDimitry Andric
560e8d8bef9SDimitry Andric  // Pointers are always passed in full 64-bit registers.
561e8d8bef9SDimitry Andric  CCIfPtr<CCCustom<"CC_X86_64_Pointer">>,
562e8d8bef9SDimitry Andric
5630b57cec5SDimitry Andric  // The first 6 integer arguments are passed in integer registers.
5640b57cec5SDimitry Andric  CCIfType<[i32], CCAssignToReg<[EDI, ESI, EDX, ECX, R8D, R9D]>>,
5655f757f3fSDimitry Andric
5665f757f3fSDimitry Andric  // i128 can be either passed in two i64 registers, or on the stack, but
5675f757f3fSDimitry Andric  // not split across register and stack. As such, do not allow using R9
5685f757f3fSDimitry Andric  // for a split i64.
5695f757f3fSDimitry Andric  CCIfType<[i64],
5705f757f3fSDimitry Andric           CCIfSplit<CCAssignToReg<[RDI, RSI, RDX, RCX, R8]>>>,
5715f757f3fSDimitry Andric  CCIfType<[i64], CCIfSplit<CCAssignToStackWithShadow<8, 16, [R9]>>>,
5725f757f3fSDimitry Andric
5730b57cec5SDimitry Andric  CCIfType<[i64], CCAssignToReg<[RDI, RSI, RDX, RCX, R8 , R9 ]>>,
5740b57cec5SDimitry Andric
5750b57cec5SDimitry Andric  // The first 8 MMX vector arguments are passed in XMM registers on Darwin.
5760b57cec5SDimitry Andric  CCIfType<[x86mmx],
5770b57cec5SDimitry Andric            CCIfSubtarget<"isTargetDarwin()",
5780b57cec5SDimitry Andric            CCIfSubtarget<"hasSSE2()",
5790b57cec5SDimitry Andric            CCPromoteToType<v2i64>>>>,
5800b57cec5SDimitry Andric
5810b57cec5SDimitry Andric  // Boolean vectors of AVX-512 are passed in SIMD registers.
5820b57cec5SDimitry Andric  // The call from AVX to AVX-512 function should work,
5830b57cec5SDimitry Andric  // since the boolean types in AVX/AVX2 are promoted by default.
5840b57cec5SDimitry Andric  CCIfType<[v2i1],  CCPromoteToType<v2i64>>,
5850b57cec5SDimitry Andric  CCIfType<[v4i1],  CCPromoteToType<v4i32>>,
5860b57cec5SDimitry Andric  CCIfType<[v8i1],  CCPromoteToType<v8i16>>,
5870b57cec5SDimitry Andric  CCIfType<[v16i1], CCPromoteToType<v16i8>>,
5880b57cec5SDimitry Andric  CCIfType<[v32i1], CCPromoteToType<v32i8>>,
5890b57cec5SDimitry Andric  CCIfType<[v64i1], CCPromoteToType<v64i8>>,
5900b57cec5SDimitry Andric
5910b57cec5SDimitry Andric  // The first 8 FP/Vector arguments are passed in XMM registers.
592349cc55cSDimitry Andric  CCIfType<[f16, f32, f64, f128, v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
5930b57cec5SDimitry Andric            CCIfSubtarget<"hasSSE1()",
5940b57cec5SDimitry Andric            CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>>>,
5950b57cec5SDimitry Andric
5960b57cec5SDimitry Andric  // The first 8 256-bit vector arguments are passed in YMM registers, unless
5970b57cec5SDimitry Andric  // this is a vararg function.
5980b57cec5SDimitry Andric  // FIXME: This isn't precisely correct; the x86-64 ABI document says that
5990b57cec5SDimitry Andric  // fixed arguments to vararg functions are supposed to be passed in
6000b57cec5SDimitry Andric  // registers.  Actually modeling that would be a lot of work, though.
601349cc55cSDimitry Andric  CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
6020b57cec5SDimitry Andric                          CCIfSubtarget<"hasAVX()",
6030b57cec5SDimitry Andric                          CCAssignToReg<[YMM0, YMM1, YMM2, YMM3,
6040b57cec5SDimitry Andric                                         YMM4, YMM5, YMM6, YMM7]>>>>,
6050b57cec5SDimitry Andric
6060b57cec5SDimitry Andric  // The first 8 512-bit vector arguments are passed in ZMM registers.
607349cc55cSDimitry Andric  CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
6080b57cec5SDimitry Andric            CCIfSubtarget<"hasAVX512()",
6090b57cec5SDimitry Andric            CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3, ZMM4, ZMM5, ZMM6, ZMM7]>>>>,
6100b57cec5SDimitry Andric
6110b57cec5SDimitry Andric  // Integer/FP values get stored in stack slots that are 8 bytes in size and
6120b57cec5SDimitry Andric  // 8-byte aligned if there are no more registers to hold them.
613349cc55cSDimitry Andric  CCIfType<[i32, i64, f16, f32, f64], CCAssignToStack<8, 8>>,
6140b57cec5SDimitry Andric
6150b57cec5SDimitry Andric  // Long doubles get stack slots whose size and alignment depends on the
6160b57cec5SDimitry Andric  // subtarget.
6170b57cec5SDimitry Andric  CCIfType<[f80, f128], CCAssignToStack<0, 0>>,
6180b57cec5SDimitry Andric
6190b57cec5SDimitry Andric  // Vectors get 16-byte stack slots that are 16-byte aligned.
620349cc55cSDimitry Andric  CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64], CCAssignToStack<16, 16>>,
6210b57cec5SDimitry Andric
6220b57cec5SDimitry Andric  // 256-bit vectors get 32-byte stack slots that are 32-byte aligned.
623349cc55cSDimitry Andric  CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
6240b57cec5SDimitry Andric           CCAssignToStack<32, 32>>,
6250b57cec5SDimitry Andric
6260b57cec5SDimitry Andric  // 512-bit vectors get 64-byte stack slots that are 64-byte aligned.
627349cc55cSDimitry Andric  CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
6280b57cec5SDimitry Andric           CCAssignToStack<64, 64>>
6290b57cec5SDimitry Andric]>;
6300b57cec5SDimitry Andric
6310b57cec5SDimitry Andric// Calling convention used on Win64
6320b57cec5SDimitry Andricdef CC_X86_Win64_C : CallingConv<[
6330b57cec5SDimitry Andric  // FIXME: Handle varargs.
6340b57cec5SDimitry Andric
6350b57cec5SDimitry Andric  // Byval aggregates are passed by pointer
6360b57cec5SDimitry Andric  CCIfByVal<CCPassIndirect<i64>>,
6370b57cec5SDimitry Andric
6380b57cec5SDimitry Andric  // Promote i1/v1i1 arguments to i8.
6390b57cec5SDimitry Andric  CCIfType<[i1, v1i1], CCPromoteToType<i8>>,
6400b57cec5SDimitry Andric
6410b57cec5SDimitry Andric  // The 'nest' parameter, if any, is passed in R10.
6420b57cec5SDimitry Andric  CCIfNest<CCAssignToReg<[R10]>>,
6430b57cec5SDimitry Andric
6440b57cec5SDimitry Andric  // A SwiftError is passed in R12.
6450b57cec5SDimitry Andric  CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[R12]>>>,
6460b57cec5SDimitry Andric
647fe6060f1SDimitry Andric  // Pass SwiftSelf in a callee saved register.
648fe6060f1SDimitry Andric  CCIfSwiftSelf<CCIfType<[i64], CCAssignToReg<[R13]>>>,
649fe6060f1SDimitry Andric
650fe6060f1SDimitry Andric  // Pass SwiftAsync in an otherwise callee saved register so that calls to
651fe6060f1SDimitry Andric  // normal functions don't need to save it somewhere.
652fe6060f1SDimitry Andric  CCIfSwiftAsync<CCIfType<[i64], CCAssignToReg<[R14]>>>,
653fe6060f1SDimitry Andric
654480093f4SDimitry Andric  // The 'CFGuardTarget' parameter, if any, is passed in RAX.
655480093f4SDimitry Andric  CCIfCFGuardTarget<CCAssignToReg<[RAX]>>,
656480093f4SDimitry Andric
6570b57cec5SDimitry Andric  // 128 bit vectors are passed by pointer
658349cc55cSDimitry Andric  CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64], CCPassIndirect<i64>>,
6590b57cec5SDimitry Andric
6600b57cec5SDimitry Andric  // 256 bit vectors are passed by pointer
661349cc55cSDimitry Andric  CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64], CCPassIndirect<i64>>,
6620b57cec5SDimitry Andric
6630b57cec5SDimitry Andric  // 512 bit vectors are passed by pointer
664349cc55cSDimitry Andric  CCIfType<[v64i8, v32i16, v16i32, v32f16, v16f32, v8f64, v8i64], CCPassIndirect<i64>>,
6650b57cec5SDimitry Andric
6660b57cec5SDimitry Andric  // Long doubles are passed by pointer
6670b57cec5SDimitry Andric  CCIfType<[f80], CCPassIndirect<i64>>,
6680b57cec5SDimitry Andric
6690b57cec5SDimitry Andric  // The first 4 MMX vector arguments are passed in GPRs.
6700b57cec5SDimitry Andric  CCIfType<[x86mmx], CCBitConvertToType<i64>>,
6710b57cec5SDimitry Andric
672480093f4SDimitry Andric  // If SSE was disabled, pass FP values smaller than 64-bits as integers in
673480093f4SDimitry Andric  // GPRs or on the stack.
674480093f4SDimitry Andric  CCIfType<[f32], CCIfNotSubtarget<"hasSSE1()", CCBitConvertToType<i32>>>,
675480093f4SDimitry Andric  CCIfType<[f64], CCIfNotSubtarget<"hasSSE1()", CCBitConvertToType<i64>>>,
676480093f4SDimitry Andric
677480093f4SDimitry Andric  // The first 4 FP/Vector arguments are passed in XMM registers.
678349cc55cSDimitry Andric  CCIfType<[f16, f32, f64],
679480093f4SDimitry Andric           CCAssignToRegWithShadow<[XMM0, XMM1, XMM2, XMM3],
680480093f4SDimitry Andric                                   [RCX , RDX , R8  , R9  ]>>,
681480093f4SDimitry Andric
6820b57cec5SDimitry Andric  // The first 4 integer arguments are passed in integer registers.
6830b57cec5SDimitry Andric  CCIfType<[i8 ], CCAssignToRegWithShadow<[CL  , DL  , R8B , R9B ],
6840b57cec5SDimitry Andric                                          [XMM0, XMM1, XMM2, XMM3]>>,
6850b57cec5SDimitry Andric  CCIfType<[i16], CCAssignToRegWithShadow<[CX  , DX  , R8W , R9W ],
6860b57cec5SDimitry Andric                                          [XMM0, XMM1, XMM2, XMM3]>>,
6870b57cec5SDimitry Andric  CCIfType<[i32], CCAssignToRegWithShadow<[ECX , EDX , R8D , R9D ],
6880b57cec5SDimitry Andric                                          [XMM0, XMM1, XMM2, XMM3]>>,
6890b57cec5SDimitry Andric
6900b57cec5SDimitry Andric  // Do not pass the sret argument in RCX, the Win64 thiscall calling
6910b57cec5SDimitry Andric  // convention requires "this" to be passed in RCX.
6920b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_ThisCall",
6930b57cec5SDimitry Andric    CCIfSRet<CCIfType<[i64], CCAssignToRegWithShadow<[RDX , R8  , R9  ],
6940b57cec5SDimitry Andric                                                     [XMM1, XMM2, XMM3]>>>>,
6950b57cec5SDimitry Andric
6960b57cec5SDimitry Andric  CCIfType<[i64], CCAssignToRegWithShadow<[RCX , RDX , R8  , R9  ],
6970b57cec5SDimitry Andric                                          [XMM0, XMM1, XMM2, XMM3]>>,
6980b57cec5SDimitry Andric
6990b57cec5SDimitry Andric  // Integer/FP values get stored in stack slots that are 8 bytes in size and
7000b57cec5SDimitry Andric  // 8-byte aligned if there are no more registers to hold them.
701349cc55cSDimitry Andric  CCIfType<[i8, i16, i32, i64, f16, f32, f64], CCAssignToStack<8, 8>>
7020b57cec5SDimitry Andric]>;
7030b57cec5SDimitry Andric
7040b57cec5SDimitry Andricdef CC_X86_Win64_VectorCall : CallingConv<[
7050b57cec5SDimitry Andric  CCCustom<"CC_X86_64_VectorCall">,
7060b57cec5SDimitry Andric
7070b57cec5SDimitry Andric  // Delegate to fastcall to handle integer types.
7080b57cec5SDimitry Andric  CCDelegateTo<CC_X86_Win64_C>
7090b57cec5SDimitry Andric]>;
7100b57cec5SDimitry Andric
7110b57cec5SDimitry Andric
7120b57cec5SDimitry Andricdef CC_X86_64_GHC : CallingConv<[
7130b57cec5SDimitry Andric  // Promote i8/i16/i32 arguments to i64.
7140b57cec5SDimitry Andric  CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
7150b57cec5SDimitry Andric
7160b57cec5SDimitry Andric  // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, SpLim
7170b57cec5SDimitry Andric  CCIfType<[i64],
7180b57cec5SDimitry Andric            CCAssignToReg<[R13, RBP, R12, RBX, R14, RSI, RDI, R8, R9, R15]>>,
7190b57cec5SDimitry Andric
7200b57cec5SDimitry Andric  // Pass in STG registers: F1, F2, F3, F4, D1, D2
7210b57cec5SDimitry Andric  CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
7220b57cec5SDimitry Andric            CCIfSubtarget<"hasSSE1()",
7230b57cec5SDimitry Andric            CCAssignToReg<[XMM1, XMM2, XMM3, XMM4, XMM5, XMM6]>>>,
7240b57cec5SDimitry Andric  // AVX
7250b57cec5SDimitry Andric  CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
7260b57cec5SDimitry Andric            CCIfSubtarget<"hasAVX()",
7270b57cec5SDimitry Andric            CCAssignToReg<[YMM1, YMM2, YMM3, YMM4, YMM5, YMM6]>>>,
7280b57cec5SDimitry Andric  // AVX-512
7290b57cec5SDimitry Andric  CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
7300b57cec5SDimitry Andric            CCIfSubtarget<"hasAVX512()",
7310b57cec5SDimitry Andric            CCAssignToReg<[ZMM1, ZMM2, ZMM3, ZMM4, ZMM5, ZMM6]>>>
7320b57cec5SDimitry Andric]>;
7330b57cec5SDimitry Andric
7340b57cec5SDimitry Andricdef CC_X86_64_HiPE : CallingConv<[
7350b57cec5SDimitry Andric  // Promote i8/i16/i32 arguments to i64.
7360b57cec5SDimitry Andric  CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
7370b57cec5SDimitry Andric
7380b57cec5SDimitry Andric  // Pass in VM's registers: HP, P, ARG0, ARG1, ARG2, ARG3
7390b57cec5SDimitry Andric  CCIfType<[i64], CCAssignToReg<[R15, RBP, RSI, RDX, RCX, R8]>>,
7400b57cec5SDimitry Andric
7410b57cec5SDimitry Andric  // Integer/FP values get stored in stack slots that are 8 bytes in size and
7420b57cec5SDimitry Andric  // 8-byte aligned if there are no more registers to hold them.
7430b57cec5SDimitry Andric  CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>
7440b57cec5SDimitry Andric]>;
7450b57cec5SDimitry Andric
7460b57cec5SDimitry Andric// No explicit register is specified for the AnyReg calling convention. The
7470b57cec5SDimitry Andric// register allocator may assign the arguments to any free register.
7480b57cec5SDimitry Andric//
7490b57cec5SDimitry Andric// This calling convention is currently only supported by the stackmap and
7500b57cec5SDimitry Andric// patchpoint intrinsics. All other uses will result in an assert on Debug
7510b57cec5SDimitry Andric// builds. On Release builds we fallback to the X86 C calling convention.
7520b57cec5SDimitry Andricdef CC_X86_64_AnyReg : CallingConv<[
7530b57cec5SDimitry Andric  CCCustom<"CC_X86_AnyReg_Error">
7540b57cec5SDimitry Andric]>;
7550b57cec5SDimitry Andric
7560b57cec5SDimitry Andric//===----------------------------------------------------------------------===//
7570b57cec5SDimitry Andric// X86 C Calling Convention
7580b57cec5SDimitry Andric//===----------------------------------------------------------------------===//
7590b57cec5SDimitry Andric
7600b57cec5SDimitry Andric/// CC_X86_32_Vector_Common - In all X86-32 calling conventions, extra vector
7610b57cec5SDimitry Andric/// values are spilled on the stack.
7620b57cec5SDimitry Andricdef CC_X86_32_Vector_Common : CallingConv<[
7630b57cec5SDimitry Andric  // Other SSE vectors get 16-byte stack slots that are 16-byte aligned.
764349cc55cSDimitry Andric  CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
765349cc55cSDimitry Andric           CCAssignToStack<16, 16>>,
7660b57cec5SDimitry Andric
7670b57cec5SDimitry Andric  // 256-bit AVX vectors get 32-byte stack slots that are 32-byte aligned.
768349cc55cSDimitry Andric  CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
7690b57cec5SDimitry Andric           CCAssignToStack<32, 32>>,
7700b57cec5SDimitry Andric
7710b57cec5SDimitry Andric  // 512-bit AVX 512-bit vectors get 64-byte stack slots that are 64-byte aligned.
772349cc55cSDimitry Andric  CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
7730b57cec5SDimitry Andric           CCAssignToStack<64, 64>>
7740b57cec5SDimitry Andric]>;
7750b57cec5SDimitry Andric
776349cc55cSDimitry Andric/// CC_X86_Win32_Vector - In X86 Win32 calling conventions, extra vector
777349cc55cSDimitry Andric/// values are spilled on the stack.
778349cc55cSDimitry Andricdef CC_X86_Win32_Vector : CallingConv<[
779349cc55cSDimitry Andric  // Other SSE vectors get 16-byte stack slots that are 4-byte aligned.
780349cc55cSDimitry Andric  CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
781349cc55cSDimitry Andric           CCAssignToStack<16, 4>>,
782349cc55cSDimitry Andric
783349cc55cSDimitry Andric  // 256-bit AVX vectors get 32-byte stack slots that are 4-byte aligned.
784349cc55cSDimitry Andric  CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
785349cc55cSDimitry Andric           CCAssignToStack<32, 4>>,
786349cc55cSDimitry Andric
787349cc55cSDimitry Andric  // 512-bit AVX 512-bit vectors get 64-byte stack slots that are 4-byte aligned.
788349cc55cSDimitry Andric  CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
789349cc55cSDimitry Andric           CCAssignToStack<64, 4>>
790349cc55cSDimitry Andric]>;
791349cc55cSDimitry Andric
7920b57cec5SDimitry Andric// CC_X86_32_Vector_Standard - The first 3 vector arguments are passed in
7930b57cec5SDimitry Andric// vector registers
7940b57cec5SDimitry Andricdef CC_X86_32_Vector_Standard : CallingConv<[
7950b57cec5SDimitry Andric  // SSE vector arguments are passed in XMM registers.
796349cc55cSDimitry Andric  CCIfNotVarArg<CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
7970b57cec5SDimitry Andric                CCAssignToReg<[XMM0, XMM1, XMM2]>>>,
7980b57cec5SDimitry Andric
7990b57cec5SDimitry Andric  // AVX 256-bit vector arguments are passed in YMM registers.
800349cc55cSDimitry Andric  CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
8010b57cec5SDimitry Andric                CCIfSubtarget<"hasAVX()",
8020b57cec5SDimitry Andric                CCAssignToReg<[YMM0, YMM1, YMM2]>>>>,
8030b57cec5SDimitry Andric
8040b57cec5SDimitry Andric  // AVX 512-bit vector arguments are passed in ZMM registers.
805349cc55cSDimitry Andric  CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
8060b57cec5SDimitry Andric                CCAssignToReg<[ZMM0, ZMM1, ZMM2]>>>,
8070b57cec5SDimitry Andric
808349cc55cSDimitry Andric  CCIfIsVarArgOnWin<CCDelegateTo<CC_X86_Win32_Vector>>,
8090b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_Vector_Common>
8100b57cec5SDimitry Andric]>;
8110b57cec5SDimitry Andric
8120b57cec5SDimitry Andric// CC_X86_32_Vector_Darwin - The first 4 vector arguments are passed in
8130b57cec5SDimitry Andric// vector registers.
8140b57cec5SDimitry Andricdef CC_X86_32_Vector_Darwin : CallingConv<[
8150b57cec5SDimitry Andric  // SSE vector arguments are passed in XMM registers.
816349cc55cSDimitry Andric  CCIfNotVarArg<CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
8170b57cec5SDimitry Andric                CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>>,
8180b57cec5SDimitry Andric
8190b57cec5SDimitry Andric  // AVX 256-bit vector arguments are passed in YMM registers.
820349cc55cSDimitry Andric  CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
8210b57cec5SDimitry Andric                CCIfSubtarget<"hasAVX()",
8220b57cec5SDimitry Andric                CCAssignToReg<[YMM0, YMM1, YMM2, YMM3]>>>>,
8230b57cec5SDimitry Andric
8240b57cec5SDimitry Andric  // AVX 512-bit vector arguments are passed in ZMM registers.
825349cc55cSDimitry Andric  CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
8260b57cec5SDimitry Andric                CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3]>>>,
8270b57cec5SDimitry Andric
8280b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_Vector_Common>
8290b57cec5SDimitry Andric]>;
8300b57cec5SDimitry Andric
8310b57cec5SDimitry Andric/// CC_X86_32_Common - In all X86-32 calling conventions, extra integers and FP
8320b57cec5SDimitry Andric/// values are spilled on the stack.
8330b57cec5SDimitry Andricdef CC_X86_32_Common : CallingConv<[
8345ffd83dbSDimitry Andric  // Handles byval/preallocated parameters.
8350b57cec5SDimitry Andric  CCIfByVal<CCPassByVal<4, 4>>,
8365ffd83dbSDimitry Andric  CCIfPreallocated<CCPassByVal<4, 4>>,
8370b57cec5SDimitry Andric
8380b57cec5SDimitry Andric  // The first 3 float or double arguments, if marked 'inreg' and if the call
8390b57cec5SDimitry Andric  // is not a vararg call and if SSE2 is available, are passed in SSE registers.
8400b57cec5SDimitry Andric  CCIfNotVarArg<CCIfInReg<CCIfType<[f32,f64],
8410b57cec5SDimitry Andric                CCIfSubtarget<"hasSSE2()",
8420b57cec5SDimitry Andric                CCAssignToReg<[XMM0,XMM1,XMM2]>>>>>,
8430b57cec5SDimitry Andric
844349cc55cSDimitry Andric  CCIfNotVarArg<CCIfInReg<CCIfType<[f16], CCAssignToReg<[XMM0,XMM1,XMM2]>>>>,
845349cc55cSDimitry Andric
8460b57cec5SDimitry Andric  // The first 3 __m64 vector arguments are passed in mmx registers if the
8470b57cec5SDimitry Andric  // call is not a vararg call.
8480b57cec5SDimitry Andric  CCIfNotVarArg<CCIfType<[x86mmx],
8490b57cec5SDimitry Andric                CCAssignToReg<[MM0, MM1, MM2]>>>,
8500b57cec5SDimitry Andric
851349cc55cSDimitry Andric  CCIfType<[f16], CCAssignToStack<4, 4>>,
852349cc55cSDimitry Andric
8530b57cec5SDimitry Andric  // Integer/Float values get stored in stack slots that are 4 bytes in
8540b57cec5SDimitry Andric  // size and 4-byte aligned.
8550b57cec5SDimitry Andric  CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
8560b57cec5SDimitry Andric
8570b57cec5SDimitry Andric  // Doubles get 8-byte slots that are 4-byte aligned.
8580b57cec5SDimitry Andric  CCIfType<[f64], CCAssignToStack<8, 4>>,
8590b57cec5SDimitry Andric
860349cc55cSDimitry Andric  // Long doubles get slots whose size and alignment depends on the subtarget.
861349cc55cSDimitry Andric  CCIfType<[f80], CCAssignToStack<0, 0>>,
8620b57cec5SDimitry Andric
8630b57cec5SDimitry Andric  // Boolean vectors of AVX-512 are passed in SIMD registers.
8640b57cec5SDimitry Andric  // The call from AVX to AVX-512 function should work,
8650b57cec5SDimitry Andric  // since the boolean types in AVX/AVX2 are promoted by default.
8660b57cec5SDimitry Andric  CCIfType<[v2i1],  CCPromoteToType<v2i64>>,
8670b57cec5SDimitry Andric  CCIfType<[v4i1],  CCPromoteToType<v4i32>>,
8680b57cec5SDimitry Andric  CCIfType<[v8i1],  CCPromoteToType<v8i16>>,
8690b57cec5SDimitry Andric  CCIfType<[v16i1], CCPromoteToType<v16i8>>,
8700b57cec5SDimitry Andric  CCIfType<[v32i1], CCPromoteToType<v32i8>>,
8710b57cec5SDimitry Andric  CCIfType<[v64i1], CCPromoteToType<v64i8>>,
8720b57cec5SDimitry Andric
8730b57cec5SDimitry Andric  // __m64 vectors get 8-byte stack slots that are 4-byte aligned. They are
8740b57cec5SDimitry Andric  // passed in the parameter area.
8750b57cec5SDimitry Andric  CCIfType<[x86mmx], CCAssignToStack<8, 4>>,
8760b57cec5SDimitry Andric
8770b57cec5SDimitry Andric  // Darwin passes vectors in a form that differs from the i386 psABI
8780b57cec5SDimitry Andric  CCIfSubtarget<"isTargetDarwin()", CCDelegateTo<CC_X86_32_Vector_Darwin>>,
8790b57cec5SDimitry Andric
8800b57cec5SDimitry Andric  // Otherwise, drop to 'normal' X86-32 CC
8810b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_Vector_Standard>
8820b57cec5SDimitry Andric]>;
8830b57cec5SDimitry Andric
8840b57cec5SDimitry Andricdef CC_X86_32_C : CallingConv<[
8850b57cec5SDimitry Andric  // Promote i1/i8/i16/v1i1 arguments to i32.
8860b57cec5SDimitry Andric  CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
8870b57cec5SDimitry Andric
8880b57cec5SDimitry Andric  // The 'nest' parameter, if any, is passed in ECX.
8890b57cec5SDimitry Andric  CCIfNest<CCAssignToReg<[ECX]>>,
8900b57cec5SDimitry Andric
891fe6060f1SDimitry Andric  // On swifttailcc pass swiftself in ECX.
892fe6060f1SDimitry Andric  CCIfCC<"CallingConv::SwiftTail",
893fe6060f1SDimitry Andric         CCIfSwiftSelf<CCIfType<[i32], CCAssignToReg<[ECX]>>>>,
894fe6060f1SDimitry Andric
8950b57cec5SDimitry Andric  // The first 3 integer arguments, if marked 'inreg' and if the call is not
8960b57cec5SDimitry Andric  // a vararg call, are passed in integer registers.
8970b57cec5SDimitry Andric  CCIfNotVarArg<CCIfInReg<CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>>>,
8980b57cec5SDimitry Andric
8990b57cec5SDimitry Andric  // Otherwise, same as everything else.
9000b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_Common>
9010b57cec5SDimitry Andric]>;
9020b57cec5SDimitry Andric
9030b57cec5SDimitry Andricdef CC_X86_32_MCU : CallingConv<[
9040b57cec5SDimitry Andric  // Handles byval parameters.  Note that, like FastCC, we can't rely on
9050b57cec5SDimitry Andric  // the delegation to CC_X86_32_Common because that happens after code that
9060b57cec5SDimitry Andric  // puts arguments in registers.
9070b57cec5SDimitry Andric  CCIfByVal<CCPassByVal<4, 4>>,
9080b57cec5SDimitry Andric
9090b57cec5SDimitry Andric  // Promote i1/i8/i16/v1i1 arguments to i32.
9100b57cec5SDimitry Andric  CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
9110b57cec5SDimitry Andric
9120b57cec5SDimitry Andric  // If the call is not a vararg call, some arguments may be passed
9130b57cec5SDimitry Andric  // in integer registers.
9140b57cec5SDimitry Andric  CCIfNotVarArg<CCIfType<[i32], CCCustom<"CC_X86_32_MCUInReg">>>,
9150b57cec5SDimitry Andric
9160b57cec5SDimitry Andric  // Otherwise, same as everything else.
9170b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_Common>
9180b57cec5SDimitry Andric]>;
9190b57cec5SDimitry Andric
9200b57cec5SDimitry Andricdef CC_X86_32_FastCall : CallingConv<[
9210b57cec5SDimitry Andric  // Promote i1 to i8.
9220b57cec5SDimitry Andric  CCIfType<[i1], CCPromoteToType<i8>>,
9230b57cec5SDimitry Andric
9240b57cec5SDimitry Andric  // The 'nest' parameter, if any, is passed in EAX.
9250b57cec5SDimitry Andric  CCIfNest<CCAssignToReg<[EAX]>>,
9260b57cec5SDimitry Andric
9270b57cec5SDimitry Andric  // The first 2 integer arguments are passed in ECX/EDX
9280b57cec5SDimitry Andric  CCIfInReg<CCIfType<[ i8], CCAssignToReg<[ CL,  DL]>>>,
9290b57cec5SDimitry Andric  CCIfInReg<CCIfType<[i16], CCAssignToReg<[ CX,  DX]>>>,
9300b57cec5SDimitry Andric  CCIfInReg<CCIfType<[i32], CCAssignToReg<[ECX, EDX]>>>,
9310b57cec5SDimitry Andric
9320b57cec5SDimitry Andric  // Otherwise, same as everything else.
9330b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_Common>
9340b57cec5SDimitry Andric]>;
9350b57cec5SDimitry Andric
9360b57cec5SDimitry Andricdef CC_X86_Win32_VectorCall : CallingConv<[
9370b57cec5SDimitry Andric  // Pass floating point in XMMs
9380b57cec5SDimitry Andric  CCCustom<"CC_X86_32_VectorCall">,
9390b57cec5SDimitry Andric
9400b57cec5SDimitry Andric  // Delegate to fastcall to handle integer types.
9410b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_FastCall>
9420b57cec5SDimitry Andric]>;
9430b57cec5SDimitry Andric
9440b57cec5SDimitry Andricdef CC_X86_32_ThisCall_Common : CallingConv<[
9450b57cec5SDimitry Andric  // The first integer argument is passed in ECX
9460b57cec5SDimitry Andric  CCIfType<[i32], CCAssignToReg<[ECX]>>,
9470b57cec5SDimitry Andric
9480b57cec5SDimitry Andric  // Otherwise, same as everything else.
9490b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_Common>
9500b57cec5SDimitry Andric]>;
9510b57cec5SDimitry Andric
9520b57cec5SDimitry Andricdef CC_X86_32_ThisCall_Mingw : CallingConv<[
9530b57cec5SDimitry Andric  // Promote i1/i8/i16/v1i1 arguments to i32.
9540b57cec5SDimitry Andric  CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
9550b57cec5SDimitry Andric
9560b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_ThisCall_Common>
9570b57cec5SDimitry Andric]>;
9580b57cec5SDimitry Andric
9590b57cec5SDimitry Andricdef CC_X86_32_ThisCall_Win : CallingConv<[
9600b57cec5SDimitry Andric  // Promote i1/i8/i16/v1i1 arguments to i32.
9610b57cec5SDimitry Andric  CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
9620b57cec5SDimitry Andric
9630b57cec5SDimitry Andric  // Pass sret arguments indirectly through stack.
9640b57cec5SDimitry Andric  CCIfSRet<CCAssignToStack<4, 4>>,
9650b57cec5SDimitry Andric
9660b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_ThisCall_Common>
9670b57cec5SDimitry Andric]>;
9680b57cec5SDimitry Andric
9690b57cec5SDimitry Andricdef CC_X86_32_ThisCall : CallingConv<[
9700b57cec5SDimitry Andric  CCIfSubtarget<"isTargetCygMing()", CCDelegateTo<CC_X86_32_ThisCall_Mingw>>,
9710b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_ThisCall_Win>
9720b57cec5SDimitry Andric]>;
9730b57cec5SDimitry Andric
9740b57cec5SDimitry Andricdef CC_X86_32_FastCC : CallingConv<[
9750b57cec5SDimitry Andric  // Handles byval parameters.  Note that we can't rely on the delegation
9760b57cec5SDimitry Andric  // to CC_X86_32_Common for this because that happens after code that
9770b57cec5SDimitry Andric  // puts arguments in registers.
9780b57cec5SDimitry Andric  CCIfByVal<CCPassByVal<4, 4>>,
9790b57cec5SDimitry Andric
9800b57cec5SDimitry Andric  // Promote i1/i8/i16/v1i1 arguments to i32.
9810b57cec5SDimitry Andric  CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
9820b57cec5SDimitry Andric
9830b57cec5SDimitry Andric  // The 'nest' parameter, if any, is passed in EAX.
9840b57cec5SDimitry Andric  CCIfNest<CCAssignToReg<[EAX]>>,
9850b57cec5SDimitry Andric
9860b57cec5SDimitry Andric  // The first 2 integer arguments are passed in ECX/EDX
9870b57cec5SDimitry Andric  CCIfType<[i32], CCAssignToReg<[ECX, EDX]>>,
9880b57cec5SDimitry Andric
9890b57cec5SDimitry Andric  // The first 3 float or double arguments, if the call is not a vararg
9900b57cec5SDimitry Andric  // call and if SSE2 is available, are passed in SSE registers.
9910b57cec5SDimitry Andric  CCIfNotVarArg<CCIfType<[f32,f64],
9920b57cec5SDimitry Andric                CCIfSubtarget<"hasSSE2()",
9930b57cec5SDimitry Andric                CCAssignToReg<[XMM0,XMM1,XMM2]>>>>,
9940b57cec5SDimitry Andric
9950b57cec5SDimitry Andric  // Doubles get 8-byte slots that are 8-byte aligned.
9960b57cec5SDimitry Andric  CCIfType<[f64], CCAssignToStack<8, 8>>,
9970b57cec5SDimitry Andric
9980b57cec5SDimitry Andric  // Otherwise, same as everything else.
9990b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_Common>
10000b57cec5SDimitry Andric]>;
10010b57cec5SDimitry Andric
1002480093f4SDimitry Andricdef CC_X86_Win32_CFGuard_Check : CallingConv<[
1003480093f4SDimitry Andric  // The CFGuard check call takes exactly one integer argument
1004480093f4SDimitry Andric  // (i.e. the target function address), which is passed in ECX.
1005480093f4SDimitry Andric  CCIfType<[i32], CCAssignToReg<[ECX]>>
1006480093f4SDimitry Andric]>;
1007480093f4SDimitry Andric
10080b57cec5SDimitry Andricdef CC_X86_32_GHC : CallingConv<[
10090b57cec5SDimitry Andric  // Promote i8/i16 arguments to i32.
10100b57cec5SDimitry Andric  CCIfType<[i8, i16], CCPromoteToType<i32>>,
10110b57cec5SDimitry Andric
10120b57cec5SDimitry Andric  // Pass in STG registers: Base, Sp, Hp, R1
10130b57cec5SDimitry Andric  CCIfType<[i32], CCAssignToReg<[EBX, EBP, EDI, ESI]>>
10140b57cec5SDimitry Andric]>;
10150b57cec5SDimitry Andric
10160b57cec5SDimitry Andricdef CC_X86_32_HiPE : CallingConv<[
10170b57cec5SDimitry Andric  // Promote i8/i16 arguments to i32.
10180b57cec5SDimitry Andric  CCIfType<[i8, i16], CCPromoteToType<i32>>,
10190b57cec5SDimitry Andric
10200b57cec5SDimitry Andric  // Pass in VM's registers: HP, P, ARG0, ARG1, ARG2
10210b57cec5SDimitry Andric  CCIfType<[i32], CCAssignToReg<[ESI, EBP, EAX, EDX, ECX]>>,
10220b57cec5SDimitry Andric
10230b57cec5SDimitry Andric  // Integer/Float values get stored in stack slots that are 4 bytes in
10240b57cec5SDimitry Andric  // size and 4-byte aligned.
10250b57cec5SDimitry Andric  CCIfType<[i32, f32], CCAssignToStack<4, 4>>
10260b57cec5SDimitry Andric]>;
10270b57cec5SDimitry Andric
10280b57cec5SDimitry Andric// X86-64 Intel OpenCL built-ins calling convention.
10290b57cec5SDimitry Andricdef CC_Intel_OCL_BI : CallingConv<[
10300b57cec5SDimitry Andric
10310b57cec5SDimitry Andric  CCIfType<[i32], CCIfSubtarget<"isTargetWin64()", CCAssignToReg<[ECX, EDX, R8D, R9D]>>>,
10320b57cec5SDimitry Andric  CCIfType<[i64], CCIfSubtarget<"isTargetWin64()", CCAssignToReg<[RCX, RDX, R8,  R9 ]>>>,
10330b57cec5SDimitry Andric
10340b57cec5SDimitry Andric  CCIfType<[i32], CCIfSubtarget<"is64Bit()", CCAssignToReg<[EDI, ESI, EDX, ECX]>>>,
10350b57cec5SDimitry Andric  CCIfType<[i64], CCIfSubtarget<"is64Bit()", CCAssignToReg<[RDI, RSI, RDX, RCX]>>>,
10360b57cec5SDimitry Andric
10370b57cec5SDimitry Andric  CCIfType<[i32], CCAssignToStack<4, 4>>,
10380b57cec5SDimitry Andric
10390b57cec5SDimitry Andric  // The SSE vector arguments are passed in XMM registers.
10400b57cec5SDimitry Andric  CCIfType<[f32, f64, v4i32, v2i64, v4f32, v2f64],
10410b57cec5SDimitry Andric           CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
10420b57cec5SDimitry Andric
10430b57cec5SDimitry Andric  // The 256-bit vector arguments are passed in YMM registers.
10440b57cec5SDimitry Andric  CCIfType<[v8f32, v4f64, v8i32, v4i64],
10450b57cec5SDimitry Andric           CCAssignToReg<[YMM0, YMM1, YMM2, YMM3]>>,
10460b57cec5SDimitry Andric
10470b57cec5SDimitry Andric  // The 512-bit vector arguments are passed in ZMM registers.
10480b57cec5SDimitry Andric  CCIfType<[v16f32, v8f64, v16i32, v8i64],
10490b57cec5SDimitry Andric           CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3]>>,
10500b57cec5SDimitry Andric
10510b57cec5SDimitry Andric  // Pass masks in mask registers
10520b57cec5SDimitry Andric  CCIfType<[v16i1, v8i1], CCAssignToReg<[K1]>>,
10530b57cec5SDimitry Andric
10540b57cec5SDimitry Andric  CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>,
10550b57cec5SDimitry Andric  CCIfSubtarget<"is64Bit()",       CCDelegateTo<CC_X86_64_C>>,
10560b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_C>
10570b57cec5SDimitry Andric]>;
10580b57cec5SDimitry Andric
1059*0fca6ea1SDimitry Andricdef CC_X86_64_Preserve_None : CallingConv<[
1060*0fca6ea1SDimitry Andric  // We don't preserve general registers, so all of them can be used to pass
1061*0fca6ea1SDimitry Andric  // arguments except
1062*0fca6ea1SDimitry Andric  //   - RBP        frame pointer
1063*0fca6ea1SDimitry Andric  //   - R10        'nest' parameter
1064*0fca6ea1SDimitry Andric  //   - RBX        base pointer
1065*0fca6ea1SDimitry Andric  //   - R16 - R31  these are not available everywhere
1066*0fca6ea1SDimitry Andric  // Use non-volatile registers first, so functions using this convention can
1067*0fca6ea1SDimitry Andric  // call "normal" functions without saving and restoring incoming values:
1068*0fca6ea1SDimitry Andric  CCIfType<[i32], CCAssignToReg<[R12D, R13D, R14D, R15D, EDI, ESI,
1069*0fca6ea1SDimitry Andric                                 EDX, ECX, R8D, R9D, R11D, EAX]>>,
1070*0fca6ea1SDimitry Andric
1071*0fca6ea1SDimitry Andric  CCIfType<[i64], CCAssignToReg<[R12, R13, R14, R15, RDI, RSI,
1072*0fca6ea1SDimitry Andric                                 RDX, RCX, R8, R9, R11, RAX]>>,
1073*0fca6ea1SDimitry Andric
1074*0fca6ea1SDimitry Andric  // Otherwise it's the same as the regular C calling convention.
1075*0fca6ea1SDimitry Andric  CCDelegateTo<CC_X86_64_C>
1076*0fca6ea1SDimitry Andric]>;
1077*0fca6ea1SDimitry Andric
10780b57cec5SDimitry Andric//===----------------------------------------------------------------------===//
10790b57cec5SDimitry Andric// X86 Root Argument Calling Conventions
10800b57cec5SDimitry Andric//===----------------------------------------------------------------------===//
10810b57cec5SDimitry Andric
10820b57cec5SDimitry Andric// This is the root argument convention for the X86-32 backend.
10830b57cec5SDimitry Andricdef CC_X86_32 : CallingConv<[
10840b57cec5SDimitry Andric  // X86_INTR calling convention is valid in MCU target and should override the
10850b57cec5SDimitry Andric  // MCU calling convention. Thus, this should be checked before isTargetMCU().
10860b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_INTR", CCCustom<"CC_X86_Intr">>,
10870b57cec5SDimitry Andric  CCIfSubtarget<"isTargetMCU()", CCDelegateTo<CC_X86_32_MCU>>,
10880b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_FastCall", CCDelegateTo<CC_X86_32_FastCall>>,
10890b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo<CC_X86_Win32_VectorCall>>,
10900b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_ThisCall", CCDelegateTo<CC_X86_32_ThisCall>>,
1091480093f4SDimitry Andric  CCIfCC<"CallingConv::CFGuard_Check", CCDelegateTo<CC_X86_Win32_CFGuard_Check>>,
10920b57cec5SDimitry Andric  CCIfCC<"CallingConv::Fast", CCDelegateTo<CC_X86_32_FastCC>>,
10938bcb0991SDimitry Andric  CCIfCC<"CallingConv::Tail", CCDelegateTo<CC_X86_32_FastCC>>,
10940b57cec5SDimitry Andric  CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_32_GHC>>,
10950b57cec5SDimitry Andric  CCIfCC<"CallingConv::HiPE", CCDelegateTo<CC_X86_32_HiPE>>,
10965f757f3fSDimitry Andric  CCIfCC<"CallingConv::X86_RegCall",
10975f757f3fSDimitry Andric    CCIfSubtarget<"isTargetWin32()", CCIfRegCallv4<CCDelegateTo<CC_X86_32_RegCallv4_Win>>>>,
10980b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_RegCall", CCDelegateTo<CC_X86_32_RegCall>>,
10990b57cec5SDimitry Andric
11000b57cec5SDimitry Andric  // Otherwise, drop to normal X86-32 CC
11010b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_C>
11020b57cec5SDimitry Andric]>;
11030b57cec5SDimitry Andric
11040b57cec5SDimitry Andric// This is the root argument convention for the X86-64 backend.
11050b57cec5SDimitry Andricdef CC_X86_64 : CallingConv<[
11060b57cec5SDimitry Andric  CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_64_GHC>>,
11070b57cec5SDimitry Andric  CCIfCC<"CallingConv::HiPE", CCDelegateTo<CC_X86_64_HiPE>>,
11080b57cec5SDimitry Andric  CCIfCC<"CallingConv::AnyReg", CCDelegateTo<CC_X86_64_AnyReg>>,
11090b57cec5SDimitry Andric  CCIfCC<"CallingConv::Win64", CCDelegateTo<CC_X86_Win64_C>>,
11100b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<CC_X86_64_C>>,
11110b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo<CC_X86_Win64_VectorCall>>,
11120b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_RegCall",
11135f757f3fSDimitry Andric    CCIfSubtarget<"isTargetWin64()", CCIfRegCallv4<CCDelegateTo<CC_X86_Win64_RegCallv4>>>>,
11145f757f3fSDimitry Andric  CCIfCC<"CallingConv::X86_RegCall",
11150b57cec5SDimitry Andric    CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_RegCall>>>,
11160b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_RegCall", CCDelegateTo<CC_X86_SysV64_RegCall>>,
1117*0fca6ea1SDimitry Andric  CCIfCC<"CallingConv::PreserveNone", CCDelegateTo<CC_X86_64_Preserve_None>>,
11180b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_INTR", CCCustom<"CC_X86_Intr">>,
11190b57cec5SDimitry Andric
11200b57cec5SDimitry Andric  // Mingw64 and native Win64 use Win64 CC
11210b57cec5SDimitry Andric  CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>,
11220b57cec5SDimitry Andric
11230b57cec5SDimitry Andric  // Otherwise, drop to normal X86-64 CC
11240b57cec5SDimitry Andric  CCDelegateTo<CC_X86_64_C>
11250b57cec5SDimitry Andric]>;
11260b57cec5SDimitry Andric
11270b57cec5SDimitry Andric// This is the argument convention used for the entire X86 backend.
11280b57cec5SDimitry Andriclet Entry = 1 in
11290b57cec5SDimitry Andricdef CC_X86 : CallingConv<[
11300b57cec5SDimitry Andric  CCIfCC<"CallingConv::Intel_OCL_BI", CCDelegateTo<CC_Intel_OCL_BI>>,
11310b57cec5SDimitry Andric  CCIfSubtarget<"is64Bit()", CCDelegateTo<CC_X86_64>>,
11320b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32>
11330b57cec5SDimitry Andric]>;
11340b57cec5SDimitry Andric
11350b57cec5SDimitry Andric//===----------------------------------------------------------------------===//
11360b57cec5SDimitry Andric// Callee-saved Registers.
11370b57cec5SDimitry Andric//===----------------------------------------------------------------------===//
11380b57cec5SDimitry Andric
11390b57cec5SDimitry Andricdef CSR_NoRegs : CalleeSavedRegs<(add)>;
11400b57cec5SDimitry Andric
11410b57cec5SDimitry Andricdef CSR_32 : CalleeSavedRegs<(add ESI, EDI, EBX, EBP)>;
11420b57cec5SDimitry Andricdef CSR_64 : CalleeSavedRegs<(add RBX, R12, R13, R14, R15, RBP)>;
11430b57cec5SDimitry Andric
11440b57cec5SDimitry Andricdef CSR_64_SwiftError : CalleeSavedRegs<(sub CSR_64, R12)>;
1145fe6060f1SDimitry Andricdef CSR_64_SwiftTail : CalleeSavedRegs<(sub CSR_64, R13, R14)>;
11460b57cec5SDimitry Andric
11470b57cec5SDimitry Andricdef CSR_32EHRet : CalleeSavedRegs<(add EAX, EDX, CSR_32)>;
11480b57cec5SDimitry Andricdef CSR_64EHRet : CalleeSavedRegs<(add RAX, RDX, CSR_64)>;
11490b57cec5SDimitry Andric
11500b57cec5SDimitry Andricdef CSR_Win64_NoSSE : CalleeSavedRegs<(add RBX, RBP, RDI, RSI, R12, R13, R14, R15)>;
11510b57cec5SDimitry Andric
11520b57cec5SDimitry Andricdef CSR_Win64 : CalleeSavedRegs<(add CSR_Win64_NoSSE,
11530b57cec5SDimitry Andric                                     (sequence "XMM%u", 6, 15))>;
11540b57cec5SDimitry Andric
11550b57cec5SDimitry Andricdef CSR_Win64_SwiftError : CalleeSavedRegs<(sub CSR_Win64, R12)>;
1156fe6060f1SDimitry Andricdef CSR_Win64_SwiftTail : CalleeSavedRegs<(sub CSR_Win64, R13, R14)>;
11570b57cec5SDimitry Andric
11580b57cec5SDimitry Andric// The function used by Darwin to obtain the address of a thread-local variable
11590b57cec5SDimitry Andric// uses rdi to pass a single parameter and rax for the return value. All other
11600b57cec5SDimitry Andric// GPRs are preserved.
11610b57cec5SDimitry Andricdef CSR_64_TLS_Darwin : CalleeSavedRegs<(add CSR_64, RCX, RDX, RSI,
11620b57cec5SDimitry Andric                                             R8, R9, R10, R11)>;
11630b57cec5SDimitry Andric
11640b57cec5SDimitry Andric// CSRs that are handled by prologue, epilogue.
11650b57cec5SDimitry Andricdef CSR_64_CXX_TLS_Darwin_PE : CalleeSavedRegs<(add RBP)>;
11660b57cec5SDimitry Andric
11670b57cec5SDimitry Andric// CSRs that are handled explicitly via copies.
11680b57cec5SDimitry Andricdef CSR_64_CXX_TLS_Darwin_ViaCopy : CalleeSavedRegs<(sub CSR_64_TLS_Darwin, RBP)>;
11690b57cec5SDimitry Andric
117006c3fb27SDimitry Andric// All GPRs - except r11 and return registers.
11710b57cec5SDimitry Andricdef CSR_64_RT_MostRegs : CalleeSavedRegs<(add CSR_64, RAX, RCX, RDX, RSI, RDI,
1172e8d8bef9SDimitry Andric                                              R8, R9, R10)>;
11730b57cec5SDimitry Andric
11745f757f3fSDimitry Andricdef CSR_Win64_RT_MostRegs : CalleeSavedRegs<(add CSR_64_RT_MostRegs,
11755f757f3fSDimitry Andric                                                 (sequence "XMM%u", 6, 15))>;
11765f757f3fSDimitry Andric
117706c3fb27SDimitry Andric// All registers - except r11 and return registers.
11780b57cec5SDimitry Andricdef CSR_64_RT_AllRegs     : CalleeSavedRegs<(add CSR_64_RT_MostRegs,
11790b57cec5SDimitry Andric                                                 (sequence "XMM%u", 0, 15))>;
11800b57cec5SDimitry Andricdef CSR_64_RT_AllRegs_AVX : CalleeSavedRegs<(add CSR_64_RT_MostRegs,
11810b57cec5SDimitry Andric                                                 (sequence "YMM%u", 0, 15))>;
11820b57cec5SDimitry Andric
11830b57cec5SDimitry Andricdef CSR_64_MostRegs : CalleeSavedRegs<(add RBX, RCX, RDX, RSI, RDI, R8, R9, R10,
11840b57cec5SDimitry Andric                                           R11, R12, R13, R14, R15, RBP,
11850b57cec5SDimitry Andric                                           (sequence "XMM%u", 0, 15))>;
11860b57cec5SDimitry Andric
11870b57cec5SDimitry Andricdef CSR_32_AllRegs     : CalleeSavedRegs<(add EAX, EBX, ECX, EDX, EBP, ESI,
11880b57cec5SDimitry Andric                                              EDI)>;
11890b57cec5SDimitry Andricdef CSR_32_AllRegs_SSE : CalleeSavedRegs<(add CSR_32_AllRegs,
11900b57cec5SDimitry Andric                                              (sequence "XMM%u", 0, 7))>;
11910b57cec5SDimitry Andricdef CSR_32_AllRegs_AVX : CalleeSavedRegs<(add CSR_32_AllRegs,
11920b57cec5SDimitry Andric                                              (sequence "YMM%u", 0, 7))>;
11930b57cec5SDimitry Andricdef CSR_32_AllRegs_AVX512 : CalleeSavedRegs<(add CSR_32_AllRegs,
11940b57cec5SDimitry Andric                                                 (sequence "ZMM%u", 0, 7),
11950b57cec5SDimitry Andric                                                 (sequence "K%u", 0, 7))>;
11960b57cec5SDimitry Andric
11970b57cec5SDimitry Andricdef CSR_64_AllRegs     : CalleeSavedRegs<(add CSR_64_MostRegs, RAX)>;
11980b57cec5SDimitry Andricdef CSR_64_AllRegs_NoSSE : CalleeSavedRegs<(add RAX, RBX, RCX, RDX, RSI, RDI, R8, R9,
11990b57cec5SDimitry Andric                                                R10, R11, R12, R13, R14, R15, RBP)>;
12000b57cec5SDimitry Andricdef CSR_64_AllRegs_AVX : CalleeSavedRegs<(sub (add CSR_64_MostRegs, RAX,
12010b57cec5SDimitry Andric                                                   (sequence "YMM%u", 0, 15)),
12020b57cec5SDimitry Andric                                              (sequence "XMM%u", 0, 15))>;
12030b57cec5SDimitry Andricdef CSR_64_AllRegs_AVX512 : CalleeSavedRegs<(sub (add CSR_64_MostRegs, RAX,
12040b57cec5SDimitry Andric                                                      (sequence "ZMM%u", 0, 31),
12050b57cec5SDimitry Andric                                                      (sequence "K%u", 0, 7)),
12060b57cec5SDimitry Andric                                                 (sequence "XMM%u", 0, 15))>;
1207*0fca6ea1SDimitry Andricdef CSR_64_NoneRegs    : CalleeSavedRegs<(add RBP)>;
12080b57cec5SDimitry Andric
12090b57cec5SDimitry Andric// Standard C + YMM6-15
12100b57cec5SDimitry Andricdef CSR_Win64_Intel_OCL_BI_AVX : CalleeSavedRegs<(add RBX, RBP, RDI, RSI, R12,
12110b57cec5SDimitry Andric                                                  R13, R14, R15,
12120b57cec5SDimitry Andric                                                  (sequence "YMM%u", 6, 15))>;
12130b57cec5SDimitry Andric
12140b57cec5SDimitry Andricdef CSR_Win64_Intel_OCL_BI_AVX512 : CalleeSavedRegs<(add RBX, RBP, RDI, RSI,
12150b57cec5SDimitry Andric                                                     R12, R13, R14, R15,
12160b57cec5SDimitry Andric                                                     (sequence "ZMM%u", 6, 21),
12170b57cec5SDimitry Andric                                                     K4, K5, K6, K7)>;
12180b57cec5SDimitry Andric//Standard C + XMM 8-15
12190b57cec5SDimitry Andricdef CSR_64_Intel_OCL_BI       : CalleeSavedRegs<(add CSR_64,
12200b57cec5SDimitry Andric                                                 (sequence "XMM%u", 8, 15))>;
12210b57cec5SDimitry Andric
12220b57cec5SDimitry Andric//Standard C + YMM 8-15
12230b57cec5SDimitry Andricdef CSR_64_Intel_OCL_BI_AVX    : CalleeSavedRegs<(add CSR_64,
12240b57cec5SDimitry Andric                                                  (sequence "YMM%u", 8, 15))>;
12250b57cec5SDimitry Andric
12265ffd83dbSDimitry Andricdef CSR_64_Intel_OCL_BI_AVX512 : CalleeSavedRegs<(add RBX, RSI, R14, R15,
12270b57cec5SDimitry Andric                                                  (sequence "ZMM%u", 16, 31),
12280b57cec5SDimitry Andric                                                  K4, K5, K6, K7)>;
12290b57cec5SDimitry Andric
12300b57cec5SDimitry Andric// Register calling convention preserves few GPR and XMM8-15
1231e8d8bef9SDimitry Andricdef CSR_32_RegCall_NoSSE : CalleeSavedRegs<(add ESI, EDI, EBX, EBP)>;
12320b57cec5SDimitry Andricdef CSR_32_RegCall       : CalleeSavedRegs<(add CSR_32_RegCall_NoSSE,
12330b57cec5SDimitry Andric                                           (sequence "XMM%u", 4, 7))>;
1234480093f4SDimitry Andricdef CSR_Win32_CFGuard_Check_NoSSE : CalleeSavedRegs<(add CSR_32_RegCall_NoSSE, ECX)>;
1235480093f4SDimitry Andricdef CSR_Win32_CFGuard_Check       : CalleeSavedRegs<(add CSR_32_RegCall, ECX)>;
1236e8d8bef9SDimitry Andricdef CSR_Win64_RegCall_NoSSE : CalleeSavedRegs<(add RBX, RBP,
12370b57cec5SDimitry Andric                                              (sequence "R%u", 10, 15))>;
12380b57cec5SDimitry Andricdef CSR_Win64_RegCall       : CalleeSavedRegs<(add CSR_Win64_RegCall_NoSSE,
12390b57cec5SDimitry Andric                                              (sequence "XMM%u", 8, 15))>;
1240e8d8bef9SDimitry Andricdef CSR_SysV64_RegCall_NoSSE : CalleeSavedRegs<(add RBX, RBP,
12410b57cec5SDimitry Andric                                               (sequence "R%u", 12, 15))>;
12420b57cec5SDimitry Andricdef CSR_SysV64_RegCall       : CalleeSavedRegs<(add CSR_SysV64_RegCall_NoSSE,
12430b57cec5SDimitry Andric                                               (sequence "XMM%u", 8, 15))>;
1244