1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=x86_64-win32 | FileCheck %s -check-prefix=WIN64 3; RUN: llc < %s -mtriple=x86_64-mingw32 | FileCheck %s -check-prefix=WIN64 4 5define i64 @double_to_i128(double %d) nounwind { 6; WIN64-LABEL: double_to_i128: 7; WIN64: # %bb.0: 8; WIN64-NEXT: subq $40, %rsp 9; WIN64-NEXT: callq __fixdfti 10; WIN64-NEXT: movq %xmm0, %rax 11; WIN64-NEXT: addq $40, %rsp 12; WIN64-NEXT: retq 13 %1 = fptosi double %d to i128 14 %2 = trunc i128 %1 to i64 15 ret i64 %2 16} 17 18define i64 @double_to_ui128(double %d) nounwind { 19; WIN64-LABEL: double_to_ui128: 20; WIN64: # %bb.0: 21; WIN64-NEXT: subq $40, %rsp 22; WIN64-NEXT: callq __fixunsdfti 23; WIN64-NEXT: movq %xmm0, %rax 24; WIN64-NEXT: addq $40, %rsp 25; WIN64-NEXT: retq 26 %1 = fptoui double %d to i128 27 %2 = trunc i128 %1 to i64 28 ret i64 %2 29} 30 31define i64 @float_to_i128(float %d) nounwind { 32; WIN64-LABEL: float_to_i128: 33; WIN64: # %bb.0: 34; WIN64-NEXT: subq $40, %rsp 35; WIN64-NEXT: callq __fixsfti 36; WIN64-NEXT: movq %xmm0, %rax 37; WIN64-NEXT: addq $40, %rsp 38; WIN64-NEXT: retq 39 %1 = fptosi float %d to i128 40 %2 = trunc i128 %1 to i64 41 ret i64 %2 42} 43 44define i64 @float_to_ui128(float %d) nounwind { 45; WIN64-LABEL: float_to_ui128: 46; WIN64: # %bb.0: 47; WIN64-NEXT: subq $40, %rsp 48; WIN64-NEXT: callq __fixunssfti 49; WIN64-NEXT: movq %xmm0, %rax 50; WIN64-NEXT: addq $40, %rsp 51; WIN64-NEXT: retq 52 %1 = fptoui float %d to i128 53 %2 = trunc i128 %1 to i64 54 ret i64 %2 55} 56 57define i64 @longdouble_to_i128(ptr nocapture readonly %0) nounwind { 58; WIN64-LABEL: longdouble_to_i128: 59; WIN64: # %bb.0: 60; WIN64-NEXT: subq $56, %rsp 61; WIN64-NEXT: fldt (%rcx) 62; WIN64-NEXT: fstpt {{[0-9]+}}(%rsp) 63; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %rcx 64; WIN64-NEXT: callq __fixxfti 65; WIN64-NEXT: movq %xmm0, %rax 66; WIN64-NEXT: addq $56, %rsp 67; WIN64-NEXT: retq 68 %2 = load x86_fp80, ptr %0, align 16 69 %3 = fptosi x86_fp80 %2 to i128 70 %4 = trunc i128 %3 to i64 71 ret i64 %4 72} 73 74define i64 @longdouble_to_ui128(ptr nocapture readonly %0) nounwind { 75; WIN64-LABEL: longdouble_to_ui128: 76; WIN64: # %bb.0: 77; WIN64-NEXT: subq $56, %rsp 78; WIN64-NEXT: fldt (%rcx) 79; WIN64-NEXT: fstpt {{[0-9]+}}(%rsp) 80; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %rcx 81; WIN64-NEXT: callq __fixunsxfti 82; WIN64-NEXT: movq %xmm0, %rax 83; WIN64-NEXT: addq $56, %rsp 84; WIN64-NEXT: retq 85 %2 = load x86_fp80, ptr %0, align 16 86 %3 = fptoui x86_fp80 %2 to i128 87 %4 = trunc i128 %3 to i64 88 ret i64 %4 89} 90 91define double @i128_to_double(ptr nocapture readonly %0) nounwind { 92; WIN64-LABEL: i128_to_double: 93; WIN64: # %bb.0: 94; WIN64-NEXT: subq $56, %rsp 95; WIN64-NEXT: movaps (%rcx), %xmm0 96; WIN64-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) 97; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %rcx 98; WIN64-NEXT: callq __floattidf 99; WIN64-NEXT: addq $56, %rsp 100; WIN64-NEXT: retq 101 %2 = load i128, ptr %0, align 16 102 %3 = sitofp i128 %2 to double 103 ret double %3 104} 105 106define double @ui128_to_double(ptr nocapture readonly %0) nounwind { 107; WIN64-LABEL: ui128_to_double: 108; WIN64: # %bb.0: 109; WIN64-NEXT: subq $56, %rsp 110; WIN64-NEXT: movaps (%rcx), %xmm0 111; WIN64-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) 112; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %rcx 113; WIN64-NEXT: callq __floatuntidf 114; WIN64-NEXT: addq $56, %rsp 115; WIN64-NEXT: retq 116 %2 = load i128, ptr %0, align 16 117 %3 = uitofp i128 %2 to double 118 ret double %3 119} 120 121define float @i128_to_float(ptr nocapture readonly %0) nounwind { 122; WIN64-LABEL: i128_to_float: 123; WIN64: # %bb.0: 124; WIN64-NEXT: subq $56, %rsp 125; WIN64-NEXT: movaps (%rcx), %xmm0 126; WIN64-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) 127; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %rcx 128; WIN64-NEXT: callq __floattisf 129; WIN64-NEXT: addq $56, %rsp 130; WIN64-NEXT: retq 131 %2 = load i128, ptr %0, align 16 132 %3 = sitofp i128 %2 to float 133 ret float %3 134} 135 136define float @ui128_to_float(ptr nocapture readonly %0) nounwind { 137; WIN64-LABEL: ui128_to_float: 138; WIN64: # %bb.0: 139; WIN64-NEXT: subq $56, %rsp 140; WIN64-NEXT: movaps (%rcx), %xmm0 141; WIN64-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) 142; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %rcx 143; WIN64-NEXT: callq __floatuntisf 144; WIN64-NEXT: addq $56, %rsp 145; WIN64-NEXT: retq 146 %2 = load i128, ptr %0, align 16 147 %3 = uitofp i128 %2 to float 148 ret float %3 149} 150 151define void @i128_to_longdouble(ptr noalias nocapture sret(x86_fp80) align 16 %agg.result, ptr nocapture readonly %0) nounwind { 152; WIN64-LABEL: i128_to_longdouble: 153; WIN64: # %bb.0: 154; WIN64-NEXT: pushq %rsi 155; WIN64-NEXT: subq $64, %rsp 156; WIN64-NEXT: movq %rcx, %rsi 157; WIN64-NEXT: movaps (%rdx), %xmm0 158; WIN64-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) 159; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %rcx 160; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %rdx 161; WIN64-NEXT: callq __floattixf 162; WIN64-NEXT: fldt {{[0-9]+}}(%rsp) 163; WIN64-NEXT: fstpt (%rsi) 164; WIN64-NEXT: movq %rsi, %rax 165; WIN64-NEXT: addq $64, %rsp 166; WIN64-NEXT: popq %rsi 167; WIN64-NEXT: retq 168 %2 = load i128, ptr %0, align 16 169 %3 = sitofp i128 %2 to x86_fp80 170 store x86_fp80 %3, ptr %agg.result, align 16 171 ret void 172} 173 174define void @ui128_to_longdouble(ptr noalias nocapture sret(x86_fp80) align 16 %agg.result, ptr nocapture readonly %0) nounwind { 175; WIN64-LABEL: ui128_to_longdouble: 176; WIN64: # %bb.0: 177; WIN64-NEXT: pushq %rsi 178; WIN64-NEXT: subq $64, %rsp 179; WIN64-NEXT: movq %rcx, %rsi 180; WIN64-NEXT: movaps (%rdx), %xmm0 181; WIN64-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) 182; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %rcx 183; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %rdx 184; WIN64-NEXT: callq __floatuntixf 185; WIN64-NEXT: fldt {{[0-9]+}}(%rsp) 186; WIN64-NEXT: fstpt (%rsi) 187; WIN64-NEXT: movq %rsi, %rax 188; WIN64-NEXT: addq $64, %rsp 189; WIN64-NEXT: popq %rsi 190; WIN64-NEXT: retq 191 %2 = load i128, ptr %0, align 16 192 %3 = uitofp i128 %2 to x86_fp80 193 store x86_fp80 %3, ptr %agg.result, align 16 194 ret void 195} 196