1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+sse3 | FileCheck %s --check-prefixes=SSE,X86-SSE 3; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,X86-AVX 4; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=AVX,X86-AVX 5; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+sse3 | FileCheck %s --check-prefixes=SSE,X64-SSE 6; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,X64-AVX 7; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=AVX,X64-AVX 8 9; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/sse3-builtins.c 10 11define <2 x double> @test_mm_addsub_pd(<2 x double> %a0, <2 x double> %a1) { 12; SSE-LABEL: test_mm_addsub_pd: 13; SSE: # %bb.0: 14; SSE-NEXT: addsubpd %xmm1, %xmm0 15; SSE-NEXT: ret{{[l|q]}} 16; 17; AVX-LABEL: test_mm_addsub_pd: 18; AVX: # %bb.0: 19; AVX-NEXT: vaddsubpd %xmm1, %xmm0, %xmm0 20; AVX-NEXT: ret{{[l|q]}} 21 %res = call <2 x double> @llvm.x86.sse3.addsub.pd(<2 x double> %a0, <2 x double> %a1) 22 ret <2 x double> %res 23} 24declare <2 x double> @llvm.x86.sse3.addsub.pd(<2 x double>, <2 x double>) nounwind readnone 25 26define <4 x float> @test_mm_addsub_ps(<4 x float> %a0, <4 x float> %a1) { 27; SSE-LABEL: test_mm_addsub_ps: 28; SSE: # %bb.0: 29; SSE-NEXT: addsubps %xmm1, %xmm0 30; SSE-NEXT: ret{{[l|q]}} 31; 32; AVX-LABEL: test_mm_addsub_ps: 33; AVX: # %bb.0: 34; AVX-NEXT: vaddsubps %xmm1, %xmm0, %xmm0 35; AVX-NEXT: ret{{[l|q]}} 36 %res = call <4 x float> @llvm.x86.sse3.addsub.ps(<4 x float> %a0, <4 x float> %a1) 37 ret <4 x float> %res 38} 39declare <4 x float> @llvm.x86.sse3.addsub.ps(<4 x float>, <4 x float>) nounwind readnone 40 41define <2 x double> @test_mm_hadd_pd(<2 x double> %a0, <2 x double> %a1) { 42; SSE-LABEL: test_mm_hadd_pd: 43; SSE: # %bb.0: 44; SSE-NEXT: haddpd %xmm1, %xmm0 45; SSE-NEXT: ret{{[l|q]}} 46; 47; AVX-LABEL: test_mm_hadd_pd: 48; AVX: # %bb.0: 49; AVX-NEXT: vhaddpd %xmm1, %xmm0, %xmm0 50; AVX-NEXT: ret{{[l|q]}} 51 %res = call <2 x double> @llvm.x86.sse3.hadd.pd(<2 x double> %a0, <2 x double> %a1) 52 ret <2 x double> %res 53} 54declare <2 x double> @llvm.x86.sse3.hadd.pd(<2 x double>, <2 x double>) nounwind readnone 55 56define <4 x float> @test_mm_hadd_ps(<4 x float> %a0, <4 x float> %a1) { 57; SSE-LABEL: test_mm_hadd_ps: 58; SSE: # %bb.0: 59; SSE-NEXT: haddps %xmm1, %xmm0 60; SSE-NEXT: ret{{[l|q]}} 61; 62; AVX-LABEL: test_mm_hadd_ps: 63; AVX: # %bb.0: 64; AVX-NEXT: vhaddps %xmm1, %xmm0, %xmm0 65; AVX-NEXT: ret{{[l|q]}} 66 %res = call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %a0, <4 x float> %a1) 67 ret <4 x float> %res 68} 69declare <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float>, <4 x float>) nounwind readnone 70 71define <2 x double> @test_mm_hsub_pd(<2 x double> %a0, <2 x double> %a1) { 72; SSE-LABEL: test_mm_hsub_pd: 73; SSE: # %bb.0: 74; SSE-NEXT: hsubpd %xmm1, %xmm0 75; SSE-NEXT: ret{{[l|q]}} 76; 77; AVX-LABEL: test_mm_hsub_pd: 78; AVX: # %bb.0: 79; AVX-NEXT: vhsubpd %xmm1, %xmm0, %xmm0 80; AVX-NEXT: ret{{[l|q]}} 81 %res = call <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %a0, <2 x double> %a1) 82 ret <2 x double> %res 83} 84declare <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double>, <2 x double>) nounwind readnone 85 86define <4 x float> @test_mm_hsub_ps(<4 x float> %a0, <4 x float> %a1) { 87; SSE-LABEL: test_mm_hsub_ps: 88; SSE: # %bb.0: 89; SSE-NEXT: hsubps %xmm1, %xmm0 90; SSE-NEXT: ret{{[l|q]}} 91; 92; AVX-LABEL: test_mm_hsub_ps: 93; AVX: # %bb.0: 94; AVX-NEXT: vhsubps %xmm1, %xmm0, %xmm0 95; AVX-NEXT: ret{{[l|q]}} 96 %res = call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %a0, <4 x float> %a1) 97 ret <4 x float> %res 98} 99declare <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float>, <4 x float>) nounwind readnone 100 101define <2 x i64> @test_mm_lddqu_si128(ptr %a0) { 102; X86-SSE-LABEL: test_mm_lddqu_si128: 103; X86-SSE: # %bb.0: 104; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax 105; X86-SSE-NEXT: lddqu (%eax), %xmm0 106; X86-SSE-NEXT: retl 107; 108; X86-AVX-LABEL: test_mm_lddqu_si128: 109; X86-AVX: # %bb.0: 110; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax 111; X86-AVX-NEXT: vlddqu (%eax), %xmm0 112; X86-AVX-NEXT: retl 113; 114; X64-SSE-LABEL: test_mm_lddqu_si128: 115; X64-SSE: # %bb.0: 116; X64-SSE-NEXT: lddqu (%rdi), %xmm0 117; X64-SSE-NEXT: retq 118; 119; X64-AVX-LABEL: test_mm_lddqu_si128: 120; X64-AVX: # %bb.0: 121; X64-AVX-NEXT: vlddqu (%rdi), %xmm0 122; X64-AVX-NEXT: retq 123 %call = call <16 x i8> @llvm.x86.sse3.ldu.dq(ptr %a0) 124 %res = bitcast <16 x i8> %call to <2 x i64> 125 ret <2 x i64> %res 126} 127declare <16 x i8> @llvm.x86.sse3.ldu.dq(ptr) nounwind readonly 128 129define <2 x double> @test_mm_loaddup_pd(ptr %a0) { 130; X86-SSE-LABEL: test_mm_loaddup_pd: 131; X86-SSE: # %bb.0: 132; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax 133; X86-SSE-NEXT: movddup {{.*#+}} xmm0 = mem[0,0] 134; X86-SSE-NEXT: retl 135; 136; X86-AVX-LABEL: test_mm_loaddup_pd: 137; X86-AVX: # %bb.0: 138; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax 139; X86-AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] 140; X86-AVX-NEXT: retl 141; 142; X64-SSE-LABEL: test_mm_loaddup_pd: 143; X64-SSE: # %bb.0: 144; X64-SSE-NEXT: movddup {{.*#+}} xmm0 = mem[0,0] 145; X64-SSE-NEXT: retq 146; 147; X64-AVX-LABEL: test_mm_loaddup_pd: 148; X64-AVX: # %bb.0: 149; X64-AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] 150; X64-AVX-NEXT: retq 151 %ld = load double, ptr %a0 152 %res0 = insertelement <2 x double> undef, double %ld, i32 0 153 %res1 = insertelement <2 x double> %res0, double %ld, i32 1 154 ret <2 x double> %res1 155} 156 157define <2 x double> @test_mm_movedup_pd(<2 x double> %a0) { 158; SSE-LABEL: test_mm_movedup_pd: 159; SSE: # %bb.0: 160; SSE-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0] 161; SSE-NEXT: ret{{[l|q]}} 162; 163; AVX-LABEL: test_mm_movedup_pd: 164; AVX: # %bb.0: 165; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] 166; AVX-NEXT: ret{{[l|q]}} 167 %res = shufflevector <2 x double> %a0, <2 x double> %a0, <2 x i32> zeroinitializer 168 ret <2 x double> %res 169} 170 171define <4 x float> @test_mm_movehdup_ps(<4 x float> %a0) { 172; SSE-LABEL: test_mm_movehdup_ps: 173; SSE: # %bb.0: 174; SSE-NEXT: movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] 175; SSE-NEXT: ret{{[l|q]}} 176; 177; AVX-LABEL: test_mm_movehdup_ps: 178; AVX: # %bb.0: 179; AVX-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] 180; AVX-NEXT: ret{{[l|q]}} 181 %res = shufflevector <4 x float> %a0, <4 x float> %a0, <4 x i32> <i32 1, i32 1, i32 3, i32 3> 182 ret <4 x float> %res 183} 184 185define <4 x float> @test_mm_moveldup_ps(<4 x float> %a0) { 186; SSE-LABEL: test_mm_moveldup_ps: 187; SSE: # %bb.0: 188; SSE-NEXT: movsldup {{.*#+}} xmm0 = xmm0[0,0,2,2] 189; SSE-NEXT: ret{{[l|q]}} 190; 191; AVX-LABEL: test_mm_moveldup_ps: 192; AVX: # %bb.0: 193; AVX-NEXT: vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2] 194; AVX-NEXT: ret{{[l|q]}} 195 %res = shufflevector <4 x float> %a0, <4 x float> %a0, <4 x i32> <i32 0, i32 0, i32 2, i32 2> 196 ret <4 x float> %res 197} 198