1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=x86_64-linux -mcpu=skylake -mattr=+avx2 < %s | FileCheck %s 3 4define <2 x float> @gather_v2f32_scale_512(ptr %result, <2 x i64> %idx, <2 x i1> %mask) { 5; CHECK-LABEL: gather_v2f32_scale_512: 6; CHECK: # %bb.0: 7; CHECK-NEXT: vpsllq $9, %xmm0, %xmm2 8; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm1[0,2,2,3] 9; CHECK-NEXT: vpslld $31, %xmm0, %xmm1 10; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0 11; CHECK-NEXT: vgatherqps %xmm1, (%rdi,%xmm2), %xmm0 12; CHECK-NEXT: retq 13 %gep = getelementptr inbounds [512 x i8], ptr %result, <2 x i64> %idx 14 %res = call <2 x float> @llvm.masked.gather.v2f32.v2p0(<2 x ptr> %gep, i32 0, <2 x i1> %mask, <2 x float> zeroinitializer) 15 ret <2 x float> %res 16} 17 18define <2 x float> @gather_v2f32_scale_16(ptr %result, <2 x i64> %idx, <2 x i1> %mask) { 19; CHECK-LABEL: gather_v2f32_scale_16: 20; CHECK: # %bb.0: 21; CHECK-NEXT: vpsllq $4, %xmm0, %xmm2 22; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm1[0,2,2,3] 23; CHECK-NEXT: vpslld $31, %xmm0, %xmm1 24; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0 25; CHECK-NEXT: vgatherqps %xmm1, (%rdi,%xmm2), %xmm0 26; CHECK-NEXT: retq 27 %gep = getelementptr inbounds [16 x i8], ptr %result, <2 x i64> %idx 28 %res = call <2 x float> @llvm.masked.gather.v2f32.v2p0(<2 x ptr> %gep, i32 0, <2 x i1> %mask, <2 x float> zeroinitializer) 29 ret <2 x float> %res 30} 31 32define <2 x float> @gather_v2f32_scale_8(ptr %result, <2 x i64> %idx, <2 x i1> %mask) { 33; CHECK-LABEL: gather_v2f32_scale_8: 34; CHECK: # %bb.0: 35; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] 36; CHECK-NEXT: vpslld $31, %xmm1, %xmm2 37; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 38; CHECK-NEXT: vgatherqps %xmm2, (%rdi,%xmm0,8), %xmm1 39; CHECK-NEXT: vmovaps %xmm1, %xmm0 40; CHECK-NEXT: retq 41 %gep = getelementptr inbounds [8 x i8], ptr %result, <2 x i64> %idx 42 %res = call <2 x float> @llvm.masked.gather.v2f32.v2p0(<2 x ptr> %gep, i32 0, <2 x i1> %mask, <2 x float> zeroinitializer) 43 ret <2 x float> %res 44} 45 46define <2 x float> @gather_v2f32_scale_4(ptr %result, <2 x i64> %idx, <2 x i1> %mask) { 47; CHECK-LABEL: gather_v2f32_scale_4: 48; CHECK: # %bb.0: 49; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] 50; CHECK-NEXT: vpslld $31, %xmm1, %xmm2 51; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 52; CHECK-NEXT: vgatherqps %xmm2, (%rdi,%xmm0,4), %xmm1 53; CHECK-NEXT: vmovaps %xmm1, %xmm0 54; CHECK-NEXT: retq 55 %gep = getelementptr inbounds [4 x i8], ptr %result, <2 x i64> %idx 56 %res = call <2 x float> @llvm.masked.gather.v2f32.v2p0(<2 x ptr> %gep, i32 0, <2 x i1> %mask, <2 x float> zeroinitializer) 57 ret <2 x float> %res 58} 59 60define <2 x float> @gather_v2f32_scale_3(ptr %result, <2 x i64> %idx, <2 x i1> %mask) { 61; CHECK-LABEL: gather_v2f32_scale_3: 62; CHECK: # %bb.0: 63; CHECK-NEXT: vpaddq %xmm0, %xmm0, %xmm2 64; CHECK-NEXT: vpaddq %xmm0, %xmm2, %xmm2 65; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm1[0,2,2,3] 66; CHECK-NEXT: vpslld $31, %xmm0, %xmm1 67; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0 68; CHECK-NEXT: vgatherqps %xmm1, (%rdi,%xmm2), %xmm0 69; CHECK-NEXT: retq 70 %gep = getelementptr inbounds [3 x i8], ptr %result, <2 x i64> %idx 71 %res = call <2 x float> @llvm.masked.gather.v2f32.v2p0(<2 x ptr> %gep, i32 0, <2 x i1> %mask, <2 x float> zeroinitializer) 72 ret <2 x float> %res 73} 74 75define <2 x float> @gather_v2f32_scale_1(ptr %result, <2 x i64> %idx, <2 x i1> %mask) { 76; CHECK-LABEL: gather_v2f32_scale_1: 77; CHECK: # %bb.0: 78; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] 79; CHECK-NEXT: vpslld $31, %xmm1, %xmm2 80; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 81; CHECK-NEXT: vgatherqps %xmm2, (%rdi,%xmm0), %xmm1 82; CHECK-NEXT: vmovaps %xmm1, %xmm0 83; CHECK-NEXT: retq 84 %gep = getelementptr inbounds [1 x i8], ptr %result, <2 x i64> %idx 85 %res = call <2 x float> @llvm.masked.gather.v2f32.v2p0(<2 x ptr> %gep, i32 0, <2 x i1> %mask, <2 x float> zeroinitializer) 86 ret <2 x float> %res 87} 88 89declare <2 x float> @llvm.masked.gather.v2f32.v2p0(<2 x ptr>, i32 immarg, <2 x i1>, <2 x float>) 90