1; RUN: llc -mtriple=aarch64-linux-gnu -stop-after=instruction-select < %s | FileCheck %s 2 3declare void @llvm.aarch64.neon.st2.v4f32.p0(<4 x float>, <4 x float>, ptr) 4declare void @llvm.aarch64.neon.st3.v4f32.p0(<4 x float>, <4 x float>, <4 x float>, ptr) 5declare void @llvm.aarch64.neon.st4.v4f32.p0(<4 x float>, <4 x float>, <4 x float>, <4 x float>, ptr) 6 7declare void @llvm.aarch64.neon.st1x2.v4f32.p0(<4 x float>, <4 x float>, ptr) 8declare void @llvm.aarch64.neon.st1x3.v4f32.p0(<4 x float>, <4 x float>, <4 x float>, ptr) 9declare void @llvm.aarch64.neon.st1x4.v4f32.p0(<4 x float>, <4 x float>, <4 x float>, <4 x float>, ptr) 10 11declare void @llvm.aarch64.neon.st2lane.v4f32.p0(<4 x float>, <4 x float>, i64, ptr) 12declare void @llvm.aarch64.neon.st3lane.v4f32.p0(<4 x float>, <4 x float>, <4 x float>, i64, ptr) 13declare void @llvm.aarch64.neon.st4lane.v4f32.p0(<4 x float>, <4 x float>, <4 x float>, <4 x float>, i64, ptr) 14 15define void @addstx(ptr %res, ptr %a, ptr %b, ptr %c, ptr %d) { 16 %al = load <4 x float>, ptr %a 17 %bl = load <4 x float>, ptr %b 18 %cl = load <4 x float>, ptr %c 19 %dl = load <4 x float>, ptr %d 20 21 %ar = fadd <4 x float> %al, %bl 22 %br = fadd <4 x float> %bl, %cl 23 %cr = fadd <4 x float> %cl, %dl 24 %dr = fadd <4 x float> %dl, %al 25 26 tail call void @llvm.aarch64.neon.st2.v4f32.p0(<4 x float> %ar, <4 x float> %br, ptr %res) 27; CHECK: ST2Twov4s {{.*}} :: (store (s256) {{.*}}) 28 tail call void @llvm.aarch64.neon.st3.v4f32.p0(<4 x float> %ar, <4 x float> %br, <4 x float> %cr, ptr %res) 29; CHECK: ST3Threev4s {{.*}} :: (store (s384) {{.*}}) 30 tail call void @llvm.aarch64.neon.st4.v4f32.p0(<4 x float> %ar, <4 x float> %br, <4 x float> %cr, <4 x float> %dr, ptr %res) 31; CHECK: ST4Fourv4s {{.*}} :: (store (s512) {{.*}}) 32 33 ret void 34} 35 36define void @addst1x(ptr %res, ptr %a, ptr %b, ptr %c, ptr %d) { 37 %al = load <4 x float>, ptr %a 38 %bl = load <4 x float>, ptr %b 39 %cl = load <4 x float>, ptr %c 40 %dl = load <4 x float>, ptr %d 41 42 %ar = fadd <4 x float> %al, %bl 43 %br = fadd <4 x float> %bl, %cl 44 %cr = fadd <4 x float> %cl, %dl 45 %dr = fadd <4 x float> %dl, %al 46 47 tail call void @llvm.aarch64.neon.st1x2.v4f32.p0(<4 x float> %ar, <4 x float> %br, ptr %res) 48; CHECK: ST1Twov4s {{.*}} :: (store (s256) {{.*}}) 49 tail call void @llvm.aarch64.neon.st1x3.v4f32.p0(<4 x float> %ar, <4 x float> %br, <4 x float> %cr, ptr %res) 50; CHECK: ST1Threev4s {{.*}} :: (store (s384) {{.*}}) 51 tail call void @llvm.aarch64.neon.st1x4.v4f32.p0(<4 x float> %ar, <4 x float> %br, <4 x float> %cr, <4 x float> %dr, ptr %res) 52; CHECK: ST1Fourv4s {{.*}} :: (store (s512) {{.*}}) 53 54 ret void 55} 56 57define void @addstxlane(ptr %res, ptr %a, ptr %b, ptr %c, ptr %d) { 58 %al = load <4 x float>, ptr %a 59 %bl = load <4 x float>, ptr %b 60 %cl = load <4 x float>, ptr %c 61 %dl = load <4 x float>, ptr %d 62 63 %ar = fadd <4 x float> %al, %bl 64 %br = fadd <4 x float> %bl, %cl 65 %cr = fadd <4 x float> %cl, %dl 66 %dr = fadd <4 x float> %dl, %al 67 68 tail call void @llvm.aarch64.neon.st2lane.v4f32.p0(<4 x float> %ar, <4 x float> %br, i64 1, ptr %res) 69; CHECK: ST2i32 {{.*}} :: (store (s64) {{.*}}) 70 tail call void @llvm.aarch64.neon.st3lane.v4f32.p0(<4 x float> %ar, <4 x float> %br, <4 x float> %cr, i64 1, ptr %res) 71; CHECK: ST3i32 {{.*}} :: (store (s96) {{.*}}) 72 tail call void @llvm.aarch64.neon.st4lane.v4f32.p0(<4 x float> %ar, <4 x float> %br, <4 x float> %cr, <4 x float> %dr, i64 1, ptr %res) 73; CHECK: ST4i32 {{.*}} :: (store (s128) {{.*}}) 74 75 ret void 76} 77