1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2 < %s | FileCheck %s
3
4;
5; STNT1H, STNT1W, STNT1D: base + 64-bit index
6;   e.g.
7;     lsl z1.d, z1.d, #1
8;     stnt1h { z0.d }, p0, [z0.d, x0]
9;
10
11define void @sstnt1h_index(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %offsets) {
12; CHECK-LABEL: sstnt1h_index:
13; CHECK:       // %bb.0:
14; CHECK-NEXT:    lsl z1.d, z1.d, #1
15; CHECK-NEXT:    stnt1h { z0.d }, p0, [z1.d, x0]
16; CHECK-NEXT:    ret
17  %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i16>
18  call void @llvm.aarch64.sve.stnt1.scatter.index.nxv2i16(<vscale x 2 x i16> %data_trunc,
19                                                          <vscale x 2 x i1> %pg,
20                                                          ptr %base,
21                                                          <vscale x 2 x i64> %offsets)
22  ret void
23}
24
25define void @sstnt1w_index(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %offsets) {
26; CHECK-LABEL: sstnt1w_index:
27; CHECK:       // %bb.0:
28; CHECK-NEXT:    lsl z1.d, z1.d, #2
29; CHECK-NEXT:    stnt1w { z0.d }, p0, [z1.d, x0]
30; CHECK-NEXT:    ret
31  %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i32>
32  call void @llvm.aarch64.sve.stnt1.scatter.index.nxv2i32(<vscale x 2 x i32> %data_trunc,
33                                                          <vscale x 2 x i1> %pg,
34                                                          ptr %base,
35                                                          <vscale x 2 x i64> %offsets)
36  ret void
37}
38
39define void  @sstnt1d_index(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %offsets) {
40; CHECK-LABEL: sstnt1d_index:
41; CHECK:       // %bb.0:
42; CHECK-NEXT:    lsl z1.d, z1.d, #3
43; CHECK-NEXT:    stnt1d { z0.d }, p0, [z1.d, x0]
44; CHECK-NEXT:    ret
45  call void @llvm.aarch64.sve.stnt1.scatter.index.nxv2i64(<vscale x 2 x i64> %data,
46                                                          <vscale x 2 x i1> %pg,
47                                                          ptr %base,
48                                                          <vscale x 2 x i64> %offsets)
49  ret void
50}
51
52define void  @sstnt1d_index_double(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %offsets) {
53; CHECK-LABEL: sstnt1d_index_double:
54; CHECK:       // %bb.0:
55; CHECK-NEXT:    lsl z1.d, z1.d, #3
56; CHECK-NEXT:    stnt1d { z0.d }, p0, [z1.d, x0]
57; CHECK-NEXT:    ret
58  call void @llvm.aarch64.sve.stnt1.scatter.index.nxv2f64(<vscale x 2 x double> %data,
59                                                          <vscale x 2 x i1> %pg,
60                                                          ptr %base,
61                                                          <vscale x 2 x i64> %offsets)
62  ret void
63}
64
65
66declare void @llvm.aarch64.sve.stnt1.scatter.index.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
67declare void @llvm.aarch64.sve.stnt1.scatter.index.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
68declare void @llvm.aarch64.sve.stnt1.scatter.index.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
69declare void @llvm.aarch64.sve.stnt1.scatter.index.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
70