1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; REQUIRES: aarch64-registered-target
3; RUN: opt -S %s -passes=scalarize-masked-mem-intrin -mtriple=aarch64-linux-gnu -mattr=+sve -force-streaming-compatible | FileCheck %s
4
5define <2 x i32> @scalarize_v2i32(<2 x ptr> %p, <2 x i1> %mask, <2 x i32> %passthru) {
6; CHECK-LABEL: @scalarize_v2i32(
7; CHECK-NEXT:    [[SCALAR_MASK:%.*]] = bitcast <2 x i1> [[MASK:%.*]] to i2
8; CHECK-NEXT:    [[TMP1:%.*]] = and i2 [[SCALAR_MASK]], 1
9; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i2 [[TMP1]], 0
10; CHECK-NEXT:    br i1 [[TMP2]], label [[COND_LOAD:%.*]], label [[ELSE:%.*]]
11; CHECK:       cond.load:
12; CHECK-NEXT:    [[PTR0:%.*]] = extractelement <2 x ptr> [[P:%.*]], i64 0
13; CHECK-NEXT:    [[LOAD0:%.*]] = load i32, ptr [[PTR0]], align 8
14; CHECK-NEXT:    [[RES0:%.*]] = insertelement <2 x i32> [[PASSTHRU:%.*]], i32 [[LOAD0]], i64 0
15; CHECK-NEXT:    br label [[ELSE]]
16; CHECK:       else:
17; CHECK-NEXT:    [[RES_PHI_ELSE:%.*]] = phi <2 x i32> [ [[RES0]], [[COND_LOAD]] ], [ [[PASSTHRU]], [[TMP0:%.*]] ]
18; CHECK-NEXT:    [[TMP3:%.*]] = and i2 [[SCALAR_MASK]], -2
19; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne i2 [[TMP3]], 0
20; CHECK-NEXT:    br i1 [[TMP4]], label [[COND_LOAD1:%.*]], label [[ELSE2:%.*]]
21; CHECK:       cond.load1:
22; CHECK-NEXT:    [[PTR1:%.*]] = extractelement <2 x ptr> [[P]], i64 1
23; CHECK-NEXT:    [[LOAD1:%.*]] = load i32, ptr [[PTR1]], align 8
24; CHECK-NEXT:    [[RES1:%.*]] = insertelement <2 x i32> [[RES_PHI_ELSE]], i32 [[LOAD1]], i64 1
25; CHECK-NEXT:    br label [[ELSE2]]
26; CHECK:       else2:
27; CHECK-NEXT:    [[RES_PHI_ELSE3:%.*]] = phi <2 x i32> [ [[RES1]], [[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], [[ELSE]] ]
28; CHECK-NEXT:    ret <2 x i32> [[RES_PHI_ELSE3]]
29;
30  %ret = call <2 x i32> @llvm.masked.gather.v2i32.v2p0(<2 x ptr> %p, i32 8, <2 x i1> %mask, <2 x i32> %passthru)
31  ret <2 x i32> %ret
32}
33
34define void @scalarize_v2i64(<2 x ptr> %p, <2 x i1> %mask, <2 x i64> %value) {
35; CHECK-LABEL: @scalarize_v2i64(
36; CHECK-NEXT:    [[SCALAR_MASK:%.*]] = bitcast <2 x i1> [[MASK:%.*]] to i2
37; CHECK-NEXT:    [[TMP1:%.*]] = and i2 [[SCALAR_MASK]], 1
38; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i2 [[TMP1]], 0
39; CHECK-NEXT:    br i1 [[TMP2]], label [[COND_STORE:%.*]], label [[ELSE:%.*]]
40; CHECK:       cond.store:
41; CHECK-NEXT:    [[ELT0:%.*]] = extractelement <2 x i64> [[VALUE:%.*]], i64 0
42; CHECK-NEXT:    [[PTR0:%.*]] = extractelement <2 x ptr> [[P:%.*]], i64 0
43; CHECK-NEXT:    store i64 [[ELT0]], ptr [[PTR0]], align 8
44; CHECK-NEXT:    br label [[ELSE]]
45; CHECK:       else:
46; CHECK-NEXT:    [[TMP3:%.*]] = and i2 [[SCALAR_MASK]], -2
47; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne i2 [[TMP3]], 0
48; CHECK-NEXT:    br i1 [[TMP4]], label [[COND_STORE1:%.*]], label [[ELSE2:%.*]]
49; CHECK:       cond.store1:
50; CHECK-NEXT:    [[ELT1:%.*]] = extractelement <2 x i64> [[VALUE]], i64 1
51; CHECK-NEXT:    [[PTR1:%.*]] = extractelement <2 x ptr> [[P]], i64 1
52; CHECK-NEXT:    store i64 [[ELT1]], ptr [[PTR1]], align 8
53; CHECK-NEXT:    br label [[ELSE2]]
54; CHECK:       else2:
55; CHECK-NEXT:    ret void
56;
57  call void @llvm.masked.scatter.v2i64.v2p0(<2 x i64> %value, <2 x ptr> %p, i32 8, <2 x i1> %mask)
58  ret void
59}
60
61declare <2 x i32> @llvm.masked.gather.v2i32.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x i32>)
62declare void @llvm.masked.scatter.v2i64.v2p0(<2 x i64>, <2 x ptr>, i32, <2 x i1>)
63