xref: /llvm-project/llvm/test/Transforms/InstCombine/scalable-cast-of-alloc.ll (revision 3e992d81afc3925a8685eb15f794dd4a6ba3e97e)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -passes=instcombine -S < %s | FileCheck %s
3
4define void @fixed_array16i32_to_scalable4i32(ptr %out) {
5; CHECK-LABEL: @fixed_array16i32_to_scalable4i32(
6; CHECK-NEXT:  entry:
7; CHECK-NEXT:    [[TMP:%.*]] = alloca [16 x i32], align 16
8; CHECK-NEXT:    store volatile <vscale x 4 x i32> zeroinitializer, ptr [[TMP]], align 16
9; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <vscale x 4 x i32>, ptr [[TMP]], align 16
10; CHECK-NEXT:    store <vscale x 4 x i32> [[RELOAD]], ptr [[OUT:%.*]], align 16
11; CHECK-NEXT:    ret void
12;
13entry:
14  %tmp = alloca [16 x i32], align 16
15  store volatile <vscale x 4 x i32> zeroinitializer, ptr %tmp, align 16
16  %reload = load volatile <vscale x 4 x i32>, ptr %tmp, align 16
17  store <vscale x 4 x i32> %reload, ptr %out, align 16
18  ret void
19}
20
21define void @scalable4i32_to_fixed16i32(ptr %out) {
22; CHECK-LABEL: @scalable4i32_to_fixed16i32(
23; CHECK-NEXT:  entry:
24; CHECK-NEXT:    [[TMP:%.*]] = alloca <vscale x 4 x i32>, align 16
25; CHECK-NEXT:    store <16 x i32> zeroinitializer, ptr [[TMP]], align 16
26; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <16 x i32>, ptr [[TMP]], align 16
27; CHECK-NEXT:    store <16 x i32> [[RELOAD]], ptr [[OUT:%.*]], align 16
28; CHECK-NEXT:    ret void
29;
30entry:
31  %tmp = alloca <vscale x 4 x i32>, align 16
32  store <16 x i32> zeroinitializer, ptr %tmp, align 16
33  %reload = load volatile <16 x i32>, ptr %tmp, align 16
34  store <16 x i32> %reload, ptr %out, align 16
35  ret void
36}
37
38define void @fixed16i32_to_scalable4i32(ptr %out) {
39; CHECK-LABEL: @fixed16i32_to_scalable4i32(
40; CHECK-NEXT:  entry:
41; CHECK-NEXT:    [[TMP:%.*]] = alloca <16 x i32>, align 16
42; CHECK-NEXT:    store volatile <vscale x 4 x i32> zeroinitializer, ptr [[TMP]], align 16
43; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <vscale x 4 x i32>, ptr [[TMP]], align 16
44; CHECK-NEXT:    store <vscale x 4 x i32> [[RELOAD]], ptr [[OUT:%.*]], align 16
45; CHECK-NEXT:    ret void
46;
47entry:
48  %tmp = alloca <16 x i32>, align 16
49  store volatile <vscale x 4 x i32> zeroinitializer, ptr %tmp, align 16
50  %reload = load volatile <vscale x 4 x i32>, ptr %tmp, align 16
51  store <vscale x 4 x i32> %reload, ptr %out, align 16
52  ret void
53}
54
55define void @scalable16i32_to_fixed16i32(ptr %out) {
56; CHECK-LABEL: @scalable16i32_to_fixed16i32(
57; CHECK-NEXT:  entry:
58; CHECK-NEXT:    [[TMP:%.*]] = alloca <vscale x 16 x i32>, align 16
59; CHECK-NEXT:    store volatile <16 x i32> zeroinitializer, ptr [[TMP]], align 16
60; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <16 x i32>, ptr [[TMP]], align 16
61; CHECK-NEXT:    store <16 x i32> [[RELOAD]], ptr [[OUT:%.*]], align 16
62; CHECK-NEXT:    ret void
63;
64entry:
65  %tmp = alloca <vscale x 16 x i32>, align 16
66  store volatile <16 x i32> zeroinitializer, ptr %tmp, align 16
67  %reload = load volatile <16 x i32>, ptr %tmp, align 16
68  store <16 x i32> %reload, ptr %out, align 16
69  ret void
70}
71
72define void @scalable32i32_to_scalable16i32(ptr %out) {
73; CHECK-LABEL: @scalable32i32_to_scalable16i32(
74; CHECK-NEXT:  entry:
75; CHECK-NEXT:    [[TMP:%.*]] = alloca <vscale x 32 x i32>, align 16
76; CHECK-NEXT:    store volatile <vscale x 16 x i32> zeroinitializer, ptr [[TMP]], align 16
77; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <vscale x 16 x i32>, ptr [[TMP]], align 16
78; CHECK-NEXT:    store <vscale x 16 x i32> [[RELOAD]], ptr [[OUT:%.*]], align 16
79; CHECK-NEXT:    ret void
80;
81entry:
82  %tmp = alloca <vscale x 32 x i32>, align 16
83  store volatile <vscale x 16 x i32> zeroinitializer, ptr %tmp, align 16
84  %reload = load volatile <vscale x 16 x i32>, ptr %tmp, align 16
85  store <vscale x 16 x i32> %reload, ptr %out, align 16
86  ret void
87}
88
89define void @scalable32i16_to_scalable16i32(ptr %out) {
90; CHECK-LABEL: @scalable32i16_to_scalable16i32(
91; CHECK-NEXT:  entry:
92; CHECK-NEXT:    [[TMP:%.*]] = alloca <vscale x 32 x i16>, align 16
93; CHECK-NEXT:    store volatile <vscale x 16 x i32> zeroinitializer, ptr [[TMP]], align 16
94; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <vscale x 16 x i32>, ptr [[TMP]], align 16
95; CHECK-NEXT:    store <vscale x 16 x i32> [[RELOAD]], ptr [[OUT:%.*]], align 16
96; CHECK-NEXT:    ret void
97;
98entry:
99  %tmp = alloca <vscale x 32 x i16>, align 16
100  store volatile <vscale x 16 x i32> zeroinitializer, ptr %tmp, align 16
101  %reload = load volatile <vscale x 16 x i32>, ptr %tmp, align 16
102  store <vscale x 16 x i32> %reload, ptr %out, align 16
103  ret void
104}
105
106define void @scalable32i16_to_scalable16i32_multiuse(ptr %out, ptr %out2) {
107; CHECK-LABEL: @scalable32i16_to_scalable16i32_multiuse(
108; CHECK-NEXT:  entry:
109; CHECK-NEXT:    [[TMP:%.*]] = alloca <vscale x 32 x i16>, align 16
110; CHECK-NEXT:    store volatile <vscale x 16 x i32> zeroinitializer, ptr [[TMP]], align 16
111; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <vscale x 16 x i32>, ptr [[TMP]], align 16
112; CHECK-NEXT:    store <vscale x 16 x i32> [[RELOAD]], ptr [[OUT:%.*]], align 16
113; CHECK-NEXT:    [[RELOAD2:%.*]] = load volatile <vscale x 32 x i16>, ptr [[TMP]], align 16
114; CHECK-NEXT:    store <vscale x 32 x i16> [[RELOAD2]], ptr [[OUT2:%.*]], align 16
115; CHECK-NEXT:    ret void
116;
117entry:
118  %tmp = alloca <vscale x 32 x i16>, align 16
119  store volatile <vscale x 16 x i32> zeroinitializer, ptr %tmp, align 16
120  %reload = load volatile <vscale x 16 x i32>, ptr %tmp, align 16
121  store <vscale x 16 x i32> %reload, ptr %out, align 16
122  %reload2 = load volatile <vscale x 32 x i16>, ptr %tmp, align 16
123  store <vscale x 32 x i16> %reload2, ptr %out2, align 16
124  ret void
125}
126