xref: /llvm-project/llvm/test/CodeGen/ARM/misched-fusion-aes.ll (revision bed1c7f061aa12417aa081e334afdba45767b938)
1; RUN: llc %s -o - -mtriple=armv8 -mattr=+crypto,+fuse-aes -enable-misched -disable-post-ra | FileCheck %s
2
3declare <16 x i8> @llvm.arm.neon.aese(<16 x i8> %d, <16 x i8> %k)
4declare <16 x i8> @llvm.arm.neon.aesmc(<16 x i8> %d)
5declare <16 x i8> @llvm.arm.neon.aesd(<16 x i8> %d, <16 x i8> %k)
6declare <16 x i8> @llvm.arm.neon.aesimc(<16 x i8> %d)
7
8define void @aesea(ptr %a0, ptr %b0, ptr %c0, <16 x i8> %d, <16 x i8> %e) {
9  %d0 = load <16 x i8>, ptr %a0
10  %a1 = getelementptr inbounds <16 x i8>, ptr %a0, i64 1
11  %d1 = load <16 x i8>, ptr %a1
12  %a2 = getelementptr inbounds <16 x i8>, ptr %a0, i64 2
13  %d2 = load <16 x i8>, ptr %a2
14  %a3 = getelementptr inbounds <16 x i8>, ptr %a0, i64 3
15  %d3 = load <16 x i8>, ptr %a3
16  %k0 = load <16 x i8>, ptr %b0
17  %e00 = call <16 x i8> @llvm.arm.neon.aese(<16 x i8> %d0, <16 x i8> %k0)
18  %f00 = call <16 x i8> @llvm.arm.neon.aesmc(<16 x i8> %e00)
19  %e01 = call <16 x i8> @llvm.arm.neon.aese(<16 x i8> %d1, <16 x i8> %k0)
20  %f01 = call <16 x i8> @llvm.arm.neon.aesmc(<16 x i8> %e01)
21  %e02 = call <16 x i8> @llvm.arm.neon.aese(<16 x i8> %d2, <16 x i8> %k0)
22  %f02 = call <16 x i8> @llvm.arm.neon.aesmc(<16 x i8> %e02)
23  %e03 = call <16 x i8> @llvm.arm.neon.aese(<16 x i8> %d3, <16 x i8> %k0)
24  %f03 = call <16 x i8> @llvm.arm.neon.aesmc(<16 x i8> %e03)
25  %b1 = getelementptr inbounds <16 x i8>, ptr %b0, i64 1
26  %k1 = load <16 x i8>, ptr %b1
27  %e10 = call <16 x i8> @llvm.arm.neon.aese(<16 x i8> %f00, <16 x i8> %k1)
28  %f10 = call <16 x i8> @llvm.arm.neon.aesmc(<16 x i8> %e00)
29  %e11 = call <16 x i8> @llvm.arm.neon.aese(<16 x i8> %f01, <16 x i8> %k1)
30  %f11 = call <16 x i8> @llvm.arm.neon.aesmc(<16 x i8> %e01)
31  %e12 = call <16 x i8> @llvm.arm.neon.aese(<16 x i8> %f02, <16 x i8> %k1)
32  %f12 = call <16 x i8> @llvm.arm.neon.aesmc(<16 x i8> %e02)
33  %e13 = call <16 x i8> @llvm.arm.neon.aese(<16 x i8> %f03, <16 x i8> %k1)
34  %f13 = call <16 x i8> @llvm.arm.neon.aesmc(<16 x i8> %e03)
35  %b2 = getelementptr inbounds <16 x i8>, ptr %b0, i64 2
36  %k2 = load <16 x i8>, ptr %b2
37  %e20 = call <16 x i8> @llvm.arm.neon.aese(<16 x i8> %f10, <16 x i8> %k2)
38  %f20 = call <16 x i8> @llvm.arm.neon.aesmc(<16 x i8> %e10)
39  %e21 = call <16 x i8> @llvm.arm.neon.aese(<16 x i8> %f11, <16 x i8> %k2)
40  %f21 = call <16 x i8> @llvm.arm.neon.aesmc(<16 x i8> %e11)
41  %e22 = call <16 x i8> @llvm.arm.neon.aese(<16 x i8> %f12, <16 x i8> %k2)
42  %f22 = call <16 x i8> @llvm.arm.neon.aesmc(<16 x i8> %e12)
43  %e23 = call <16 x i8> @llvm.arm.neon.aese(<16 x i8> %f13, <16 x i8> %k2)
44  %f23 = call <16 x i8> @llvm.arm.neon.aesmc(<16 x i8> %e13)
45  %b3 = getelementptr inbounds <16 x i8>, ptr %b0, i64 3
46  %k3 = load <16 x i8>, ptr %b3
47  %e30 = call <16 x i8> @llvm.arm.neon.aese(<16 x i8> %f20, <16 x i8> %k3)
48  %f30 = call <16 x i8> @llvm.arm.neon.aesmc(<16 x i8> %e20)
49  %e31 = call <16 x i8> @llvm.arm.neon.aese(<16 x i8> %f21, <16 x i8> %k3)
50  %f31 = call <16 x i8> @llvm.arm.neon.aesmc(<16 x i8> %e21)
51  %e32 = call <16 x i8> @llvm.arm.neon.aese(<16 x i8> %f22, <16 x i8> %k3)
52  %f32 = call <16 x i8> @llvm.arm.neon.aesmc(<16 x i8> %e22)
53  %e33 = call <16 x i8> @llvm.arm.neon.aese(<16 x i8> %f23, <16 x i8> %k3)
54  %f33 = call <16 x i8> @llvm.arm.neon.aesmc(<16 x i8> %e23)
55  %g0 = call <16 x i8> @llvm.arm.neon.aese(<16 x i8> %f30, <16 x i8> %d)
56  %h0 = xor <16 x i8> %g0, %e
57  %g1 = call <16 x i8> @llvm.arm.neon.aese(<16 x i8> %f31, <16 x i8> %d)
58  %h1 = xor <16 x i8> %g1, %e
59  %g2 = call <16 x i8> @llvm.arm.neon.aese(<16 x i8> %f32, <16 x i8> %d)
60  %h2 = xor <16 x i8> %g2, %e
61  %g3 = call <16 x i8> @llvm.arm.neon.aese(<16 x i8> %f33, <16 x i8> %d)
62  %h3 = xor <16 x i8> %g3, %e
63  store <16 x i8> %h0, ptr %c0
64  %c1 = getelementptr inbounds <16 x i8>, ptr %c0, i64 1
65  store <16 x i8> %h1, ptr %c1
66  %c2 = getelementptr inbounds <16 x i8>, ptr %c0, i64 2
67  store <16 x i8> %h2, ptr %c2
68  %c3 = getelementptr inbounds <16 x i8>, ptr %c0, i64 3
69  store <16 x i8> %h3, ptr %c3
70  ret void
71
72; CHECK-LABEL: aesea:
73; CHECK: aese.8 [[QA:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
74; CHECK-NEXT: aesmc.8 {{q[0-9][0-9]?}}, [[QA]]
75
76; CHECK: aese.8 [[QB:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
77; CHECK-NEXT: aesmc.8 {{q[0-9][0-9]?}}, [[QB]]
78
79; CHECK: aese.8 [[QC:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
80; CHECK-NEXT: aesmc.8 {{q[0-9][0-9]?}}, [[QC]]
81; CHECK: aese.8 {{q[0-9][0-9]?}}, {{q[0-9][0-9]?}}
82
83; CHECK: aese.8 [[QD:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
84; CHECK-NEXT: aesmc.8 {{q[0-9][0-9]?}}, [[QD]]
85
86; CHECK: aese.8 {{q[0-9][0-9]?}}, {{q[0-9][0-9]?}}
87; CHECK: aese.8 [[QE:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
88; CHECK-NEXT: aesmc.8 {{q[0-9][0-9]?}}, [[QE]]
89
90; CHECK: aese.8 [[QF:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
91; CHECK-NEXT: aesmc.8 {{q[0-9][0-9]?}}, [[QF]]
92
93; CHECK: aese.8 {{q[0-9][0-9]?}}, {{q[0-9][0-9]?}}
94; CHECK: aese.8 [[QG:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
95; CHECK-NEXT: aesmc.8 {{q[0-9][0-9]?}}, [[QG]]
96
97; CHECK: aese.8 [[QH:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
98; CHECK-NEXT: aesmc.8 {{q[0-9][0-9]?}}, [[QH]]
99}
100
101define void @aesda(ptr %a0, ptr %b0, ptr %c0, <16 x i8> %d, <16 x i8> %e) {
102  %d0 = load <16 x i8>, ptr %a0
103  %a1 = getelementptr inbounds <16 x i8>, ptr %a0, i64 1
104  %d1 = load <16 x i8>, ptr %a1
105  %a2 = getelementptr inbounds <16 x i8>, ptr %a0, i64 2
106  %d2 = load <16 x i8>, ptr %a2
107  %a3 = getelementptr inbounds <16 x i8>, ptr %a0, i64 3
108  %d3 = load <16 x i8>, ptr %a3
109  %k0 = load <16 x i8>, ptr %b0
110  %e00 = call <16 x i8> @llvm.arm.neon.aesd(<16 x i8> %d0, <16 x i8> %k0)
111  %f00 = call <16 x i8> @llvm.arm.neon.aesimc(<16 x i8> %e00)
112  %e01 = call <16 x i8> @llvm.arm.neon.aesd(<16 x i8> %d1, <16 x i8> %k0)
113  %f01 = call <16 x i8> @llvm.arm.neon.aesimc(<16 x i8> %e01)
114  %e02 = call <16 x i8> @llvm.arm.neon.aesd(<16 x i8> %d2, <16 x i8> %k0)
115  %f02 = call <16 x i8> @llvm.arm.neon.aesimc(<16 x i8> %e02)
116  %e03 = call <16 x i8> @llvm.arm.neon.aesd(<16 x i8> %d3, <16 x i8> %k0)
117  %f03 = call <16 x i8> @llvm.arm.neon.aesimc(<16 x i8> %e03)
118  %b1 = getelementptr inbounds <16 x i8>, ptr %b0, i64 1
119  %k1 = load <16 x i8>, ptr %b1
120  %e10 = call <16 x i8> @llvm.arm.neon.aesd(<16 x i8> %f00, <16 x i8> %k1)
121  %f10 = call <16 x i8> @llvm.arm.neon.aesimc(<16 x i8> %e00)
122  %e11 = call <16 x i8> @llvm.arm.neon.aesd(<16 x i8> %f01, <16 x i8> %k1)
123  %f11 = call <16 x i8> @llvm.arm.neon.aesimc(<16 x i8> %e01)
124  %e12 = call <16 x i8> @llvm.arm.neon.aesd(<16 x i8> %f02, <16 x i8> %k1)
125  %f12 = call <16 x i8> @llvm.arm.neon.aesimc(<16 x i8> %e02)
126  %e13 = call <16 x i8> @llvm.arm.neon.aesd(<16 x i8> %f03, <16 x i8> %k1)
127  %f13 = call <16 x i8> @llvm.arm.neon.aesimc(<16 x i8> %e03)
128  %b2 = getelementptr inbounds <16 x i8>, ptr %b0, i64 2
129  %k2 = load <16 x i8>, ptr %b2
130  %e20 = call <16 x i8> @llvm.arm.neon.aesd(<16 x i8> %f10, <16 x i8> %k2)
131  %f20 = call <16 x i8> @llvm.arm.neon.aesimc(<16 x i8> %e10)
132  %e21 = call <16 x i8> @llvm.arm.neon.aesd(<16 x i8> %f11, <16 x i8> %k2)
133  %f21 = call <16 x i8> @llvm.arm.neon.aesimc(<16 x i8> %e11)
134  %e22 = call <16 x i8> @llvm.arm.neon.aesd(<16 x i8> %f12, <16 x i8> %k2)
135  %f22 = call <16 x i8> @llvm.arm.neon.aesimc(<16 x i8> %e12)
136  %e23 = call <16 x i8> @llvm.arm.neon.aesd(<16 x i8> %f13, <16 x i8> %k2)
137  %f23 = call <16 x i8> @llvm.arm.neon.aesimc(<16 x i8> %e13)
138  %b3 = getelementptr inbounds <16 x i8>, ptr %b0, i64 3
139  %k3 = load <16 x i8>, ptr %b3
140  %e30 = call <16 x i8> @llvm.arm.neon.aesd(<16 x i8> %f20, <16 x i8> %k3)
141  %f30 = call <16 x i8> @llvm.arm.neon.aesimc(<16 x i8> %e20)
142  %e31 = call <16 x i8> @llvm.arm.neon.aesd(<16 x i8> %f21, <16 x i8> %k3)
143  %f31 = call <16 x i8> @llvm.arm.neon.aesimc(<16 x i8> %e21)
144  %e32 = call <16 x i8> @llvm.arm.neon.aesd(<16 x i8> %f22, <16 x i8> %k3)
145  %f32 = call <16 x i8> @llvm.arm.neon.aesimc(<16 x i8> %e22)
146  %e33 = call <16 x i8> @llvm.arm.neon.aesd(<16 x i8> %f23, <16 x i8> %k3)
147  %f33 = call <16 x i8> @llvm.arm.neon.aesimc(<16 x i8> %e23)
148  %g0 = call <16 x i8> @llvm.arm.neon.aesd(<16 x i8> %f30, <16 x i8> %d)
149  %h0 = xor <16 x i8> %g0, %e
150  %g1 = call <16 x i8> @llvm.arm.neon.aesd(<16 x i8> %f31, <16 x i8> %d)
151  %h1 = xor <16 x i8> %g1, %e
152  %g2 = call <16 x i8> @llvm.arm.neon.aesd(<16 x i8> %f32, <16 x i8> %d)
153  %h2 = xor <16 x i8> %g2, %e
154  %g3 = call <16 x i8> @llvm.arm.neon.aesd(<16 x i8> %f33, <16 x i8> %d)
155  %h3 = xor <16 x i8> %g3, %e
156  store <16 x i8> %h0, ptr %c0
157  %c1 = getelementptr inbounds <16 x i8>, ptr %c0, i64 1
158  store <16 x i8> %h1, ptr %c1
159  %c2 = getelementptr inbounds <16 x i8>, ptr %c0, i64 2
160  store <16 x i8> %h2, ptr %c2
161  %c3 = getelementptr inbounds <16 x i8>, ptr %c0, i64 3
162  store <16 x i8> %h3, ptr %c3
163  ret void
164
165; CHECK-LABEL: aesda:
166; CHECK: aesd.8 [[QA:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
167; CHECK-NEXT: aesimc.8 {{q[0-9][0-9]?}}, [[QA]]
168
169; CHECK: aesd.8 [[QB:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
170; CHECK-NEXT: aesimc.8 {{q[0-9][0-9]?}}, [[QB]]
171
172; CHECK: aesd.8 [[QC:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
173; CHECK-NEXT: aesimc.8 {{q[0-9][0-9]?}}, [[QC]]
174; CHECK: aesd.8 {{q[0-9][0-9]?}}, {{q[0-9][0-9]?}}
175
176; CHECK: aesd.8 [[QD:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
177; CHECK-NEXT: aesimc.8 {{q[0-9][0-9]?}}, [[QD]]
178
179; CHECK: aesd.8 {{q[0-9][0-9]?}}, {{q[0-9][0-9]?}}
180; CHECK: aesd.8 [[QE:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
181; CHECK-NEXT: aesimc.8 {{q[0-9][0-9]?}}, [[QE]]
182
183; CHECK: aesd.8 [[QF:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
184; CHECK-NEXT: aesimc.8 {{q[0-9][0-9]?}}, [[QF]]
185
186; CHECK: aesd.8 {{q[0-9][0-9]?}}, {{q[0-9][0-9]?}}
187; CHECK: aesd.8 [[QG:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
188; CHECK-NEXT: aesimc.8 {{q[0-9][0-9]?}}, [[QG]]
189
190; CHECK: aesd.8 [[QH:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
191; CHECK-NEXT: aesimc.8 {{q[0-9][0-9]?}}, [[QH]]
192}
193
194define void @aes_load_store(ptr %p1, ptr %p2 , ptr %p3) {
195entry:
196  %x1 = alloca <16 x i8>, align 16
197  %x2 = alloca <16 x i8>, align 16
198  %x3 = alloca <16 x i8>, align 16
199  %x4 = alloca <16 x i8>, align 16
200  %x5 = alloca <16 x i8>, align 16
201  %in1 = load <16 x i8>, ptr %p1, align 16
202  store <16 x i8> %in1, ptr %x1, align 16
203  %aese1 = call <16 x i8> @llvm.arm.neon.aese(<16 x i8> %in1, <16 x i8> %in1) #2
204  store <16 x i8> %aese1, ptr %x2, align 16
205  %in2 = load <16 x i8>, ptr %p2, align 16
206  %aesmc1= call <16 x i8> @llvm.arm.neon.aesmc(<16 x i8> %aese1) #2
207  store <16 x i8> %aesmc1, ptr %x3, align 16
208  %aese2 = call <16 x i8> @llvm.arm.neon.aese(<16 x i8> %in1, <16 x i8> %in2) #2
209  store <16 x i8> %aese2, ptr %x4, align 16
210  %aesmc2= call <16 x i8> @llvm.arm.neon.aesmc(<16 x i8> %aese2) #2
211  store <16 x i8> %aesmc2, ptr %x5, align 16
212  ret void
213
214; CHECK-LABEL: aes_load_store:
215; CHECK: aese.8 [[QA:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
216; CHECK-NEXT: aesmc.8 {{q[0-9][0-9]?}}, [[QA]]
217
218; CHECK: aese.8 [[QB:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
219; CHECK-NEXT: aesmc.8 {{q[0-9][0-9]?}}, [[QB]]
220}
221