xref: /llvm-project/llvm/test/CodeGen/X86/cmpccxadd-intrinsics.ll (revision 372842b30f8e611765e3cb9f06b8265d2e79f3f6)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -verify-machineinstrs -mtriple=x86_64-unknown-unknown --show-mc-encoding -mattr=+cmpccxadd | FileCheck %s
3; RUN: llc < %s -verify-machineinstrs -mtriple=x86_64-unknown-unknown --show-mc-encoding -mattr=+cmpccxadd,+egpr | FileCheck %s --check-prefix=EGPR
4
5define dso_local i32 @test_cmpbexadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
6; CHECK-LABEL: test_cmpbexadd32:
7; CHECK:       # %bb.0: # %entry
8; CHECK-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
9; CHECK-NEXT:    cmpoxadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xe0,0x07]
10; CHECK-NEXT:    retq # encoding: [0xc3]
11;
12; EGPR-LABEL: test_cmpbexadd32:
13; EGPR:       # %bb.0: # %entry
14; EGPR-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
15; EGPR-NEXT:    cmpoxadd %edx, %eax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x69,0xe0,0x07]
16; EGPR-NEXT:    retq # encoding: [0xc3]
17entry:
18  %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 0)
19  ret i32 %0
20}
21
22declare i32 @llvm.x86.cmpccxadd32(ptr, i32, i32, i32 immarg)
23
24define dso_local i64 @test_cmpbexadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
25; CHECK-LABEL: test_cmpbexadd64:
26; CHECK:       # %bb.0: # %entry
27; CHECK-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
28; CHECK-NEXT:    cmpoxadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xe0,0x07]
29; CHECK-NEXT:    retq # encoding: [0xc3]
30;
31; EGPR-LABEL: test_cmpbexadd64:
32; EGPR:       # %bb.0: # %entry
33; EGPR-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
34; EGPR-NEXT:    cmpoxadd %rdx, %rax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xe9,0xe0,0x07]
35; EGPR-NEXT:    retq # encoding: [0xc3]
36entry:
37  %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 0)
38  ret i64 %0
39}
40
41declare i64 @llvm.x86.cmpccxadd64(ptr, i64, i64, i32 immarg)
42
43define dso_local i32 @test_cmpbxadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
44; CHECK-LABEL: test_cmpbxadd32:
45; CHECK:       # %bb.0: # %entry
46; CHECK-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
47; CHECK-NEXT:    cmpnoxadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xe1,0x07]
48; CHECK-NEXT:    retq # encoding: [0xc3]
49;
50; EGPR-LABEL: test_cmpbxadd32:
51; EGPR:       # %bb.0: # %entry
52; EGPR-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
53; EGPR-NEXT:    cmpnoxadd %edx, %eax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x69,0xe1,0x07]
54; EGPR-NEXT:    retq # encoding: [0xc3]
55entry:
56  %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 1)
57  ret i32 %0
58}
59
60define dso_local i64 @test_cmpbxadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
61; CHECK-LABEL: test_cmpbxadd64:
62; CHECK:       # %bb.0: # %entry
63; CHECK-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
64; CHECK-NEXT:    cmpnoxadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xe1,0x07]
65; CHECK-NEXT:    retq # encoding: [0xc3]
66;
67; EGPR-LABEL: test_cmpbxadd64:
68; EGPR:       # %bb.0: # %entry
69; EGPR-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
70; EGPR-NEXT:    cmpnoxadd %rdx, %rax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xe9,0xe1,0x07]
71; EGPR-NEXT:    retq # encoding: [0xc3]
72entry:
73  %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 1)
74  ret i64 %0
75}
76
77define dso_local i32 @test_cmplexadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
78; CHECK-LABEL: test_cmplexadd32:
79; CHECK:       # %bb.0: # %entry
80; CHECK-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
81; CHECK-NEXT:    cmpbxadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xe2,0x07]
82; CHECK-NEXT:    retq # encoding: [0xc3]
83;
84; EGPR-LABEL: test_cmplexadd32:
85; EGPR:       # %bb.0: # %entry
86; EGPR-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
87; EGPR-NEXT:    cmpbxadd %edx, %eax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x69,0xe2,0x07]
88; EGPR-NEXT:    retq # encoding: [0xc3]
89entry:
90  %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 2)
91  ret i32 %0
92}
93
94define dso_local i64 @test_cmplexadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
95; CHECK-LABEL: test_cmplexadd64:
96; CHECK:       # %bb.0: # %entry
97; CHECK-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
98; CHECK-NEXT:    cmpbxadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xe2,0x07]
99; CHECK-NEXT:    retq # encoding: [0xc3]
100;
101; EGPR-LABEL: test_cmplexadd64:
102; EGPR:       # %bb.0: # %entry
103; EGPR-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
104; EGPR-NEXT:    cmpbxadd %rdx, %rax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xe9,0xe2,0x07]
105; EGPR-NEXT:    retq # encoding: [0xc3]
106entry:
107  %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 2)
108  ret i64 %0
109}
110
111define dso_local i32 @test_cmplxadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
112; CHECK-LABEL: test_cmplxadd32:
113; CHECK:       # %bb.0: # %entry
114; CHECK-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
115; CHECK-NEXT:    cmpaexadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xe3,0x07]
116; CHECK-NEXT:    retq # encoding: [0xc3]
117;
118; EGPR-LABEL: test_cmplxadd32:
119; EGPR:       # %bb.0: # %entry
120; EGPR-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
121; EGPR-NEXT:    cmpaexadd %edx, %eax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x69,0xe3,0x07]
122; EGPR-NEXT:    retq # encoding: [0xc3]
123entry:
124  %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 3)
125  ret i32 %0
126}
127
128define dso_local i64 @test_cmplxadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
129; CHECK-LABEL: test_cmplxadd64:
130; CHECK:       # %bb.0: # %entry
131; CHECK-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
132; CHECK-NEXT:    cmpaexadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xe3,0x07]
133; CHECK-NEXT:    retq # encoding: [0xc3]
134;
135; EGPR-LABEL: test_cmplxadd64:
136; EGPR:       # %bb.0: # %entry
137; EGPR-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
138; EGPR-NEXT:    cmpaexadd %rdx, %rax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xe9,0xe3,0x07]
139; EGPR-NEXT:    retq # encoding: [0xc3]
140entry:
141  %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 3)
142  ret i64 %0
143}
144
145define dso_local i32 @test_cmpaxadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
146; CHECK-LABEL: test_cmpaxadd32:
147; CHECK:       # %bb.0: # %entry
148; CHECK-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
149; CHECK-NEXT:    cmpexadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xe4,0x07]
150; CHECK-NEXT:    retq # encoding: [0xc3]
151;
152; EGPR-LABEL: test_cmpaxadd32:
153; EGPR:       # %bb.0: # %entry
154; EGPR-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
155; EGPR-NEXT:    cmpexadd %edx, %eax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x69,0xe4,0x07]
156; EGPR-NEXT:    retq # encoding: [0xc3]
157entry:
158  %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 4)
159  ret i32 %0
160}
161
162define dso_local i64 @test_cmpaxadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
163; CHECK-LABEL: test_cmpaxadd64:
164; CHECK:       # %bb.0: # %entry
165; CHECK-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
166; CHECK-NEXT:    cmpexadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xe4,0x07]
167; CHECK-NEXT:    retq # encoding: [0xc3]
168;
169; EGPR-LABEL: test_cmpaxadd64:
170; EGPR:       # %bb.0: # %entry
171; EGPR-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
172; EGPR-NEXT:    cmpexadd %rdx, %rax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xe9,0xe4,0x07]
173; EGPR-NEXT:    retq # encoding: [0xc3]
174entry:
175  %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 4)
176  ret i64 %0
177}
178
179define dso_local i32 @test_cmpaexadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
180; CHECK-LABEL: test_cmpaexadd32:
181; CHECK:       # %bb.0: # %entry
182; CHECK-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
183; CHECK-NEXT:    cmpnexadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xe5,0x07]
184; CHECK-NEXT:    retq # encoding: [0xc3]
185;
186; EGPR-LABEL: test_cmpaexadd32:
187; EGPR:       # %bb.0: # %entry
188; EGPR-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
189; EGPR-NEXT:    cmpnexadd %edx, %eax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x69,0xe5,0x07]
190; EGPR-NEXT:    retq # encoding: [0xc3]
191entry:
192  %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 5)
193  ret i32 %0
194}
195
196define dso_local i64 @test_cmpaexadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
197; CHECK-LABEL: test_cmpaexadd64:
198; CHECK:       # %bb.0: # %entry
199; CHECK-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
200; CHECK-NEXT:    cmpnexadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xe5,0x07]
201; CHECK-NEXT:    retq # encoding: [0xc3]
202;
203; EGPR-LABEL: test_cmpaexadd64:
204; EGPR:       # %bb.0: # %entry
205; EGPR-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
206; EGPR-NEXT:    cmpnexadd %rdx, %rax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xe9,0xe5,0x07]
207; EGPR-NEXT:    retq # encoding: [0xc3]
208entry:
209  %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 5)
210  ret i64 %0
211}
212
213define dso_local i32 @test_cmpgxadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
214; CHECK-LABEL: test_cmpgxadd32:
215; CHECK:       # %bb.0: # %entry
216; CHECK-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
217; CHECK-NEXT:    cmpbexadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xe6,0x07]
218; CHECK-NEXT:    retq # encoding: [0xc3]
219;
220; EGPR-LABEL: test_cmpgxadd32:
221; EGPR:       # %bb.0: # %entry
222; EGPR-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
223; EGPR-NEXT:    cmpbexadd %edx, %eax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x69,0xe6,0x07]
224; EGPR-NEXT:    retq # encoding: [0xc3]
225entry:
226  %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 6)
227  ret i32 %0
228}
229
230define dso_local i64 @test_cmpgxadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
231; CHECK-LABEL: test_cmpgxadd64:
232; CHECK:       # %bb.0: # %entry
233; CHECK-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
234; CHECK-NEXT:    cmpbexadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xe6,0x07]
235; CHECK-NEXT:    retq # encoding: [0xc3]
236;
237; EGPR-LABEL: test_cmpgxadd64:
238; EGPR:       # %bb.0: # %entry
239; EGPR-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
240; EGPR-NEXT:    cmpbexadd %rdx, %rax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xe9,0xe6,0x07]
241; EGPR-NEXT:    retq # encoding: [0xc3]
242entry:
243  %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 6)
244  ret i64 %0
245}
246
247define dso_local i32 @test_cmpgexadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
248; CHECK-LABEL: test_cmpgexadd32:
249; CHECK:       # %bb.0: # %entry
250; CHECK-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
251; CHECK-NEXT:    cmpaxadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xe7,0x07]
252; CHECK-NEXT:    retq # encoding: [0xc3]
253;
254; EGPR-LABEL: test_cmpgexadd32:
255; EGPR:       # %bb.0: # %entry
256; EGPR-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
257; EGPR-NEXT:    cmpaxadd %edx, %eax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x69,0xe7,0x07]
258; EGPR-NEXT:    retq # encoding: [0xc3]
259entry:
260  %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 7)
261  ret i32 %0
262}
263
264define dso_local i64 @test_cmpgexadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
265; CHECK-LABEL: test_cmpgexadd64:
266; CHECK:       # %bb.0: # %entry
267; CHECK-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
268; CHECK-NEXT:    cmpaxadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xe7,0x07]
269; CHECK-NEXT:    retq # encoding: [0xc3]
270;
271; EGPR-LABEL: test_cmpgexadd64:
272; EGPR:       # %bb.0: # %entry
273; EGPR-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
274; EGPR-NEXT:    cmpaxadd %rdx, %rax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xe9,0xe7,0x07]
275; EGPR-NEXT:    retq # encoding: [0xc3]
276entry:
277  %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 7)
278  ret i64 %0
279}
280
281define dso_local i32 @test_cmpnoxadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
282; CHECK-LABEL: test_cmpnoxadd32:
283; CHECK:       # %bb.0: # %entry
284; CHECK-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
285; CHECK-NEXT:    cmpsxadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xe8,0x07]
286; CHECK-NEXT:    retq # encoding: [0xc3]
287;
288; EGPR-LABEL: test_cmpnoxadd32:
289; EGPR:       # %bb.0: # %entry
290; EGPR-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
291; EGPR-NEXT:    cmpsxadd %edx, %eax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x69,0xe8,0x07]
292; EGPR-NEXT:    retq # encoding: [0xc3]
293entry:
294  %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 8)
295  ret i32 %0
296}
297
298define dso_local i64 @test_cmpnoxadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
299; CHECK-LABEL: test_cmpnoxadd64:
300; CHECK:       # %bb.0: # %entry
301; CHECK-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
302; CHECK-NEXT:    cmpsxadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xe8,0x07]
303; CHECK-NEXT:    retq # encoding: [0xc3]
304;
305; EGPR-LABEL: test_cmpnoxadd64:
306; EGPR:       # %bb.0: # %entry
307; EGPR-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
308; EGPR-NEXT:    cmpsxadd %rdx, %rax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xe9,0xe8,0x07]
309; EGPR-NEXT:    retq # encoding: [0xc3]
310entry:
311  %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 8)
312  ret i64 %0
313}
314
315define dso_local i32 @test_cmpnpxadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
316; CHECK-LABEL: test_cmpnpxadd32:
317; CHECK:       # %bb.0: # %entry
318; CHECK-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
319; CHECK-NEXT:    cmpnsxadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xe9,0x07]
320; CHECK-NEXT:    retq # encoding: [0xc3]
321;
322; EGPR-LABEL: test_cmpnpxadd32:
323; EGPR:       # %bb.0: # %entry
324; EGPR-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
325; EGPR-NEXT:    cmpnsxadd %edx, %eax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x69,0xe9,0x07]
326; EGPR-NEXT:    retq # encoding: [0xc3]
327entry:
328  %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 9)
329  ret i32 %0
330}
331
332define dso_local i64 @test_cmpnpxadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
333; CHECK-LABEL: test_cmpnpxadd64:
334; CHECK:       # %bb.0: # %entry
335; CHECK-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
336; CHECK-NEXT:    cmpnsxadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xe9,0x07]
337; CHECK-NEXT:    retq # encoding: [0xc3]
338;
339; EGPR-LABEL: test_cmpnpxadd64:
340; EGPR:       # %bb.0: # %entry
341; EGPR-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
342; EGPR-NEXT:    cmpnsxadd %rdx, %rax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xe9,0xe9,0x07]
343; EGPR-NEXT:    retq # encoding: [0xc3]
344entry:
345  %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 9)
346  ret i64 %0
347}
348
349define dso_local i32 @test_cmpnsxadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
350; CHECK-LABEL: test_cmpnsxadd32:
351; CHECK:       # %bb.0: # %entry
352; CHECK-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
353; CHECK-NEXT:    cmppxadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xea,0x07]
354; CHECK-NEXT:    retq # encoding: [0xc3]
355;
356; EGPR-LABEL: test_cmpnsxadd32:
357; EGPR:       # %bb.0: # %entry
358; EGPR-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
359; EGPR-NEXT:    cmppxadd %edx, %eax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x69,0xea,0x07]
360; EGPR-NEXT:    retq # encoding: [0xc3]
361entry:
362  %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 10)
363  ret i32 %0
364}
365
366define dso_local i64 @test_cmpnsxadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
367; CHECK-LABEL: test_cmpnsxadd64:
368; CHECK:       # %bb.0: # %entry
369; CHECK-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
370; CHECK-NEXT:    cmppxadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xea,0x07]
371; CHECK-NEXT:    retq # encoding: [0xc3]
372;
373; EGPR-LABEL: test_cmpnsxadd64:
374; EGPR:       # %bb.0: # %entry
375; EGPR-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
376; EGPR-NEXT:    cmppxadd %rdx, %rax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xe9,0xea,0x07]
377; EGPR-NEXT:    retq # encoding: [0xc3]
378entry:
379  %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 10)
380  ret i64 %0
381}
382
383define dso_local i32 @test_cmpnexadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
384; CHECK-LABEL: test_cmpnexadd32:
385; CHECK:       # %bb.0: # %entry
386; CHECK-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
387; CHECK-NEXT:    cmpnpxadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xeb,0x07]
388; CHECK-NEXT:    retq # encoding: [0xc3]
389;
390; EGPR-LABEL: test_cmpnexadd32:
391; EGPR:       # %bb.0: # %entry
392; EGPR-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
393; EGPR-NEXT:    cmpnpxadd %edx, %eax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x69,0xeb,0x07]
394; EGPR-NEXT:    retq # encoding: [0xc3]
395entry:
396  %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 11)
397  ret i32 %0
398}
399
400define dso_local i64 @test_cmpnexadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
401; CHECK-LABEL: test_cmpnexadd64:
402; CHECK:       # %bb.0: # %entry
403; CHECK-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
404; CHECK-NEXT:    cmpnpxadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xeb,0x07]
405; CHECK-NEXT:    retq # encoding: [0xc3]
406;
407; EGPR-LABEL: test_cmpnexadd64:
408; EGPR:       # %bb.0: # %entry
409; EGPR-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
410; EGPR-NEXT:    cmpnpxadd %rdx, %rax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xe9,0xeb,0x07]
411; EGPR-NEXT:    retq # encoding: [0xc3]
412entry:
413  %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 11)
414  ret i64 %0
415}
416
417define dso_local i32 @test_cmpoxadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
418; CHECK-LABEL: test_cmpoxadd32:
419; CHECK:       # %bb.0: # %entry
420; CHECK-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
421; CHECK-NEXT:    cmplxadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xec,0x07]
422; CHECK-NEXT:    retq # encoding: [0xc3]
423;
424; EGPR-LABEL: test_cmpoxadd32:
425; EGPR:       # %bb.0: # %entry
426; EGPR-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
427; EGPR-NEXT:    cmplxadd %edx, %eax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x69,0xec,0x07]
428; EGPR-NEXT:    retq # encoding: [0xc3]
429entry:
430  %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 12)
431  ret i32 %0
432}
433
434define dso_local i64 @test_cmpoxadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
435; CHECK-LABEL: test_cmpoxadd64:
436; CHECK:       # %bb.0: # %entry
437; CHECK-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
438; CHECK-NEXT:    cmplxadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xec,0x07]
439; CHECK-NEXT:    retq # encoding: [0xc3]
440;
441; EGPR-LABEL: test_cmpoxadd64:
442; EGPR:       # %bb.0: # %entry
443; EGPR-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
444; EGPR-NEXT:    cmplxadd %rdx, %rax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xe9,0xec,0x07]
445; EGPR-NEXT:    retq # encoding: [0xc3]
446entry:
447  %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 12)
448  ret i64 %0
449}
450
451define dso_local i32 @test_cmppxadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
452; CHECK-LABEL: test_cmppxadd32:
453; CHECK:       # %bb.0: # %entry
454; CHECK-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
455; CHECK-NEXT:    cmpgexadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xed,0x07]
456; CHECK-NEXT:    retq # encoding: [0xc3]
457;
458; EGPR-LABEL: test_cmppxadd32:
459; EGPR:       # %bb.0: # %entry
460; EGPR-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
461; EGPR-NEXT:    cmpgexadd %edx, %eax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x69,0xed,0x07]
462; EGPR-NEXT:    retq # encoding: [0xc3]
463entry:
464  %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 13)
465  ret i32 %0
466}
467
468define dso_local i64 @test_cmppxadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
469; CHECK-LABEL: test_cmppxadd64:
470; CHECK:       # %bb.0: # %entry
471; CHECK-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
472; CHECK-NEXT:    cmpgexadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xed,0x07]
473; CHECK-NEXT:    retq # encoding: [0xc3]
474;
475; EGPR-LABEL: test_cmppxadd64:
476; EGPR:       # %bb.0: # %entry
477; EGPR-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
478; EGPR-NEXT:    cmpgexadd %rdx, %rax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xe9,0xed,0x07]
479; EGPR-NEXT:    retq # encoding: [0xc3]
480entry:
481  %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 13)
482  ret i64 %0
483}
484
485define dso_local i32 @test_cmpsxadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
486; CHECK-LABEL: test_cmpsxadd32:
487; CHECK:       # %bb.0: # %entry
488; CHECK-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
489; CHECK-NEXT:    cmplexadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xee,0x07]
490; CHECK-NEXT:    retq # encoding: [0xc3]
491;
492; EGPR-LABEL: test_cmpsxadd32:
493; EGPR:       # %bb.0: # %entry
494; EGPR-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
495; EGPR-NEXT:    cmplexadd %edx, %eax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x69,0xee,0x07]
496; EGPR-NEXT:    retq # encoding: [0xc3]
497entry:
498  %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 14)
499  ret i32 %0
500}
501
502define dso_local i64 @test_cmpsxadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
503; CHECK-LABEL: test_cmpsxadd64:
504; CHECK:       # %bb.0: # %entry
505; CHECK-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
506; CHECK-NEXT:    cmplexadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xee,0x07]
507; CHECK-NEXT:    retq # encoding: [0xc3]
508;
509; EGPR-LABEL: test_cmpsxadd64:
510; EGPR:       # %bb.0: # %entry
511; EGPR-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
512; EGPR-NEXT:    cmplexadd %rdx, %rax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xe9,0xee,0x07]
513; EGPR-NEXT:    retq # encoding: [0xc3]
514entry:
515  %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 14)
516  ret i64 %0
517}
518
519define dso_local i32 @test_cmpexadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
520; CHECK-LABEL: test_cmpexadd32:
521; CHECK:       # %bb.0: # %entry
522; CHECK-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
523; CHECK-NEXT:    cmpgxadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xef,0x07]
524; CHECK-NEXT:    retq # encoding: [0xc3]
525;
526; EGPR-LABEL: test_cmpexadd32:
527; EGPR:       # %bb.0: # %entry
528; EGPR-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
529; EGPR-NEXT:    cmpgxadd %edx, %eax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x69,0xef,0x07]
530; EGPR-NEXT:    retq # encoding: [0xc3]
531entry:
532  %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 15)
533  ret i32 %0
534}
535
536define dso_local i64 @test_cmpexadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
537; CHECK-LABEL: test_cmpexadd64:
538; CHECK:       # %bb.0: # %entry
539; CHECK-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
540; CHECK-NEXT:    cmpgxadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xef,0x07]
541; CHECK-NEXT:    retq # encoding: [0xc3]
542;
543; EGPR-LABEL: test_cmpexadd64:
544; EGPR:       # %bb.0: # %entry
545; EGPR-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
546; EGPR-NEXT:    cmpgxadd %rdx, %rax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xe9,0xef,0x07]
547; EGPR-NEXT:    retq # encoding: [0xc3]
548entry:
549  %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 15)
550  ret i64 %0
551}
552