xref: /llvm-project/llvm/test/CodeGen/X86/apx/cmov.ll (revision f30188797453fc9bccb0ba9e8bdb8fd47369dfa7)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
2; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ndd -x86-cmov-converter=false -show-mc-encoding -verify-machineinstrs | FileCheck %s
3
4define i8 @cmov8(i8 %a, i8 %b, i8 %x, ptr %y.ptr) {
5; CHECK-LABEL: cmov8:
6; CHECK:       # %bb.0: # %entry
7; CHECK-NEXT:    cmpb %sil, %dil # encoding: [0x40,0x38,0xf7]
8; CHECK-NEXT:    cmovbel %edx, %edi # EVEX TO LEGACY Compression encoding: [0x0f,0x46,0xfa]
9; CHECK-NEXT:    movzbl (%rcx), %eax # encoding: [0x0f,0xb6,0x01]
10; CHECK-NEXT:    cmovbel %edx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x46,0xc2]
11; CHECK-NEXT:    addb %dil, %al # EVEX TO LEGACY Compression encoding: [0x40,0x00,0xf8]
12; CHECK-NEXT:    retq # encoding: [0xc3]
13entry:
14  %cond = icmp ugt i8 %a, %b
15  %y = load i8, ptr %y.ptr
16  %z2 = select i1 %cond, i8 %a, i8 %x
17  %z1 = select i1 %cond, i8 %y, i8 %x
18  %s1 = add i8 %z1, %z2
19  ret i8 %s1
20}
21
22define i16 @cmov16(i16 %a, i16 %b, i16 %x, ptr %y.ptr) {
23; CHECK-LABEL: cmov16:
24; CHECK:       # %bb.0: # %entry
25; CHECK-NEXT:    cmpw %si, %di # encoding: [0x66,0x39,0xf7]
26; CHECK-NEXT:    cmovbel %edx, %edi # EVEX TO LEGACY Compression encoding: [0x0f,0x46,0xfa]
27; CHECK-NEXT:    cmovaw (%rcx), %dx, %ax # encoding: [0x62,0xf4,0x7d,0x18,0x47,0x11]
28; CHECK-NEXT:    addw %di, %ax # EVEX TO LEGACY Compression encoding: [0x66,0x01,0xf8]
29; CHECK-NEXT:    retq # encoding: [0xc3]
30entry:
31  %cond = icmp ugt i16 %a, %b
32  %y = load i16, ptr %y.ptr
33  %z2 = select i1 %cond, i16 %a, i16 %x
34  %z1 = select i1 %cond, i16 %y, i16 %x
35  %s1 = add i16 %z1, %z2
36  ret i16 %s1
37}
38
39define i32 @cmov32(i32 %a, i32 %b, i32 %x, ptr %y.ptr) {
40; CHECK-LABEL: cmov32:
41; CHECK:       # %bb.0: # %entry
42; CHECK-NEXT:    cmpl %esi, %edi # encoding: [0x39,0xf7]
43; CHECK-NEXT:    cmoval %edi, %edx, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x47,0xd7]
44; CHECK-NEXT:    cmoval (%rcx), %edx # EVEX TO LEGACY Compression encoding: [0x0f,0x47,0x11]
45; CHECK-NEXT:    addl %edx, %eax # EVEX TO LEGACY Compression encoding: [0x01,0xd0]
46; CHECK-NEXT:    retq # encoding: [0xc3]
47entry:
48  %cond = icmp ugt i32 %a, %b
49  %y = load i32, ptr %y.ptr
50  %z2 = select i1 %cond, i32 %a, i32 %x
51  %z1 = select i1 %cond, i32 %y, i32 %x
52  %s1 = add i32 %z1, %z2
53  ret i32 %s1
54}
55
56define i64 @cmov64(i64 %a, i64 %b, i64 %x, ptr %y.ptr) {
57; CHECK-LABEL: cmov64:
58; CHECK:       # %bb.0: # %entry
59; CHECK-NEXT:    cmpq %rsi, %rdi # encoding: [0x48,0x39,0xf7]
60; CHECK-NEXT:    cmovaq %rdi, %rdx, %rax # encoding: [0x62,0xf4,0xfc,0x18,0x47,0xd7]
61; CHECK-NEXT:    cmovaq (%rcx), %rdx # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x47,0x11]
62; CHECK-NEXT:    addq %rdx, %rax # EVEX TO LEGACY Compression encoding: [0x48,0x01,0xd0]
63; CHECK-NEXT:    retq # encoding: [0xc3]
64entry:
65  %cond = icmp ugt i64 %a, %b
66  %y = load i64, ptr %y.ptr
67  %z2 = select i1 %cond, i64 %a, i64 %x
68  %z1 = select i1 %cond, i64 %y, i64 %x
69  %s1 = add i64 %z1, %z2
70  ret i64 %s1
71}
72
73define i8 @cmov8rm_inv(i8 %a, i8 %x, ptr %y.ptr) {
74; CHECK-LABEL: cmov8rm_inv:
75; CHECK:       # %bb.0: # %entry
76; CHECK-NEXT:    testb %dil, %dil # encoding: [0x40,0x84,0xff]
77; CHECK-NEXT:    movzbl (%rdx), %eax # encoding: [0x0f,0xb6,0x02]
78; CHECK-NEXT:    cmovsl %esi, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x48,0xc6]
79; CHECK-NEXT:    # kill: def $al killed $al killed $eax
80; CHECK-NEXT:    retq # encoding: [0xc3]
81entry:
82  %y = load i8, ptr %y.ptr
83  %cmp = icmp slt i8 %a, 0
84  %cmov = select i1 %cmp, i8 %x, i8 %y
85  ret i8 %cmov
86}
87
88define i16 @cmov16rm_inv(i16 %a, i16 %x, ptr %y.ptr) {
89; CHECK-LABEL: cmov16rm_inv:
90; CHECK:       # %bb.0: # %entry
91; CHECK-NEXT:    testw %di, %di # encoding: [0x66,0x85,0xff]
92; CHECK-NEXT:    cmovnsw (%rdx), %si, %ax # encoding: [0x62,0xf4,0x7d,0x18,0x49,0x32]
93; CHECK-NEXT:    retq # encoding: [0xc3]
94entry:
95  %y = load i16, ptr %y.ptr
96  %cmp = icmp slt i16 %a, 0
97  %cmov = select i1 %cmp, i16 %x, i16 %y
98  ret i16 %cmov
99}
100
101define i32 @cmov32rm_inv(i32 %a, i32 %x, ptr %y.ptr) {
102; CHECK-LABEL: cmov32rm_inv:
103; CHECK:       # %bb.0: # %entry
104; CHECK-NEXT:    testl %edi, %edi # encoding: [0x85,0xff]
105; CHECK-NEXT:    cmovnsl (%rdx), %esi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x49,0x32]
106; CHECK-NEXT:    retq # encoding: [0xc3]
107entry:
108  %y = load i32, ptr %y.ptr
109  %cmp = icmp slt i32 %a, 0
110  %cmov = select i1 %cmp, i32 %x, i32 %y
111  ret i32 %cmov
112}
113
114define i64 @cmov64rm_inv(i64 %a, i64 %x, ptr %y.ptr) {
115; CHECK-LABEL: cmov64rm_inv:
116; CHECK:       # %bb.0: # %entry
117; CHECK-NEXT:    testq %rdi, %rdi # encoding: [0x48,0x85,0xff]
118; CHECK-NEXT:    cmovnsq (%rdx), %rsi, %rax # encoding: [0x62,0xf4,0xfc,0x18,0x49,0x32]
119; CHECK-NEXT:    retq # encoding: [0xc3]
120entry:
121  %y = load i64, ptr %y.ptr
122  %cmp = icmp slt i64 %a, 0
123  %cmov = select i1 %cmp, i64 %x, i64 %y
124  ret i64 %cmov
125}
126