xref: /llvm-project/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll (revision 4ab0f51a7518332b8b7691915b5fdad4c1ed045f)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
3; RUN:   | FileCheck %s -check-prefix=RV32I
4; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zbt -verify-machineinstrs < %s \
5; RUN:   | FileCheck %s -check-prefix=RV32IBT
6; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
7; RUN:   | FileCheck %s -check-prefix=RV64I
8; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zbt -verify-machineinstrs < %s \
9; RUN:   | FileCheck %s -check-prefix=RV64IBT
10
11; Selects of wide values are split into two selects, which can easily cause
12; unnecessary control flow. Here we check some cases where we can currently
13; emit a sequence of selects with shared control flow.
14
15define i64 @cmovcc64(i32 signext %a, i64 %b, i64 %c) nounwind {
16; RV32I-LABEL: cmovcc64:
17; RV32I:       # %bb.0: # %entry
18; RV32I-NEXT:    addi a5, zero, 123
19; RV32I-NEXT:    beq a0, a5, .LBB0_2
20; RV32I-NEXT:  # %bb.1: # %entry
21; RV32I-NEXT:    mv a1, a3
22; RV32I-NEXT:    mv a2, a4
23; RV32I-NEXT:  .LBB0_2: # %entry
24; RV32I-NEXT:    mv a0, a1
25; RV32I-NEXT:    mv a1, a2
26; RV32I-NEXT:    ret
27;
28; RV32IBT-LABEL: cmovcc64:
29; RV32IBT:       # %bb.0: # %entry
30; RV32IBT-NEXT:    addi a5, zero, 123
31; RV32IBT-NEXT:    xor a5, a0, a5
32; RV32IBT-NEXT:    cmov a0, a5, a3, a1
33; RV32IBT-NEXT:    cmov a1, a5, a4, a2
34; RV32IBT-NEXT:    ret
35;
36; RV64I-LABEL: cmovcc64:
37; RV64I:       # %bb.0: # %entry
38; RV64I-NEXT:    addi a3, zero, 123
39; RV64I-NEXT:    beq a0, a3, .LBB0_2
40; RV64I-NEXT:  # %bb.1: # %entry
41; RV64I-NEXT:    mv a1, a2
42; RV64I-NEXT:  .LBB0_2: # %entry
43; RV64I-NEXT:    mv a0, a1
44; RV64I-NEXT:    ret
45;
46; RV64IBT-LABEL: cmovcc64:
47; RV64IBT:       # %bb.0: # %entry
48; RV64IBT-NEXT:    addi a3, zero, 123
49; RV64IBT-NEXT:    xor a0, a0, a3
50; RV64IBT-NEXT:    cmov a0, a0, a2, a1
51; RV64IBT-NEXT:    ret
52entry:
53  %cmp = icmp eq i32 %a, 123
54  %cond = select i1 %cmp, i64 %b, i64 %c
55  ret i64 %cond
56}
57
58define i128 @cmovcc128(i64 signext %a, i128 %b, i128 %c) nounwind {
59; RV32I-LABEL: cmovcc128:
60; RV32I:       # %bb.0: # %entry
61; RV32I-NEXT:    xori a1, a1, 123
62; RV32I-NEXT:    or a1, a1, a2
63; RV32I-NEXT:    mv a2, a3
64; RV32I-NEXT:    beqz a1, .LBB1_2
65; RV32I-NEXT:  # %bb.1: # %entry
66; RV32I-NEXT:    mv a2, a4
67; RV32I-NEXT:  .LBB1_2: # %entry
68; RV32I-NEXT:    beqz a1, .LBB1_5
69; RV32I-NEXT:  # %bb.3: # %entry
70; RV32I-NEXT:    addi a7, a4, 4
71; RV32I-NEXT:    bnez a1, .LBB1_6
72; RV32I-NEXT:  .LBB1_4:
73; RV32I-NEXT:    addi a5, a3, 8
74; RV32I-NEXT:    j .LBB1_7
75; RV32I-NEXT:  .LBB1_5:
76; RV32I-NEXT:    addi a7, a3, 4
77; RV32I-NEXT:    beqz a1, .LBB1_4
78; RV32I-NEXT:  .LBB1_6: # %entry
79; RV32I-NEXT:    addi a5, a4, 8
80; RV32I-NEXT:  .LBB1_7: # %entry
81; RV32I-NEXT:    lw a6, 0(a2)
82; RV32I-NEXT:    lw a7, 0(a7)
83; RV32I-NEXT:    lw a2, 0(a5)
84; RV32I-NEXT:    beqz a1, .LBB1_9
85; RV32I-NEXT:  # %bb.8: # %entry
86; RV32I-NEXT:    addi a1, a4, 12
87; RV32I-NEXT:    j .LBB1_10
88; RV32I-NEXT:  .LBB1_9:
89; RV32I-NEXT:    addi a1, a3, 12
90; RV32I-NEXT:  .LBB1_10: # %entry
91; RV32I-NEXT:    lw a1, 0(a1)
92; RV32I-NEXT:    sw a1, 12(a0)
93; RV32I-NEXT:    sw a2, 8(a0)
94; RV32I-NEXT:    sw a7, 4(a0)
95; RV32I-NEXT:    sw a6, 0(a0)
96; RV32I-NEXT:    ret
97;
98; RV32IBT-LABEL: cmovcc128:
99; RV32IBT:       # %bb.0: # %entry
100; RV32IBT-NEXT:    addi a6, a3, 12
101; RV32IBT-NEXT:    addi a7, a4, 12
102; RV32IBT-NEXT:    addi t0, a3, 8
103; RV32IBT-NEXT:    addi t1, a4, 8
104; RV32IBT-NEXT:    addi t2, a3, 4
105; RV32IBT-NEXT:    addi a5, a4, 4
106; RV32IBT-NEXT:    xori a1, a1, 123
107; RV32IBT-NEXT:    or a1, a1, a2
108; RV32IBT-NEXT:    cmov a2, a1, a4, a3
109; RV32IBT-NEXT:    cmov a3, a1, a5, t2
110; RV32IBT-NEXT:    cmov a4, a1, t1, t0
111; RV32IBT-NEXT:    cmov a1, a1, a7, a6
112; RV32IBT-NEXT:    lw a1, 0(a1)
113; RV32IBT-NEXT:    lw a4, 0(a4)
114; RV32IBT-NEXT:    lw a3, 0(a3)
115; RV32IBT-NEXT:    lw a2, 0(a2)
116; RV32IBT-NEXT:    sw a1, 12(a0)
117; RV32IBT-NEXT:    sw a4, 8(a0)
118; RV32IBT-NEXT:    sw a3, 4(a0)
119; RV32IBT-NEXT:    sw a2, 0(a0)
120; RV32IBT-NEXT:    ret
121;
122; RV64I-LABEL: cmovcc128:
123; RV64I:       # %bb.0: # %entry
124; RV64I-NEXT:    addi a5, zero, 123
125; RV64I-NEXT:    beq a0, a5, .LBB1_2
126; RV64I-NEXT:  # %bb.1: # %entry
127; RV64I-NEXT:    mv a1, a3
128; RV64I-NEXT:    mv a2, a4
129; RV64I-NEXT:  .LBB1_2: # %entry
130; RV64I-NEXT:    mv a0, a1
131; RV64I-NEXT:    mv a1, a2
132; RV64I-NEXT:    ret
133;
134; RV64IBT-LABEL: cmovcc128:
135; RV64IBT:       # %bb.0: # %entry
136; RV64IBT-NEXT:    addi a5, zero, 123
137; RV64IBT-NEXT:    xor a5, a0, a5
138; RV64IBT-NEXT:    cmov a0, a5, a3, a1
139; RV64IBT-NEXT:    cmov a1, a5, a4, a2
140; RV64IBT-NEXT:    ret
141entry:
142  %cmp = icmp eq i64 %a, 123
143  %cond = select i1 %cmp, i128 %b, i128 %c
144  ret i128 %cond
145}
146
147define i64 @cmov64(i1 %a, i64 %b, i64 %c) nounwind {
148; RV32I-LABEL: cmov64:
149; RV32I:       # %bb.0: # %entry
150; RV32I-NEXT:    andi a5, a0, 1
151; RV32I-NEXT:    mv a0, a1
152; RV32I-NEXT:    bnez a5, .LBB2_2
153; RV32I-NEXT:  # %bb.1: # %entry
154; RV32I-NEXT:    mv a0, a3
155; RV32I-NEXT:    mv a2, a4
156; RV32I-NEXT:  .LBB2_2: # %entry
157; RV32I-NEXT:    mv a1, a2
158; RV32I-NEXT:    ret
159;
160; RV32IBT-LABEL: cmov64:
161; RV32IBT:       # %bb.0: # %entry
162; RV32IBT-NEXT:    andi a5, a0, 1
163; RV32IBT-NEXT:    cmov a0, a5, a1, a3
164; RV32IBT-NEXT:    cmov a1, a5, a2, a4
165; RV32IBT-NEXT:    ret
166;
167; RV64I-LABEL: cmov64:
168; RV64I:       # %bb.0: # %entry
169; RV64I-NEXT:    andi a3, a0, 1
170; RV64I-NEXT:    mv a0, a1
171; RV64I-NEXT:    bnez a3, .LBB2_2
172; RV64I-NEXT:  # %bb.1: # %entry
173; RV64I-NEXT:    mv a0, a2
174; RV64I-NEXT:  .LBB2_2: # %entry
175; RV64I-NEXT:    ret
176;
177; RV64IBT-LABEL: cmov64:
178; RV64IBT:       # %bb.0: # %entry
179; RV64IBT-NEXT:    andi a0, a0, 1
180; RV64IBT-NEXT:    cmov a0, a0, a1, a2
181; RV64IBT-NEXT:    ret
182entry:
183  %cond = select i1 %a, i64 %b, i64 %c
184  ret i64 %cond
185}
186
187define i128 @cmov128(i1 %a, i128 %b, i128 %c) nounwind {
188; RV32I-LABEL: cmov128:
189; RV32I:       # %bb.0: # %entry
190; RV32I-NEXT:    andi a1, a1, 1
191; RV32I-NEXT:    mv a4, a2
192; RV32I-NEXT:    bnez a1, .LBB3_2
193; RV32I-NEXT:  # %bb.1: # %entry
194; RV32I-NEXT:    mv a4, a3
195; RV32I-NEXT:  .LBB3_2: # %entry
196; RV32I-NEXT:    bnez a1, .LBB3_5
197; RV32I-NEXT:  # %bb.3: # %entry
198; RV32I-NEXT:    addi a7, a3, 4
199; RV32I-NEXT:    beqz a1, .LBB3_6
200; RV32I-NEXT:  .LBB3_4:
201; RV32I-NEXT:    addi a5, a2, 8
202; RV32I-NEXT:    j .LBB3_7
203; RV32I-NEXT:  .LBB3_5:
204; RV32I-NEXT:    addi a7, a2, 4
205; RV32I-NEXT:    bnez a1, .LBB3_4
206; RV32I-NEXT:  .LBB3_6: # %entry
207; RV32I-NEXT:    addi a5, a3, 8
208; RV32I-NEXT:  .LBB3_7: # %entry
209; RV32I-NEXT:    lw a6, 0(a4)
210; RV32I-NEXT:    lw a7, 0(a7)
211; RV32I-NEXT:    lw a4, 0(a5)
212; RV32I-NEXT:    bnez a1, .LBB3_9
213; RV32I-NEXT:  # %bb.8: # %entry
214; RV32I-NEXT:    addi a1, a3, 12
215; RV32I-NEXT:    j .LBB3_10
216; RV32I-NEXT:  .LBB3_9:
217; RV32I-NEXT:    addi a1, a2, 12
218; RV32I-NEXT:  .LBB3_10: # %entry
219; RV32I-NEXT:    lw a1, 0(a1)
220; RV32I-NEXT:    sw a1, 12(a0)
221; RV32I-NEXT:    sw a4, 8(a0)
222; RV32I-NEXT:    sw a7, 4(a0)
223; RV32I-NEXT:    sw a6, 0(a0)
224; RV32I-NEXT:    ret
225;
226; RV32IBT-LABEL: cmov128:
227; RV32IBT:       # %bb.0: # %entry
228; RV32IBT-NEXT:    addi a6, a3, 12
229; RV32IBT-NEXT:    addi a7, a2, 12
230; RV32IBT-NEXT:    addi t0, a3, 8
231; RV32IBT-NEXT:    addi t1, a2, 8
232; RV32IBT-NEXT:    addi a4, a3, 4
233; RV32IBT-NEXT:    addi a5, a2, 4
234; RV32IBT-NEXT:    andi a1, a1, 1
235; RV32IBT-NEXT:    cmov a2, a1, a2, a3
236; RV32IBT-NEXT:    cmov a3, a1, a5, a4
237; RV32IBT-NEXT:    cmov a4, a1, t1, t0
238; RV32IBT-NEXT:    cmov a1, a1, a7, a6
239; RV32IBT-NEXT:    lw a1, 0(a1)
240; RV32IBT-NEXT:    lw a4, 0(a4)
241; RV32IBT-NEXT:    lw a3, 0(a3)
242; RV32IBT-NEXT:    lw a2, 0(a2)
243; RV32IBT-NEXT:    sw a1, 12(a0)
244; RV32IBT-NEXT:    sw a4, 8(a0)
245; RV32IBT-NEXT:    sw a3, 4(a0)
246; RV32IBT-NEXT:    sw a2, 0(a0)
247; RV32IBT-NEXT:    ret
248;
249; RV64I-LABEL: cmov128:
250; RV64I:       # %bb.0: # %entry
251; RV64I-NEXT:    andi a5, a0, 1
252; RV64I-NEXT:    mv a0, a1
253; RV64I-NEXT:    bnez a5, .LBB3_2
254; RV64I-NEXT:  # %bb.1: # %entry
255; RV64I-NEXT:    mv a0, a3
256; RV64I-NEXT:    mv a2, a4
257; RV64I-NEXT:  .LBB3_2: # %entry
258; RV64I-NEXT:    mv a1, a2
259; RV64I-NEXT:    ret
260;
261; RV64IBT-LABEL: cmov128:
262; RV64IBT:       # %bb.0: # %entry
263; RV64IBT-NEXT:    andi a5, a0, 1
264; RV64IBT-NEXT:    cmov a0, a5, a1, a3
265; RV64IBT-NEXT:    cmov a1, a5, a2, a4
266; RV64IBT-NEXT:    ret
267entry:
268  %cond = select i1 %a, i128 %b, i128 %c
269  ret i128 %cond
270}
271
272define float @cmovfloat(i1 %a, float %b, float %c, float %d, float %e) nounwind {
273; RV32I-LABEL: cmovfloat:
274; RV32I:       # %bb.0: # %entry
275; RV32I-NEXT:    andi a0, a0, 1
276; RV32I-NEXT:    bnez a0, .LBB4_2
277; RV32I-NEXT:  # %bb.1: # %entry
278; RV32I-NEXT:    fmv.w.x ft0, a4
279; RV32I-NEXT:    fmv.w.x ft1, a2
280; RV32I-NEXT:    j .LBB4_3
281; RV32I-NEXT:  .LBB4_2:
282; RV32I-NEXT:    fmv.w.x ft0, a3
283; RV32I-NEXT:    fmv.w.x ft1, a1
284; RV32I-NEXT:  .LBB4_3: # %entry
285; RV32I-NEXT:    fadd.s ft0, ft1, ft0
286; RV32I-NEXT:    fmv.x.w a0, ft0
287; RV32I-NEXT:    ret
288;
289; RV32IBT-LABEL: cmovfloat:
290; RV32IBT:       # %bb.0: # %entry
291; RV32IBT-NEXT:    andi a0, a0, 1
292; RV32IBT-NEXT:    bnez a0, .LBB4_2
293; RV32IBT-NEXT:  # %bb.1: # %entry
294; RV32IBT-NEXT:    fmv.w.x ft0, a4
295; RV32IBT-NEXT:    fmv.w.x ft1, a2
296; RV32IBT-NEXT:    j .LBB4_3
297; RV32IBT-NEXT:  .LBB4_2:
298; RV32IBT-NEXT:    fmv.w.x ft0, a3
299; RV32IBT-NEXT:    fmv.w.x ft1, a1
300; RV32IBT-NEXT:  .LBB4_3: # %entry
301; RV32IBT-NEXT:    fadd.s ft0, ft1, ft0
302; RV32IBT-NEXT:    fmv.x.w a0, ft0
303; RV32IBT-NEXT:    ret
304;
305; RV64I-LABEL: cmovfloat:
306; RV64I:       # %bb.0: # %entry
307; RV64I-NEXT:    andi a0, a0, 1
308; RV64I-NEXT:    bnez a0, .LBB4_2
309; RV64I-NEXT:  # %bb.1: # %entry
310; RV64I-NEXT:    fmv.w.x ft0, a4
311; RV64I-NEXT:    fmv.w.x ft1, a2
312; RV64I-NEXT:    j .LBB4_3
313; RV64I-NEXT:  .LBB4_2:
314; RV64I-NEXT:    fmv.w.x ft0, a3
315; RV64I-NEXT:    fmv.w.x ft1, a1
316; RV64I-NEXT:  .LBB4_3: # %entry
317; RV64I-NEXT:    fadd.s ft0, ft1, ft0
318; RV64I-NEXT:    fmv.x.w a0, ft0
319; RV64I-NEXT:    ret
320;
321; RV64IBT-LABEL: cmovfloat:
322; RV64IBT:       # %bb.0: # %entry
323; RV64IBT-NEXT:    andi a0, a0, 1
324; RV64IBT-NEXT:    bnez a0, .LBB4_2
325; RV64IBT-NEXT:  # %bb.1: # %entry
326; RV64IBT-NEXT:    fmv.w.x ft0, a4
327; RV64IBT-NEXT:    fmv.w.x ft1, a2
328; RV64IBT-NEXT:    j .LBB4_3
329; RV64IBT-NEXT:  .LBB4_2:
330; RV64IBT-NEXT:    fmv.w.x ft0, a3
331; RV64IBT-NEXT:    fmv.w.x ft1, a1
332; RV64IBT-NEXT:  .LBB4_3: # %entry
333; RV64IBT-NEXT:    fadd.s ft0, ft1, ft0
334; RV64IBT-NEXT:    fmv.x.w a0, ft0
335; RV64IBT-NEXT:    ret
336entry:
337  %cond1 = select i1 %a, float %b, float %c
338  %cond2 = select i1 %a, float %d, float %e
339  %ret = fadd float %cond1, %cond2
340  ret float %ret
341}
342
343define double @cmovdouble(i1 %a, double %b, double %c) nounwind {
344; RV32I-LABEL: cmovdouble:
345; RV32I:       # %bb.0: # %entry
346; RV32I-NEXT:    addi sp, sp, -16
347; RV32I-NEXT:    sw a3, 8(sp)
348; RV32I-NEXT:    sw a4, 12(sp)
349; RV32I-NEXT:    fld ft0, 8(sp)
350; RV32I-NEXT:    sw a1, 8(sp)
351; RV32I-NEXT:    andi a0, a0, 1
352; RV32I-NEXT:    sw a2, 12(sp)
353; RV32I-NEXT:    beqz a0, .LBB5_2
354; RV32I-NEXT:  # %bb.1:
355; RV32I-NEXT:    fld ft0, 8(sp)
356; RV32I-NEXT:  .LBB5_2: # %entry
357; RV32I-NEXT:    fsd ft0, 8(sp)
358; RV32I-NEXT:    lw a0, 8(sp)
359; RV32I-NEXT:    lw a1, 12(sp)
360; RV32I-NEXT:    addi sp, sp, 16
361; RV32I-NEXT:    ret
362;
363; RV32IBT-LABEL: cmovdouble:
364; RV32IBT:       # %bb.0: # %entry
365; RV32IBT-NEXT:    addi sp, sp, -16
366; RV32IBT-NEXT:    sw a3, 8(sp)
367; RV32IBT-NEXT:    sw a4, 12(sp)
368; RV32IBT-NEXT:    fld ft0, 8(sp)
369; RV32IBT-NEXT:    sw a1, 8(sp)
370; RV32IBT-NEXT:    andi a0, a0, 1
371; RV32IBT-NEXT:    sw a2, 12(sp)
372; RV32IBT-NEXT:    beqz a0, .LBB5_2
373; RV32IBT-NEXT:  # %bb.1:
374; RV32IBT-NEXT:    fld ft0, 8(sp)
375; RV32IBT-NEXT:  .LBB5_2: # %entry
376; RV32IBT-NEXT:    fsd ft0, 8(sp)
377; RV32IBT-NEXT:    lw a0, 8(sp)
378; RV32IBT-NEXT:    lw a1, 12(sp)
379; RV32IBT-NEXT:    addi sp, sp, 16
380; RV32IBT-NEXT:    ret
381;
382; RV64I-LABEL: cmovdouble:
383; RV64I:       # %bb.0: # %entry
384; RV64I-NEXT:    andi a0, a0, 1
385; RV64I-NEXT:    bnez a0, .LBB5_2
386; RV64I-NEXT:  # %bb.1: # %entry
387; RV64I-NEXT:    fmv.d.x ft0, a2
388; RV64I-NEXT:    fmv.x.d a0, ft0
389; RV64I-NEXT:    ret
390; RV64I-NEXT:  .LBB5_2:
391; RV64I-NEXT:    fmv.d.x ft0, a1
392; RV64I-NEXT:    fmv.x.d a0, ft0
393; RV64I-NEXT:    ret
394;
395; RV64IBT-LABEL: cmovdouble:
396; RV64IBT:       # %bb.0: # %entry
397; RV64IBT-NEXT:    andi a0, a0, 1
398; RV64IBT-NEXT:    bnez a0, .LBB5_2
399; RV64IBT-NEXT:  # %bb.1: # %entry
400; RV64IBT-NEXT:    fmv.d.x ft0, a2
401; RV64IBT-NEXT:    fmv.x.d a0, ft0
402; RV64IBT-NEXT:    ret
403; RV64IBT-NEXT:  .LBB5_2:
404; RV64IBT-NEXT:    fmv.d.x ft0, a1
405; RV64IBT-NEXT:    fmv.x.d a0, ft0
406; RV64IBT-NEXT:    ret
407entry:
408  %cond = select i1 %a, double %b, double %c
409  ret double %cond
410}
411
412; Check that selects with dependencies on previous ones aren't incorrectly
413; optimized.
414
415define i32 @cmovccdep(i32 signext %a, i32 %b, i32 %c, i32 %d) nounwind {
416; RV32I-LABEL: cmovccdep:
417; RV32I:       # %bb.0: # %entry
418; RV32I-NEXT:    addi a4, zero, 123
419; RV32I-NEXT:    bne a0, a4, .LBB6_3
420; RV32I-NEXT:  # %bb.1: # %entry
421; RV32I-NEXT:    mv a2, a1
422; RV32I-NEXT:    bne a0, a4, .LBB6_4
423; RV32I-NEXT:  .LBB6_2: # %entry
424; RV32I-NEXT:    add a0, a1, a2
425; RV32I-NEXT:    ret
426; RV32I-NEXT:  .LBB6_3: # %entry
427; RV32I-NEXT:    mv a1, a2
428; RV32I-NEXT:    mv a2, a1
429; RV32I-NEXT:    beq a0, a4, .LBB6_2
430; RV32I-NEXT:  .LBB6_4: # %entry
431; RV32I-NEXT:    mv a2, a3
432; RV32I-NEXT:    add a0, a1, a2
433; RV32I-NEXT:    ret
434;
435; RV32IBT-LABEL: cmovccdep:
436; RV32IBT:       # %bb.0: # %entry
437; RV32IBT-NEXT:    addi a4, zero, 123
438; RV32IBT-NEXT:    xor a0, a0, a4
439; RV32IBT-NEXT:    cmov a1, a0, a2, a1
440; RV32IBT-NEXT:    cmov a0, a0, a3, a1
441; RV32IBT-NEXT:    add a0, a1, a0
442; RV32IBT-NEXT:    ret
443;
444; RV64I-LABEL: cmovccdep:
445; RV64I:       # %bb.0: # %entry
446; RV64I-NEXT:    addi a4, zero, 123
447; RV64I-NEXT:    bne a0, a4, .LBB6_3
448; RV64I-NEXT:  # %bb.1: # %entry
449; RV64I-NEXT:    mv a2, a1
450; RV64I-NEXT:    bne a0, a4, .LBB6_4
451; RV64I-NEXT:  .LBB6_2: # %entry
452; RV64I-NEXT:    addw a0, a1, a2
453; RV64I-NEXT:    ret
454; RV64I-NEXT:  .LBB6_3: # %entry
455; RV64I-NEXT:    mv a1, a2
456; RV64I-NEXT:    mv a2, a1
457; RV64I-NEXT:    beq a0, a4, .LBB6_2
458; RV64I-NEXT:  .LBB6_4: # %entry
459; RV64I-NEXT:    mv a2, a3
460; RV64I-NEXT:    addw a0, a1, a2
461; RV64I-NEXT:    ret
462;
463; RV64IBT-LABEL: cmovccdep:
464; RV64IBT:       # %bb.0: # %entry
465; RV64IBT-NEXT:    addi a4, zero, 123
466; RV64IBT-NEXT:    xor a0, a0, a4
467; RV64IBT-NEXT:    cmov a1, a0, a2, a1
468; RV64IBT-NEXT:    cmov a0, a0, a3, a1
469; RV64IBT-NEXT:    addw a0, a1, a0
470; RV64IBT-NEXT:    ret
471entry:
472  %cmp = icmp eq i32 %a, 123
473  %cond1 = select i1 %cmp, i32 %b, i32 %c
474  %cond2 = select i1 %cmp, i32 %cond1, i32 %d
475  %ret = add i32 %cond1, %cond2
476  ret i32 %ret
477}
478
479; Check that selects with different conditions aren't incorrectly optimized.
480
481define i32 @cmovdiffcc(i1 %a, i1 %b, i32 %c, i32 %d, i32 %e, i32 %f) nounwind {
482; RV32I-LABEL: cmovdiffcc:
483; RV32I:       # %bb.0: # %entry
484; RV32I-NEXT:    andi a0, a0, 1
485; RV32I-NEXT:    andi a1, a1, 1
486; RV32I-NEXT:    beqz a0, .LBB7_3
487; RV32I-NEXT:  # %bb.1: # %entry
488; RV32I-NEXT:    beqz a1, .LBB7_4
489; RV32I-NEXT:  .LBB7_2: # %entry
490; RV32I-NEXT:    add a0, a2, a4
491; RV32I-NEXT:    ret
492; RV32I-NEXT:  .LBB7_3: # %entry
493; RV32I-NEXT:    mv a2, a3
494; RV32I-NEXT:    bnez a1, .LBB7_2
495; RV32I-NEXT:  .LBB7_4: # %entry
496; RV32I-NEXT:    mv a4, a5
497; RV32I-NEXT:    add a0, a2, a4
498; RV32I-NEXT:    ret
499;
500; RV32IBT-LABEL: cmovdiffcc:
501; RV32IBT:       # %bb.0: # %entry
502; RV32IBT-NEXT:    andi a1, a1, 1
503; RV32IBT-NEXT:    andi a0, a0, 1
504; RV32IBT-NEXT:    cmov a0, a0, a2, a3
505; RV32IBT-NEXT:    cmov a1, a1, a4, a5
506; RV32IBT-NEXT:    add a0, a0, a1
507; RV32IBT-NEXT:    ret
508;
509; RV64I-LABEL: cmovdiffcc:
510; RV64I:       # %bb.0: # %entry
511; RV64I-NEXT:    andi a0, a0, 1
512; RV64I-NEXT:    andi a1, a1, 1
513; RV64I-NEXT:    beqz a0, .LBB7_3
514; RV64I-NEXT:  # %bb.1: # %entry
515; RV64I-NEXT:    beqz a1, .LBB7_4
516; RV64I-NEXT:  .LBB7_2: # %entry
517; RV64I-NEXT:    addw a0, a2, a4
518; RV64I-NEXT:    ret
519; RV64I-NEXT:  .LBB7_3: # %entry
520; RV64I-NEXT:    mv a2, a3
521; RV64I-NEXT:    bnez a1, .LBB7_2
522; RV64I-NEXT:  .LBB7_4: # %entry
523; RV64I-NEXT:    mv a4, a5
524; RV64I-NEXT:    addw a0, a2, a4
525; RV64I-NEXT:    ret
526;
527; RV64IBT-LABEL: cmovdiffcc:
528; RV64IBT:       # %bb.0: # %entry
529; RV64IBT-NEXT:    andi a1, a1, 1
530; RV64IBT-NEXT:    andi a0, a0, 1
531; RV64IBT-NEXT:    cmov a0, a0, a2, a3
532; RV64IBT-NEXT:    cmov a1, a1, a4, a5
533; RV64IBT-NEXT:    addw a0, a0, a1
534; RV64IBT-NEXT:    ret
535entry:
536  %cond1 = select i1 %a, i32 %c, i32 %d
537  %cond2 = select i1 %b, i32 %e, i32 %f
538  %ret = add i32 %cond1, %cond2
539  ret i32 %ret
540}
541