xref: /llvm-project/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll (revision d65e8ee507f82ddca018267d0ce627518dd07337)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
3; RUN:   | FileCheck %s -check-prefix=RV32I
4; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zbt -verify-machineinstrs < %s \
5; RUN:   | FileCheck %s -check-prefix=RV32IBT
6; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
7; RUN:   | FileCheck %s -check-prefix=RV64I
8; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zbt -verify-machineinstrs < %s \
9; RUN:   | FileCheck %s -check-prefix=RV64IBT
10
11; Selects of wide values are split into two selects, which can easily cause
12; unnecessary control flow. Here we check some cases where we can currently
13; emit a sequence of selects with shared control flow.
14
15define i64 @cmovcc64(i32 signext %a, i64 %b, i64 %c) nounwind {
16; RV32I-LABEL: cmovcc64:
17; RV32I:       # %bb.0: # %entry
18; RV32I-NEXT:    addi a5, zero, 123
19; RV32I-NEXT:    beq a0, a5, .LBB0_2
20; RV32I-NEXT:  # %bb.1: # %entry
21; RV32I-NEXT:    mv a1, a3
22; RV32I-NEXT:    mv a2, a4
23; RV32I-NEXT:  .LBB0_2: # %entry
24; RV32I-NEXT:    mv a0, a1
25; RV32I-NEXT:    mv a1, a2
26; RV32I-NEXT:    ret
27;
28; RV32IBT-LABEL: cmovcc64:
29; RV32IBT:       # %bb.0: # %entry
30; RV32IBT-NEXT:    addi a5, a0, -123
31; RV32IBT-NEXT:    cmov a0, a5, a3, a1
32; RV32IBT-NEXT:    cmov a1, a5, a4, a2
33; RV32IBT-NEXT:    ret
34;
35; RV64I-LABEL: cmovcc64:
36; RV64I:       # %bb.0: # %entry
37; RV64I-NEXT:    addi a3, zero, 123
38; RV64I-NEXT:    beq a0, a3, .LBB0_2
39; RV64I-NEXT:  # %bb.1: # %entry
40; RV64I-NEXT:    mv a1, a2
41; RV64I-NEXT:  .LBB0_2: # %entry
42; RV64I-NEXT:    mv a0, a1
43; RV64I-NEXT:    ret
44;
45; RV64IBT-LABEL: cmovcc64:
46; RV64IBT:       # %bb.0: # %entry
47; RV64IBT-NEXT:    addi a0, a0, -123
48; RV64IBT-NEXT:    cmov a0, a0, a2, a1
49; RV64IBT-NEXT:    ret
50entry:
51  %cmp = icmp eq i32 %a, 123
52  %cond = select i1 %cmp, i64 %b, i64 %c
53  ret i64 %cond
54}
55
56define i128 @cmovcc128(i64 signext %a, i128 %b, i128 %c) nounwind {
57; RV32I-LABEL: cmovcc128:
58; RV32I:       # %bb.0: # %entry
59; RV32I-NEXT:    xori a1, a1, 123
60; RV32I-NEXT:    or a1, a1, a2
61; RV32I-NEXT:    mv a2, a3
62; RV32I-NEXT:    beqz a1, .LBB1_2
63; RV32I-NEXT:  # %bb.1: # %entry
64; RV32I-NEXT:    mv a2, a4
65; RV32I-NEXT:  .LBB1_2: # %entry
66; RV32I-NEXT:    beqz a1, .LBB1_5
67; RV32I-NEXT:  # %bb.3: # %entry
68; RV32I-NEXT:    addi a7, a4, 4
69; RV32I-NEXT:    bnez a1, .LBB1_6
70; RV32I-NEXT:  .LBB1_4:
71; RV32I-NEXT:    addi a5, a3, 8
72; RV32I-NEXT:    j .LBB1_7
73; RV32I-NEXT:  .LBB1_5:
74; RV32I-NEXT:    addi a7, a3, 4
75; RV32I-NEXT:    beqz a1, .LBB1_4
76; RV32I-NEXT:  .LBB1_6: # %entry
77; RV32I-NEXT:    addi a5, a4, 8
78; RV32I-NEXT:  .LBB1_7: # %entry
79; RV32I-NEXT:    lw a6, 0(a2)
80; RV32I-NEXT:    lw a7, 0(a7)
81; RV32I-NEXT:    lw a2, 0(a5)
82; RV32I-NEXT:    beqz a1, .LBB1_9
83; RV32I-NEXT:  # %bb.8: # %entry
84; RV32I-NEXT:    addi a1, a4, 12
85; RV32I-NEXT:    j .LBB1_10
86; RV32I-NEXT:  .LBB1_9:
87; RV32I-NEXT:    addi a1, a3, 12
88; RV32I-NEXT:  .LBB1_10: # %entry
89; RV32I-NEXT:    lw a1, 0(a1)
90; RV32I-NEXT:    sw a1, 12(a0)
91; RV32I-NEXT:    sw a2, 8(a0)
92; RV32I-NEXT:    sw a7, 4(a0)
93; RV32I-NEXT:    sw a6, 0(a0)
94; RV32I-NEXT:    ret
95;
96; RV32IBT-LABEL: cmovcc128:
97; RV32IBT:       # %bb.0: # %entry
98; RV32IBT-NEXT:    addi a6, a3, 12
99; RV32IBT-NEXT:    addi a7, a4, 12
100; RV32IBT-NEXT:    addi t0, a3, 8
101; RV32IBT-NEXT:    addi t1, a4, 8
102; RV32IBT-NEXT:    addi t2, a3, 4
103; RV32IBT-NEXT:    addi a5, a4, 4
104; RV32IBT-NEXT:    xori a1, a1, 123
105; RV32IBT-NEXT:    or a1, a1, a2
106; RV32IBT-NEXT:    cmov a2, a1, a4, a3
107; RV32IBT-NEXT:    cmov a3, a1, a5, t2
108; RV32IBT-NEXT:    cmov a4, a1, t1, t0
109; RV32IBT-NEXT:    cmov a1, a1, a7, a6
110; RV32IBT-NEXT:    lw a1, 0(a1)
111; RV32IBT-NEXT:    lw a4, 0(a4)
112; RV32IBT-NEXT:    lw a3, 0(a3)
113; RV32IBT-NEXT:    lw a2, 0(a2)
114; RV32IBT-NEXT:    sw a1, 12(a0)
115; RV32IBT-NEXT:    sw a4, 8(a0)
116; RV32IBT-NEXT:    sw a3, 4(a0)
117; RV32IBT-NEXT:    sw a2, 0(a0)
118; RV32IBT-NEXT:    ret
119;
120; RV64I-LABEL: cmovcc128:
121; RV64I:       # %bb.0: # %entry
122; RV64I-NEXT:    addi a5, zero, 123
123; RV64I-NEXT:    beq a0, a5, .LBB1_2
124; RV64I-NEXT:  # %bb.1: # %entry
125; RV64I-NEXT:    mv a1, a3
126; RV64I-NEXT:    mv a2, a4
127; RV64I-NEXT:  .LBB1_2: # %entry
128; RV64I-NEXT:    mv a0, a1
129; RV64I-NEXT:    mv a1, a2
130; RV64I-NEXT:    ret
131;
132; RV64IBT-LABEL: cmovcc128:
133; RV64IBT:       # %bb.0: # %entry
134; RV64IBT-NEXT:    addi a5, a0, -123
135; RV64IBT-NEXT:    cmov a0, a5, a3, a1
136; RV64IBT-NEXT:    cmov a1, a5, a4, a2
137; RV64IBT-NEXT:    ret
138entry:
139  %cmp = icmp eq i64 %a, 123
140  %cond = select i1 %cmp, i128 %b, i128 %c
141  ret i128 %cond
142}
143
144define i64 @cmov64(i1 %a, i64 %b, i64 %c) nounwind {
145; RV32I-LABEL: cmov64:
146; RV32I:       # %bb.0: # %entry
147; RV32I-NEXT:    andi a5, a0, 1
148; RV32I-NEXT:    mv a0, a1
149; RV32I-NEXT:    bnez a5, .LBB2_2
150; RV32I-NEXT:  # %bb.1: # %entry
151; RV32I-NEXT:    mv a0, a3
152; RV32I-NEXT:    mv a2, a4
153; RV32I-NEXT:  .LBB2_2: # %entry
154; RV32I-NEXT:    mv a1, a2
155; RV32I-NEXT:    ret
156;
157; RV32IBT-LABEL: cmov64:
158; RV32IBT:       # %bb.0: # %entry
159; RV32IBT-NEXT:    andi a5, a0, 1
160; RV32IBT-NEXT:    cmov a0, a5, a1, a3
161; RV32IBT-NEXT:    cmov a1, a5, a2, a4
162; RV32IBT-NEXT:    ret
163;
164; RV64I-LABEL: cmov64:
165; RV64I:       # %bb.0: # %entry
166; RV64I-NEXT:    andi a3, a0, 1
167; RV64I-NEXT:    mv a0, a1
168; RV64I-NEXT:    bnez a3, .LBB2_2
169; RV64I-NEXT:  # %bb.1: # %entry
170; RV64I-NEXT:    mv a0, a2
171; RV64I-NEXT:  .LBB2_2: # %entry
172; RV64I-NEXT:    ret
173;
174; RV64IBT-LABEL: cmov64:
175; RV64IBT:       # %bb.0: # %entry
176; RV64IBT-NEXT:    andi a0, a0, 1
177; RV64IBT-NEXT:    cmov a0, a0, a1, a2
178; RV64IBT-NEXT:    ret
179entry:
180  %cond = select i1 %a, i64 %b, i64 %c
181  ret i64 %cond
182}
183
184define i128 @cmov128(i1 %a, i128 %b, i128 %c) nounwind {
185; RV32I-LABEL: cmov128:
186; RV32I:       # %bb.0: # %entry
187; RV32I-NEXT:    andi a1, a1, 1
188; RV32I-NEXT:    mv a4, a2
189; RV32I-NEXT:    bnez a1, .LBB3_2
190; RV32I-NEXT:  # %bb.1: # %entry
191; RV32I-NEXT:    mv a4, a3
192; RV32I-NEXT:  .LBB3_2: # %entry
193; RV32I-NEXT:    bnez a1, .LBB3_5
194; RV32I-NEXT:  # %bb.3: # %entry
195; RV32I-NEXT:    addi a7, a3, 4
196; RV32I-NEXT:    beqz a1, .LBB3_6
197; RV32I-NEXT:  .LBB3_4:
198; RV32I-NEXT:    addi a5, a2, 8
199; RV32I-NEXT:    j .LBB3_7
200; RV32I-NEXT:  .LBB3_5:
201; RV32I-NEXT:    addi a7, a2, 4
202; RV32I-NEXT:    bnez a1, .LBB3_4
203; RV32I-NEXT:  .LBB3_6: # %entry
204; RV32I-NEXT:    addi a5, a3, 8
205; RV32I-NEXT:  .LBB3_7: # %entry
206; RV32I-NEXT:    lw a6, 0(a4)
207; RV32I-NEXT:    lw a7, 0(a7)
208; RV32I-NEXT:    lw a4, 0(a5)
209; RV32I-NEXT:    bnez a1, .LBB3_9
210; RV32I-NEXT:  # %bb.8: # %entry
211; RV32I-NEXT:    addi a1, a3, 12
212; RV32I-NEXT:    j .LBB3_10
213; RV32I-NEXT:  .LBB3_9:
214; RV32I-NEXT:    addi a1, a2, 12
215; RV32I-NEXT:  .LBB3_10: # %entry
216; RV32I-NEXT:    lw a1, 0(a1)
217; RV32I-NEXT:    sw a1, 12(a0)
218; RV32I-NEXT:    sw a4, 8(a0)
219; RV32I-NEXT:    sw a7, 4(a0)
220; RV32I-NEXT:    sw a6, 0(a0)
221; RV32I-NEXT:    ret
222;
223; RV32IBT-LABEL: cmov128:
224; RV32IBT:       # %bb.0: # %entry
225; RV32IBT-NEXT:    addi a6, a3, 12
226; RV32IBT-NEXT:    addi a7, a2, 12
227; RV32IBT-NEXT:    addi t0, a3, 8
228; RV32IBT-NEXT:    addi t1, a2, 8
229; RV32IBT-NEXT:    addi a4, a3, 4
230; RV32IBT-NEXT:    addi a5, a2, 4
231; RV32IBT-NEXT:    andi a1, a1, 1
232; RV32IBT-NEXT:    cmov a2, a1, a2, a3
233; RV32IBT-NEXT:    cmov a3, a1, a5, a4
234; RV32IBT-NEXT:    cmov a4, a1, t1, t0
235; RV32IBT-NEXT:    cmov a1, a1, a7, a6
236; RV32IBT-NEXT:    lw a1, 0(a1)
237; RV32IBT-NEXT:    lw a4, 0(a4)
238; RV32IBT-NEXT:    lw a3, 0(a3)
239; RV32IBT-NEXT:    lw a2, 0(a2)
240; RV32IBT-NEXT:    sw a1, 12(a0)
241; RV32IBT-NEXT:    sw a4, 8(a0)
242; RV32IBT-NEXT:    sw a3, 4(a0)
243; RV32IBT-NEXT:    sw a2, 0(a0)
244; RV32IBT-NEXT:    ret
245;
246; RV64I-LABEL: cmov128:
247; RV64I:       # %bb.0: # %entry
248; RV64I-NEXT:    andi a5, a0, 1
249; RV64I-NEXT:    mv a0, a1
250; RV64I-NEXT:    bnez a5, .LBB3_2
251; RV64I-NEXT:  # %bb.1: # %entry
252; RV64I-NEXT:    mv a0, a3
253; RV64I-NEXT:    mv a2, a4
254; RV64I-NEXT:  .LBB3_2: # %entry
255; RV64I-NEXT:    mv a1, a2
256; RV64I-NEXT:    ret
257;
258; RV64IBT-LABEL: cmov128:
259; RV64IBT:       # %bb.0: # %entry
260; RV64IBT-NEXT:    andi a5, a0, 1
261; RV64IBT-NEXT:    cmov a0, a5, a1, a3
262; RV64IBT-NEXT:    cmov a1, a5, a2, a4
263; RV64IBT-NEXT:    ret
264entry:
265  %cond = select i1 %a, i128 %b, i128 %c
266  ret i128 %cond
267}
268
269define float @cmovfloat(i1 %a, float %b, float %c, float %d, float %e) nounwind {
270; RV32I-LABEL: cmovfloat:
271; RV32I:       # %bb.0: # %entry
272; RV32I-NEXT:    andi a0, a0, 1
273; RV32I-NEXT:    bnez a0, .LBB4_2
274; RV32I-NEXT:  # %bb.1: # %entry
275; RV32I-NEXT:    fmv.w.x ft0, a4
276; RV32I-NEXT:    fmv.w.x ft1, a2
277; RV32I-NEXT:    j .LBB4_3
278; RV32I-NEXT:  .LBB4_2:
279; RV32I-NEXT:    fmv.w.x ft0, a3
280; RV32I-NEXT:    fmv.w.x ft1, a1
281; RV32I-NEXT:  .LBB4_3: # %entry
282; RV32I-NEXT:    fadd.s ft0, ft1, ft0
283; RV32I-NEXT:    fmv.x.w a0, ft0
284; RV32I-NEXT:    ret
285;
286; RV32IBT-LABEL: cmovfloat:
287; RV32IBT:       # %bb.0: # %entry
288; RV32IBT-NEXT:    andi a0, a0, 1
289; RV32IBT-NEXT:    bnez a0, .LBB4_2
290; RV32IBT-NEXT:  # %bb.1: # %entry
291; RV32IBT-NEXT:    fmv.w.x ft0, a4
292; RV32IBT-NEXT:    fmv.w.x ft1, a2
293; RV32IBT-NEXT:    j .LBB4_3
294; RV32IBT-NEXT:  .LBB4_2:
295; RV32IBT-NEXT:    fmv.w.x ft0, a3
296; RV32IBT-NEXT:    fmv.w.x ft1, a1
297; RV32IBT-NEXT:  .LBB4_3: # %entry
298; RV32IBT-NEXT:    fadd.s ft0, ft1, ft0
299; RV32IBT-NEXT:    fmv.x.w a0, ft0
300; RV32IBT-NEXT:    ret
301;
302; RV64I-LABEL: cmovfloat:
303; RV64I:       # %bb.0: # %entry
304; RV64I-NEXT:    andi a0, a0, 1
305; RV64I-NEXT:    bnez a0, .LBB4_2
306; RV64I-NEXT:  # %bb.1: # %entry
307; RV64I-NEXT:    fmv.w.x ft0, a4
308; RV64I-NEXT:    fmv.w.x ft1, a2
309; RV64I-NEXT:    j .LBB4_3
310; RV64I-NEXT:  .LBB4_2:
311; RV64I-NEXT:    fmv.w.x ft0, a3
312; RV64I-NEXT:    fmv.w.x ft1, a1
313; RV64I-NEXT:  .LBB4_3: # %entry
314; RV64I-NEXT:    fadd.s ft0, ft1, ft0
315; RV64I-NEXT:    fmv.x.w a0, ft0
316; RV64I-NEXT:    ret
317;
318; RV64IBT-LABEL: cmovfloat:
319; RV64IBT:       # %bb.0: # %entry
320; RV64IBT-NEXT:    andi a0, a0, 1
321; RV64IBT-NEXT:    bnez a0, .LBB4_2
322; RV64IBT-NEXT:  # %bb.1: # %entry
323; RV64IBT-NEXT:    fmv.w.x ft0, a4
324; RV64IBT-NEXT:    fmv.w.x ft1, a2
325; RV64IBT-NEXT:    j .LBB4_3
326; RV64IBT-NEXT:  .LBB4_2:
327; RV64IBT-NEXT:    fmv.w.x ft0, a3
328; RV64IBT-NEXT:    fmv.w.x ft1, a1
329; RV64IBT-NEXT:  .LBB4_3: # %entry
330; RV64IBT-NEXT:    fadd.s ft0, ft1, ft0
331; RV64IBT-NEXT:    fmv.x.w a0, ft0
332; RV64IBT-NEXT:    ret
333entry:
334  %cond1 = select i1 %a, float %b, float %c
335  %cond2 = select i1 %a, float %d, float %e
336  %ret = fadd float %cond1, %cond2
337  ret float %ret
338}
339
340define double @cmovdouble(i1 %a, double %b, double %c) nounwind {
341; RV32I-LABEL: cmovdouble:
342; RV32I:       # %bb.0: # %entry
343; RV32I-NEXT:    addi sp, sp, -16
344; RV32I-NEXT:    sw a3, 8(sp)
345; RV32I-NEXT:    sw a4, 12(sp)
346; RV32I-NEXT:    fld ft0, 8(sp)
347; RV32I-NEXT:    sw a1, 8(sp)
348; RV32I-NEXT:    andi a0, a0, 1
349; RV32I-NEXT:    sw a2, 12(sp)
350; RV32I-NEXT:    beqz a0, .LBB5_2
351; RV32I-NEXT:  # %bb.1:
352; RV32I-NEXT:    fld ft0, 8(sp)
353; RV32I-NEXT:  .LBB5_2: # %entry
354; RV32I-NEXT:    fsd ft0, 8(sp)
355; RV32I-NEXT:    lw a0, 8(sp)
356; RV32I-NEXT:    lw a1, 12(sp)
357; RV32I-NEXT:    addi sp, sp, 16
358; RV32I-NEXT:    ret
359;
360; RV32IBT-LABEL: cmovdouble:
361; RV32IBT:       # %bb.0: # %entry
362; RV32IBT-NEXT:    addi sp, sp, -16
363; RV32IBT-NEXT:    sw a3, 8(sp)
364; RV32IBT-NEXT:    sw a4, 12(sp)
365; RV32IBT-NEXT:    fld ft0, 8(sp)
366; RV32IBT-NEXT:    sw a1, 8(sp)
367; RV32IBT-NEXT:    andi a0, a0, 1
368; RV32IBT-NEXT:    sw a2, 12(sp)
369; RV32IBT-NEXT:    beqz a0, .LBB5_2
370; RV32IBT-NEXT:  # %bb.1:
371; RV32IBT-NEXT:    fld ft0, 8(sp)
372; RV32IBT-NEXT:  .LBB5_2: # %entry
373; RV32IBT-NEXT:    fsd ft0, 8(sp)
374; RV32IBT-NEXT:    lw a0, 8(sp)
375; RV32IBT-NEXT:    lw a1, 12(sp)
376; RV32IBT-NEXT:    addi sp, sp, 16
377; RV32IBT-NEXT:    ret
378;
379; RV64I-LABEL: cmovdouble:
380; RV64I:       # %bb.0: # %entry
381; RV64I-NEXT:    andi a0, a0, 1
382; RV64I-NEXT:    bnez a0, .LBB5_2
383; RV64I-NEXT:  # %bb.1: # %entry
384; RV64I-NEXT:    fmv.d.x ft0, a2
385; RV64I-NEXT:    fmv.x.d a0, ft0
386; RV64I-NEXT:    ret
387; RV64I-NEXT:  .LBB5_2:
388; RV64I-NEXT:    fmv.d.x ft0, a1
389; RV64I-NEXT:    fmv.x.d a0, ft0
390; RV64I-NEXT:    ret
391;
392; RV64IBT-LABEL: cmovdouble:
393; RV64IBT:       # %bb.0: # %entry
394; RV64IBT-NEXT:    andi a0, a0, 1
395; RV64IBT-NEXT:    bnez a0, .LBB5_2
396; RV64IBT-NEXT:  # %bb.1: # %entry
397; RV64IBT-NEXT:    fmv.d.x ft0, a2
398; RV64IBT-NEXT:    fmv.x.d a0, ft0
399; RV64IBT-NEXT:    ret
400; RV64IBT-NEXT:  .LBB5_2:
401; RV64IBT-NEXT:    fmv.d.x ft0, a1
402; RV64IBT-NEXT:    fmv.x.d a0, ft0
403; RV64IBT-NEXT:    ret
404entry:
405  %cond = select i1 %a, double %b, double %c
406  ret double %cond
407}
408
409; Check that selects with dependencies on previous ones aren't incorrectly
410; optimized.
411
412define i32 @cmovccdep(i32 signext %a, i32 %b, i32 %c, i32 %d) nounwind {
413; RV32I-LABEL: cmovccdep:
414; RV32I:       # %bb.0: # %entry
415; RV32I-NEXT:    addi a4, zero, 123
416; RV32I-NEXT:    bne a0, a4, .LBB6_3
417; RV32I-NEXT:  # %bb.1: # %entry
418; RV32I-NEXT:    mv a2, a1
419; RV32I-NEXT:    bne a0, a4, .LBB6_4
420; RV32I-NEXT:  .LBB6_2: # %entry
421; RV32I-NEXT:    add a0, a1, a2
422; RV32I-NEXT:    ret
423; RV32I-NEXT:  .LBB6_3: # %entry
424; RV32I-NEXT:    mv a1, a2
425; RV32I-NEXT:    mv a2, a1
426; RV32I-NEXT:    beq a0, a4, .LBB6_2
427; RV32I-NEXT:  .LBB6_4: # %entry
428; RV32I-NEXT:    mv a2, a3
429; RV32I-NEXT:    add a0, a1, a2
430; RV32I-NEXT:    ret
431;
432; RV32IBT-LABEL: cmovccdep:
433; RV32IBT:       # %bb.0: # %entry
434; RV32IBT-NEXT:    addi a0, a0, -123
435; RV32IBT-NEXT:    cmov a1, a0, a2, a1
436; RV32IBT-NEXT:    cmov a0, a0, a3, a1
437; RV32IBT-NEXT:    add a0, a1, a0
438; RV32IBT-NEXT:    ret
439;
440; RV64I-LABEL: cmovccdep:
441; RV64I:       # %bb.0: # %entry
442; RV64I-NEXT:    addi a4, zero, 123
443; RV64I-NEXT:    bne a0, a4, .LBB6_3
444; RV64I-NEXT:  # %bb.1: # %entry
445; RV64I-NEXT:    mv a2, a1
446; RV64I-NEXT:    bne a0, a4, .LBB6_4
447; RV64I-NEXT:  .LBB6_2: # %entry
448; RV64I-NEXT:    addw a0, a1, a2
449; RV64I-NEXT:    ret
450; RV64I-NEXT:  .LBB6_3: # %entry
451; RV64I-NEXT:    mv a1, a2
452; RV64I-NEXT:    mv a2, a1
453; RV64I-NEXT:    beq a0, a4, .LBB6_2
454; RV64I-NEXT:  .LBB6_4: # %entry
455; RV64I-NEXT:    mv a2, a3
456; RV64I-NEXT:    addw a0, a1, a2
457; RV64I-NEXT:    ret
458;
459; RV64IBT-LABEL: cmovccdep:
460; RV64IBT:       # %bb.0: # %entry
461; RV64IBT-NEXT:    addi a0, a0, -123
462; RV64IBT-NEXT:    cmov a1, a0, a2, a1
463; RV64IBT-NEXT:    cmov a0, a0, a3, a1
464; RV64IBT-NEXT:    addw a0, a1, a0
465; RV64IBT-NEXT:    ret
466entry:
467  %cmp = icmp eq i32 %a, 123
468  %cond1 = select i1 %cmp, i32 %b, i32 %c
469  %cond2 = select i1 %cmp, i32 %cond1, i32 %d
470  %ret = add i32 %cond1, %cond2
471  ret i32 %ret
472}
473
474; Check that selects with different conditions aren't incorrectly optimized.
475
476define i32 @cmovdiffcc(i1 %a, i1 %b, i32 %c, i32 %d, i32 %e, i32 %f) nounwind {
477; RV32I-LABEL: cmovdiffcc:
478; RV32I:       # %bb.0: # %entry
479; RV32I-NEXT:    andi a0, a0, 1
480; RV32I-NEXT:    andi a1, a1, 1
481; RV32I-NEXT:    beqz a0, .LBB7_3
482; RV32I-NEXT:  # %bb.1: # %entry
483; RV32I-NEXT:    beqz a1, .LBB7_4
484; RV32I-NEXT:  .LBB7_2: # %entry
485; RV32I-NEXT:    add a0, a2, a4
486; RV32I-NEXT:    ret
487; RV32I-NEXT:  .LBB7_3: # %entry
488; RV32I-NEXT:    mv a2, a3
489; RV32I-NEXT:    bnez a1, .LBB7_2
490; RV32I-NEXT:  .LBB7_4: # %entry
491; RV32I-NEXT:    mv a4, a5
492; RV32I-NEXT:    add a0, a2, a4
493; RV32I-NEXT:    ret
494;
495; RV32IBT-LABEL: cmovdiffcc:
496; RV32IBT:       # %bb.0: # %entry
497; RV32IBT-NEXT:    andi a1, a1, 1
498; RV32IBT-NEXT:    andi a0, a0, 1
499; RV32IBT-NEXT:    cmov a0, a0, a2, a3
500; RV32IBT-NEXT:    cmov a1, a1, a4, a5
501; RV32IBT-NEXT:    add a0, a0, a1
502; RV32IBT-NEXT:    ret
503;
504; RV64I-LABEL: cmovdiffcc:
505; RV64I:       # %bb.0: # %entry
506; RV64I-NEXT:    andi a0, a0, 1
507; RV64I-NEXT:    andi a1, a1, 1
508; RV64I-NEXT:    beqz a0, .LBB7_3
509; RV64I-NEXT:  # %bb.1: # %entry
510; RV64I-NEXT:    beqz a1, .LBB7_4
511; RV64I-NEXT:  .LBB7_2: # %entry
512; RV64I-NEXT:    addw a0, a2, a4
513; RV64I-NEXT:    ret
514; RV64I-NEXT:  .LBB7_3: # %entry
515; RV64I-NEXT:    mv a2, a3
516; RV64I-NEXT:    bnez a1, .LBB7_2
517; RV64I-NEXT:  .LBB7_4: # %entry
518; RV64I-NEXT:    mv a4, a5
519; RV64I-NEXT:    addw a0, a2, a4
520; RV64I-NEXT:    ret
521;
522; RV64IBT-LABEL: cmovdiffcc:
523; RV64IBT:       # %bb.0: # %entry
524; RV64IBT-NEXT:    andi a1, a1, 1
525; RV64IBT-NEXT:    andi a0, a0, 1
526; RV64IBT-NEXT:    cmov a0, a0, a2, a3
527; RV64IBT-NEXT:    cmov a1, a1, a4, a5
528; RV64IBT-NEXT:    addw a0, a0, a1
529; RV64IBT-NEXT:    ret
530entry:
531  %cond1 = select i1 %a, i32 %c, i32 %d
532  %cond2 = select i1 %b, i32 %e, i32 %f
533  %ret = add i32 %cond1, %cond2
534  ret i32 %ret
535}
536