xref: /llvm-project/llvm/test/CodeGen/X86/or-lea.ll (revision 47a1704ac94c8aeb1aa7e0fc438ff99d36b632c6)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefixes=X86
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=X64,NOBMI
4; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+bmi | FileCheck %s --check-prefixes=X64,BMI
5
6; InstCombine and DAGCombiner transform an 'add' into an 'or'
7; if there are no common bits from the incoming operands.
8; LEA instruction selection should be able to see through that
9; transform and reduce add/shift/or instruction counts.
10
11define i32 @or_shift1_and1(i32 %x, i32 %y) {
12; X86-LABEL: or_shift1_and1:
13; X86:       # %bb.0:
14; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
15; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
16; X86-NEXT:    andl $1, %ecx
17; X86-NEXT:    leal (%ecx,%eax,2), %eax
18; X86-NEXT:    retl
19;
20; X64-LABEL: or_shift1_and1:
21; X64:       # %bb.0:
22; X64-NEXT:    # kill: def $esi killed $esi def $rsi
23; X64-NEXT:    # kill: def $edi killed $edi def $rdi
24; X64-NEXT:    andl $1, %esi
25; X64-NEXT:    leal (%rsi,%rdi,2), %eax
26; X64-NEXT:    retq
27  %shl = shl i32 %x, 1
28  %and = and i32 %y, 1
29  %or = or i32 %and, %shl
30  ret i32 %or
31}
32
33define i32 @or_shift1_and1_swapped(i32 %x, i32 %y) {
34; X86-LABEL: or_shift1_and1_swapped:
35; X86:       # %bb.0:
36; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
37; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
38; X86-NEXT:    andl $1, %ecx
39; X86-NEXT:    leal (%ecx,%eax,2), %eax
40; X86-NEXT:    retl
41;
42; X64-LABEL: or_shift1_and1_swapped:
43; X64:       # %bb.0:
44; X64-NEXT:    # kill: def $esi killed $esi def $rsi
45; X64-NEXT:    # kill: def $edi killed $edi def $rdi
46; X64-NEXT:    andl $1, %esi
47; X64-NEXT:    leal (%rsi,%rdi,2), %eax
48; X64-NEXT:    retq
49  %shl = shl i32 %x, 1
50  %and = and i32 %y, 1
51  %or = or i32 %shl, %and
52  ret i32 %or
53}
54
55define i32 @or_shift2_and1(i32 %x, i32 %y) {
56; X86-LABEL: or_shift2_and1:
57; X86:       # %bb.0:
58; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
59; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
60; X86-NEXT:    andl $1, %ecx
61; X86-NEXT:    leal (%ecx,%eax,4), %eax
62; X86-NEXT:    retl
63;
64; X64-LABEL: or_shift2_and1:
65; X64:       # %bb.0:
66; X64-NEXT:    # kill: def $esi killed $esi def $rsi
67; X64-NEXT:    # kill: def $edi killed $edi def $rdi
68; X64-NEXT:    andl $1, %esi
69; X64-NEXT:    leal (%rsi,%rdi,4), %eax
70; X64-NEXT:    retq
71  %shl = shl i32 %x, 2
72  %and = and i32 %y, 1
73  %or = or i32 %shl, %and
74  ret i32 %or
75}
76
77define i32 @or_shift3_and1(i32 %x, i32 %y) {
78; X86-LABEL: or_shift3_and1:
79; X86:       # %bb.0:
80; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
81; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
82; X86-NEXT:    andl $1, %ecx
83; X86-NEXT:    leal (%ecx,%eax,8), %eax
84; X86-NEXT:    retl
85;
86; X64-LABEL: or_shift3_and1:
87; X64:       # %bb.0:
88; X64-NEXT:    # kill: def $esi killed $esi def $rsi
89; X64-NEXT:    # kill: def $edi killed $edi def $rdi
90; X64-NEXT:    andl $1, %esi
91; X64-NEXT:    leal (%rsi,%rdi,8), %eax
92; X64-NEXT:    retq
93  %shl = shl i32 %x, 3
94  %and = and i32 %y, 1
95  %or = or i32 %shl, %and
96  ret i32 %or
97}
98
99define i32 @or_shift3_and7(i32 %x, i32 %y) {
100; X86-LABEL: or_shift3_and7:
101; X86:       # %bb.0:
102; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
103; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
104; X86-NEXT:    andl $7, %ecx
105; X86-NEXT:    leal (%ecx,%eax,8), %eax
106; X86-NEXT:    retl
107;
108; X64-LABEL: or_shift3_and7:
109; X64:       # %bb.0:
110; X64-NEXT:    # kill: def $esi killed $esi def $rsi
111; X64-NEXT:    # kill: def $edi killed $edi def $rdi
112; X64-NEXT:    andl $7, %esi
113; X64-NEXT:    leal (%rsi,%rdi,8), %eax
114; X64-NEXT:    retq
115  %shl = shl i32 %x, 3
116  %and = and i32 %y, 7
117  %or = or i32 %shl, %and
118  ret i32 %or
119}
120
121; The shift is too big for an LEA.
122
123define i32 @or_shift4_and1(i32 %x, i32 %y) {
124; X86-LABEL: or_shift4_and1:
125; X86:       # %bb.0:
126; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
127; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
128; X86-NEXT:    shll $4, %ecx
129; X86-NEXT:    andl $1, %eax
130; X86-NEXT:    orl %ecx, %eax
131; X86-NEXT:    retl
132;
133; X64-LABEL: or_shift4_and1:
134; X64:       # %bb.0:
135; X64-NEXT:    # kill: def $esi killed $esi def $rsi
136; X64-NEXT:    # kill: def $edi killed $edi def $rdi
137; X64-NEXT:    shll $4, %edi
138; X64-NEXT:    andl $1, %esi
139; X64-NEXT:    leal (%rsi,%rdi), %eax
140; X64-NEXT:    retq
141  %shl = shl i32 %x, 4
142  %and = and i32 %y, 1
143  %or = or i32 %shl, %and
144  ret i32 %or
145}
146
147; The mask is too big for the shift, so the 'or' isn't equivalent to an 'add'.
148
149define i32 @or_shift3_and8(i32 %x, i32 %y) {
150; X86-LABEL: or_shift3_and8:
151; X86:       # %bb.0:
152; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
153; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
154; X86-NEXT:    shll $3, %ecx
155; X86-NEXT:    andl $8, %eax
156; X86-NEXT:    orl %ecx, %eax
157; X86-NEXT:    retl
158;
159; X64-LABEL: or_shift3_and8:
160; X64:       # %bb.0:
161; X64-NEXT:    # kill: def $edi killed $edi def $rdi
162; X64-NEXT:    leal (,%rdi,8), %eax
163; X64-NEXT:    andl $8, %esi
164; X64-NEXT:    orl %esi, %eax
165; X64-NEXT:    retq
166  %shl = shl i32 %x, 3
167  %and = and i32 %y, 8
168  %or = or i32 %shl, %and
169  ret i32 %or
170}
171
172; 64-bit operands should work too.
173
174define i64 @or_shift1_and1_64(i64 %x, i64 %y) {
175; X86-LABEL: or_shift1_and1_64:
176; X86:       # %bb.0:
177; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
178; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
179; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
180; X86-NEXT:    shldl $1, %ecx, %edx
181; X86-NEXT:    andl $1, %eax
182; X86-NEXT:    leal (%eax,%ecx,2), %eax
183; X86-NEXT:    retl
184;
185; X64-LABEL: or_shift1_and1_64:
186; X64:       # %bb.0:
187; X64-NEXT:    andl $1, %esi
188; X64-NEXT:    leaq (%rsi,%rdi,2), %rax
189; X64-NEXT:    retq
190  %shl = shl i64 %x, 1
191  %and = and i64 %y, 1
192  %or = or i64 %and, %shl
193  ret i64 %or
194}
195
196; In the following patterns, lhs and rhs of the or instruction have no common bits.
197
198define i32 @or_and_and_rhs_neg_i32(i32 %x, i32 %y, i32 %z) {
199; X86-LABEL: or_and_and_rhs_neg_i32:
200; X86:       # %bb.0: # %entry
201; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
202; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
203; X86-NEXT:    xorl %ecx, %eax
204; X86-NEXT:    andl {{[0-9]+}}(%esp), %eax
205; X86-NEXT:    xorl %ecx, %eax
206; X86-NEXT:    incl %eax
207; X86-NEXT:    retl
208;
209; NOBMI-LABEL: or_and_and_rhs_neg_i32:
210; NOBMI:       # %bb.0: # %entry
211; NOBMI-NEXT:    # kill: def $edx killed $edx def $rdx
212; NOBMI-NEXT:    xorl %edi, %edx
213; NOBMI-NEXT:    andl %esi, %edx
214; NOBMI-NEXT:    xorl %edi, %edx
215; NOBMI-NEXT:    leal 1(%rdx), %eax
216; NOBMI-NEXT:    retq
217;
218; BMI-LABEL: or_and_and_rhs_neg_i32:
219; BMI:       # %bb.0: # %entry
220; BMI-NEXT:    # kill: def $edx killed $edx def $rdx
221; BMI-NEXT:    andl %esi, %edx
222; BMI-NEXT:    andnl %edi, %esi, %eax
223; BMI-NEXT:    leal 1(%rdx,%rax), %eax
224; BMI-NEXT:    retq
225entry:
226  %and1 = and i32 %z, %y
227  %xor = xor i32 %y, -1
228  %and2 = and i32 %x, %xor
229  %or = or i32 %and1, %and2
230  %inc = add i32 %or, 1
231  ret i32 %inc
232}
233
234define i32 @or_and_and_lhs_neg_i32(i32 %x, i32 %y, i32 %z) {
235; X86-LABEL: or_and_and_lhs_neg_i32:
236; X86:       # %bb.0: # %entry
237; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
238; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
239; X86-NEXT:    xorl %ecx, %eax
240; X86-NEXT:    andl {{[0-9]+}}(%esp), %eax
241; X86-NEXT:    xorl %ecx, %eax
242; X86-NEXT:    incl %eax
243; X86-NEXT:    retl
244;
245; NOBMI-LABEL: or_and_and_lhs_neg_i32:
246; NOBMI:       # %bb.0: # %entry
247; NOBMI-NEXT:    # kill: def $edx killed $edx def $rdx
248; NOBMI-NEXT:    xorl %edi, %edx
249; NOBMI-NEXT:    andl %esi, %edx
250; NOBMI-NEXT:    xorl %edi, %edx
251; NOBMI-NEXT:    leal 1(%rdx), %eax
252; NOBMI-NEXT:    retq
253;
254; BMI-LABEL: or_and_and_lhs_neg_i32:
255; BMI:       # %bb.0: # %entry
256; BMI-NEXT:    # kill: def $edx killed $edx def $rdx
257; BMI-NEXT:    andl %esi, %edx
258; BMI-NEXT:    andnl %edi, %esi, %eax
259; BMI-NEXT:    leal 1(%rdx,%rax), %eax
260; BMI-NEXT:    retq
261entry:
262  %and1 = and i32 %z, %y
263  %xor = xor i32 %y, -1
264  %and2 = and i32 %xor, %x
265  %or = or i32 %and1, %and2
266  %inc = add i32 %or, 1
267  ret i32 %inc
268}
269
270define i32 @or_and_rhs_neg_and_i32(i32 %x, i32 %y, i32 %z) {
271; X86-LABEL: or_and_rhs_neg_and_i32:
272; X86:       # %bb.0: # %entry
273; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
274; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
275; X86-NEXT:    xorl %ecx, %eax
276; X86-NEXT:    andl {{[0-9]+}}(%esp), %eax
277; X86-NEXT:    xorl %ecx, %eax
278; X86-NEXT:    incl %eax
279; X86-NEXT:    retl
280;
281; NOBMI-LABEL: or_and_rhs_neg_and_i32:
282; NOBMI:       # %bb.0: # %entry
283; NOBMI-NEXT:    # kill: def $edi killed $edi def $rdi
284; NOBMI-NEXT:    xorl %edx, %edi
285; NOBMI-NEXT:    andl %esi, %edi
286; NOBMI-NEXT:    xorl %edx, %edi
287; NOBMI-NEXT:    leal 1(%rdi), %eax
288; NOBMI-NEXT:    retq
289;
290; BMI-LABEL: or_and_rhs_neg_and_i32:
291; BMI:       # %bb.0: # %entry
292; BMI-NEXT:    # kill: def $edi killed $edi def $rdi
293; BMI-NEXT:    andnl %edx, %esi, %eax
294; BMI-NEXT:    andl %esi, %edi
295; BMI-NEXT:    leal 1(%rax,%rdi), %eax
296; BMI-NEXT:    retq
297entry:
298  %xor = xor i32 %y, -1
299  %and1 = and i32 %z, %xor
300  %and2 = and i32 %x, %y
301  %or = or i32 %and1, %and2
302  %inc = add i32 %or, 1
303  ret i32 %inc
304}
305
306define i32 @or_and_lhs_neg_and_i32(i32 %x, i32 %y, i32 %z) {
307; X86-LABEL: or_and_lhs_neg_and_i32:
308; X86:       # %bb.0: # %entry
309; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
310; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
311; X86-NEXT:    xorl %ecx, %eax
312; X86-NEXT:    andl {{[0-9]+}}(%esp), %eax
313; X86-NEXT:    xorl %ecx, %eax
314; X86-NEXT:    incl %eax
315; X86-NEXT:    retl
316;
317; NOBMI-LABEL: or_and_lhs_neg_and_i32:
318; NOBMI:       # %bb.0: # %entry
319; NOBMI-NEXT:    # kill: def $edi killed $edi def $rdi
320; NOBMI-NEXT:    xorl %edx, %edi
321; NOBMI-NEXT:    andl %esi, %edi
322; NOBMI-NEXT:    xorl %edx, %edi
323; NOBMI-NEXT:    leal 1(%rdi), %eax
324; NOBMI-NEXT:    retq
325;
326; BMI-LABEL: or_and_lhs_neg_and_i32:
327; BMI:       # %bb.0: # %entry
328; BMI-NEXT:    # kill: def $edi killed $edi def $rdi
329; BMI-NEXT:    andnl %edx, %esi, %eax
330; BMI-NEXT:    andl %esi, %edi
331; BMI-NEXT:    leal 1(%rax,%rdi), %eax
332; BMI-NEXT:    retq
333entry:
334  %xor = xor i32 %y, -1
335  %and1 = and i32 %xor, %z
336  %and2 = and i32 %x, %y
337  %or = or i32 %and1, %and2
338  %inc = add i32 %or, 1
339  ret i32 %inc
340}
341
342define i64 @or_and_and_rhs_neg_i64(i64 %x, i64 %y, i64 %z) {
343; X86-LABEL: or_and_and_rhs_neg_i64:
344; X86:       # %bb.0: # %entry
345; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
346; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
347; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
348; X86-NEXT:    xorl %eax, %edx
349; X86-NEXT:    andl {{[0-9]+}}(%esp), %edx
350; X86-NEXT:    xorl %eax, %edx
351; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
352; X86-NEXT:    xorl %ecx, %eax
353; X86-NEXT:    andl {{[0-9]+}}(%esp), %eax
354; X86-NEXT:    xorl %ecx, %eax
355; X86-NEXT:    addl $1, %eax
356; X86-NEXT:    adcl $0, %edx
357; X86-NEXT:    retl
358;
359; NOBMI-LABEL: or_and_and_rhs_neg_i64:
360; NOBMI:       # %bb.0: # %entry
361; NOBMI-NEXT:    xorq %rdi, %rdx
362; NOBMI-NEXT:    andq %rsi, %rdx
363; NOBMI-NEXT:    xorq %rdi, %rdx
364; NOBMI-NEXT:    leaq 1(%rdx), %rax
365; NOBMI-NEXT:    retq
366;
367; BMI-LABEL: or_and_and_rhs_neg_i64:
368; BMI:       # %bb.0: # %entry
369; BMI-NEXT:    andq %rsi, %rdx
370; BMI-NEXT:    andnq %rdi, %rsi, %rax
371; BMI-NEXT:    leaq 1(%rdx,%rax), %rax
372; BMI-NEXT:    retq
373entry:
374  %and1 = and i64 %z, %y
375  %xor = xor i64 %y, -1
376  %and2 = and i64 %x, %xor
377  %or = or i64 %and1, %and2
378  %inc = add i64 %or, 1
379  ret i64 %inc
380}
381
382define i64 @or_and_and_lhs_neg_i64(i64 %x, i64 %y, i64 %z) {
383; X86-LABEL: or_and_and_lhs_neg_i64:
384; X86:       # %bb.0: # %entry
385; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
386; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
387; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
388; X86-NEXT:    xorl %eax, %edx
389; X86-NEXT:    andl {{[0-9]+}}(%esp), %edx
390; X86-NEXT:    xorl %eax, %edx
391; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
392; X86-NEXT:    xorl %ecx, %eax
393; X86-NEXT:    andl {{[0-9]+}}(%esp), %eax
394; X86-NEXT:    xorl %ecx, %eax
395; X86-NEXT:    addl $1, %eax
396; X86-NEXT:    adcl $0, %edx
397; X86-NEXT:    retl
398;
399; NOBMI-LABEL: or_and_and_lhs_neg_i64:
400; NOBMI:       # %bb.0: # %entry
401; NOBMI-NEXT:    xorq %rdi, %rdx
402; NOBMI-NEXT:    andq %rsi, %rdx
403; NOBMI-NEXT:    xorq %rdi, %rdx
404; NOBMI-NEXT:    leaq 1(%rdx), %rax
405; NOBMI-NEXT:    retq
406;
407; BMI-LABEL: or_and_and_lhs_neg_i64:
408; BMI:       # %bb.0: # %entry
409; BMI-NEXT:    andq %rsi, %rdx
410; BMI-NEXT:    andnq %rdi, %rsi, %rax
411; BMI-NEXT:    leaq 1(%rdx,%rax), %rax
412; BMI-NEXT:    retq
413entry:
414  %and1 = and i64 %z, %y
415  %xor = xor i64 %y, -1
416  %and2 = and i64 %xor, %x
417  %or = or i64 %and1, %and2
418  %inc = add i64 %or, 1
419  ret i64 %inc
420}
421
422define i64 @or_and_rhs_neg_and_i64(i64 %x, i64 %y, i64 %z) {
423; X86-LABEL: or_and_rhs_neg_and_i64:
424; X86:       # %bb.0: # %entry
425; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
426; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
427; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
428; X86-NEXT:    xorl %eax, %edx
429; X86-NEXT:    andl {{[0-9]+}}(%esp), %edx
430; X86-NEXT:    xorl %eax, %edx
431; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
432; X86-NEXT:    xorl %ecx, %eax
433; X86-NEXT:    andl {{[0-9]+}}(%esp), %eax
434; X86-NEXT:    xorl %ecx, %eax
435; X86-NEXT:    addl $1, %eax
436; X86-NEXT:    adcl $0, %edx
437; X86-NEXT:    retl
438;
439; NOBMI-LABEL: or_and_rhs_neg_and_i64:
440; NOBMI:       # %bb.0: # %entry
441; NOBMI-NEXT:    xorq %rdx, %rdi
442; NOBMI-NEXT:    andq %rsi, %rdi
443; NOBMI-NEXT:    xorq %rdx, %rdi
444; NOBMI-NEXT:    leaq 1(%rdi), %rax
445; NOBMI-NEXT:    retq
446;
447; BMI-LABEL: or_and_rhs_neg_and_i64:
448; BMI:       # %bb.0: # %entry
449; BMI-NEXT:    andnq %rdx, %rsi, %rax
450; BMI-NEXT:    andq %rsi, %rdi
451; BMI-NEXT:    leaq 1(%rax,%rdi), %rax
452; BMI-NEXT:    retq
453entry:
454  %xor = xor i64 %y, -1
455  %and1 = and i64 %z, %xor
456  %and2 = and i64 %x, %y
457  %or = or i64 %and1, %and2
458  %inc = add i64 %or, 1
459  ret i64 %inc
460}
461
462define i64 @or_and_lhs_neg_and_i64(i64 %x, i64 %y, i64 %z) {
463; X86-LABEL: or_and_lhs_neg_and_i64:
464; X86:       # %bb.0: # %entry
465; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
466; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
467; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
468; X86-NEXT:    xorl %eax, %edx
469; X86-NEXT:    andl {{[0-9]+}}(%esp), %edx
470; X86-NEXT:    xorl %eax, %edx
471; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
472; X86-NEXT:    xorl %ecx, %eax
473; X86-NEXT:    andl {{[0-9]+}}(%esp), %eax
474; X86-NEXT:    xorl %ecx, %eax
475; X86-NEXT:    addl $1, %eax
476; X86-NEXT:    adcl $0, %edx
477; X86-NEXT:    retl
478;
479; NOBMI-LABEL: or_and_lhs_neg_and_i64:
480; NOBMI:       # %bb.0: # %entry
481; NOBMI-NEXT:    xorq %rdx, %rdi
482; NOBMI-NEXT:    andq %rsi, %rdi
483; NOBMI-NEXT:    xorq %rdx, %rdi
484; NOBMI-NEXT:    leaq 1(%rdi), %rax
485; NOBMI-NEXT:    retq
486;
487; BMI-LABEL: or_and_lhs_neg_and_i64:
488; BMI:       # %bb.0: # %entry
489; BMI-NEXT:    andnq %rdx, %rsi, %rax
490; BMI-NEXT:    andq %rsi, %rdi
491; BMI-NEXT:    leaq 1(%rax,%rdi), %rax
492; BMI-NEXT:    retq
493entry:
494  %xor = xor i64 %y, -1
495  %and1 = and i64 %xor, %z
496  %and2 = and i64 %x, %y
497  %or = or i64 %and1, %and2
498  %inc = add i64 %or, 1
499  ret i64 %inc
500}
501
502define i32 @or_sext1(i32 %x) {
503; X86-LABEL: or_sext1:
504; X86:       # %bb.0:
505; X86-NEXT:    xorl %eax, %eax
506; X86-NEXT:    cmpl $43, {{[0-9]+}}(%esp)
507; X86-NEXT:    setl %al
508; X86-NEXT:    leal -1(%eax,%eax), %eax
509; X86-NEXT:    retl
510;
511; X64-LABEL: or_sext1:
512; X64:       # %bb.0:
513; X64-NEXT:    xorl %eax, %eax
514; X64-NEXT:    cmpl $43, %edi
515; X64-NEXT:    setl %al
516; X64-NEXT:    leal -1(%rax,%rax), %eax
517; X64-NEXT:    retq
518  %cmp = icmp sgt i32 %x, 42
519  %sext = sext i1 %cmp to i32
520  %or = or i32 %sext, 1
521  ret i32 %or
522}
523
524define i64 @or_sext1_64(i64 %x) {
525; X86-LABEL: or_sext1_64:
526; X86:       # %bb.0:
527; X86-NEXT:    xorl %eax, %eax
528; X86-NEXT:    movl $42, %ecx
529; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %ecx
530; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %eax
531; X86-NEXT:    setl %al
532; X86-NEXT:    movzbl %al, %edx
533; X86-NEXT:    negl %edx
534; X86-NEXT:    movl %edx, %eax
535; X86-NEXT:    orl $1, %eax
536; X86-NEXT:    retl
537;
538; X64-LABEL: or_sext1_64:
539; X64:       # %bb.0:
540; X64-NEXT:    xorl %eax, %eax
541; X64-NEXT:    cmpq $43, %rdi
542; X64-NEXT:    setl %al
543; X64-NEXT:    leaq -1(%rax,%rax), %rax
544; X64-NEXT:    retq
545  %cmp = icmp sgt i64 %x, 42
546  %sext = sext i1 %cmp to i64
547  %or = or i64 %sext, 1
548  ret i64 %or
549}
550
551define i32 @or_sext2(i32 %x) {
552; X86-LABEL: or_sext2:
553; X86:       # %bb.0:
554; X86-NEXT:    xorl %eax, %eax
555; X86-NEXT:    cmpl $43, {{[0-9]+}}(%esp)
556; X86-NEXT:    setl %al
557; X86-NEXT:    leal -1(%eax,%eax,2), %eax
558; X86-NEXT:    retl
559;
560; X64-LABEL: or_sext2:
561; X64:       # %bb.0:
562; X64-NEXT:    xorl %eax, %eax
563; X64-NEXT:    cmpl $43, %edi
564; X64-NEXT:    setl %al
565; X64-NEXT:    leal -1(%rax,%rax,2), %eax
566; X64-NEXT:    retq
567  %cmp = icmp sgt i32 %x, 42
568  %sext = sext i1 %cmp to i32
569  %or = or i32 %sext, 2
570  ret i32 %or
571}
572
573define i64 @or_sext2_64(i64 %x) {
574; X86-LABEL: or_sext2_64:
575; X86:       # %bb.0:
576; X86-NEXT:    xorl %eax, %eax
577; X86-NEXT:    movl $42, %ecx
578; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %ecx
579; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %eax
580; X86-NEXT:    setl %al
581; X86-NEXT:    movzbl %al, %edx
582; X86-NEXT:    negl %edx
583; X86-NEXT:    movl %edx, %eax
584; X86-NEXT:    orl $2, %eax
585; X86-NEXT:    retl
586;
587; X64-LABEL: or_sext2_64:
588; X64:       # %bb.0:
589; X64-NEXT:    xorl %eax, %eax
590; X64-NEXT:    cmpq $43, %rdi
591; X64-NEXT:    setl %al
592; X64-NEXT:    leaq -1(%rax,%rax,2), %rax
593; X64-NEXT:    retq
594  %cmp = icmp sgt i64 %x, 42
595  %sext = sext i1 %cmp to i64
596  %or = or i64 %sext, 2
597  ret i64 %or
598}
599
600define i32 @or_sext3(i32 %x) {
601; X86-LABEL: or_sext3:
602; X86:       # %bb.0:
603; X86-NEXT:    xorl %eax, %eax
604; X86-NEXT:    cmpl $43, {{[0-9]+}}(%esp)
605; X86-NEXT:    setl %al
606; X86-NEXT:    leal -1(,%eax,4), %eax
607; X86-NEXT:    retl
608;
609; X64-LABEL: or_sext3:
610; X64:       # %bb.0:
611; X64-NEXT:    xorl %eax, %eax
612; X64-NEXT:    cmpl $43, %edi
613; X64-NEXT:    setl %al
614; X64-NEXT:    leal -1(,%rax,4), %eax
615; X64-NEXT:    retq
616  %cmp = icmp sgt i32 %x, 42
617  %sext = sext i1 %cmp to i32
618  %or = or i32 %sext, 3
619  ret i32 %or
620}
621
622define i64 @or_sext3_64(i64 %x) {
623; X86-LABEL: or_sext3_64:
624; X86:       # %bb.0:
625; X86-NEXT:    xorl %eax, %eax
626; X86-NEXT:    movl $42, %ecx
627; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %ecx
628; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %eax
629; X86-NEXT:    setl %al
630; X86-NEXT:    movzbl %al, %edx
631; X86-NEXT:    negl %edx
632; X86-NEXT:    movl %edx, %eax
633; X86-NEXT:    orl $3, %eax
634; X86-NEXT:    retl
635;
636; X64-LABEL: or_sext3_64:
637; X64:       # %bb.0:
638; X64-NEXT:    xorl %eax, %eax
639; X64-NEXT:    cmpq $43, %rdi
640; X64-NEXT:    setl %al
641; X64-NEXT:    leaq -1(,%rax,4), %rax
642; X64-NEXT:    retq
643  %cmp = icmp sgt i64 %x, 42
644  %sext = sext i1 %cmp to i64
645  %or = or i64 %sext, 3
646  ret i64 %or
647}
648
649define i32 @or_sext4(i32 %x) {
650; X86-LABEL: or_sext4:
651; X86:       # %bb.0:
652; X86-NEXT:    xorl %eax, %eax
653; X86-NEXT:    cmpl $43, {{[0-9]+}}(%esp)
654; X86-NEXT:    setl %al
655; X86-NEXT:    leal -1(%eax,%eax,4), %eax
656; X86-NEXT:    retl
657;
658; X64-LABEL: or_sext4:
659; X64:       # %bb.0:
660; X64-NEXT:    xorl %eax, %eax
661; X64-NEXT:    cmpl $43, %edi
662; X64-NEXT:    setl %al
663; X64-NEXT:    leal -1(%rax,%rax,4), %eax
664; X64-NEXT:    retq
665  %cmp = icmp sgt i32 %x, 42
666  %sext = sext i1 %cmp to i32
667  %or = or i32 %sext, 4
668  ret i32 %or
669}
670
671define i64 @or_sext4_64(i64 %x) {
672; X86-LABEL: or_sext4_64:
673; X86:       # %bb.0:
674; X86-NEXT:    xorl %eax, %eax
675; X86-NEXT:    movl $42, %ecx
676; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %ecx
677; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %eax
678; X86-NEXT:    setl %al
679; X86-NEXT:    movzbl %al, %edx
680; X86-NEXT:    negl %edx
681; X86-NEXT:    movl %edx, %eax
682; X86-NEXT:    orl $4, %eax
683; X86-NEXT:    retl
684;
685; X64-LABEL: or_sext4_64:
686; X64:       # %bb.0:
687; X64-NEXT:    xorl %eax, %eax
688; X64-NEXT:    cmpq $43, %rdi
689; X64-NEXT:    setl %al
690; X64-NEXT:    leaq -1(%rax,%rax,4), %rax
691; X64-NEXT:    retq
692  %cmp = icmp sgt i64 %x, 42
693  %sext = sext i1 %cmp to i64
694  %or = or i64 %sext, 4
695  ret i64 %or
696}
697
698define i32 @or_sext7(i32 %x) {
699; X86-LABEL: or_sext7:
700; X86:       # %bb.0:
701; X86-NEXT:    xorl %eax, %eax
702; X86-NEXT:    cmpl $43, {{[0-9]+}}(%esp)
703; X86-NEXT:    setl %al
704; X86-NEXT:    leal -1(,%eax,8), %eax
705; X86-NEXT:    retl
706;
707; X64-LABEL: or_sext7:
708; X64:       # %bb.0:
709; X64-NEXT:    xorl %eax, %eax
710; X64-NEXT:    cmpl $43, %edi
711; X64-NEXT:    setl %al
712; X64-NEXT:    leal -1(,%rax,8), %eax
713; X64-NEXT:    retq
714  %cmp = icmp sgt i32 %x, 42
715  %sext = sext i1 %cmp to i32
716  %or = or i32 %sext, 7
717  ret i32 %or
718}
719
720define i64 @or_sext7_64(i64 %x) {
721; X86-LABEL: or_sext7_64:
722; X86:       # %bb.0:
723; X86-NEXT:    xorl %eax, %eax
724; X86-NEXT:    movl $42, %ecx
725; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %ecx
726; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %eax
727; X86-NEXT:    setl %al
728; X86-NEXT:    movzbl %al, %edx
729; X86-NEXT:    negl %edx
730; X86-NEXT:    movl %edx, %eax
731; X86-NEXT:    orl $7, %eax
732; X86-NEXT:    retl
733;
734; X64-LABEL: or_sext7_64:
735; X64:       # %bb.0:
736; X64-NEXT:    xorl %eax, %eax
737; X64-NEXT:    cmpq $43, %rdi
738; X64-NEXT:    setl %al
739; X64-NEXT:    leaq -1(,%rax,8), %rax
740; X64-NEXT:    retq
741  %cmp = icmp sgt i64 %x, 42
742  %sext = sext i1 %cmp to i64
743  %or = or i64 %sext, 7
744  ret i64 %or
745}
746
747define i32 @or_sext8(i32 %x) {
748; X86-LABEL: or_sext8:
749; X86:       # %bb.0:
750; X86-NEXT:    xorl %eax, %eax
751; X86-NEXT:    cmpl $43, {{[0-9]+}}(%esp)
752; X86-NEXT:    setl %al
753; X86-NEXT:    leal -1(%eax,%eax,8), %eax
754; X86-NEXT:    retl
755;
756; X64-LABEL: or_sext8:
757; X64:       # %bb.0:
758; X64-NEXT:    xorl %eax, %eax
759; X64-NEXT:    cmpl $43, %edi
760; X64-NEXT:    setl %al
761; X64-NEXT:    leal -1(%rax,%rax,8), %eax
762; X64-NEXT:    retq
763  %cmp = icmp sgt i32 %x, 42
764  %sext = sext i1 %cmp to i32
765  %or = or i32 %sext, 8
766  ret i32 %or
767}
768
769define i64 @or_sext8_64(i64 %x) {
770; X86-LABEL: or_sext8_64:
771; X86:       # %bb.0:
772; X86-NEXT:    xorl %eax, %eax
773; X86-NEXT:    movl $42, %ecx
774; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %ecx
775; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %eax
776; X86-NEXT:    setl %al
777; X86-NEXT:    movzbl %al, %edx
778; X86-NEXT:    negl %edx
779; X86-NEXT:    movl %edx, %eax
780; X86-NEXT:    orl $8, %eax
781; X86-NEXT:    retl
782;
783; X64-LABEL: or_sext8_64:
784; X64:       # %bb.0:
785; X64-NEXT:    xorl %eax, %eax
786; X64-NEXT:    cmpq $43, %rdi
787; X64-NEXT:    setl %al
788; X64-NEXT:    leaq -1(%rax,%rax,8), %rax
789; X64-NEXT:    retq
790  %cmp = icmp sgt i64 %x, 42
791  %sext = sext i1 %cmp to i64
792  %or = or i64 %sext, 8
793  ret i64 %or
794}
795
796define i64 @or_large_constant(i64 %x) {
797; X86-LABEL: or_large_constant:
798; X86:       # %bb.0: # %entry
799; X86-NEXT:    xorl %edx, %edx
800; X86-NEXT:    movl $1, %eax
801; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %eax
802; X86-NEXT:    movl $0, %eax
803; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %eax
804; X86-NEXT:    setl %al
805; X86-NEXT:    movzbl %al, %eax
806; X86-NEXT:    negl %eax
807; X86-NEXT:    sbbl %edx, %edx
808; X86-NEXT:    orl $1, %eax
809; X86-NEXT:    orl $128, %edx
810; X86-NEXT:    retl
811;
812; X64-LABEL: or_large_constant:
813; X64:       # %bb.0: # %entry
814; X64-NEXT:    xorl %ecx, %ecx
815; X64-NEXT:    cmpq $2, %rdi
816; X64-NEXT:    setge %cl
817; X64-NEXT:    negq %rcx
818; X64-NEXT:    movabsq $549755813889, %rax # imm = 0x8000000001
819; X64-NEXT:    orq %rcx, %rax
820; X64-NEXT:    retq
821entry:
822  %cmp = icmp sgt i64 %x, 1
823  %zext = zext i1 %cmp to i64
824  %sub = sub i64 0, %zext
825  %or = or i64 %sub, 549755813889   ; 0x8000000001
826  ret i64 %or
827}
828
829define i32 @or_shift1_disjoint(i32 %x, i32 %y) {
830; X86-LABEL: or_shift1_disjoint:
831; X86:       # %bb.0:
832; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
833; X86-NEXT:    addl %eax, %eax
834; X86-NEXT:    orl {{[0-9]+}}(%esp), %eax
835; X86-NEXT:    retl
836;
837; X64-LABEL: or_shift1_disjoint:
838; X64:       # %bb.0:
839; X64-NEXT:    # kill: def $esi killed $esi def $rsi
840; X64-NEXT:    # kill: def $edi killed $edi def $rdi
841; X64-NEXT:    leal (%rsi,%rdi,2), %eax
842; X64-NEXT:    retq
843  %shl = shl i32 %x, 1
844  %or = or disjoint i32 %y, %shl
845  ret i32 %or
846}
847
848