xref: /llvm-project/llvm/test/CodeGen/LoongArch/typepromotion-overflow.ll (revision 3850912fee9a14990bc3d72dc2654b03f9e2ab87)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=loongarch32 %s -o - | FileCheck %s --check-prefix=LA32
3; RUN: llc -mtriple=loongarch64 %s -o - | FileCheck %s --check-prefix=LA64
4
5define zeroext i16 @overflow_add(i16 zeroext %a, i16 zeroext %b) {
6; LA32-LABEL: overflow_add:
7; LA32:       # %bb.0:
8; LA32-NEXT:    add.w $a0, $a1, $a0
9; LA32-NEXT:    ori $a0, $a0, 1
10; LA32-NEXT:    bstrpick.w $a0, $a0, 15, 0
11; LA32-NEXT:    ori $a1, $zero, 1024
12; LA32-NEXT:    sltu $a0, $a1, $a0
13; LA32-NEXT:    ori $a1, $zero, 5
14; LA32-NEXT:    masknez $a1, $a1, $a0
15; LA32-NEXT:    ori $a2, $zero, 2
16; LA32-NEXT:    maskeqz $a0, $a2, $a0
17; LA32-NEXT:    or $a0, $a0, $a1
18; LA32-NEXT:    ret
19;
20; LA64-LABEL: overflow_add:
21; LA64:       # %bb.0:
22; LA64-NEXT:    add.d $a0, $a1, $a0
23; LA64-NEXT:    ori $a0, $a0, 1
24; LA64-NEXT:    bstrpick.d $a0, $a0, 15, 0
25; LA64-NEXT:    ori $a1, $zero, 1024
26; LA64-NEXT:    sltu $a0, $a1, $a0
27; LA64-NEXT:    ori $a1, $zero, 5
28; LA64-NEXT:    masknez $a1, $a1, $a0
29; LA64-NEXT:    ori $a2, $zero, 2
30; LA64-NEXT:    maskeqz $a0, $a2, $a0
31; LA64-NEXT:    or $a0, $a0, $a1
32; LA64-NEXT:    ret
33  %add = add i16 %b, %a
34  %or = or i16 %add, 1
35  %cmp = icmp ugt i16 %or, 1024
36  %res = select i1 %cmp, i16 2, i16 5
37  ret i16 %res
38}
39
40define zeroext i16 @overflow_sub(i16 zeroext %a, i16 zeroext %b) {
41; LA32-LABEL: overflow_sub:
42; LA32:       # %bb.0:
43; LA32-NEXT:    sub.w $a0, $a0, $a1
44; LA32-NEXT:    ori $a0, $a0, 1
45; LA32-NEXT:    bstrpick.w $a0, $a0, 15, 0
46; LA32-NEXT:    ori $a1, $zero, 1024
47; LA32-NEXT:    sltu $a0, $a1, $a0
48; LA32-NEXT:    ori $a1, $zero, 5
49; LA32-NEXT:    masknez $a1, $a1, $a0
50; LA32-NEXT:    ori $a2, $zero, 2
51; LA32-NEXT:    maskeqz $a0, $a2, $a0
52; LA32-NEXT:    or $a0, $a0, $a1
53; LA32-NEXT:    ret
54;
55; LA64-LABEL: overflow_sub:
56; LA64:       # %bb.0:
57; LA64-NEXT:    sub.d $a0, $a0, $a1
58; LA64-NEXT:    ori $a0, $a0, 1
59; LA64-NEXT:    bstrpick.d $a0, $a0, 15, 0
60; LA64-NEXT:    ori $a1, $zero, 1024
61; LA64-NEXT:    sltu $a0, $a1, $a0
62; LA64-NEXT:    ori $a1, $zero, 5
63; LA64-NEXT:    masknez $a1, $a1, $a0
64; LA64-NEXT:    ori $a2, $zero, 2
65; LA64-NEXT:    maskeqz $a0, $a2, $a0
66; LA64-NEXT:    or $a0, $a0, $a1
67; LA64-NEXT:    ret
68  %add = sub i16 %a, %b
69  %or = or i16 %add, 1
70  %cmp = icmp ugt i16 %or, 1024
71  %res = select i1 %cmp, i16 2, i16 5
72  ret i16 %res
73}
74
75define zeroext i16 @overflow_mul(i16 zeroext %a, i16 zeroext %b) {
76; LA32-LABEL: overflow_mul:
77; LA32:       # %bb.0:
78; LA32-NEXT:    mul.w $a0, $a1, $a0
79; LA32-NEXT:    ori $a0, $a0, 1
80; LA32-NEXT:    bstrpick.w $a0, $a0, 15, 0
81; LA32-NEXT:    ori $a1, $zero, 1024
82; LA32-NEXT:    sltu $a0, $a1, $a0
83; LA32-NEXT:    ori $a1, $zero, 5
84; LA32-NEXT:    masknez $a1, $a1, $a0
85; LA32-NEXT:    ori $a2, $zero, 2
86; LA32-NEXT:    maskeqz $a0, $a2, $a0
87; LA32-NEXT:    or $a0, $a0, $a1
88; LA32-NEXT:    ret
89;
90; LA64-LABEL: overflow_mul:
91; LA64:       # %bb.0:
92; LA64-NEXT:    mul.d $a0, $a1, $a0
93; LA64-NEXT:    ori $a0, $a0, 1
94; LA64-NEXT:    bstrpick.d $a0, $a0, 15, 0
95; LA64-NEXT:    ori $a1, $zero, 1024
96; LA64-NEXT:    sltu $a0, $a1, $a0
97; LA64-NEXT:    ori $a1, $zero, 5
98; LA64-NEXT:    masknez $a1, $a1, $a0
99; LA64-NEXT:    ori $a2, $zero, 2
100; LA64-NEXT:    maskeqz $a0, $a2, $a0
101; LA64-NEXT:    or $a0, $a0, $a1
102; LA64-NEXT:    ret
103  %add = mul i16 %b, %a
104  %or = or i16 %add, 1
105  %cmp = icmp ugt i16 %or, 1024
106  %res = select i1 %cmp, i16 2, i16 5
107  ret i16 %res
108}
109
110define zeroext i16 @overflow_shl(i16 zeroext %a, i16 zeroext %b) {
111; LA32-LABEL: overflow_shl:
112; LA32:       # %bb.0:
113; LA32-NEXT:    sll.w $a0, $a0, $a1
114; LA32-NEXT:    ori $a0, $a0, 1
115; LA32-NEXT:    bstrpick.w $a0, $a0, 15, 0
116; LA32-NEXT:    ori $a1, $zero, 1024
117; LA32-NEXT:    sltu $a0, $a1, $a0
118; LA32-NEXT:    ori $a1, $zero, 5
119; LA32-NEXT:    masknez $a1, $a1, $a0
120; LA32-NEXT:    ori $a2, $zero, 2
121; LA32-NEXT:    maskeqz $a0, $a2, $a0
122; LA32-NEXT:    or $a0, $a0, $a1
123; LA32-NEXT:    ret
124;
125; LA64-LABEL: overflow_shl:
126; LA64:       # %bb.0:
127; LA64-NEXT:    sll.d $a0, $a0, $a1
128; LA64-NEXT:    ori $a0, $a0, 1
129; LA64-NEXT:    bstrpick.d $a0, $a0, 15, 0
130; LA64-NEXT:    ori $a1, $zero, 1024
131; LA64-NEXT:    sltu $a0, $a1, $a0
132; LA64-NEXT:    ori $a1, $zero, 5
133; LA64-NEXT:    masknez $a1, $a1, $a0
134; LA64-NEXT:    ori $a2, $zero, 2
135; LA64-NEXT:    maskeqz $a0, $a2, $a0
136; LA64-NEXT:    or $a0, $a0, $a1
137; LA64-NEXT:    ret
138  %add = shl i16 %a, %b
139  %or = or i16 %add, 1
140  %cmp = icmp ugt i16 %or, 1024
141  %res = select i1 %cmp, i16 2, i16 5
142  ret i16 %res
143}
144
145define i32 @overflow_add_no_consts(i8 zeroext %a, i8 zeroext %b, i8 zeroext %limit) {
146; LA32-LABEL: overflow_add_no_consts:
147; LA32:       # %bb.0:
148; LA32-NEXT:    add.w $a0, $a1, $a0
149; LA32-NEXT:    andi $a0, $a0, 255
150; LA32-NEXT:    sltu $a0, $a2, $a0
151; LA32-NEXT:    ori $a1, $zero, 16
152; LA32-NEXT:    masknez $a1, $a1, $a0
153; LA32-NEXT:    ori $a2, $zero, 8
154; LA32-NEXT:    maskeqz $a0, $a2, $a0
155; LA32-NEXT:    or $a0, $a0, $a1
156; LA32-NEXT:    ret
157;
158; LA64-LABEL: overflow_add_no_consts:
159; LA64:       # %bb.0:
160; LA64-NEXT:    add.d $a0, $a1, $a0
161; LA64-NEXT:    andi $a0, $a0, 255
162; LA64-NEXT:    sltu $a0, $a2, $a0
163; LA64-NEXT:    ori $a1, $zero, 16
164; LA64-NEXT:    masknez $a1, $a1, $a0
165; LA64-NEXT:    ori $a2, $zero, 8
166; LA64-NEXT:    maskeqz $a0, $a2, $a0
167; LA64-NEXT:    or $a0, $a0, $a1
168; LA64-NEXT:    ret
169  %add = add i8 %b, %a
170  %cmp = icmp ugt i8 %add, %limit
171  %res = select i1 %cmp, i32 8, i32 16
172  ret i32 %res
173}
174
175define i32 @overflow_add_const_limit(i8 zeroext %a, i8 zeroext %b) {
176; LA32-LABEL: overflow_add_const_limit:
177; LA32:       # %bb.0:
178; LA32-NEXT:    add.w $a0, $a1, $a0
179; LA32-NEXT:    andi $a0, $a0, 255
180; LA32-NEXT:    ori $a1, $zero, 128
181; LA32-NEXT:    sltu $a0, $a1, $a0
182; LA32-NEXT:    ori $a1, $zero, 16
183; LA32-NEXT:    masknez $a1, $a1, $a0
184; LA32-NEXT:    ori $a2, $zero, 8
185; LA32-NEXT:    maskeqz $a0, $a2, $a0
186; LA32-NEXT:    or $a0, $a0, $a1
187; LA32-NEXT:    ret
188;
189; LA64-LABEL: overflow_add_const_limit:
190; LA64:       # %bb.0:
191; LA64-NEXT:    add.d $a0, $a1, $a0
192; LA64-NEXT:    andi $a0, $a0, 255
193; LA64-NEXT:    ori $a1, $zero, 128
194; LA64-NEXT:    sltu $a0, $a1, $a0
195; LA64-NEXT:    ori $a1, $zero, 16
196; LA64-NEXT:    masknez $a1, $a1, $a0
197; LA64-NEXT:    ori $a2, $zero, 8
198; LA64-NEXT:    maskeqz $a0, $a2, $a0
199; LA64-NEXT:    or $a0, $a0, $a1
200; LA64-NEXT:    ret
201  %add = add i8 %b, %a
202  %cmp = icmp ugt i8 %add, -128
203  %res = select i1 %cmp, i32 8, i32 16
204  ret i32 %res
205}
206
207define i32 @overflow_add_positive_const_limit(i8 zeroext %a) {
208; LA32-LABEL: overflow_add_positive_const_limit:
209; LA32:       # %bb.0:
210; LA32-NEXT:    ext.w.b $a0, $a0
211; LA32-NEXT:    slti $a0, $a0, -1
212; LA32-NEXT:    ori $a1, $zero, 16
213; LA32-NEXT:    masknez $a1, $a1, $a0
214; LA32-NEXT:    ori $a2, $zero, 8
215; LA32-NEXT:    maskeqz $a0, $a2, $a0
216; LA32-NEXT:    or $a0, $a0, $a1
217; LA32-NEXT:    ret
218;
219; LA64-LABEL: overflow_add_positive_const_limit:
220; LA64:       # %bb.0:
221; LA64-NEXT:    ext.w.b $a0, $a0
222; LA64-NEXT:    slti $a0, $a0, -1
223; LA64-NEXT:    ori $a1, $zero, 16
224; LA64-NEXT:    masknez $a1, $a1, $a0
225; LA64-NEXT:    ori $a2, $zero, 8
226; LA64-NEXT:    maskeqz $a0, $a2, $a0
227; LA64-NEXT:    or $a0, $a0, $a1
228; LA64-NEXT:    ret
229  %cmp = icmp slt i8 %a, -1
230  %res = select i1 %cmp, i32 8, i32 16
231  ret i32 %res
232}
233
234define i32 @unsafe_add_underflow(i8 zeroext %a) {
235; LA32-LABEL: unsafe_add_underflow:
236; LA32:       # %bb.0:
237; LA32-NEXT:    addi.w $a0, $a0, -1
238; LA32-NEXT:    sltui $a0, $a0, 1
239; LA32-NEXT:    ori $a1, $zero, 16
240; LA32-NEXT:    masknez $a1, $a1, $a0
241; LA32-NEXT:    ori $a2, $zero, 8
242; LA32-NEXT:    maskeqz $a0, $a2, $a0
243; LA32-NEXT:    or $a0, $a0, $a1
244; LA32-NEXT:    ret
245;
246; LA64-LABEL: unsafe_add_underflow:
247; LA64:       # %bb.0:
248; LA64-NEXT:    addi.d $a0, $a0, -1
249; LA64-NEXT:    sltui $a0, $a0, 1
250; LA64-NEXT:    ori $a1, $zero, 16
251; LA64-NEXT:    masknez $a1, $a1, $a0
252; LA64-NEXT:    ori $a2, $zero, 8
253; LA64-NEXT:    maskeqz $a0, $a2, $a0
254; LA64-NEXT:    or $a0, $a0, $a1
255; LA64-NEXT:    ret
256  %cmp = icmp eq i8 %a, 1
257  %res = select i1 %cmp, i32 8, i32 16
258  ret i32 %res
259}
260
261define i32 @safe_add_underflow(i8 zeroext %a) {
262; LA32-LABEL: safe_add_underflow:
263; LA32:       # %bb.0:
264; LA32-NEXT:    sltui $a0, $a0, 1
265; LA32-NEXT:    ori $a1, $zero, 16
266; LA32-NEXT:    masknez $a1, $a1, $a0
267; LA32-NEXT:    ori $a2, $zero, 8
268; LA32-NEXT:    maskeqz $a0, $a2, $a0
269; LA32-NEXT:    or $a0, $a0, $a1
270; LA32-NEXT:    ret
271;
272; LA64-LABEL: safe_add_underflow:
273; LA64:       # %bb.0:
274; LA64-NEXT:    sltui $a0, $a0, 1
275; LA64-NEXT:    ori $a1, $zero, 16
276; LA64-NEXT:    masknez $a1, $a1, $a0
277; LA64-NEXT:    ori $a2, $zero, 8
278; LA64-NEXT:    maskeqz $a0, $a2, $a0
279; LA64-NEXT:    or $a0, $a0, $a1
280; LA64-NEXT:    ret
281  %cmp = icmp eq i8 %a, 0
282  %res = select i1 %cmp, i32 8, i32 16
283  ret i32 %res
284}
285
286define i32 @safe_add_underflow_neg(i8 zeroext %a) {
287; LA32-LABEL: safe_add_underflow_neg:
288; LA32:       # %bb.0:
289; LA32-NEXT:    addi.w $a0, $a0, -2
290; LA32-NEXT:    sltui $a0, $a0, 251
291; LA32-NEXT:    ori $a1, $zero, 16
292; LA32-NEXT:    masknez $a1, $a1, $a0
293; LA32-NEXT:    ori $a2, $zero, 8
294; LA32-NEXT:    maskeqz $a0, $a2, $a0
295; LA32-NEXT:    or $a0, $a0, $a1
296; LA32-NEXT:    ret
297;
298; LA64-LABEL: safe_add_underflow_neg:
299; LA64:       # %bb.0:
300; LA64-NEXT:    addi.d $a0, $a0, -2
301; LA64-NEXT:    sltui $a0, $a0, 251
302; LA64-NEXT:    ori $a1, $zero, 16
303; LA64-NEXT:    masknez $a1, $a1, $a0
304; LA64-NEXT:    ori $a2, $zero, 8
305; LA64-NEXT:    maskeqz $a0, $a2, $a0
306; LA64-NEXT:    or $a0, $a0, $a1
307; LA64-NEXT:    ret
308  %add = add i8 %a, -2
309  %cmp = icmp ult i8 %add, -5
310  %res = select i1 %cmp, i32 8, i32 16
311  ret i32 %res
312}
313
314define i32 @overflow_sub_negative_const_limit(i8 zeroext %a) {
315; LA32-LABEL: overflow_sub_negative_const_limit:
316; LA32:       # %bb.0:
317; LA32-NEXT:    ext.w.b $a0, $a0
318; LA32-NEXT:    slti $a0, $a0, -1
319; LA32-NEXT:    ori $a1, $zero, 16
320; LA32-NEXT:    masknez $a1, $a1, $a0
321; LA32-NEXT:    ori $a2, $zero, 8
322; LA32-NEXT:    maskeqz $a0, $a2, $a0
323; LA32-NEXT:    or $a0, $a0, $a1
324; LA32-NEXT:    ret
325;
326; LA64-LABEL: overflow_sub_negative_const_limit:
327; LA64:       # %bb.0:
328; LA64-NEXT:    ext.w.b $a0, $a0
329; LA64-NEXT:    slti $a0, $a0, -1
330; LA64-NEXT:    ori $a1, $zero, 16
331; LA64-NEXT:    masknez $a1, $a1, $a0
332; LA64-NEXT:    ori $a2, $zero, 8
333; LA64-NEXT:    maskeqz $a0, $a2, $a0
334; LA64-NEXT:    or $a0, $a0, $a1
335; LA64-NEXT:    ret
336  %cmp = icmp slt i8 %a, -1
337  %res = select i1 %cmp, i32 8, i32 16
338  ret i32 %res
339}
340
341define i32 @sext_sub_underflow(i8 zeroext %a) {
342; LA32-LABEL: sext_sub_underflow:
343; LA32:       # %bb.0:
344; LA32-NEXT:    addi.w $a0, $a0, -6
345; LA32-NEXT:    addi.w $a1, $zero, -6
346; LA32-NEXT:    sltu $a0, $a1, $a0
347; LA32-NEXT:    ori $a1, $zero, 16
348; LA32-NEXT:    masknez $a1, $a1, $a0
349; LA32-NEXT:    ori $a2, $zero, 8
350; LA32-NEXT:    maskeqz $a0, $a2, $a0
351; LA32-NEXT:    or $a0, $a0, $a1
352; LA32-NEXT:    ret
353;
354; LA64-LABEL: sext_sub_underflow:
355; LA64:       # %bb.0:
356; LA64-NEXT:    addi.d $a0, $a0, -6
357; LA64-NEXT:    addi.w $a1, $zero, -6
358; LA64-NEXT:    sltu $a0, $a1, $a0
359; LA64-NEXT:    ori $a1, $zero, 16
360; LA64-NEXT:    masknez $a1, $a1, $a0
361; LA64-NEXT:    ori $a2, $zero, 8
362; LA64-NEXT:    maskeqz $a0, $a2, $a0
363; LA64-NEXT:    or $a0, $a0, $a1
364; LA64-NEXT:    ret
365  %sub = add i8 %a, -6
366  %cmp = icmp ugt i8 %sub, -6
367  %res = select i1 %cmp, i32 8, i32 16
368  ret i32 %res
369}
370
371define i32 @safe_sub_underflow(i8 zeroext %a) {
372; LA32-LABEL: safe_sub_underflow:
373; LA32:       # %bb.0:
374; LA32-NEXT:    sltui $a0, $a0, 1
375; LA32-NEXT:    ori $a1, $zero, 8
376; LA32-NEXT:    masknez $a1, $a1, $a0
377; LA32-NEXT:    ori $a2, $zero, 16
378; LA32-NEXT:    maskeqz $a0, $a2, $a0
379; LA32-NEXT:    or $a0, $a0, $a1
380; LA32-NEXT:    ret
381;
382; LA64-LABEL: safe_sub_underflow:
383; LA64:       # %bb.0:
384; LA64-NEXT:    sltui $a0, $a0, 1
385; LA64-NEXT:    ori $a1, $zero, 8
386; LA64-NEXT:    masknez $a1, $a1, $a0
387; LA64-NEXT:    ori $a2, $zero, 16
388; LA64-NEXT:    maskeqz $a0, $a2, $a0
389; LA64-NEXT:    or $a0, $a0, $a1
390; LA64-NEXT:    ret
391  %cmp.not = icmp eq i8 %a, 0
392  %res = select i1 %cmp.not, i32 16, i32 8
393  ret i32 %res
394}
395
396define i32 @safe_sub_underflow_neg(i8 zeroext %a) {
397; LA32-LABEL: safe_sub_underflow_neg:
398; LA32:       # %bb.0:
399; LA32-NEXT:    addi.w $a0, $a0, -4
400; LA32-NEXT:    ori $a1, $zero, 250
401; LA32-NEXT:    sltu $a0, $a1, $a0
402; LA32-NEXT:    ori $a1, $zero, 16
403; LA32-NEXT:    masknez $a1, $a1, $a0
404; LA32-NEXT:    ori $a2, $zero, 8
405; LA32-NEXT:    maskeqz $a0, $a2, $a0
406; LA32-NEXT:    or $a0, $a0, $a1
407; LA32-NEXT:    ret
408;
409; LA64-LABEL: safe_sub_underflow_neg:
410; LA64:       # %bb.0:
411; LA64-NEXT:    addi.d $a0, $a0, -4
412; LA64-NEXT:    ori $a1, $zero, 250
413; LA64-NEXT:    sltu $a0, $a1, $a0
414; LA64-NEXT:    ori $a1, $zero, 16
415; LA64-NEXT:    masknez $a1, $a1, $a0
416; LA64-NEXT:    ori $a2, $zero, 8
417; LA64-NEXT:    maskeqz $a0, $a2, $a0
418; LA64-NEXT:    or $a0, $a0, $a1
419; LA64-NEXT:    ret
420  %sub = add i8 %a, -4
421  %cmp = icmp ugt i8 %sub, -6
422  %res = select i1 %cmp, i32 8, i32 16
423  ret i32 %res
424}
425
426define i32 @sext_sub_underflow_neg(i8 zeroext %a) {
427; LA32-LABEL: sext_sub_underflow_neg:
428; LA32:       # %bb.0:
429; LA32-NEXT:    addi.w $a0, $a0, -4
430; LA32-NEXT:    sltui $a0, $a0, -3
431; LA32-NEXT:    ori $a1, $zero, 16
432; LA32-NEXT:    masknez $a1, $a1, $a0
433; LA32-NEXT:    ori $a2, $zero, 8
434; LA32-NEXT:    maskeqz $a0, $a2, $a0
435; LA32-NEXT:    or $a0, $a0, $a1
436; LA32-NEXT:    ret
437;
438; LA64-LABEL: sext_sub_underflow_neg:
439; LA64:       # %bb.0:
440; LA64-NEXT:    addi.d $a0, $a0, -4
441; LA64-NEXT:    sltui $a0, $a0, -3
442; LA64-NEXT:    ori $a1, $zero, 16
443; LA64-NEXT:    masknez $a1, $a1, $a0
444; LA64-NEXT:    ori $a2, $zero, 8
445; LA64-NEXT:    maskeqz $a0, $a2, $a0
446; LA64-NEXT:    or $a0, $a0, $a1
447; LA64-NEXT:    ret
448  %sub = add i8 %a, -4
449  %cmp = icmp ult i8 %sub, -3
450  %res = select i1 %cmp, i32 8, i32 16
451  ret i32 %res
452}
453
454define i32 @safe_sub_imm_var(ptr nocapture readonly %b) local_unnamed_addr #1 {
455; LA32-LABEL: safe_sub_imm_var:
456; LA32:       # %bb.0: # %entry
457; LA32-NEXT:    move $a0, $zero
458; LA32-NEXT:    ret
459;
460; LA64-LABEL: safe_sub_imm_var:
461; LA64:       # %bb.0: # %entry
462; LA64-NEXT:    move $a0, $zero
463; LA64-NEXT:    ret
464entry:
465  ret i32 0
466}
467
468define i32 @safe_sub_var_imm(ptr nocapture readonly %b) local_unnamed_addr #1 {
469; LA32-LABEL: safe_sub_var_imm:
470; LA32:       # %bb.0: # %entry
471; LA32-NEXT:    ld.bu $a0, $a0, 0
472; LA32-NEXT:    addi.w $a0, $a0, -248
473; LA32-NEXT:    addi.w $a1, $zero, -4
474; LA32-NEXT:    sltu $a0, $a1, $a0
475; LA32-NEXT:    ret
476;
477; LA64-LABEL: safe_sub_var_imm:
478; LA64:       # %bb.0: # %entry
479; LA64-NEXT:    ld.bu $a0, $a0, 0
480; LA64-NEXT:    addi.d $a0, $a0, -248
481; LA64-NEXT:    addi.w $a1, $zero, -4
482; LA64-NEXT:    sltu $a0, $a1, $a0
483; LA64-NEXT:    ret
484entry:
485  %0 = load i8, ptr %b, align 1
486  %sub = add nsw i8 %0, 8
487  %cmp = icmp ugt i8 %sub, -4
488  %conv4 = zext i1 %cmp to i32
489  ret i32 %conv4
490}
491
492define i32 @safe_add_imm_var(ptr nocapture readnone %b) {
493; LA32-LABEL: safe_add_imm_var:
494; LA32:       # %bb.0: # %entry
495; LA32-NEXT:    ori $a0, $zero, 1
496; LA32-NEXT:    ret
497;
498; LA64-LABEL: safe_add_imm_var:
499; LA64:       # %bb.0: # %entry
500; LA64-NEXT:    ori $a0, $zero, 1
501; LA64-NEXT:    ret
502entry:
503  ret i32 1
504}
505
506define i32 @safe_add_var_imm(ptr nocapture readnone %b) {
507; LA32-LABEL: safe_add_var_imm:
508; LA32:       # %bb.0: # %entry
509; LA32-NEXT:    ori $a0, $zero, 1
510; LA32-NEXT:    ret
511;
512; LA64-LABEL: safe_add_var_imm:
513; LA64:       # %bb.0: # %entry
514; LA64-NEXT:    ori $a0, $zero, 1
515; LA64-NEXT:    ret
516entry:
517  ret i32 1
518}
519
520define i8 @convert_add_order(i8 zeroext %arg) {
521; LA32-LABEL: convert_add_order:
522; LA32:       # %bb.0:
523; LA32-NEXT:    ori $a1, $a0, 1
524; LA32-NEXT:    sltui $a2, $a1, 50
525; LA32-NEXT:    addi.w $a1, $a1, -40
526; LA32-NEXT:    sltui $a1, $a1, 20
527; LA32-NEXT:    ori $a3, $zero, 2
528; LA32-NEXT:    sub.w $a1, $a3, $a1
529; LA32-NEXT:    ori $a3, $zero, 255
530; LA32-NEXT:    masknez $a3, $a3, $a2
531; LA32-NEXT:    maskeqz $a1, $a1, $a2
532; LA32-NEXT:    or $a1, $a1, $a3
533; LA32-NEXT:    and $a0, $a1, $a0
534; LA32-NEXT:    ret
535;
536; LA64-LABEL: convert_add_order:
537; LA64:       # %bb.0:
538; LA64-NEXT:    ori $a1, $a0, 1
539; LA64-NEXT:    sltui $a2, $a1, 50
540; LA64-NEXT:    addi.d $a1, $a1, -40
541; LA64-NEXT:    sltui $a1, $a1, 20
542; LA64-NEXT:    ori $a3, $zero, 2
543; LA64-NEXT:    sub.d $a1, $a3, $a1
544; LA64-NEXT:    ori $a3, $zero, 255
545; LA64-NEXT:    masknez $a3, $a3, $a2
546; LA64-NEXT:    maskeqz $a1, $a1, $a2
547; LA64-NEXT:    or $a1, $a1, $a3
548; LA64-NEXT:    and $a0, $a1, $a0
549; LA64-NEXT:    ret
550  %shl = or i8 %arg, 1
551  %cmp.0 = icmp ult i8 %shl, 50
552  %sub = add nsw i8 %shl, -40
553  %cmp.1 = icmp ult i8 %sub, 20
554  %mask.sel.v = select i1 %cmp.1, i8 1, i8 2
555  %mask.sel = select i1 %cmp.0, i8 %mask.sel.v, i8 -1
556  %res = and i8 %mask.sel, %arg
557  ret i8 %res
558}
559
560define i8 @underflow_if_sub(i32 %arg, i8 zeroext %arg1) {
561; LA32-LABEL: underflow_if_sub:
562; LA32:       # %bb.0:
563; LA32-NEXT:    slt $a2, $zero, $a0
564; LA32-NEXT:    and $a0, $a2, $a0
565; LA32-NEXT:    addi.w $a0, $a0, 245
566; LA32-NEXT:    sltu $a1, $a0, $a1
567; LA32-NEXT:    maskeqz $a0, $a0, $a1
568; LA32-NEXT:    ori $a2, $zero, 100
569; LA32-NEXT:    masknez $a1, $a2, $a1
570; LA32-NEXT:    or $a0, $a0, $a1
571; LA32-NEXT:    ret
572;
573; LA64-LABEL: underflow_if_sub:
574; LA64:       # %bb.0:
575; LA64-NEXT:    addi.w $a2, $a0, 0
576; LA64-NEXT:    slt $a2, $zero, $a2
577; LA64-NEXT:    and $a0, $a2, $a0
578; LA64-NEXT:    addi.d $a0, $a0, 245
579; LA64-NEXT:    sltu $a1, $a0, $a1
580; LA64-NEXT:    maskeqz $a0, $a0, $a1
581; LA64-NEXT:    ori $a2, $zero, 100
582; LA64-NEXT:    masknez $a1, $a2, $a1
583; LA64-NEXT:    or $a0, $a0, $a1
584; LA64-NEXT:    ret
585  %cmp = icmp sgt i32 %arg, 0
586  %conv = zext i1 %cmp to i32
587  %and = and i32 %conv, %arg
588  %trunc = trunc i32 %and to i8
589  %conv1 = add nuw nsw i8 %trunc, -11
590  %cmp.1 = icmp ult i8 %conv1, %arg1
591  %res = select i1 %cmp.1, i8 %conv1, i8 100
592  ret i8 %res
593}
594
595define i8 @underflow_if_sub_signext(i32 %arg, i8 signext %arg1) {
596; LA32-LABEL: underflow_if_sub_signext:
597; LA32:       # %bb.0:
598; LA32-NEXT:    andi $a1, $a1, 255
599; LA32-NEXT:    slt $a2, $zero, $a0
600; LA32-NEXT:    and $a0, $a2, $a0
601; LA32-NEXT:    addi.w $a0, $a0, 245
602; LA32-NEXT:    sltu $a1, $a0, $a1
603; LA32-NEXT:    maskeqz $a0, $a0, $a1
604; LA32-NEXT:    ori $a2, $zero, 100
605; LA32-NEXT:    masknez $a1, $a2, $a1
606; LA32-NEXT:    or $a0, $a0, $a1
607; LA32-NEXT:    ret
608;
609; LA64-LABEL: underflow_if_sub_signext:
610; LA64:       # %bb.0:
611; LA64-NEXT:    addi.w $a2, $a0, 0
612; LA64-NEXT:    andi $a1, $a1, 255
613; LA64-NEXT:    slt $a2, $zero, $a2
614; LA64-NEXT:    and $a0, $a2, $a0
615; LA64-NEXT:    addi.d $a0, $a0, 245
616; LA64-NEXT:    sltu $a1, $a0, $a1
617; LA64-NEXT:    maskeqz $a0, $a0, $a1
618; LA64-NEXT:    ori $a2, $zero, 100
619; LA64-NEXT:    masknez $a1, $a2, $a1
620; LA64-NEXT:    or $a0, $a0, $a1
621; LA64-NEXT:    ret
622  %cmp = icmp sgt i32 %arg, 0
623  %conv = zext i1 %cmp to i32
624  %and = and i32 %conv, %arg
625  %trunc = trunc i32 %and to i8
626  %conv1 = add nuw nsw i8 %trunc, -11
627  %cmp.1 = icmp ult i8 %conv1, %arg1
628  %res = select i1 %cmp.1, i8 %conv1, i8 100
629  ret i8 %res
630}
631