xref: /llvm-project/llvm/test/CodeGen/RISCV/zbb-logic-neg-imm.ll (revision cfe5a0847a42d7e67942d70f938d2d664a95990c)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+zbb -verify-machineinstrs < %s \
3; RUN:   | FileCheck %s --check-prefixes=CHECK,RV32,NOZBS32
4; RUN: llc -mtriple=riscv64 -mattr=+zbb -verify-machineinstrs < %s \
5; RUN:   | FileCheck %s --check-prefixes=CHECK,RV64,NOZBS64
6; RUN: llc -mtriple=riscv32 -mattr=+zbb,+zbs -verify-machineinstrs < %s \
7; RUN:   | FileCheck %s --check-prefixes=CHECK,RV32,ZBS,ZBS32
8; RUN: llc -mtriple=riscv64 -mattr=+zbb,+zbs -verify-machineinstrs < %s \
9; RUN:   | FileCheck %s --check-prefixes=CHECK,RV64,ZBS,ZBS64
10
11define i32 @and0xabcdefff(i32 %x) {
12; CHECK-LABEL: and0xabcdefff:
13; CHECK:       # %bb.0:
14; CHECK-NEXT:    lui a1, 344865
15; CHECK-NEXT:    andn a0, a0, a1
16; CHECK-NEXT:    ret
17  %and = and i32 %x, -1412567041
18  ret i32 %and
19}
20
21define i32 @orlow13(i32 %x) {
22; CHECK-LABEL: orlow13:
23; CHECK:       # %bb.0:
24; CHECK-NEXT:    lui a1, 1048574
25; CHECK-NEXT:    orn a0, a0, a1
26; CHECK-NEXT:    ret
27  %or = or i32 %x, 8191
28  ret i32 %or
29}
30
31define i64 @orlow24(i64 %x) {
32; RV32-LABEL: orlow24:
33; RV32:       # %bb.0:
34; RV32-NEXT:    lui a2, 1044480
35; RV32-NEXT:    orn a0, a0, a2
36; RV32-NEXT:    ret
37;
38; RV64-LABEL: orlow24:
39; RV64:       # %bb.0:
40; RV64-NEXT:    lui a1, 1044480
41; RV64-NEXT:    orn a0, a0, a1
42; RV64-NEXT:    ret
43  %or = or i64 %x, 16777215
44  ret i64 %or
45}
46
47define i32 @xorlow16(i32 %x) {
48; CHECK-LABEL: xorlow16:
49; CHECK:       # %bb.0:
50; CHECK-NEXT:    lui a1, 1048560
51; CHECK-NEXT:    xnor a0, a0, a1
52; CHECK-NEXT:    ret
53  %xor = xor i32 %x, 65535
54  ret i32 %xor
55}
56
57define i32 @xorlow31(i32 %x) {
58; CHECK-LABEL: xorlow31:
59; CHECK:       # %bb.0:
60; CHECK-NEXT:    lui a1, 524288
61; CHECK-NEXT:    xnor a0, a0, a1
62; CHECK-NEXT:    ret
63  %xor = xor i32 %x, 2147483647
64  ret i32 %xor
65}
66
67define i32 @oraddlow16(i32 %x) {
68; RV32-LABEL: oraddlow16:
69; RV32:       # %bb.0:
70; RV32-NEXT:    lui a1, 16
71; RV32-NEXT:    addi a1, a1, -1
72; RV32-NEXT:    or a0, a0, a1
73; RV32-NEXT:    add a0, a0, a1
74; RV32-NEXT:    ret
75;
76; RV64-LABEL: oraddlow16:
77; RV64:       # %bb.0:
78; RV64-NEXT:    lui a1, 16
79; RV64-NEXT:    addi a1, a1, -1
80; RV64-NEXT:    or a0, a0, a1
81; RV64-NEXT:    addw a0, a0, a1
82; RV64-NEXT:    ret
83  %or = or i32 %x, 65535
84  %add = add nsw i32 %or, 65535
85  ret i32 %add
86}
87
88define i32 @addorlow16(i32 %x) {
89; RV32-LABEL: addorlow16:
90; RV32:       # %bb.0:
91; RV32-NEXT:    lui a1, 16
92; RV32-NEXT:    addi a1, a1, -1
93; RV32-NEXT:    add a0, a0, a1
94; RV32-NEXT:    or a0, a0, a1
95; RV32-NEXT:    ret
96;
97; RV64-LABEL: addorlow16:
98; RV64:       # %bb.0:
99; RV64-NEXT:    lui a1, 16
100; RV64-NEXT:    addiw a1, a1, -1
101; RV64-NEXT:    addw a0, a0, a1
102; RV64-NEXT:    or a0, a0, a1
103; RV64-NEXT:    ret
104  %add = add nsw i32 %x, 65535
105  %or = or i32 %add, 65535
106  ret i32 %or
107}
108
109define i32 @andxorlow16(i32 %x) {
110; RV32-LABEL: andxorlow16:
111; RV32:       # %bb.0:
112; RV32-NEXT:    lui a1, 16
113; RV32-NEXT:    addi a1, a1, -1
114; RV32-NEXT:    andn a0, a1, a0
115; RV32-NEXT:    ret
116;
117; RV64-LABEL: andxorlow16:
118; RV64:       # %bb.0:
119; RV64-NEXT:    lui a1, 16
120; RV64-NEXT:    addiw a1, a1, -1
121; RV64-NEXT:    andn a0, a1, a0
122; RV64-NEXT:    ret
123  %and = and i32 %x, 65535
124  %xor = xor i32 %and, 65535
125  ret i32 %xor
126}
127
128define void @orarray100(ptr %a) {
129; RV32-LABEL: orarray100:
130; RV32:       # %bb.0: # %entry
131; RV32-NEXT:    li a1, 0
132; RV32-NEXT:    li a2, 0
133; RV32-NEXT:    lui a3, 1048560
134; RV32-NEXT:  .LBB8_1: # %for.body
135; RV32-NEXT:    # =>This Inner Loop Header: Depth=1
136; RV32-NEXT:    slli a4, a1, 2
137; RV32-NEXT:    addi a1, a1, 1
138; RV32-NEXT:    add a4, a0, a4
139; RV32-NEXT:    lw a5, 0(a4)
140; RV32-NEXT:    seqz a6, a1
141; RV32-NEXT:    add a2, a2, a6
142; RV32-NEXT:    xori a6, a1, 100
143; RV32-NEXT:    orn a5, a5, a3
144; RV32-NEXT:    or a6, a6, a2
145; RV32-NEXT:    sw a5, 0(a4)
146; RV32-NEXT:    bnez a6, .LBB8_1
147; RV32-NEXT:  # %bb.2: # %for.cond.cleanup
148; RV32-NEXT:    ret
149;
150; RV64-LABEL: orarray100:
151; RV64:       # %bb.0: # %entry
152; RV64-NEXT:    addi a1, a0, 400
153; RV64-NEXT:    lui a2, 1048560
154; RV64-NEXT:  .LBB8_1: # %for.body
155; RV64-NEXT:    # =>This Inner Loop Header: Depth=1
156; RV64-NEXT:    lw a3, 0(a0)
157; RV64-NEXT:    orn a3, a3, a2
158; RV64-NEXT:    sw a3, 0(a0)
159; RV64-NEXT:    addi a0, a0, 4
160; RV64-NEXT:    bne a0, a1, .LBB8_1
161; RV64-NEXT:  # %bb.2: # %for.cond.cleanup
162; RV64-NEXT:    ret
163entry:
164  br label %for.body
165
166for.cond.cleanup:
167  ret void
168
169for.body:
170  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
171  %arrayidx = getelementptr inbounds nuw i32, ptr %a, i64 %indvars.iv
172  %1 = load i32, ptr %arrayidx, align 4
173  %or = or i32 %1, 65535
174  store i32 %or, ptr %arrayidx, align 4
175  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
176  %exitcond.not = icmp eq i64 %indvars.iv.next, 100
177  br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
178}
179
180define void @orarray3(ptr %a) {
181; CHECK-LABEL: orarray3:
182; CHECK:       # %bb.0:
183; CHECK-NEXT:    lw a1, 0(a0)
184; CHECK-NEXT:    lw a2, 4(a0)
185; CHECK-NEXT:    lw a3, 8(a0)
186; CHECK-NEXT:    lui a4, 1048560
187; CHECK-NEXT:    orn a1, a1, a4
188; CHECK-NEXT:    orn a2, a2, a4
189; CHECK-NEXT:    orn a3, a3, a4
190; CHECK-NEXT:    sw a1, 0(a0)
191; CHECK-NEXT:    sw a2, 4(a0)
192; CHECK-NEXT:    sw a3, 8(a0)
193; CHECK-NEXT:    ret
194  %1 = load i32, ptr %a, align 4
195  %or = or i32 %1, 65535
196  store i32 %or, ptr %a, align 4
197  %arrayidx.1 = getelementptr inbounds nuw i8, ptr %a, i64 4
198  %2 = load i32, ptr %arrayidx.1, align 4
199  %or.1 = or i32 %2, 65535
200  store i32 %or.1, ptr %arrayidx.1, align 4
201  %arrayidx.2 = getelementptr inbounds nuw i8, ptr %a, i64 8
202  %3 = load i32, ptr %arrayidx.2, align 4
203  %or.2 = or i32 %3, 65535
204  store i32 %or.2, ptr %arrayidx.2, align 4
205  ret void
206}
207
208define i32 @andlow16(i32 %x) {
209; CHECK-LABEL: andlow16:
210; CHECK:       # %bb.0:
211; CHECK-NEXT:    zext.h a0, a0
212; CHECK-NEXT:    ret
213  %and = and i32 %x, 65535
214  ret i32 %and
215}
216
217define i32 @andlow24(i32 %x) {
218; RV32-LABEL: andlow24:
219; RV32:       # %bb.0:
220; RV32-NEXT:    slli a0, a0, 8
221; RV32-NEXT:    srli a0, a0, 8
222; RV32-NEXT:    ret
223;
224; RV64-LABEL: andlow24:
225; RV64:       # %bb.0:
226; RV64-NEXT:    slli a0, a0, 40
227; RV64-NEXT:    srli a0, a0, 40
228; RV64-NEXT:    ret
229  %and = and i32 %x, 16777215
230  ret i32 %and
231}
232
233define i32 @compl(i32 %x) {
234; CHECK-LABEL: compl:
235; CHECK:       # %bb.0:
236; CHECK-NEXT:    not a0, a0
237; CHECK-NEXT:    ret
238  %not = xor i32 %x, -1
239  ret i32 %not
240}
241
242define i32 @orlow12(i32 %x) {
243; NOZBS32-LABEL: orlow12:
244; NOZBS32:       # %bb.0:
245; NOZBS32-NEXT:    lui a1, 1048575
246; NOZBS32-NEXT:    orn a0, a0, a1
247; NOZBS32-NEXT:    ret
248;
249; NOZBS64-LABEL: orlow12:
250; NOZBS64:       # %bb.0:
251; NOZBS64-NEXT:    lui a1, 1048575
252; NOZBS64-NEXT:    orn a0, a0, a1
253; NOZBS64-NEXT:    ret
254;
255; ZBS-LABEL: orlow12:
256; ZBS:       # %bb.0:
257; ZBS-NEXT:    ori a0, a0, 2047
258; ZBS-NEXT:    bseti a0, a0, 11
259; ZBS-NEXT:    ret
260  %or = or i32 %x, 4095
261  ret i32 %or
262}
263
264define i32 @xorlow12(i32 %x) {
265; NOZBS32-LABEL: xorlow12:
266; NOZBS32:       # %bb.0:
267; NOZBS32-NEXT:    lui a1, 1048575
268; NOZBS32-NEXT:    xnor a0, a0, a1
269; NOZBS32-NEXT:    ret
270;
271; NOZBS64-LABEL: xorlow12:
272; NOZBS64:       # %bb.0:
273; NOZBS64-NEXT:    lui a1, 1048575
274; NOZBS64-NEXT:    xnor a0, a0, a1
275; NOZBS64-NEXT:    ret
276;
277; ZBS-LABEL: xorlow12:
278; ZBS:       # %bb.0:
279; ZBS-NEXT:    xori a0, a0, 2047
280; ZBS-NEXT:    binvi a0, a0, 11
281; ZBS-NEXT:    ret
282  %xor = xor i32 %x, 4095
283  ret i32 %xor
284}
285
286define i64 @andimm64(i64 %x) {
287; RV32-LABEL: andimm64:
288; RV32:       # %bb.0:
289; RV32-NEXT:    lui a1, 4080
290; RV32-NEXT:    andn a0, a0, a1
291; RV32-NEXT:    li a1, 0
292; RV32-NEXT:    ret
293;
294; RV64-LABEL: andimm64:
295; RV64:       # %bb.0:
296; RV64-NEXT:    lui a1, 983295
297; RV64-NEXT:    slli a1, a1, 4
298; RV64-NEXT:    andn a0, a0, a1
299; RV64-NEXT:    ret
300  %and = and i64 %x, 4278255615
301  ret i64 %and
302}
303
304define i64 @orimm64srli(i64 %x) {
305; RV32-LABEL: orimm64srli:
306; RV32:       # %bb.0:
307; RV32-NEXT:    lui a2, 1040384
308; RV32-NEXT:    orn a0, a0, a2
309; RV32-NEXT:    lui a2, 917504
310; RV32-NEXT:    or a1, a1, a2
311; RV32-NEXT:    ret
312;
313; RV64-LABEL: orimm64srli:
314; RV64:       # %bb.0:
315; RV64-NEXT:    lui a1, 983040
316; RV64-NEXT:    srli a1, a1, 3
317; RV64-NEXT:    orn a0, a0, a1
318; RV64-NEXT:    ret
319  %or = or i64 %x, -2305843009180139521
320  ret i64 %or
321}
322
323define i64 @andnofff(i64 %x) {
324; RV32-LABEL: andnofff:
325; RV32:       # %bb.0:
326; RV32-NEXT:    lui a2, 1044480
327; RV32-NEXT:    and a1, a1, a2
328; RV32-NEXT:    andi a0, a0, 255
329; RV32-NEXT:    ret
330;
331; RV64-LABEL: andnofff:
332; RV64:       # %bb.0:
333; RV64-NEXT:    lui a1, 1048560
334; RV64-NEXT:    srli a1, a1, 8
335; RV64-NEXT:    andn a0, a0, a1
336; RV64-NEXT:    ret
337  %and = and i64 %x, -72057594037927681
338  ret i64 %and
339}
340
341define i64 @ornofff(i64 %x) {
342; NOZBS32-LABEL: ornofff:
343; NOZBS32:       # %bb.0:
344; NOZBS32-NEXT:    lui a2, 524288
345; NOZBS32-NEXT:    or a1, a1, a2
346; NOZBS32-NEXT:    ori a0, a0, 2047
347; NOZBS32-NEXT:    ret
348;
349; NOZBS64-LABEL: ornofff:
350; NOZBS64:       # %bb.0:
351; NOZBS64-NEXT:    lui a1, 1048575
352; NOZBS64-NEXT:    srli a1, a1, 1
353; NOZBS64-NEXT:    orn a0, a0, a1
354; NOZBS64-NEXT:    ret
355;
356; ZBS32-LABEL: ornofff:
357; ZBS32:       # %bb.0:
358; ZBS32-NEXT:    ori a0, a0, 2047
359; ZBS32-NEXT:    bseti a1, a1, 31
360; ZBS32-NEXT:    ret
361;
362; ZBS64-LABEL: ornofff:
363; ZBS64:       # %bb.0:
364; ZBS64-NEXT:    ori a0, a0, 2047
365; ZBS64-NEXT:    bseti a0, a0, 63
366; ZBS64-NEXT:    ret
367  %or = or i64 %x, -9223372036854773761
368  ret i64 %or
369}
370
371define i64 @xornofff(i64 %x) {
372; RV32-LABEL: xornofff:
373; RV32:       # %bb.0:
374; RV32-NEXT:    lui a2, 983040
375; RV32-NEXT:    xor a1, a1, a2
376; RV32-NEXT:    xori a0, a0, 255
377; RV32-NEXT:    ret
378;
379; RV64-LABEL: xornofff:
380; RV64:       # %bb.0:
381; RV64-NEXT:    lui a1, 1048575
382; RV64-NEXT:    srli a1, a1, 4
383; RV64-NEXT:    xnor a0, a0, a1
384; RV64-NEXT:    ret
385  %xor = xor i64 %x, -1152921504606846721
386  ret i64 %xor
387}
388