xref: /llvm-project/llvm/test/CodeGen/RISCV/rv32-inline-asm-pairs.ll (revision 33c44074714d1d2f3d5f65c3fb842cddb6b689ac)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
2; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
3; RUN:   | FileCheck %s
4
5define i64 @test_Pr_wide_scalar_simple(i64 noundef %0) nounwind {
6; CHECK-LABEL: test_Pr_wide_scalar_simple:
7; CHECK:       # %bb.0: # %entry
8; CHECK-NEXT:    #APP
9; CHECK-NEXT:    # a2 <- a0
10; CHECK-NEXT:    #NO_APP
11; CHECK-NEXT:    mv a0, a2
12; CHECK-NEXT:    mv a1, a3
13; CHECK-NEXT:    ret
14entry:
15  %1 = call i64 asm sideeffect "/* $0 <- $1 */", "=&R,R"(i64 %0)
16  ret i64 %1
17}
18
19define i32 @test_Pr_wide_scalar_with_ops(i32 noundef %0) nounwind {
20; CHECK-LABEL: test_Pr_wide_scalar_with_ops:
21; CHECK:       # %bb.0: # %entry
22; CHECK-NEXT:    mv a1, a0
23; CHECK-NEXT:    #APP
24; CHECK-NEXT:    # a2 <- a0
25; CHECK-NEXT:    #NO_APP
26; CHECK-NEXT:    or a0, a2, a3
27; CHECK-NEXT:    ret
28entry:
29  %1 = zext i32 %0 to i64
30  %2 = shl i64 %1, 32
31  %3 = or i64 %1, %2
32  %4 = call i64 asm sideeffect "/* $0 <- $1 */", "=&R,R"(i64 %3)
33  %5 = trunc i64 %4 to i32
34  %6 = lshr i64 %4, 32
35  %7 = trunc i64 %6 to i32
36  %8 = or i32 %5, %7
37  ret i32 %8
38}
39
40define i64 @test_Pr_wide_scalar_inout(ptr %0, i64 noundef %1) nounwind {
41; CHECK-LABEL: test_Pr_wide_scalar_inout:
42; CHECK:       # %bb.0: # %entry
43; CHECK-NEXT:    addi sp, sp, -16
44; CHECK-NEXT:    mv a3, a2
45; CHECK-NEXT:    sw a0, 12(sp)
46; CHECK-NEXT:    mv a2, a1
47; CHECK-NEXT:    sw a1, 0(sp)
48; CHECK-NEXT:    sw a3, 4(sp)
49; CHECK-NEXT:    #APP
50; CHECK-NEXT:    # a0; a2
51; CHECK-NEXT:    #NO_APP
52; CHECK-NEXT:    sw a0, 12(sp)
53; CHECK-NEXT:    sw a2, 0(sp)
54; CHECK-NEXT:    sw a3, 4(sp)
55; CHECK-NEXT:    mv a0, a2
56; CHECK-NEXT:    mv a1, a3
57; CHECK-NEXT:    addi sp, sp, 16
58; CHECK-NEXT:    ret
59entry:
60  %2 = alloca ptr, align 4
61  %3 = alloca i64, align 8
62  store ptr %0, ptr %2, align 4
63  store i64 %1, ptr %3, align 8
64  %4 = load ptr, ptr %2, align 4
65  %5 = load i64, ptr %3, align 8
66  %6 = call { ptr, i64 } asm sideeffect "/* $0; $1 */", "=r,=R,0,1"(ptr %4, i64 %5)
67  %7 = extractvalue { ptr, i64} %6, 0
68  %8 = extractvalue { ptr, i64 } %6, 1
69  store ptr %7, ptr %2, align 4
70  store i64 %8, ptr %3, align 8
71  %9 = load i64, ptr %3, align 8
72  ret i64 %9
73}
74
75define i64 @test_cR_wide_scalar_simple(i64 noundef %0) nounwind {
76; CHECK-LABEL: test_cR_wide_scalar_simple:
77; CHECK:       # %bb.0: # %entry
78; CHECK-NEXT:    #APP
79; CHECK-NEXT:    # a2 <- a0
80; CHECK-NEXT:    #NO_APP
81; CHECK-NEXT:    mv a0, a2
82; CHECK-NEXT:    mv a1, a3
83; CHECK-NEXT:    ret
84entry:
85  %1 = call i64 asm sideeffect "/* $0 <- $1 */", "=&^cR,^cR"(i64 %0)
86  ret i64 %1
87}
88
89define i32 @test_cR_wide_scalar_with_ops(i32 noundef %0) nounwind {
90; CHECK-LABEL: test_cR_wide_scalar_with_ops:
91; CHECK:       # %bb.0: # %entry
92; CHECK-NEXT:    mv a1, a0
93; CHECK-NEXT:    #APP
94; CHECK-NEXT:    # a2 <- a0
95; CHECK-NEXT:    #NO_APP
96; CHECK-NEXT:    or a0, a2, a3
97; CHECK-NEXT:    ret
98entry:
99  %1 = zext i32 %0 to i64
100  %2 = shl i64 %1, 32
101  %3 = or i64 %1, %2
102  %4 = call i64 asm sideeffect "/* $0 <- $1 */", "=&^cR,^cR"(i64 %3)
103  %5 = trunc i64 %4 to i32
104  %6 = lshr i64 %4, 32
105  %7 = trunc i64 %6 to i32
106  %8 = or i32 %5, %7
107  ret i32 %8
108}
109
110define i64 @test_cR_wide_scalar_inout(ptr %0, i64 noundef %1) nounwind {
111; CHECK-LABEL: test_cR_wide_scalar_inout:
112; CHECK:       # %bb.0: # %entry
113; CHECK-NEXT:    addi sp, sp, -16
114; CHECK-NEXT:    mv a3, a2
115; CHECK-NEXT:    sw a0, 12(sp)
116; CHECK-NEXT:    mv a2, a1
117; CHECK-NEXT:    sw a1, 0(sp)
118; CHECK-NEXT:    sw a3, 4(sp)
119; CHECK-NEXT:    #APP
120; CHECK-NEXT:    # a0; a2
121; CHECK-NEXT:    #NO_APP
122; CHECK-NEXT:    sw a0, 12(sp)
123; CHECK-NEXT:    sw a2, 0(sp)
124; CHECK-NEXT:    sw a3, 4(sp)
125; CHECK-NEXT:    mv a0, a2
126; CHECK-NEXT:    mv a1, a3
127; CHECK-NEXT:    addi sp, sp, 16
128; CHECK-NEXT:    ret
129entry:
130  %2 = alloca ptr, align 4
131  %3 = alloca i64, align 8
132  store ptr %0, ptr %2, align 4
133  store i64 %1, ptr %3, align 8
134  %4 = load ptr, ptr %2, align 4
135  %5 = load i64, ptr %3, align 8
136  %6 = call { ptr, i64 } asm sideeffect "/* $0; $1 */", "=r,=^cR,0,1"(ptr %4, i64 %5)
137  %7 = extractvalue { ptr, i64} %6, 0
138  %8 = extractvalue { ptr, i64 } %6, 1
139  store ptr %7, ptr %2, align 4
140  store i64 %8, ptr %3, align 8
141  %9 = load i64, ptr %3, align 8
142  ret i64 %9
143}
144