xref: /llvm-project/llvm/test/CodeGen/RISCV/rv64-inline-asm-pairs.ll (revision 33c44074714d1d2f3d5f65c3fb842cddb6b689ac)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
2; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
3; RUN:   | FileCheck %s
4
5define i128 @test_R_wide_scalar_simple(i128 noundef %0) nounwind {
6; CHECK-LABEL: test_R_wide_scalar_simple:
7; CHECK:       # %bb.0: # %entry
8; CHECK-NEXT:    #APP
9; CHECK-NEXT:    # a2 <- a0
10; CHECK-NEXT:    #NO_APP
11; CHECK-NEXT:    mv a0, a2
12; CHECK-NEXT:    mv a1, a3
13; CHECK-NEXT:    ret
14entry:
15  %1 = call i128 asm sideeffect "/* $0 <- $1 */", "=&R,R"(i128 %0)
16  ret i128 %1
17}
18
19define i64 @test_R_wide_scalar_with_ops(i64 noundef %0) nounwind {
20; CHECK-LABEL: test_R_wide_scalar_with_ops:
21; CHECK:       # %bb.0: # %entry
22; CHECK-NEXT:    mv a1, a0
23; CHECK-NEXT:    #APP
24; CHECK-NEXT:    # a2 <- a0
25; CHECK-NEXT:    #NO_APP
26; CHECK-NEXT:    or a0, a2, a3
27; CHECK-NEXT:    ret
28entry:
29  %1 = zext i64 %0 to i128
30  %2 = shl i128 %1, 64
31  %3 = or i128 %1, %2
32  %4 = call i128 asm sideeffect "/* $0 <- $1 */", "=&R,R"(i128 %3)
33  %5 = trunc i128 %4 to i64
34  %6 = lshr i128 %4, 64
35  %7 = trunc i128 %6 to i64
36  %8 = or i64 %5, %7
37  ret i64 %8
38}
39
40define i128 @test_R_wide_scalar_inout(ptr %0, i128 noundef %1) nounwind {
41; CHECK-LABEL: test_R_wide_scalar_inout:
42; CHECK:       # %bb.0: # %entry
43; CHECK-NEXT:    addi sp, sp, -32
44; CHECK-NEXT:    mv a3, a2
45; CHECK-NEXT:    sd a0, 24(sp)
46; CHECK-NEXT:    mv a2, a1
47; CHECK-NEXT:    sd a1, 0(sp)
48; CHECK-NEXT:    sd a3, 8(sp)
49; CHECK-NEXT:    #APP
50; CHECK-NEXT:    # a0; a2
51; CHECK-NEXT:    #NO_APP
52; CHECK-NEXT:    sd a0, 24(sp)
53; CHECK-NEXT:    sd a2, 0(sp)
54; CHECK-NEXT:    sd a3, 8(sp)
55; CHECK-NEXT:    mv a0, a2
56; CHECK-NEXT:    mv a1, a3
57; CHECK-NEXT:    addi sp, sp, 32
58; CHECK-NEXT:    ret
59entry:
60  %2 = alloca ptr, align 8
61  %3 = alloca i128, align 16
62  store ptr %0, ptr %2, align 8
63  store i128 %1, ptr %3, align 16
64  %4 = load ptr, ptr %2, align 8
65  %5 = load i128, ptr %3, align 16
66  %6 = call { ptr, i128 } asm sideeffect "/* $0; $1 */", "=r,=R,0,1"(ptr %4, i128 %5)
67  %7 = extractvalue { ptr, i128} %6, 0
68  %8 = extractvalue { ptr, i128 } %6, 1
69  store ptr %7, ptr %2, align 8
70  store i128 %8, ptr %3, align 16
71  %9 = load i128, ptr %3, align 16
72  ret i128 %9
73}
74
75define i128 @test_cR_wide_scalar_simple(i128 noundef %0) nounwind {
76; CHECK-LABEL: test_cR_wide_scalar_simple:
77; CHECK:       # %bb.0: # %entry
78; CHECK-NEXT:    #APP
79; CHECK-NEXT:    # a2 <- a0
80; CHECK-NEXT:    #NO_APP
81; CHECK-NEXT:    mv a0, a2
82; CHECK-NEXT:    mv a1, a3
83; CHECK-NEXT:    ret
84entry:
85  %1 = call i128 asm sideeffect "/* $0 <- $1 */", "=&^cR,^cR"(i128 %0)
86  ret i128 %1
87}
88
89define i64 @test_cR_wide_scalar_with_ops(i64 noundef %0) nounwind {
90; CHECK-LABEL: test_cR_wide_scalar_with_ops:
91; CHECK:       # %bb.0: # %entry
92; CHECK-NEXT:    mv a1, a0
93; CHECK-NEXT:    #APP
94; CHECK-NEXT:    # a2 <- a0
95; CHECK-NEXT:    #NO_APP
96; CHECK-NEXT:    or a0, a2, a3
97; CHECK-NEXT:    ret
98entry:
99  %1 = zext i64 %0 to i128
100  %2 = shl i128 %1, 64
101  %3 = or i128 %1, %2
102  %4 = call i128 asm sideeffect "/* $0 <- $1 */", "=&^cR,^cR"(i128 %3)
103  %5 = trunc i128 %4 to i64
104  %6 = lshr i128 %4, 64
105  %7 = trunc i128 %6 to i64
106  %8 = or i64 %5, %7
107  ret i64 %8
108}
109
110define i128 @test_cR_wide_scalar_inout(ptr %0, i128 noundef %1) nounwind {
111; CHECK-LABEL: test_cR_wide_scalar_inout:
112; CHECK:       # %bb.0: # %entry
113; CHECK-NEXT:    addi sp, sp, -32
114; CHECK-NEXT:    mv a3, a2
115; CHECK-NEXT:    sd a0, 24(sp)
116; CHECK-NEXT:    mv a2, a1
117; CHECK-NEXT:    sd a1, 0(sp)
118; CHECK-NEXT:    sd a3, 8(sp)
119; CHECK-NEXT:    #APP
120; CHECK-NEXT:    # a0; a2
121; CHECK-NEXT:    #NO_APP
122; CHECK-NEXT:    sd a0, 24(sp)
123; CHECK-NEXT:    sd a2, 0(sp)
124; CHECK-NEXT:    sd a3, 8(sp)
125; CHECK-NEXT:    mv a0, a2
126; CHECK-NEXT:    mv a1, a3
127; CHECK-NEXT:    addi sp, sp, 32
128; CHECK-NEXT:    ret
129entry:
130  %2 = alloca ptr, align 8
131  %3 = alloca i128, align 16
132  store ptr %0, ptr %2, align 8
133  store i128 %1, ptr %3, align 16
134  %4 = load ptr, ptr %2, align 8
135  %5 = load i128, ptr %3, align 16
136  %6 = call { ptr, i128 } asm sideeffect "/* $0; $1 */", "=r,=^cR,0,1"(ptr %4, i128 %5)
137  %7 = extractvalue { ptr, i128} %6, 0
138  %8 = extractvalue { ptr, i128 } %6, 1
139  store ptr %7, ptr %2, align 8
140  store i128 %8, ptr %3, align 16
141  %9 = load i128, ptr %3, align 16
142  ret i128 %9
143}
144