xref: /llvm-project/llvm/test/CodeGen/AArch64/ls64-inline-asm.ll (revision 862f42eedf21cc28f4bc692ab846c87b28b5960b)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=aarch64 -mattr=+ls64 -verify-machineinstrs -o - %s | FileCheck %s
3
4define void @load(ptr %output, ptr %addr) {
5; CHECK-LABEL: load:
6; CHECK:       // %bb.0: // %entry
7; CHECK-NEXT:    //APP
8; CHECK-NEXT:    ld64b x2, [x1]
9; CHECK-NEXT:    //NO_APP
10; CHECK-NEXT:    stp x8, x9, [x0, #48]
11; CHECK-NEXT:    stp x6, x7, [x0, #32]
12; CHECK-NEXT:    stp x4, x5, [x0, #16]
13; CHECK-NEXT:    stp x2, x3, [x0]
14; CHECK-NEXT:    ret
15entry:
16  %val = call i512 asm sideeffect "ld64b $0,[$1]", "=r,r,~{memory}"(ptr %addr)
17  store i512 %val, ptr %output, align 8
18  ret void
19}
20
21define void @store(ptr %input, ptr %addr) {
22; CHECK-LABEL: store:
23; CHECK:       // %bb.0: // %entry
24; CHECK-NEXT:    ldp x8, x9, [x0, #48]
25; CHECK-NEXT:    ldp x6, x7, [x0, #32]
26; CHECK-NEXT:    ldp x4, x5, [x0, #16]
27; CHECK-NEXT:    ldp x2, x3, [x0]
28; CHECK-NEXT:    //APP
29; CHECK-NEXT:    st64b x2, [x1]
30; CHECK-NEXT:    //NO_APP
31; CHECK-NEXT:    ret
32entry:
33  %val = load i512, ptr %input, align 8
34  call void asm sideeffect "st64b $0,[$1]", "r,r,~{memory}"(i512 %val, ptr %addr)
35  ret void
36}
37
38define void @store2(ptr %in, ptr %addr) {
39; CHECK-LABEL: store2:
40; CHECK:       // %bb.0: // %entry
41; CHECK-NEXT:    sub sp, sp, #64
42; CHECK-NEXT:    .cfi_def_cfa_offset 64
43; CHECK-NEXT:    ldpsw x2, x3, [x0]
44; CHECK-NEXT:    ldrsw x4, [x0, #16]
45; CHECK-NEXT:    ldrsw x5, [x0, #64]
46; CHECK-NEXT:    ldrsw x6, [x0, #100]
47; CHECK-NEXT:    ldrsw x7, [x0, #144]
48; CHECK-NEXT:    ldrsw x8, [x0, #196]
49; CHECK-NEXT:    ldrsw x9, [x0, #256]
50; CHECK-NEXT:    //APP
51; CHECK-NEXT:    st64b x2, [x1]
52; CHECK-NEXT:    //NO_APP
53; CHECK-NEXT:    add sp, sp, #64
54; CHECK-NEXT:    ret
55entry:
56  %0 = load i32, ptr %in, align 4
57  %conv = sext i32 %0 to i64
58  %arrayidx1 = getelementptr inbounds i32, ptr %in, i64 1
59  %1 = load i32, ptr %arrayidx1, align 4
60  %conv2 = sext i32 %1 to i64
61  %arrayidx4 = getelementptr inbounds i32, ptr %in, i64 4
62  %2 = load i32, ptr %arrayidx4, align 4
63  %conv5 = sext i32 %2 to i64
64  %arrayidx7 = getelementptr inbounds i32, ptr %in, i64 16
65  %3 = load i32, ptr %arrayidx7, align 4
66  %conv8 = sext i32 %3 to i64
67  %arrayidx10 = getelementptr inbounds i32, ptr %in, i64 25
68  %4 = load i32, ptr %arrayidx10, align 4
69  %conv11 = sext i32 %4 to i64
70  %arrayidx13 = getelementptr inbounds i32, ptr %in, i64 36
71  %5 = load i32, ptr %arrayidx13, align 4
72  %conv14 = sext i32 %5 to i64
73  %arrayidx16 = getelementptr inbounds i32, ptr %in, i64 49
74  %6 = load i32, ptr %arrayidx16, align 4
75  %conv17 = sext i32 %6 to i64
76  %arrayidx19 = getelementptr inbounds i32, ptr %in, i64 64
77  %7 = load i32, ptr %arrayidx19, align 4
78  %conv20 = sext i32 %7 to i64
79  %s.sroa.10.0.insert.ext = zext i64 %conv20 to i512
80  %s.sroa.10.0.insert.shift = shl nuw i512 %s.sroa.10.0.insert.ext, 448
81  %s.sroa.9.0.insert.ext = zext i64 %conv17 to i512
82  %s.sroa.9.0.insert.shift = shl nuw nsw i512 %s.sroa.9.0.insert.ext, 384
83  %s.sroa.9.0.insert.insert = or i512 %s.sroa.10.0.insert.shift, %s.sroa.9.0.insert.shift
84  %s.sroa.8.0.insert.ext = zext i64 %conv14 to i512
85  %s.sroa.8.0.insert.shift = shl nuw nsw i512 %s.sroa.8.0.insert.ext, 320
86  %s.sroa.8.0.insert.insert = or i512 %s.sroa.9.0.insert.insert, %s.sroa.8.0.insert.shift
87  %s.sroa.7.0.insert.ext = zext i64 %conv11 to i512
88  %s.sroa.7.0.insert.shift = shl nuw nsw i512 %s.sroa.7.0.insert.ext, 256
89  %s.sroa.7.0.insert.insert = or i512 %s.sroa.8.0.insert.insert, %s.sroa.7.0.insert.shift
90  %s.sroa.6.0.insert.ext = zext i64 %conv8 to i512
91  %s.sroa.6.0.insert.shift = shl nuw nsw i512 %s.sroa.6.0.insert.ext, 192
92  %s.sroa.6.0.insert.insert = or i512 %s.sroa.7.0.insert.insert, %s.sroa.6.0.insert.shift
93  %s.sroa.5.0.insert.ext = zext i64 %conv5 to i512
94  %s.sroa.5.0.insert.shift = shl nuw nsw i512 %s.sroa.5.0.insert.ext, 128
95  %s.sroa.4.0.insert.ext = zext i64 %conv2 to i512
96  %s.sroa.4.0.insert.shift = shl nuw nsw i512 %s.sroa.4.0.insert.ext, 64
97  %s.sroa.4.0.insert.mask = or i512 %s.sroa.6.0.insert.insert, %s.sroa.5.0.insert.shift
98  %s.sroa.0.0.insert.ext = zext i64 %conv to i512
99  %s.sroa.0.0.insert.mask = or i512 %s.sroa.4.0.insert.mask, %s.sroa.4.0.insert.shift
100  %s.sroa.0.0.insert.insert = or i512 %s.sroa.0.0.insert.mask, %s.sroa.0.0.insert.ext
101  call void asm sideeffect "st64b $0,[$1]", "r,r,~{memory}"(i512 %s.sroa.0.0.insert.insert, ptr %addr)
102  ret void
103}
104
105define void @multi_output(ptr %addr) {
106; CHECK-LABEL: multi_output:
107; CHECK:       // %bb.0: // %entry
108; CHECK-NEXT:    //APP
109; CHECK-NEXT:    ld64b x0, [x0]
110; CHECK-NEXT:    mov x8, x0
111; CHECK-NEXT:    //NO_APP
112; CHECK-NEXT:    stp x6, x7, [x8, #48]
113; CHECK-NEXT:    stp x4, x5, [x8, #32]
114; CHECK-NEXT:    stp x2, x3, [x8, #16]
115; CHECK-NEXT:    stp x0, x1, [x8]
116; CHECK-NEXT:    ret
117entry:
118  %val = call { i512, ptr } asm sideeffect "ld64b $0, [$2]; mov $1, $2", "=r,=r,r,~{memory}"(ptr %addr)
119  %val0 = extractvalue { i512, ptr } %val, 0
120  %val1 = extractvalue { i512, ptr } %val, 1
121  store i512 %val0, ptr %val1, align 8
122  ret void
123}
124
125; FIXME: This case still crashes in RegsForValue::AddInlineAsmOperands without
126; additional changes. I believe this is a bug in target-independent code, that
127; is worked around in the RISC-V and SystemZ backends, but should almost
128; certainly be fixed instead.
129; define void @tied_constraints(ptr %addr) {
130; entry:
131;   %in = load i512, ptr %addr, align 8
132;   %val = call { i512, ptr } asm sideeffect "nop", "=r,=r,0,1,~{memory}"(i512 %in, ptr %addr)
133;   %val0 = extractvalue { i512, ptr } %val, 0
134;   %val1 = extractvalue { i512, ptr } %val, 1
135;   store i512 %val0, ptr %val1, align 8
136;   ret void
137; }
138