xref: /llvm-project/llvm/test/CodeGen/AArch64/GlobalISel/select-ldxr-intrin.mir (revision 1ee315ae7964c8433b772e0b5d667834994ba753)
1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2# RUN: llc -mtriple=aarch64-unknown-unknown -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
3
4--- |
5  define void @test_load_i8(ptr %addr) { ret void }
6  define void @test_load_i16(ptr %addr) { ret void }
7  define void @test_load_i32(ptr %addr) { ret void }
8  define void @test_load_i64(ptr %addr) { ret void }
9...
10---
11name:            test_load_i8
12alignment:       4
13legalized:       true
14regBankSelected: true
15tracksRegLiveness: true
16body:             |
17  bb.0:
18    liveins: $x0
19    ; CHECK-LABEL: name: test_load_i8
20    ; CHECK: liveins: $x0
21    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
22    ; CHECK: [[LDXRB:%[0-9]+]]:gpr32 = LDXRB [[COPY]] :: (volatile load (s8) from %ir.addr)
23    ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64all = SUBREG_TO_REG 0, [[LDXRB]], %subreg.sub_32
24    ; CHECK: $x1 = COPY [[SUBREG_TO_REG]]
25    ; CHECK: RET_ReallyLR implicit $x1
26    %0:gpr(p0) = COPY $x0
27    %1:gpr(s64) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.ldxr), %0(p0) :: (volatile load (s8) from %ir.addr)
28    $x1 = COPY %1(s64)
29    RET_ReallyLR implicit $x1
30
31...
32---
33name:            test_load_i16
34alignment:       4
35legalized:       true
36regBankSelected: true
37tracksRegLiveness: true
38body:             |
39  bb.0:
40    liveins: $x0
41    ; CHECK-LABEL: name: test_load_i16
42    ; CHECK: liveins: $x0
43    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
44    ; CHECK: [[LDXRH:%[0-9]+]]:gpr32 = LDXRH [[COPY]] :: (volatile load (s16) from %ir.addr)
45    ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64all = SUBREG_TO_REG 0, [[LDXRH]], %subreg.sub_32
46    ; CHECK: $x1 = COPY [[SUBREG_TO_REG]]
47    ; CHECK: RET_ReallyLR implicit $x1
48    %0:gpr(p0) = COPY $x0
49    %1:gpr(s64) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.ldxr), %0(p0) :: (volatile load (s16) from %ir.addr)
50    $x1 = COPY %1(s64)
51    RET_ReallyLR implicit $x1
52
53...
54---
55name:            test_load_i32
56alignment:       4
57legalized:       true
58regBankSelected: true
59tracksRegLiveness: true
60body:             |
61  bb.0:
62    liveins: $x0
63    ; CHECK-LABEL: name: test_load_i32
64    ; CHECK: liveins: $x0
65    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
66    ; CHECK: [[LDXRW:%[0-9]+]]:gpr32 = LDXRW [[COPY]] :: (volatile load (s32) from %ir.addr)
67    ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64all = SUBREG_TO_REG 0, [[LDXRW]], %subreg.sub_32
68    ; CHECK: $x1 = COPY [[SUBREG_TO_REG]]
69    ; CHECK: RET_ReallyLR implicit $x1
70    %0:gpr(p0) = COPY $x0
71    %1:gpr(s64) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.ldxr), %0(p0) :: (volatile load (s32) from %ir.addr)
72    $x1 = COPY %1(s64)
73    RET_ReallyLR implicit $x1
74
75
76...
77---
78name:            test_load_i64
79alignment:       4
80legalized:       true
81regBankSelected: true
82tracksRegLiveness: true
83body:             |
84  bb.0:
85    liveins: $x0
86    ; CHECK-LABEL: name: test_load_i64
87    ; CHECK: liveins: $x0
88    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
89    ; CHECK: [[LDXRX:%[0-9]+]]:gpr64 = LDXRX [[COPY]] :: (volatile load (s64) from %ir.addr)
90    ; CHECK: $x1 = COPY [[LDXRX]]
91    ; CHECK: RET_ReallyLR implicit $x1
92    %0:gpr(p0) = COPY $x0
93    %1:gpr(s64) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.ldxr), %0(p0) :: (volatile load (s64) from %ir.addr)
94    $x1 = COPY %1(s64)
95    RET_ReallyLR implicit $x1
96