1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py 2# RUN: llc -run-pass=aarch64-prelegalizer-combiner -verify-machineinstrs %s -o - | FileCheck %s 3--- | 4 target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" 5 target triple = "arm64-apple-darwin" 6 7 define void @test_memcpy1(ptr nocapture %dst, ptr nocapture readonly %src, i64 %len) local_unnamed_addr #0 { 8 entry: 9 %0 = bitcast ptr %dst to ptr 10 %1 = bitcast ptr %src to ptr 11 tail call void @llvm.memcpy.p0.p0.i64(ptr align 4 %0, ptr align 4 %1, i64 %len, i1 false) 12 ret void 13 } 14 15 declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1 immarg) #1 16 declare void @llvm.memcpy.p1.p2.i64(ptr addrspace(1) nocapture writeonly, ptr addrspace(2) nocapture readonly, i64, i1 immarg) #1 17 18 define void @test_memcpy2_const(ptr nocapture %dst, ptr nocapture readonly %src) local_unnamed_addr #0 { 19 entry: 20 %0 = bitcast ptr %dst to ptr 21 %1 = bitcast ptr %src to ptr 22 tail call void @llvm.memcpy.p0.p0.i64(ptr align 4 %0, ptr align 4 %1, i64 72, i1 false) 23 ret void 24 } 25 26 define void @test_memcpy2_const_optsize(ptr nocapture %dst, ptr nocapture readonly %src) local_unnamed_addr #2 { 27 entry: 28 %0 = bitcast ptr %dst to ptr 29 %1 = bitcast ptr %src to ptr 30 tail call void @llvm.memcpy.p0.p0.i64(ptr align 4 %0, ptr align 4 %1, i64 72, i1 false) 31 ret void 32 } 33 34 define void @test_memcpy2_const_minsize(ptr nocapture %dst, ptr nocapture readonly %src) local_unnamed_addr #3 { 35 entry: 36 %0 = bitcast ptr %dst to ptr 37 %1 = bitcast ptr %src to ptr 38 tail call void @llvm.memcpy.p0.p0.i64(ptr align 4 %0, ptr align 4 %1, i64 72, i1 false) 39 ret void 40 } 41 42 define void @test_memcpy3_const_arrays_unaligned(ptr nocapture %dst, ptr nocapture readonly %src) local_unnamed_addr #0 { 43 entry: 44 %0 = bitcast ptr %dst to ptr 45 %1 = bitcast ptr %src to ptr 46 tail call void @llvm.memcpy.p0.p0.i64(ptr align 4 %0, ptr align 4 %1, i64 143, i1 false) 47 ret void 48 } 49 50 define void @test_memcpy_addrspace(ptr addrspace(1) nocapture %dst, ptr addrspace(2) nocapture readonly %src) local_unnamed_addr #0 { 51 entry: 52 %0 = bitcast ptr addrspace(1) %dst to ptr addrspace(1) 53 %1 = bitcast ptr addrspace(2) %src to ptr addrspace(2) 54 tail call void @llvm.memcpy.p1.p2.i64(ptr addrspace(1) align 4 %0, ptr addrspace(2) align 4 %1, i64 72, i1 false) 55 ret void 56 } 57 58 59 attributes #0 = { nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cyclone" "target-features"="+aes,+crypto,+fp-armv8,+neon,+sha2,+zcm,+zcz" "unsafe-fp-math"="false" "use-soft-float"="false" } 60 attributes #1 = { argmemonly nounwind } 61 attributes #2 = { optsize } 62 attributes #3 = { minsize } 63 64... 65--- 66name: test_memcpy1 67alignment: 4 68tracksRegLiveness: true 69registers: 70 - { id: 0, class: _ } 71 - { id: 1, class: _ } 72 - { id: 2, class: _ } 73machineFunctionInfo: {} 74body: | 75 bb.1.entry: 76 liveins: $x0, $x1, $x2 77 78 ; CHECK-LABEL: name: test_memcpy1 79 ; CHECK: liveins: $x0, $x1, $x2 80 ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 81 ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1 82 ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2 83 ; CHECK: G_MEMCPY [[COPY]](p0), [[COPY1]](p0), [[COPY2]](s64), 1 :: (store (s8) into %ir.0, align 4), (load (s8) from %ir.1, align 4) 84 ; CHECK: RET_ReallyLR 85 %0:_(p0) = COPY $x0 86 %1:_(p0) = COPY $x1 87 %2:_(s64) = COPY $x2 88 G_MEMCPY %0(p0), %1(p0), %2(s64), 1 :: (store (s8) into %ir.0, align 4), (load (s8) from %ir.1, align 4) 89 RET_ReallyLR 90 91... 92--- 93name: test_memcpy2_const 94alignment: 4 95tracksRegLiveness: true 96registers: 97 - { id: 0, class: _ } 98 - { id: 1, class: _ } 99 - { id: 2, class: _ } 100machineFunctionInfo: {} 101body: | 102 bb.1.entry: 103 liveins: $x0, $x1 104 105 ; CHECK-LABEL: name: test_memcpy2_const 106 ; CHECK: liveins: $x0, $x1 107 ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 108 ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1 109 ; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.1, align 4) 110 ; CHECK: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store (s128) into %ir.0, align 4) 111 ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 112 ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64) 113 ; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[GEP]](p0) :: (load (s128) from %ir.1 + 16, align 4) 114 ; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) 115 ; CHECK: G_STORE [[LOAD1]](s128), [[GEP1]](p0) :: (store (s128) into %ir.0 + 16, align 4) 116 ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 117 ; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64) 118 ; CHECK: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[GEP2]](p0) :: (load (s128) from %ir.1 + 32, align 4) 119 ; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) 120 ; CHECK: G_STORE [[LOAD2]](s128), [[GEP3]](p0) :: (store (s128) into %ir.0 + 32, align 4) 121 ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48 122 ; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64) 123 ; CHECK: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[GEP4]](p0) :: (load (s128) from %ir.1 + 48, align 4) 124 ; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) 125 ; CHECK: G_STORE [[LOAD3]](s128), [[GEP5]](p0) :: (store (s128) into %ir.0 + 48, align 4) 126 ; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64 127 ; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C3]](s64) 128 ; CHECK: [[LOAD4:%[0-9]+]]:_(s64) = G_LOAD [[GEP6]](p0) :: (load (s64) from %ir.1 + 64, align 4) 129 ; CHECK: [[GEP7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64) 130 ; CHECK: G_STORE [[LOAD4]](s64), [[GEP7]](p0) :: (store (s64) into %ir.0 + 64, align 4) 131 ; CHECK: RET_ReallyLR 132 %0:_(p0) = COPY $x0 133 %1:_(p0) = COPY $x1 134 %2:_(s64) = G_CONSTANT i64 72 135 G_MEMCPY %0(p0), %1(p0), %2(s64), 1 :: (store (s8) into %ir.0, align 4), (load (s8) from %ir.1, align 4) 136 RET_ReallyLR 137 138... 139--- 140name: test_memcpy2_const_optsize 141alignment: 4 142tracksRegLiveness: true 143registers: 144 - { id: 0, class: _ } 145 - { id: 1, class: _ } 146 - { id: 2, class: _ } 147machineFunctionInfo: {} 148body: | 149 bb.1.entry: 150 liveins: $x0, $x1 151 152 ; CHECK-LABEL: name: test_memcpy2_const_optsize 153 ; CHECK: liveins: $x0, $x1 154 ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 155 ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1 156 ; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.1, align 4) 157 ; CHECK: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store (s128) into %ir.0, align 4) 158 ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 159 ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64) 160 ; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[GEP]](p0) :: (load (s128) from %ir.1 + 16, align 4) 161 ; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) 162 ; CHECK: G_STORE [[LOAD1]](s128), [[GEP1]](p0) :: (store (s128) into %ir.0 + 16, align 4) 163 ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 164 ; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64) 165 ; CHECK: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[GEP2]](p0) :: (load (s128) from %ir.1 + 32, align 4) 166 ; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) 167 ; CHECK: G_STORE [[LOAD2]](s128), [[GEP3]](p0) :: (store (s128) into %ir.0 + 32, align 4) 168 ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48 169 ; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64) 170 ; CHECK: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[GEP4]](p0) :: (load (s128) from %ir.1 + 48, align 4) 171 ; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) 172 ; CHECK: G_STORE [[LOAD3]](s128), [[GEP5]](p0) :: (store (s128) into %ir.0 + 48, align 4) 173 ; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64 174 ; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C3]](s64) 175 ; CHECK: [[LOAD4:%[0-9]+]]:_(s64) = G_LOAD [[GEP6]](p0) :: (load (s64) from %ir.1 + 64, align 4) 176 ; CHECK: [[GEP7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64) 177 ; CHECK: G_STORE [[LOAD4]](s64), [[GEP7]](p0) :: (store (s64) into %ir.0 + 64, align 4) 178 ; CHECK: RET_ReallyLR 179 %0:_(p0) = COPY $x0 180 %1:_(p0) = COPY $x1 181 %2:_(s64) = G_CONSTANT i64 72 182 G_MEMCPY %0(p0), %1(p0), %2(s64), 1 :: (store (s8) into %ir.0, align 4), (load (s8) from %ir.1, align 4) 183 RET_ReallyLR 184 185... 186--- 187name: test_memcpy2_const_minsize 188alignment: 4 189tracksRegLiveness: true 190registers: 191 - { id: 0, class: _ } 192 - { id: 1, class: _ } 193 - { id: 2, class: _ } 194machineFunctionInfo: {} 195body: | 196 bb.1.entry: 197 liveins: $x0, $x1 198 199 ; CHECK-LABEL: name: test_memcpy2_const_minsize 200 ; CHECK: liveins: $x0, $x1 201 ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 202 ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1 203 ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 72 204 ; CHECK: G_MEMCPY [[COPY]](p0), [[COPY1]](p0), [[C]](s64), 1 :: (store (s8) into %ir.0, align 4), (load (s8) from %ir.1, align 4) 205 ; CHECK: RET_ReallyLR 206 %0:_(p0) = COPY $x0 207 %1:_(p0) = COPY $x1 208 %2:_(s64) = G_CONSTANT i64 72 209 G_MEMCPY %0(p0), %1(p0), %2(s64), 1 :: (store (s8) into %ir.0, align 4), (load (s8) from %ir.1, align 4) 210 RET_ReallyLR 211 212... 213--- 214name: test_memcpy3_const_arrays_unaligned 215alignment: 4 216tracksRegLiveness: true 217registers: 218 - { id: 0, class: _ } 219 - { id: 1, class: _ } 220 - { id: 2, class: _ } 221machineFunctionInfo: {} 222body: | 223 bb.1.entry: 224 liveins: $x0, $x1 225 226 ; CHECK-LABEL: name: test_memcpy3_const_arrays_unaligned 227 ; CHECK: liveins: $x0, $x1 228 ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 229 ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1 230 ; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.1, align 4) 231 ; CHECK: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store (s128) into %ir.0, align 4) 232 ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 233 ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64) 234 ; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[GEP]](p0) :: (load (s128) from %ir.1 + 16, align 4) 235 ; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) 236 ; CHECK: G_STORE [[LOAD1]](s128), [[GEP1]](p0) :: (store (s128) into %ir.0 + 16, align 4) 237 ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 238 ; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64) 239 ; CHECK: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[GEP2]](p0) :: (load (s128) from %ir.1 + 32, align 4) 240 ; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) 241 ; CHECK: G_STORE [[LOAD2]](s128), [[GEP3]](p0) :: (store (s128) into %ir.0 + 32, align 4) 242 ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48 243 ; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64) 244 ; CHECK: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[GEP4]](p0) :: (load (s128) from %ir.1 + 48, align 4) 245 ; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) 246 ; CHECK: G_STORE [[LOAD3]](s128), [[GEP5]](p0) :: (store (s128) into %ir.0 + 48, align 4) 247 ; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64 248 ; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C3]](s64) 249 ; CHECK: [[LOAD4:%[0-9]+]]:_(s128) = G_LOAD [[GEP6]](p0) :: (load (s128) from %ir.1 + 64, align 4) 250 ; CHECK: [[GEP7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64) 251 ; CHECK: G_STORE [[LOAD4]](s128), [[GEP7]](p0) :: (store (s128) into %ir.0 + 64, align 4) 252 ; CHECK: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 80 253 ; CHECK: [[GEP8:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C4]](s64) 254 ; CHECK: [[LOAD5:%[0-9]+]]:_(s128) = G_LOAD [[GEP8]](p0) :: (load (s128) from %ir.1 + 80, align 4) 255 ; CHECK: [[GEP9:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64) 256 ; CHECK: G_STORE [[LOAD5]](s128), [[GEP9]](p0) :: (store (s128) into %ir.0 + 80, align 4) 257 ; CHECK: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 96 258 ; CHECK: [[GEP10:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C5]](s64) 259 ; CHECK: [[LOAD6:%[0-9]+]]:_(s128) = G_LOAD [[GEP10]](p0) :: (load (s128) from %ir.1 + 96, align 4) 260 ; CHECK: [[GEP11:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64) 261 ; CHECK: G_STORE [[LOAD6]](s128), [[GEP11]](p0) :: (store (s128) into %ir.0 + 96, align 4) 262 ; CHECK: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 112 263 ; CHECK: [[GEP12:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C6]](s64) 264 ; CHECK: [[LOAD7:%[0-9]+]]:_(s128) = G_LOAD [[GEP12]](p0) :: (load (s128) from %ir.1 + 112, align 4) 265 ; CHECK: [[GEP13:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64) 266 ; CHECK: G_STORE [[LOAD7]](s128), [[GEP13]](p0) :: (store (s128) into %ir.0 + 112, align 4) 267 ; CHECK: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 127 268 ; CHECK: [[GEP14:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C7]](s64) 269 ; CHECK: [[LOAD8:%[0-9]+]]:_(s128) = G_LOAD [[GEP14]](p0) :: (load (s128) from %ir.1 + 127, align 1, basealign 4) 270 ; CHECK: [[GEP15:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C7]](s64) 271 ; CHECK: G_STORE [[LOAD8]](s128), [[GEP15]](p0) :: (store (s128) into %ir.0 + 127, align 1, basealign 4) 272 ; CHECK: RET_ReallyLR 273 %0:_(p0) = COPY $x0 274 %1:_(p0) = COPY $x1 275 %2:_(s64) = G_CONSTANT i64 143 276 G_MEMCPY %0(p0), %1(p0), %2(s64), 1 :: (store (s8) into %ir.0, align 4), (load (s8) from %ir.1, align 4) 277 RET_ReallyLR 278 279... 280--- 281name: test_memcpy_addrspace 282alignment: 4 283tracksRegLiveness: true 284registers: 285 - { id: 0, class: _ } 286 - { id: 1, class: _ } 287 - { id: 2, class: _ } 288machineFunctionInfo: {} 289body: | 290 bb.1.entry: 291 liveins: $x0, $x1 292 293 ; CHECK-LABEL: name: test_memcpy_addrspace 294 ; CHECK: liveins: $x0, $x1 295 ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $x0 296 ; CHECK: [[COPY1:%[0-9]+]]:_(p2) = COPY $x1 297 ; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p2) :: (load (s128) from %ir.1, align 4, addrspace 2) 298 ; CHECK: G_STORE [[LOAD]](s128), [[COPY]](p1) :: (store (s128) into %ir.0, align 4, addrspace 1) 299 ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 300 ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p2) = G_PTR_ADD [[COPY1]], [[C]](s64) 301 ; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p2) :: (load (s128) from %ir.1 + 16, align 4, addrspace 2) 302 ; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) 303 ; CHECK: G_STORE [[LOAD1]](s128), [[PTR_ADD1]](p1) :: (store (s128) into %ir.0 + 16, align 4, addrspace 1) 304 ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 305 ; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p2) = G_PTR_ADD [[COPY1]], [[C1]](s64) 306 ; CHECK: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD2]](p2) :: (load (s128) from %ir.1 + 32, align 4, addrspace 2) 307 ; CHECK: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) 308 ; CHECK: G_STORE [[LOAD2]](s128), [[PTR_ADD3]](p1) :: (store (s128) into %ir.0 + 32, align 4, addrspace 1) 309 ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48 310 ; CHECK: [[PTR_ADD4:%[0-9]+]]:_(p2) = G_PTR_ADD [[COPY1]], [[C2]](s64) 311 ; CHECK: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD4]](p2) :: (load (s128) from %ir.1 + 48, align 4, addrspace 2) 312 ; CHECK: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) 313 ; CHECK: G_STORE [[LOAD3]](s128), [[PTR_ADD5]](p1) :: (store (s128) into %ir.0 + 48, align 4, addrspace 1) 314 ; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64 315 ; CHECK: [[PTR_ADD6:%[0-9]+]]:_(p2) = G_PTR_ADD [[COPY1]], [[C3]](s64) 316 ; CHECK: [[LOAD4:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD6]](p2) :: (load (s64) from %ir.1 + 64, align 4, addrspace 2) 317 ; CHECK: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) 318 ; CHECK: G_STORE [[LOAD4]](s64), [[PTR_ADD7]](p1) :: (store (s64) into %ir.0 + 64, align 4, addrspace 1) 319 ; CHECK: RET_ReallyLR 320 %0:_(p1) = COPY $x0 321 %1:_(p2) = COPY $x1 322 %2:_(s64) = G_CONSTANT i64 72 323 G_MEMCPY %0(p1), %1(p2), %2(s64), 1 :: (store (s8) into %ir.0, align 4, addrspace 1), (load (s8) from %ir.1, align 4, addrspace 2) 324 RET_ReallyLR 325 326 327... 328