1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=arm64-apple-ios -mattr=+strict-align < %s | FileCheck %s 3 4; Small (16 bytes here) unaligned memcpy() should be a function call if 5; strict-alignment is turned on. 6define void @t0(ptr %out, ptr %in) { 7; CHECK-LABEL: t0: 8; CHECK: ; %bb.0: ; %entry 9; CHECK-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill 10; CHECK-NEXT: .cfi_def_cfa_offset 16 11; CHECK-NEXT: .cfi_offset w30, -8 12; CHECK-NEXT: .cfi_offset w29, -16 13; CHECK-NEXT: mov w2, #16 ; =0x10 14; CHECK-NEXT: bl _memcpy 15; CHECK-NEXT: ldp x29, x30, [sp], #16 ; 16-byte Folded Reload 16; CHECK-NEXT: ret 17entry: 18 call void @llvm.memcpy.p0.p0.i64(ptr %out, ptr %in, i64 16, i1 false) 19 ret void 20} 21 22; Small (16 bytes here) aligned memcpy() should be inlined even if 23; strict-alignment is turned on. 24define void @t1(ptr align 8 %out, ptr align 8 %in) { 25; CHECK-LABEL: t1: 26; CHECK: ; %bb.0: ; %entry 27; CHECK-NEXT: ldp x9, x8, [x1] 28; CHECK-NEXT: stp x9, x8, [x0] 29; CHECK-NEXT: ret 30entry: 31 call void @llvm.memcpy.p0.p0.i64(ptr align 8 %out, ptr align 8 %in, i64 16, i1 false) 32 ret void 33} 34 35; Tiny (4 bytes here) unaligned memcpy() should be inlined with byte sized 36; loads and stores if strict-alignment is turned on. 37define void @t2(ptr %out, ptr %in) { 38; CHECK-LABEL: t2: 39; CHECK: ; %bb.0: ; %entry 40; CHECK-NEXT: ldrb w8, [x1, #3] 41; CHECK-NEXT: ldrb w9, [x1, #2] 42; CHECK-NEXT: ldrb w10, [x1] 43; CHECK-NEXT: ldrb w11, [x1, #1] 44; CHECK-NEXT: strb w8, [x0, #3] 45; CHECK-NEXT: strb w9, [x0, #2] 46; CHECK-NEXT: strb w11, [x0, #1] 47; CHECK-NEXT: strb w10, [x0] 48; CHECK-NEXT: ret 49entry: 50 call void @llvm.memcpy.p0.p0.i64(ptr %out, ptr %in, i64 4, i1 false) 51 ret void 52} 53 54declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) 55