xref: /llvm-project/llvm/test/Transforms/SLPVectorizer/ARM/memory.ll (revision 580210a0c938531ef9fd79f9ffedb93eeb2e66c2)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -passes=slp-vectorizer -S -mtriple=thumbv7-apple-ios3.0.0 -mcpu=swift | FileCheck %s
3
4target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
5
6; On swift unaligned <2 x double> stores need 4uops and it is there for cheaper
7; to do this scalar.
8
9define void @expensive_double_store(ptr noalias %dst, ptr noalias %src, i64 %count) {
10; CHECK-LABEL: @expensive_double_store(
11; CHECK-NEXT:  entry:
12; CHECK-NEXT:    [[TMP0:%.*]] = load double, ptr [[SRC:%.*]], align 8
13; CHECK-NEXT:    store double [[TMP0]], ptr [[DST:%.*]], align 8
14; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds double, ptr [[SRC]], i64 1
15; CHECK-NEXT:    [[TMP1:%.*]] = load double, ptr [[ARRAYIDX2]], align 8
16; CHECK-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds double, ptr [[DST]], i64 1
17; CHECK-NEXT:    store double [[TMP1]], ptr [[ARRAYIDX3]], align 8
18; CHECK-NEXT:    ret void
19;
20entry:
21  %0 = load double, ptr %src, align 8
22  store double %0, ptr %dst, align 8
23  %arrayidx2 = getelementptr inbounds double, ptr %src, i64 1
24  %1 = load double, ptr %arrayidx2, align 8
25  %arrayidx3 = getelementptr inbounds double, ptr %dst, i64 1
26  store double %1, ptr %arrayidx3, align 8
27  ret void
28}
29