1cee313d2SEric Christopher; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 23be72f40SBjorn Pettersson; RUN: opt < %s -passes=slp-vectorizer -S -mtriple=thumbv7-apple-ios3.0.0 -mcpu=swift | FileCheck %s 3cee313d2SEric Christopher 4cee313d2SEric Christophertarget datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32" 5cee313d2SEric Christopher 6cee313d2SEric Christopher; On swift unaligned <2 x double> stores need 4uops and it is there for cheaper 7cee313d2SEric Christopher; to do this scalar. 8cee313d2SEric Christopher 9*580210a0SNikita Popovdefine void @expensive_double_store(ptr noalias %dst, ptr noalias %src, i64 %count) { 10cee313d2SEric Christopher; CHECK-LABEL: @expensive_double_store( 11cee313d2SEric Christopher; CHECK-NEXT: entry: 12*580210a0SNikita Popov; CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[SRC:%.*]], align 8 13*580210a0SNikita Popov; CHECK-NEXT: store double [[TMP0]], ptr [[DST:%.*]], align 8 14*580210a0SNikita Popov; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds double, ptr [[SRC]], i64 1 15*580210a0SNikita Popov; CHECK-NEXT: [[TMP1:%.*]] = load double, ptr [[ARRAYIDX2]], align 8 16*580210a0SNikita Popov; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, ptr [[DST]], i64 1 17*580210a0SNikita Popov; CHECK-NEXT: store double [[TMP1]], ptr [[ARRAYIDX3]], align 8 18cee313d2SEric Christopher; CHECK-NEXT: ret void 19cee313d2SEric Christopher; 20cee313d2SEric Christopherentry: 21*580210a0SNikita Popov %0 = load double, ptr %src, align 8 22*580210a0SNikita Popov store double %0, ptr %dst, align 8 23*580210a0SNikita Popov %arrayidx2 = getelementptr inbounds double, ptr %src, i64 1 24*580210a0SNikita Popov %1 = load double, ptr %arrayidx2, align 8 25*580210a0SNikita Popov %arrayidx3 = getelementptr inbounds double, ptr %dst, i64 1 26*580210a0SNikita Popov store double %1, ptr %arrayidx3, align 8 27cee313d2SEric Christopher ret void 28cee313d2SEric Christopher} 29