xref: /llvm-project/llvm/test/Transforms/InstCombine/PowerPC/vsx-unaligned.ll (revision fcfc31fffb9a83416453e60bd0dff2df93c2ee20)
1; Verify that we can create unaligned loads and stores from VSX intrinsics.
2
3; RUN: opt < %s -passes=instcombine -S | FileCheck %s
4
5target triple = "powerpc64-unknown-linux-gnu"
6
7@vf = common global <4 x float> zeroinitializer, align 1
8@res_vf = common global <4 x float> zeroinitializer, align 1
9@vd = common global <2 x double> zeroinitializer, align 1
10@res_vd = common global <2 x double> zeroinitializer, align 1
11
12define void @test1() {
13entry:
14  %t1 = alloca ptr, align 8
15  %t2 = alloca ptr, align 8
16  store ptr @vf, ptr %t1, align 8
17  %0 = load ptr, ptr %t1, align 8
18  %1 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(ptr %0)
19  store ptr @res_vf, ptr %t1, align 8
20  %2 = load ptr, ptr %t1, align 8
21  call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %1, ptr %2)
22  store ptr @vd, ptr %t2, align 8
23  %3 = load ptr, ptr %t2, align 8
24  %4 = call <2 x double> @llvm.ppc.vsx.lxvd2x(ptr %3)
25  store ptr @res_vd, ptr %t2, align 8
26  %5 = load ptr, ptr %t2, align 8
27  call void @llvm.ppc.vsx.stxvd2x(<2 x double> %4, ptr %5)
28  ret void
29}
30
31; CHECK-LABEL: @test1
32; CHECK: %0 = load <4 x i32>, ptr @vf, align 1
33; CHECK: store <4 x i32> %0, ptr @res_vf, align 1
34; CHECK: %1 = load <2 x double>, ptr @vd, align 1
35; CHECK: store <2 x double> %1, ptr @res_vd, align 1
36
37declare <4 x i32> @llvm.ppc.vsx.lxvw4x(ptr)
38declare void @llvm.ppc.vsx.stxvw4x(<4 x i32>, ptr)
39declare <2 x double> @llvm.ppc.vsx.lxvd2x(ptr)
40declare void @llvm.ppc.vsx.stxvd2x(<2 x double>, ptr)
41