xref: /llvm-project/llvm/test/CodeGen/VE/Vector/vec_load.ll (revision b006b60dc993b2e0ba3e412c80709477241b6be6)
19ebaec46SSimon Moll; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
29ebaec46SSimon Moll; RUN: llc < %s -mtriple=ve-unknown-unknown -mattr=+vpu | FileCheck %s
39ebaec46SSimon Moll
4*b006b60dSNikita Popovdeclare <128 x double> @llvm.masked.load.v128f64.p0(ptr %0, i32 immarg %1, <128 x i1> %2, <128 x double> %3) #0
59ebaec46SSimon Moll
69ebaec46SSimon Moll; TODO: Custom widen by lowering to vvp_load in ReplaceNodeResult
79ebaec46SSimon Moll; Function Attrs: nounwind
8*b006b60dSNikita Popov; define fastcc <128 x double> @vec_mload_v128f64(ptr %P, <128 x i1> %M) {
9*b006b60dSNikita Popov;   %r = call <128 x double> @llvm.masked.load.v128f64.p0(ptr %P, i32 16, <128 x i1> %M, <128 x double> undef)
109ebaec46SSimon Moll;   ret <128 x double> %r
119ebaec46SSimon Moll; }
129ebaec46SSimon Moll
139ebaec46SSimon Moll
14*b006b60dSNikita Popovdeclare <256 x double> @llvm.masked.load.v256f64.p0(ptr %0, i32 immarg %1, <256 x i1> %2, <256 x double> %3) #0
159ebaec46SSimon Moll
169ebaec46SSimon Moll; Function Attrs: nounwind
17*b006b60dSNikita Popovdefine fastcc <256 x double> @vec_mload_v256f64(ptr %P, <256 x i1> %M) {
189ebaec46SSimon Moll; CHECK-LABEL: vec_mload_v256f64:
199ebaec46SSimon Moll; CHECK:       # %bb.0:
209ebaec46SSimon Moll; CHECK-NEXT:    lea %s1, 256
219ebaec46SSimon Moll; CHECK-NEXT:    lvl %s1
229ebaec46SSimon Moll; CHECK-NEXT:    vseq %v0
239ebaec46SSimon Moll; CHECK-NEXT:    vmulu.l %v0, 8, %v0, %vm1
249ebaec46SSimon Moll; CHECK-NEXT:    vaddu.l %v0, %s0, %v0, %vm1
259ebaec46SSimon Moll; CHECK-NEXT:    vgt %v0, %v0, 0, 0, %vm1
269ebaec46SSimon Moll; CHECK-NEXT:    b.l.t (, %s10)
27*b006b60dSNikita Popov  %r = call <256 x double> @llvm.masked.load.v256f64.p0(ptr %P, i32 16, <256 x i1> %M, <256 x double> undef)
289ebaec46SSimon Moll  ret <256 x double> %r
299ebaec46SSimon Moll}
309ebaec46SSimon Moll
319ebaec46SSimon Moll; Function Attrs: nounwind
32*b006b60dSNikita Popovdefine fastcc <256 x double> @vec_load_v256f64(ptr %P) {
339ebaec46SSimon Moll; CHECK-LABEL: vec_load_v256f64:
349ebaec46SSimon Moll; CHECK:       # %bb.0:
359ebaec46SSimon Moll; CHECK-NEXT:    lea %s1, 256
369ebaec46SSimon Moll; CHECK-NEXT:    lvl %s1
379ebaec46SSimon Moll; CHECK-NEXT:    vld %v0, 8, %s0
389ebaec46SSimon Moll; CHECK-NEXT:    b.l.t (, %s10)
39*b006b60dSNikita Popov  %r = load <256 x double>, ptr %P, align 4
409ebaec46SSimon Moll  ret <256 x double> %r
419ebaec46SSimon Moll}
429ebaec46SSimon Moll
439ebaec46SSimon Moll; Function Attrs: nounwind
44*b006b60dSNikita Popovdefine fastcc <256 x double> @vec_mload_pt_v256f64(ptr %P, <256 x double> %PT, <256 x i1> %M) {
459ebaec46SSimon Moll; CHECK-LABEL: vec_mload_pt_v256f64:
469ebaec46SSimon Moll; CHECK:       # %bb.0:
479ebaec46SSimon Moll; CHECK-NEXT:    lea %s1, 256
489ebaec46SSimon Moll; CHECK-NEXT:    lvl %s1
499ebaec46SSimon Moll; CHECK-NEXT:    vseq %v1
509ebaec46SSimon Moll; CHECK-NEXT:    vmulu.l %v1, 8, %v1, %vm1
519ebaec46SSimon Moll; CHECK-NEXT:    vaddu.l %v1, %s0, %v1, %vm1
529ebaec46SSimon Moll; CHECK-NEXT:    vgt %v1, %v1, 0, 0, %vm1
539ebaec46SSimon Moll; CHECK-NEXT:    vmrg %v0, %v0, %v1, %vm1
549ebaec46SSimon Moll; CHECK-NEXT:    b.l.t (, %s10)
55*b006b60dSNikita Popov  %r = call <256 x double> @llvm.masked.load.v256f64.p0(ptr %P, i32 16, <256 x i1> %M, <256 x double> %PT)
569ebaec46SSimon Moll  ret <256 x double> %r
579ebaec46SSimon Moll}
589ebaec46SSimon Moll
599ebaec46SSimon Moll
60*b006b60dSNikita Popovdeclare <256 x float> @llvm.masked.load.v256f32.p0(ptr %0, i32 immarg %1, <256 x i1> %2, <256 x float> %3) #0
619ebaec46SSimon Moll
629ebaec46SSimon Moll; Function Attrs: nounwind
63*b006b60dSNikita Popovdefine fastcc <256 x float> @vec_mload_v256f32(ptr %P, <256 x i1> %M) {
649ebaec46SSimon Moll; CHECK-LABEL: vec_mload_v256f32:
659ebaec46SSimon Moll; CHECK:       # %bb.0:
669ebaec46SSimon Moll; CHECK-NEXT:    lea %s1, 256
679ebaec46SSimon Moll; CHECK-NEXT:    lvl %s1
689ebaec46SSimon Moll; CHECK-NEXT:    vseq %v0
699ebaec46SSimon Moll; CHECK-NEXT:    vmulu.l %v0, 4, %v0, %vm1
709ebaec46SSimon Moll; CHECK-NEXT:    vaddu.l %v0, %s0, %v0, %vm1
719ebaec46SSimon Moll; CHECK-NEXT:    vgtu %v0, %v0, 0, 0, %vm1
729ebaec46SSimon Moll; CHECK-NEXT:    b.l.t (, %s10)
73*b006b60dSNikita Popov  %r = call <256 x float> @llvm.masked.load.v256f32.p0(ptr %P, i32 16, <256 x i1> %M, <256 x float> undef)
749ebaec46SSimon Moll  ret <256 x float> %r
759ebaec46SSimon Moll}
769ebaec46SSimon Moll
779ebaec46SSimon Moll; Function Attrs: nounwind
78*b006b60dSNikita Popovdefine fastcc <256 x float> @vec_mload_pt_v256f32(ptr %P, <256 x float> %PT, <256 x i1> %M) {
799ebaec46SSimon Moll; CHECK-LABEL: vec_mload_pt_v256f32:
809ebaec46SSimon Moll; CHECK:       # %bb.0:
819ebaec46SSimon Moll; CHECK-NEXT:    lea %s1, 256
829ebaec46SSimon Moll; CHECK-NEXT:    lvl %s1
839ebaec46SSimon Moll; CHECK-NEXT:    vseq %v1
849ebaec46SSimon Moll; CHECK-NEXT:    vmulu.l %v1, 4, %v1, %vm1
859ebaec46SSimon Moll; CHECK-NEXT:    vaddu.l %v1, %s0, %v1, %vm1
869ebaec46SSimon Moll; CHECK-NEXT:    vgtu %v1, %v1, 0, 0, %vm1
879ebaec46SSimon Moll; CHECK-NEXT:    vmrg %v0, %v0, %v1, %vm1
889ebaec46SSimon Moll; CHECK-NEXT:    b.l.t (, %s10)
89*b006b60dSNikita Popov  %r = call <256 x float> @llvm.masked.load.v256f32.p0(ptr %P, i32 16, <256 x i1> %M, <256 x float> %PT)
909ebaec46SSimon Moll  ret <256 x float> %r
919ebaec46SSimon Moll}
929ebaec46SSimon Moll
939ebaec46SSimon Moll
94*b006b60dSNikita Popovdeclare <256 x i32> @llvm.masked.load.v256i32.p0(ptr %0, i32 immarg %1, <256 x i1> %2, <256 x i32> %3) #0
959ebaec46SSimon Moll
969ebaec46SSimon Moll; Function Attrs: nounwind
97*b006b60dSNikita Popovdefine fastcc <256 x i32> @vec_mload_v256i32(ptr %P, <256 x i1> %M) {
989ebaec46SSimon Moll; CHECK-LABEL: vec_mload_v256i32:
999ebaec46SSimon Moll; CHECK:       # %bb.0:
1009ebaec46SSimon Moll; CHECK-NEXT:    lea %s1, 256
1019ebaec46SSimon Moll; CHECK-NEXT:    lvl %s1
1029ebaec46SSimon Moll; CHECK-NEXT:    vseq %v0
1039ebaec46SSimon Moll; CHECK-NEXT:    vmulu.l %v0, 4, %v0, %vm1
1049ebaec46SSimon Moll; CHECK-NEXT:    vaddu.l %v0, %s0, %v0, %vm1
1059ebaec46SSimon Moll; CHECK-NEXT:    vgtl.zx %v0, %v0, 0, 0, %vm1
1069ebaec46SSimon Moll; CHECK-NEXT:    b.l.t (, %s10)
107*b006b60dSNikita Popov  %r = call <256 x i32> @llvm.masked.load.v256i32.p0(ptr %P, i32 16, <256 x i1> %M, <256 x i32> undef)
1089ebaec46SSimon Moll  ret <256 x i32> %r
1099ebaec46SSimon Moll}
1109ebaec46SSimon Moll
1119ebaec46SSimon Moll; Function Attrs: nounwind
112*b006b60dSNikita Popovdefine fastcc <256 x i32> @vec_mload_pt_v256i32(ptr %P, <256 x i32> %PT, <256 x i1> %M) {
1139ebaec46SSimon Moll; CHECK-LABEL: vec_mload_pt_v256i32:
1149ebaec46SSimon Moll; CHECK:       # %bb.0:
1159ebaec46SSimon Moll; CHECK-NEXT:    lea %s1, 256
1169ebaec46SSimon Moll; CHECK-NEXT:    lvl %s1
1179ebaec46SSimon Moll; CHECK-NEXT:    vseq %v1
1189ebaec46SSimon Moll; CHECK-NEXT:    vmulu.l %v1, 4, %v1, %vm1
1199ebaec46SSimon Moll; CHECK-NEXT:    vaddu.l %v1, %s0, %v1, %vm1
1209ebaec46SSimon Moll; CHECK-NEXT:    vgtl.zx %v1, %v1, 0, 0, %vm1
1219ebaec46SSimon Moll; CHECK-NEXT:    vmrg %v0, %v0, %v1, %vm1
1229ebaec46SSimon Moll; CHECK-NEXT:    b.l.t (, %s10)
123*b006b60dSNikita Popov  %r = call <256 x i32> @llvm.masked.load.v256i32.p0(ptr %P, i32 16, <256 x i1> %M, <256 x i32> %PT)
1249ebaec46SSimon Moll  ret <256 x i32> %r
1259ebaec46SSimon Moll}
1269ebaec46SSimon Moll
1279ebaec46SSimon Mollattributes #0 = { argmemonly nounwind readonly willreturn }
128