xref: /llvm-project/llvm/test/Transforms/SLPVectorizer/RISCV/gep.ll (revision d0d864f6f4828e6973b81d1544036d101f6ad4e4)
12b28f8f0SLuke Lau; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
22b28f8f0SLuke Lau; RUN: opt < %s -passes=slp-vectorizer -mtriple=riscv64 -mattr=+v \
32b28f8f0SLuke Lau; RUN: -riscv-v-slp-max-vf=0 -S | FileCheck %s
42b28f8f0SLuke Lau
5*d0d864f6SLuke Lau; This should not be vectorized, as the cost of computing the offsets nullifies
6*d0d864f6SLuke Lau; the benefits of vectorizing:
72b28f8f0SLuke Lau;
82b28f8f0SLuke Lau; copy_with_offset_v2i8:
92b28f8f0SLuke Lau;         addi    a0, a0, 8
102b28f8f0SLuke Lau;         vsetivli        zero, 2, e8, mf8, ta, ma
112b28f8f0SLuke Lau;         vle8.v  v8, (a0)
122b28f8f0SLuke Lau;         addi    a1, a1, 16
132b28f8f0SLuke Lau;         vse8.v  v8, (a1)
142b28f8f0SLuke Lau;         ret
152b28f8f0SLuke Lau;
162b28f8f0SLuke Lau; Compared to the scalar version where the offsets can be folded into the
172b28f8f0SLuke Lau; addressing mode:
182b28f8f0SLuke Lau;
192b28f8f0SLuke Lau; copy_with_offset_v2i8:
202b28f8f0SLuke Lau;         lbu     a2, 8(a0)
212b28f8f0SLuke Lau;         lbu     a0, 9(a0)
222b28f8f0SLuke Lau;         sb      a2, 16(a1)
232b28f8f0SLuke Lau;         sb      a0, 17(a1)
242b28f8f0SLuke Lau;	  ret
252b28f8f0SLuke Lau
262b28f8f0SLuke Laudefine void @copy_with_offset_v2i8(ptr noalias %p, ptr noalias %q) {
272b28f8f0SLuke Lau; CHECK-LABEL: @copy_with_offset_v2i8(
282b28f8f0SLuke Lau; CHECK-NEXT:  entry:
292b28f8f0SLuke Lau; CHECK-NEXT:    [[P1:%.*]] = getelementptr i8, ptr [[P:%.*]], i32 8
30*d0d864f6SLuke Lau; CHECK-NEXT:    [[X1:%.*]] = load i8, ptr [[P1]], align 1
312b28f8f0SLuke Lau; CHECK-NEXT:    [[Q1:%.*]] = getelementptr i8, ptr [[Q:%.*]], i32 16
32*d0d864f6SLuke Lau; CHECK-NEXT:    store i8 [[X1]], ptr [[Q1]], align 1
33*d0d864f6SLuke Lau; CHECK-NEXT:    [[P2:%.*]] = getelementptr i8, ptr [[P]], i32 9
34*d0d864f6SLuke Lau; CHECK-NEXT:    [[X2:%.*]] = load i8, ptr [[P2]], align 1
35*d0d864f6SLuke Lau; CHECK-NEXT:    [[Q2:%.*]] = getelementptr i8, ptr [[Q]], i32 17
36*d0d864f6SLuke Lau; CHECK-NEXT:    store i8 [[X2]], ptr [[Q2]], align 1
372b28f8f0SLuke Lau; CHECK-NEXT:    ret void
382b28f8f0SLuke Lau;
392b28f8f0SLuke Lauentry:
402b28f8f0SLuke Lau  %p1 = getelementptr i8, ptr %p, i32 8
412b28f8f0SLuke Lau  %x1 = load i8, ptr %p1
422b28f8f0SLuke Lau  %q1 = getelementptr i8, ptr %q, i32 16
432b28f8f0SLuke Lau  store i8 %x1, ptr %q1
442b28f8f0SLuke Lau
452b28f8f0SLuke Lau  %p2 = getelementptr i8, ptr %p, i32 9
462b28f8f0SLuke Lau  %x2 = load i8, ptr %p2
472b28f8f0SLuke Lau  %q2 = getelementptr i8, ptr %q, i32 17
482b28f8f0SLuke Lau  store i8 %x2, ptr %q2
492b28f8f0SLuke Lau
502b28f8f0SLuke Lau  ret void
512b28f8f0SLuke Lau}
522b28f8f0SLuke Lau
532b28f8f0SLuke Lau; This on the other hand, should be vectorized as the vector savings outweigh
542b28f8f0SLuke Lau; the GEP costs.
552b28f8f0SLuke Laudefine void @copy_with_offset_v4i8(ptr noalias %p, ptr noalias %q) {
562b28f8f0SLuke Lau; CHECK-LABEL: @copy_with_offset_v4i8(
572b28f8f0SLuke Lau; CHECK-NEXT:  entry:
582b28f8f0SLuke Lau; CHECK-NEXT:    [[P1:%.*]] = getelementptr i8, ptr [[P:%.*]], i32 8
592b28f8f0SLuke Lau; CHECK-NEXT:    [[Q1:%.*]] = getelementptr i8, ptr [[Q:%.*]], i32 16
602b28f8f0SLuke Lau; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i8>, ptr [[P1]], align 1
612b28f8f0SLuke Lau; CHECK-NEXT:    store <4 x i8> [[TMP0]], ptr [[Q1]], align 1
622b28f8f0SLuke Lau; CHECK-NEXT:    ret void
632b28f8f0SLuke Lau;
642b28f8f0SLuke Lauentry:
652b28f8f0SLuke Lau  %p1 = getelementptr i8, ptr %p, i32 8
662b28f8f0SLuke Lau  %x1 = load i8, ptr %p1
672b28f8f0SLuke Lau  %q1 = getelementptr i8, ptr %q, i32 16
682b28f8f0SLuke Lau  store i8 %x1, ptr %q1
692b28f8f0SLuke Lau
702b28f8f0SLuke Lau  %p2 = getelementptr i8, ptr %p, i32 9
712b28f8f0SLuke Lau  %x2 = load i8, ptr %p2
722b28f8f0SLuke Lau  %q2 = getelementptr i8, ptr %q, i32 17
732b28f8f0SLuke Lau  store i8 %x2, ptr %q2
742b28f8f0SLuke Lau
752b28f8f0SLuke Lau  %p3 = getelementptr i8, ptr %p, i32 10
762b28f8f0SLuke Lau  %x3 = load i8, ptr %p3
772b28f8f0SLuke Lau  %q3 = getelementptr i8, ptr %q, i32 18
782b28f8f0SLuke Lau  store i8 %x3, ptr %q3
792b28f8f0SLuke Lau
802b28f8f0SLuke Lau  %p4 = getelementptr i8, ptr %p, i32 11
812b28f8f0SLuke Lau  %x4 = load i8, ptr %p4
822b28f8f0SLuke Lau  %q4 = getelementptr i8, ptr %q, i32 19
832b28f8f0SLuke Lau  store i8 %x4, ptr %q4
842b28f8f0SLuke Lau
852b28f8f0SLuke Lau  ret void
862b28f8f0SLuke Lau}
87