xref: /llvm-project/llvm/test/Transforms/InstCombine/opts-tuples-extract-intrinsic.ll (revision 09afe4155b304c8ab9e90c5699f1c0f10ccd0a7e)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -S -passes=instcombine < %s | FileCheck %s
3
4; Check that the redundant sequences of extract/insert are eliminated.
5
6; extract.vector(insert.vector(Tuple, Value, Idx), Idx) --> Value
7define <vscale x 16 x i8> @test_extract_insert_same_idx(<vscale x 64 x i8> %v0, <vscale x 16 x i8> %v1) {
8; CHECK-LABEL: @test_extract_insert_same_idx(
9; CHECK-NEXT:    ret <vscale x 16 x i8> [[V1:%.*]]
10;
11  %vec.ins = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> %v0, <vscale x 16 x i8> %v1, i64 48)
12  %vec.ext = call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> %vec.ins, i64 48)
13  ret <vscale x 16 x i8> %vec.ext
14}
15
16; extract.vector(insert.vector(Vector, Value, InsertIndex), ExtractIndex)
17;  --> extract.vector(Vector, ExtractIndex)
18define <vscale x 16 x i8> @test_extract_insert_dif_idx(<vscale x 64 x i8> %v0, <vscale x 16 x i8> %v1) {
19; CHECK-LABEL: @test_extract_insert_dif_idx(
20; CHECK-NEXT:    [[VEC_EXT:%.*]] = call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[V0:%.*]], i64 0)
21; CHECK-NEXT:    ret <vscale x 16 x i8> [[VEC_EXT]]
22;
23  %vec.ins = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> %v0, <vscale x 16 x i8> %v1, i64 48)
24  %vec.ext = call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> %vec.ins, i64 0)
25  ret <vscale x 16 x i8> %vec.ext
26}
27
28; Negative test
29; The extracted vector-size != inserted vector-size
30define <vscale x 32 x i8> @neg_test_extract_insert_same_idx_dif_ret_size(<vscale x 64 x i8> %v0, <vscale x 16 x i8> %v1) {
31; CHECK-LABEL: @neg_test_extract_insert_same_idx_dif_ret_size(
32; CHECK-NEXT:    [[VEC_INS:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> [[V0:%.*]], <vscale x 16 x i8> [[V1:%.*]], i64 32)
33; CHECK-NEXT:    [[VEC_EXT:%.*]] = call <vscale x 32 x i8> @llvm.vector.extract.nxv32i8.nxv64i8(<vscale x 64 x i8> [[VEC_INS]], i64 32)
34; CHECK-NEXT:    ret <vscale x 32 x i8> [[VEC_EXT]]
35;
36  %vec.ins = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> %v0, <vscale x 16 x i8> %v1, i64 32)
37  %vec.ext = call <vscale x 32 x i8> @llvm.vector.extract.nxv32i8.nxv64i8(<vscale x 64 x i8> %vec.ins, i64 32)
38  ret <vscale x 32 x i8> %vec.ext
39}
40
41
42declare <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8>, <vscale x 16 x i8>, i64)
43declare <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8>, i64)
44declare <vscale x 32 x i8> @llvm.vector.extract.nxv32i8.nxv64i8(<vscale x 64 x i8>, i64)
45