xref: /llvm-project/llvm/test/CodeGen/AArch64/fix-shuffle-vector-be-rev.ll (revision abcbca21cc2e8a2b256cd519df2b1559f29e8edd)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
2; RUN: llc < %s -mtriple=aarch64 | FileCheck %s --check-prefix=CHECKLE
3; RUN: llc < %s -mtriple=aarch64_be | FileCheck %s --check-prefix=CHECKBE
4
5define <4 x i16> @test_reconstructshuffle(<16 x i8> %a, <16 x i8> %b) nounwind {
6; CHECKLE-LABEL: test_reconstructshuffle:
7; CHECKLE:       // %bb.0:
8; CHECKLE-NEXT:    umov w8, v0.b[3]
9; CHECKLE-NEXT:    umov w9, v0.b[2]
10; CHECKLE-NEXT:    fmov s2, w8
11; CHECKLE-NEXT:    umov w8, v0.b[1]
12; CHECKLE-NEXT:    mov v2.h[1], w9
13; CHECKLE-NEXT:    mov v2.h[2], w8
14; CHECKLE-NEXT:    umov w8, v0.b[0]
15; CHECKLE-NEXT:    ext v0.16b, v1.16b, v1.16b, #8
16; CHECKLE-NEXT:    mov v2.h[3], w8
17; CHECKLE-NEXT:    zip2 v0.8b, v0.8b, v0.8b
18; CHECKLE-NEXT:    add v0.4h, v2.4h, v0.4h
19; CHECKLE-NEXT:    bic v0.4h, #255, lsl #8
20; CHECKLE-NEXT:    ret
21;
22; CHECKBE-LABEL: test_reconstructshuffle:
23; CHECKBE:       // %bb.0:
24; CHECKBE-NEXT:    rev64 v0.16b, v0.16b
25; CHECKBE-NEXT:    rev64 v1.16b, v1.16b
26; CHECKBE-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
27; CHECKBE-NEXT:    ext v1.16b, v1.16b, v1.16b, #8
28; CHECKBE-NEXT:    umov w8, v0.b[3]
29; CHECKBE-NEXT:    umov w9, v0.b[2]
30; CHECKBE-NEXT:    fmov s2, w8
31; CHECKBE-NEXT:    umov w8, v0.b[1]
32; CHECKBE-NEXT:    mov v2.h[1], w9
33; CHECKBE-NEXT:    mov v2.h[2], w8
34; CHECKBE-NEXT:    umov w8, v0.b[0]
35; CHECKBE-NEXT:    ext v0.16b, v1.16b, v1.16b, #8
36; CHECKBE-NEXT:    mov v2.h[3], w8
37; CHECKBE-NEXT:    zip2 v0.8b, v0.8b, v0.8b
38; CHECKBE-NEXT:    add v0.4h, v2.4h, v0.4h
39; CHECKBE-NEXT:    bic v0.4h, #255, lsl #8
40; CHECKBE-NEXT:    rev64 v0.4h, v0.4h
41; CHECKBE-NEXT:    ret
42  %tmp1 = shufflevector <16 x i8> %a, <16 x i8> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
43  %tmp2 = shufflevector <16 x i8> %b, <16 x i8> undef, <4 x i32> <i32 12, i32 13, i32 14, i32 15>
44  %tmp3 = add <4 x i8> %tmp1, %tmp2
45  %tmp4 = zext <4 x i8> %tmp3 to <4 x i16>
46  ret <4 x i16> %tmp4
47}
48