xref: /llvm-project/llvm/test/CodeGen/AArch64/sve-fixed-length-build-vector.ll (revision cc82f1290a1e2157a6c0530d78d8cc84d2b8553d)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -aarch64-sve-vector-bits-min=256 < %s | FileCheck %s -check-prefix=VBITS_GE_256
3; RUN: llc -aarch64-sve-vector-bits-min=512 < %s | FileCheck %s -check-prefix=VBITS_GE_256
4
5target triple = "aarch64-unknown-linux-gnu"
6
7define void @build_vector_7_inc1_v32i8(ptr %a) #0 {
8; VBITS_GE_256-LABEL: build_vector_7_inc1_v32i8:
9; VBITS_GE_256:       // %bb.0:
10; VBITS_GE_256-NEXT:    index z0.b, #7, #1
11; VBITS_GE_256-NEXT:    ptrue p0.b, vl32
12; VBITS_GE_256-NEXT:    st1b { z0.b }, p0, [x0]
13; VBITS_GE_256-NEXT:    ret
14  store <32 x i8> <i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31, i8 32, i8 33, i8 34, i8 35, i8 36, i8 37, i8 38>, ptr %a, align 1
15  ret void
16}
17
18define void @build_vector_0_inc2_v16i16(ptr %a) #0 {
19; VBITS_GE_256-LABEL: build_vector_0_inc2_v16i16:
20; VBITS_GE_256:       // %bb.0:
21; VBITS_GE_256-NEXT:    index z0.h, #0, #2
22; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
23; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0]
24; VBITS_GE_256-NEXT:    ret
25  store <16 x i16> <i16 0, i16 2, i16 4, i16 6, i16 8, i16 10, i16 12, i16 14, i16 16, i16 18, i16 20, i16 22, i16 24, i16 26, i16 28, i16 30>, ptr %a, align 2
26  ret void
27}
28
29; Negative const stride.
30define void @build_vector_0_dec3_v8i32(ptr %a) #0 {
31; VBITS_GE_256-LABEL: build_vector_0_dec3_v8i32:
32; VBITS_GE_256:       // %bb.0:
33; VBITS_GE_256-NEXT:    index z0.s, #0, #-3
34; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
35; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0]
36; VBITS_GE_256-NEXT:    ret
37  store <8 x i32> <i32 0, i32 -3, i32 -6, i32 -9, i32 -12, i32 -15, i32 -18, i32 -21>, ptr %a, align 4
38  ret void
39}
40
41; Constant stride that's too big to be directly encoded into the index.
42define void @build_vector_minus2_dec32_v4i64(ptr %a) #0 {
43; VBITS_GE_256-LABEL: build_vector_minus2_dec32_v4i64:
44; VBITS_GE_256:       // %bb.0:
45; VBITS_GE_256-NEXT:    mov x8, #-32 // =0xffffffffffffffe0
46; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
47; VBITS_GE_256-NEXT:    index z0.d, #-2, x8
48; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0]
49; VBITS_GE_256-NEXT:    ret
50  store <4 x i64> <i64 -2, i64 -34, i64 -66, i64 -98>, ptr %a, align 8
51  ret void
52}
53
54; Constant but not a sequence.
55define void @build_vector_no_stride_v4i64(ptr %a) #0 {
56; VBITS_GE_256-LABEL: build_vector_no_stride_v4i64:
57; VBITS_GE_256:       // %bb.0:
58; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
59; VBITS_GE_256-NEXT:    adrp x8, .LCPI4_0
60; VBITS_GE_256-NEXT:    add x8, x8, :lo12:.LCPI4_0
61; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x8]
62; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0]
63; VBITS_GE_256-NEXT:    ret
64  store <4 x i64> <i64 0, i64 4, i64 1, i64 8>, ptr %a, align 8
65  ret void
66}
67
68attributes #0 = { "target-features"="+sve" }
69