xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll (revision 9122c5235ec85ce0c0ad337e862b006e7b349d84)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
3; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
4
5define <1 x i1> @insertelt_v1i1(<1 x i1> %x, i1 %elt) nounwind {
6; CHECK-LABEL: insertelt_v1i1:
7; CHECK:       # %bb.0:
8; CHECK-NEXT:    andi a0, a0, 1
9; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
10; CHECK-NEXT:    vmv.s.x v8, a0
11; CHECK-NEXT:    vmsne.vi v0, v8, 0
12; CHECK-NEXT:    ret
13  %y = insertelement <1 x i1> %x, i1 %elt, i64 0
14  ret <1 x i1> %y
15}
16
17define <1 x i1> @insertelt_idx_v1i1(<1 x i1> %x, i1 %elt, i32 zeroext %idx) nounwind {
18; CHECK-LABEL: insertelt_idx_v1i1:
19; CHECK:       # %bb.0:
20; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
21; CHECK-NEXT:    vmv.s.x v8, zero
22; CHECK-NEXT:    addi a2, a1, 1
23; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
24; CHECK-NEXT:    vmv.s.x v9, a0
25; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, tu, ma
26; CHECK-NEXT:    vslideup.vx v8, v9, a1
27; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
28; CHECK-NEXT:    vand.vi v8, v8, 1
29; CHECK-NEXT:    vmsne.vi v0, v8, 0
30; CHECK-NEXT:    ret
31  %y = insertelement <1 x i1> %x, i1 %elt, i32 %idx
32  ret <1 x i1> %y
33}
34
35define <2 x i1> @insertelt_v2i1(<2 x i1> %x, i1 %elt) nounwind {
36; CHECK-LABEL: insertelt_v2i1:
37; CHECK:       # %bb.0:
38; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
39; CHECK-NEXT:    vmv.v.i v8, 0
40; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
41; CHECK-NEXT:    vmv.s.x v9, a0
42; CHECK-NEXT:    vslideup.vi v8, v9, 1
43; CHECK-NEXT:    vand.vi v8, v8, 1
44; CHECK-NEXT:    vmsne.vi v0, v8, 0
45; CHECK-NEXT:    ret
46  %y = insertelement <2 x i1> %x, i1 %elt, i64 1
47  ret <2 x i1> %y
48}
49
50define <2 x i1> @insertelt_idx_v2i1(<2 x i1> %x, i1 %elt, i32 zeroext %idx) nounwind {
51; CHECK-LABEL: insertelt_idx_v2i1:
52; CHECK:       # %bb.0:
53; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
54; CHECK-NEXT:    vmv.v.i v8, 0
55; CHECK-NEXT:    addi a2, a1, 1
56; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
57; CHECK-NEXT:    vmv.s.x v9, a0
58; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, tu, ma
59; CHECK-NEXT:    vslideup.vx v8, v9, a1
60; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
61; CHECK-NEXT:    vand.vi v8, v8, 1
62; CHECK-NEXT:    vmsne.vi v0, v8, 0
63; CHECK-NEXT:    ret
64  %y = insertelement <2 x i1> %x, i1 %elt, i32 %idx
65  ret <2 x i1> %y
66}
67
68define <8 x i1> @insertelt_v8i1(<8 x i1> %x, i1 %elt) nounwind {
69; CHECK-LABEL: insertelt_v8i1:
70; CHECK:       # %bb.0:
71; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
72; CHECK-NEXT:    vmv.v.i v8, 0
73; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
74; CHECK-NEXT:    vmv.s.x v9, a0
75; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, tu, ma
76; CHECK-NEXT:    vslideup.vi v8, v9, 1
77; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
78; CHECK-NEXT:    vand.vi v8, v8, 1
79; CHECK-NEXT:    vmsne.vi v0, v8, 0
80; CHECK-NEXT:    ret
81  %y = insertelement <8 x i1> %x, i1 %elt, i64 1
82  ret <8 x i1> %y
83}
84
85define <8 x i1> @insertelt_idx_v8i1(<8 x i1> %x, i1 %elt, i32 zeroext %idx) nounwind {
86; CHECK-LABEL: insertelt_idx_v8i1:
87; CHECK:       # %bb.0:
88; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
89; CHECK-NEXT:    vmv.v.i v8, 0
90; CHECK-NEXT:    addi a2, a1, 1
91; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
92; CHECK-NEXT:    vmv.s.x v9, a0
93; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, tu, ma
94; CHECK-NEXT:    vslideup.vx v8, v9, a1
95; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
96; CHECK-NEXT:    vand.vi v8, v8, 1
97; CHECK-NEXT:    vmsne.vi v0, v8, 0
98; CHECK-NEXT:    ret
99  %y = insertelement <8 x i1> %x, i1 %elt, i32 %idx
100  ret <8 x i1> %y
101}
102
103define <64 x i1> @insertelt_v64i1(<64 x i1> %x, i1 %elt) nounwind {
104; CHECK-LABEL: insertelt_v64i1:
105; CHECK:       # %bb.0:
106; CHECK-NEXT:    li a1, 64
107; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
108; CHECK-NEXT:    vmv.v.i v8, 0
109; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
110; CHECK-NEXT:    vmv.s.x v12, a0
111; CHECK-NEXT:    vsetivli zero, 2, e8, m1, tu, ma
112; CHECK-NEXT:    vslideup.vi v8, v12, 1
113; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
114; CHECK-NEXT:    vand.vi v8, v8, 1
115; CHECK-NEXT:    vmsne.vi v0, v8, 0
116; CHECK-NEXT:    ret
117  %y = insertelement <64 x i1> %x, i1 %elt, i64 1
118  ret <64 x i1> %y
119}
120
121define <64 x i1> @insertelt_idx_v64i1(<64 x i1> %x, i1 %elt, i32 zeroext %idx) nounwind {
122; CHECK-LABEL: insertelt_idx_v64i1:
123; CHECK:       # %bb.0:
124; CHECK-NEXT:    li a2, 64
125; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, ma
126; CHECK-NEXT:    vmv.v.i v8, 0
127; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
128; CHECK-NEXT:    vmv.s.x v12, a0
129; CHECK-NEXT:    addi a0, a1, 1
130; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
131; CHECK-NEXT:    vslideup.vx v8, v12, a1
132; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, ma
133; CHECK-NEXT:    vand.vi v8, v8, 1
134; CHECK-NEXT:    vmsne.vi v0, v8, 0
135; CHECK-NEXT:    ret
136  %y = insertelement <64 x i1> %x, i1 %elt, i32 %idx
137  ret <64 x i1> %y
138}
139