Lines Matching refs:slice

6 define void @add_f16_vg1x2(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1) #0 {
15 call void @llvm.aarch64.sme.add.za16.vg1x2.nxv8f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1)
16 %slice.7 = add i32 %slice, 7
17 call void @llvm.aarch64.sme.add.za16.vg1x2.nxv8f16(i32 %slice.7, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1)
21 define void @add_f16_vg1x4(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1,
33 call void @llvm.aarch64.sme.add.za16.vg1x4.nxv8f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1,
35 %slice.7 = add i32 %slice, 7
36 call void @llvm.aarch64.sme.add.za16.vg1x4.nxv8f16(i32 %slice.7, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1,
41 define void @sub_f16_vg1x2(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1) #1 {
50 call void @llvm.aarch64.sme.sub.za16.vg1x2.nxv8f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1)
51 %slice.7 = add i32 %slice, 7
52 call void @llvm.aarch64.sme.sub.za16.vg1x2.nxv8f16(i32 %slice.7, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1)
56 define void @sub_f16_vg1x4(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1,
68 call void @llvm.aarch64.sme.sub.za16.vg1x4.nxv8f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1,
70 %slice.7 = add i32 %slice, 7
71 call void @llvm.aarch64.sme.sub.za16.vg1x4.nxv8f16(i32 %slice.7, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1,
76 define void @add_bf16_vg1x2(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1) #2 {
85 call void @llvm.aarch64.sme.add.za16.vg1x2.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1)
86 %slice.7 = add i32 %slice, 7
87 call void @llvm.aarch64.sme.add.za16.vg1x2.nxv8bf16(i32 %slice.7, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1)
91 define void @add_bf16_vg1x4(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1,
103 call void @llvm.aarch64.sme.add.za16.vg1x4.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1,
105 %slice.7 = add i32 %slice, 7
106 call void @llvm.aarch64.sme.add.za16.vg1x4.nxv8bf16(i32 %slice.7, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1,
111 define void @sub_bf16_vg1x2(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1) #2 {
120 call void @llvm.aarch64.sme.sub.za16.vg1x2.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1)
121 %slice.7 = add i32 %slice, 7
122 call void @llvm.aarch64.sme.sub.za16.vg1x2.nxv8bf16(i32 %slice.7, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1)
126 define void @sub_bf16_vg1x4(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1,
138 call void @llvm.aarch64.sme.sub.za16.vg1x4.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1,
140 %slice.7 = add i32 %slice, 7
141 call void @llvm.aarch64.sme.sub.za16.vg1x4.nxv8bf16(i32 %slice.7, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1,