; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --filter-out "// kill:.*$" --version 4 ; RUN: llc -force-streaming -verify-machineinstrs < %s | FileCheck %s target triple = "aarch64-linux" define void @test_fmla_f16_vg2_single(i32 %slice, %a0, %a1, %b) #0 { ; CHECK-LABEL: test_fmla_f16_vg2_single: ; CHECK: // %bb.0: ; CHECK: mov w8, w0 ; CHECK: fmla za.h[w8, 0, vgx2], { z0.h, z1.h }, z2.h ; CHECK: fmla za.h[w8, 7, vgx2], { z0.h, z1.h }, z2.h ; CHECK: ret call void @llvm.aarch64.sme.fmla.single.vg1x2.nxv8f16(i32 %slice, %a0, %a1, %b) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmla.single.vg1x2.nxv8f16(i32 %slice.7, %a0, %a1, %b) ret void } define void @test_fmla_f16_vg4_single(i32 %slice, %a0, %a1, ; CHECK-LABEL: test_fmla_f16_vg4_single: ; CHECK: // %bb.0: ; CHECK: mov w8, w0 ; CHECK: fmla za.h[w8, 0, vgx4], { z0.h - z3.h }, z4.h ; CHECK: fmla za.h[w8, 7, vgx4], { z0.h - z3.h }, z4.h ; CHECK: ret %a2, %a3, %b) #0 { call void @llvm.aarch64.sme.fmla.single.vg1x4.nxv8f16(i32 %slice, %a0, %a1, %a2, %a3, %b) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmla.single.vg1x4.nxv8f16(i32 %slice.7, %a0, %a1, %a2, %a3, %b) ret void } define void @test_fmls_f16_vg2_single(i32 %slice, %a0, %a1, %b) #0 { ; CHECK-LABEL: test_fmls_f16_vg2_single: ; CHECK: // %bb.0: ; CHECK: mov w8, w0 ; CHECK: fmls za.h[w8, 0, vgx2], { z0.h, z1.h }, z2.h ; CHECK: fmls za.h[w8, 7, vgx2], { z0.h, z1.h }, z2.h ; CHECK: ret call void @llvm.aarch64.sme.fmls.single.vg1x2.nxv8f16(i32 %slice, %a0, %a1, %b) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmls.single.vg1x2.nxv8f16(i32 %slice.7, %a0, %a1, %b) ret void } define void @test_fmls_f16_vg4_single(i32 %slice, %a0, %a1, ; CHECK-LABEL: test_fmls_f16_vg4_single: ; CHECK: // %bb.0: ; CHECK: mov w8, w0 ; CHECK: fmls za.h[w8, 0, vgx4], { z0.h - z3.h }, z4.h ; CHECK: fmls za.h[w8, 7, vgx4], { z0.h - z3.h }, z4.h ; CHECK: ret %a2, %a3, %b) #0 { call void @llvm.aarch64.sme.fmls.single.vg1x4.nxv8f16(i32 %slice, %a0, %a1, %a2, %a3, %b) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmls.single.vg1x4.nxv8f16(i32 %slice.7, %a0, %a1, %a2, %a3, %b) ret void } define void @test_fmla_f16_vg2_multi(i32 %slice, ; CHECK-LABEL: test_fmla_f16_vg2_multi: ; CHECK: // %bb.0: ; CHECK: mov w8, w0 ; CHECK: fmla za.h[w8, 0, vgx2], { z0.h, z1.h }, { z2.h, z3.h } ; CHECK: fmla za.h[w8, 7, vgx2], { z0.h, z1.h }, { z2.h, z3.h } ; CHECK: ret %a0, %a1, %b0, %b1) #0 { call void @llvm.aarch64.sme.fmla.vg1x2.nxv8f16(i32 %slice, %a0, %a1, %b0, %b1) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmla.vg1x2.nxv8f16(i32 %slice.7, %a0, %a1, %b0, %b1) ret void } define void @test_fmla_f16_vg4_multi(i32 %slice, ; CHECK-LABEL: test_fmla_f16_vg4_multi: ; CHECK: // %bb.0: ; CHECK: mov w8, w0 ; CHECK: fmla za.h[w8, 0, vgx4], { z0.h - z3.h }, { z4.h - z7.h } ; CHECK: fmla za.h[w8, 7, vgx4], { z0.h - z3.h }, { z4.h - z7.h } ; CHECK: ret %a0, %a1, %a2, %a3, %b0, %b1, %b2, %b3) #0 { call void @llvm.aarch64.sme.fmla.vg1x4.nxv8f16(i32 %slice, %a0, %a1, %a2, %a3, %b0, %b1, %b2, %b3) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmla.vg1x4.nxv8f16(i32 %slice.7, %a0, %a1, %a2, %a3, %b0, %b1, %b2, %b3) ret void } define void @test_fmls_f16_vg2_multi(i32 %slice, ; CHECK-LABEL: test_fmls_f16_vg2_multi: ; CHECK: // %bb.0: ; CHECK: mov w8, w0 ; CHECK: fmls za.h[w8, 0, vgx2], { z0.h, z1.h }, { z2.h, z3.h } ; CHECK: fmls za.h[w8, 7, vgx2], { z0.h, z1.h }, { z2.h, z3.h } ; CHECK: ret %a0, %a1, %b0, %b1) #0 { call void @llvm.aarch64.sme.fmls.vg1x2.nxv8f16(i32 %slice, %a0, %a1, %b0, %b1) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmls.vg1x2.nxv8f16(i32 %slice.7, %a0, %a1, %b0, %b1) ret void } define void @test_fmls_f16_vg4_multi(i32 %slice, ; CHECK-LABEL: test_fmls_f16_vg4_multi: ; CHECK: // %bb.0: ; CHECK: mov w8, w0 ; CHECK: fmls za.h[w8, 0, vgx4], { z0.h - z3.h }, { z4.h - z7.h } ; CHECK: fmls za.h[w8, 7, vgx4], { z0.h - z3.h }, { z4.h - z7.h } ; CHECK: ret %a0, %a1, %a2, %a3, %b0, %b1, %b2, %b3) #0 { call void @llvm.aarch64.sme.fmls.vg1x4.nxv8f16(i32 %slice, %a0, %a1, %a2, %a3, %b0, %b1, %b2, %b3) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmls.vg1x4.nxv8f16(i32 %slice.7, %a0, %a1, %a2, %a3, %b0, %b1, %b2, %b3) ret void } define void @test_fmla_f16_vg2_index(i32 %slice, ; CHECK-LABEL: test_fmla_f16_vg2_index: ; CHECK: // %bb.0: ; CHECK: mov w8, w0 ; CHECK: fmla za.h[w8, 0, vgx2], { z0.h, z1.h }, z2.h[7] ; CHECK: fmla za.h[w8, 7, vgx2], { z0.h, z1.h }, z2.h[7] ; CHECK: ret %a0, %a1, %b) #0 { call void @llvm.aarch64.sme.fmla.lane.vg1x2.nxv8f16(i32 %slice, %a0, %a1, %b, i32 7); %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmla.lane.vg1x2.nxv8f16(i32 %slice.7, %a0, %a1, %b, i32 7); ret void } define void @test_fmla_f16_vg4_index(i32 %slice, ; CHECK-LABEL: test_fmla_f16_vg4_index: ; CHECK: // %bb.0: ; CHECK: mov w8, w0 ; CHECK: fmla za.h[w8, 0, vgx4], { z0.h - z3.h }, z4.h[7] ; CHECK: fmla za.h[w8, 0, vgx4], { z0.h - z3.h }, z4.h[7] ; CHECK: ret %a0, %a1, %a2, %a3, %b) #0 { call void @llvm.aarch64.sme.fmla.lane.vg1x4.nxv8f16(i32 %slice, %a0, %a1, %a2, %a3, %b, i32 7); %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmla.lane.vg1x4.nxv8f16(i32 %slice, %a0, %a1, %a2, %a3, %b, i32 7); ret void } define void @test_fmls_f16_vg2_index(i32 %slice, ; CHECK-LABEL: test_fmls_f16_vg2_index: ; CHECK: // %bb.0: ; CHECK: mov w8, w0 ; CHECK: fmls za.h[w8, 0, vgx2], { z0.h, z1.h }, z2.h[7] ; CHECK: fmls za.h[w8, 7, vgx2], { z0.h, z1.h }, z2.h[7] ; CHECK: ret %a0, %a1, %b) #0 { call void @llvm.aarch64.sme.fmls.lane.vg1x2.nxv8f16(i32 %slice, %a0, %a1, %b, i32 7); %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmls.lane.vg1x2.nxv8f16(i32 %slice.7, %a0, %a1, %b, i32 7); ret void } define void @test_fmls_f16_vg4_index(i32 %slice, ; CHECK-LABEL: test_fmls_f16_vg4_index: ; CHECK: // %bb.0: ; CHECK: mov w8, w0 ; CHECK: fmls za.h[w8, 0, vgx4], { z0.h - z3.h }, z4.h[7] ; CHECK: fmls za.h[w8, 0, vgx4], { z0.h - z3.h }, z4.h[7] ; CHECK: ret %a0, %a1, %a2, %a3, %b) #0 { call void @llvm.aarch64.sme.fmls.lane.vg1x4.nxv8f16(i32 %slice, %a0, %a1, %a2, %a3, %b, i32 7); %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmls.lane.vg1x4.nxv8f16(i32 %slice, %a0, %a1, %a2, %a3, %b, i32 7); ret void } define void @test_fmla_bf16_vg2_single(i32 %slice, %a0, %a1, %b) #0 { ; CHECK-LABEL: test_fmla_bf16_vg2_single: ; CHECK: // %bb.0: ; CHECK: mov w8, w0 ; CHECK: bfmla za.h[w8, 0, vgx2], { z0.h, z1.h }, z2.h ; CHECK: bfmla za.h[w8, 7, vgx2], { z0.h, z1.h }, z2.h ; CHECK: ret call void @llvm.aarch64.sme.fmla.single.vg1x2.nxv8bf16(i32 %slice, %a0, %a1, %b) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmla.single.vg1x2.nxv8bf16(i32 %slice.7, %a0, %a1, %b) ret void } define void @test_fmla_bf16_vg4_single(i32 %slice, %a0, %a1, ; CHECK-LABEL: test_fmla_bf16_vg4_single: ; CHECK: // %bb.0: ; CHECK: mov w8, w0 ; CHECK: bfmla za.h[w8, 0, vgx4], { z0.h - z3.h }, z4.h ; CHECK: bfmla za.h[w8, 7, vgx4], { z0.h - z3.h }, z4.h ; CHECK: ret %a2, %a3, %b) #0 { call void @llvm.aarch64.sme.fmla.single.vg1x4.nxv8bf16(i32 %slice, %a0, %a1, %a2, %a3, %b) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmla.single.vg1x4.nxv8bf16(i32 %slice.7, %a0, %a1, %a2, %a3, %b) ret void } define void @test_fmls_bf16_vg2_single(i32 %slice, %a0, %a1, %b) #0 { ; CHECK-LABEL: test_fmls_bf16_vg2_single: ; CHECK: // %bb.0: ; CHECK: mov w8, w0 ; CHECK: bfmls za.h[w8, 0, vgx2], { z0.h, z1.h }, z2.h ; CHECK: bfmls za.h[w8, 7, vgx2], { z0.h, z1.h }, z2.h ; CHECK: ret call void @llvm.aarch64.sme.fmls.single.vg1x2.nxv8bf16(i32 %slice, %a0, %a1, %b) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmls.single.vg1x2.nxv8bf16(i32 %slice.7, %a0, %a1, %b) ret void } define void @test_fmls_bf16_vg4_single(i32 %slice, %a0, %a1, ; CHECK-LABEL: test_fmls_bf16_vg4_single: ; CHECK: // %bb.0: ; CHECK: mov w8, w0 ; CHECK: bfmls za.h[w8, 0, vgx4], { z0.h - z3.h }, z4.h ; CHECK: bfmls za.h[w8, 7, vgx4], { z0.h - z3.h }, z4.h ; CHECK: ret %a2, %a3, %b) #0 { call void @llvm.aarch64.sme.fmls.single.vg1x4.nxv8bf16(i32 %slice, %a0, %a1, %a2, %a3, %b) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmls.single.vg1x4.nxv8bf16(i32 %slice.7, %a0, %a1, %a2, %a3, %b) ret void } define void @test_fmla_bf16_vg2_multi(i32 %slice, ; CHECK-LABEL: test_fmla_bf16_vg2_multi: ; CHECK: // %bb.0: ; CHECK: mov w8, w0 ; CHECK: bfmla za.h[w8, 0, vgx2], { z0.h, z1.h }, { z2.h, z3.h } ; CHECK: bfmla za.h[w8, 7, vgx2], { z0.h, z1.h }, { z2.h, z3.h } ; CHECK: ret %a0, %a1, %b0, %b1) #0 { call void @llvm.aarch64.sme.fmla.vg1x2.nxv8bf16(i32 %slice, %a0, %a1, %b0, %b1) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmla.vg1x2.nxv8bf16(i32 %slice.7, %a0, %a1, %b0, %b1) ret void } define void @test_fmla_bf16_vg4_multi(i32 %slice, ; CHECK-LABEL: test_fmla_bf16_vg4_multi: ; CHECK: // %bb.0: ; CHECK: mov w8, w0 ; CHECK: bfmla za.h[w8, 0, vgx4], { z0.h - z3.h }, { z4.h - z7.h } ; CHECK: bfmla za.h[w8, 7, vgx4], { z0.h - z3.h }, { z4.h - z7.h } ; CHECK: ret %a0, %a1, %a2, %a3, %b0, %b1, %b2, %b3) #0 { call void @llvm.aarch64.sme.fmla.vg1x4.nxv8bf16(i32 %slice, %a0, %a1, %a2, %a3, %b0, %b1, %b2, %b3) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmla.vg1x4.nxv8bf16(i32 %slice.7, %a0, %a1, %a2, %a3, %b0, %b1, %b2, %b3) ret void } define void @test_fmls_bf16_vg2_multi(i32 %slice, ; CHECK-LABEL: test_fmls_bf16_vg2_multi: ; CHECK: // %bb.0: ; CHECK: mov w8, w0 ; CHECK: bfmls za.h[w8, 0, vgx2], { z0.h, z1.h }, { z2.h, z3.h } ; CHECK: bfmls za.h[w8, 7, vgx2], { z0.h, z1.h }, { z2.h, z3.h } ; CHECK: ret %a0, %a1, %b0, %b1) #0 { call void @llvm.aarch64.sme.fmls.vg1x2.nxv8bf16(i32 %slice, %a0, %a1, %b0, %b1) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmls.vg1x2.nxv8bf16(i32 %slice.7, %a0, %a1, %b0, %b1) ret void } define void @test_fmls_bf16_vg4_multi(i32 %slice, ; CHECK-LABEL: test_fmls_bf16_vg4_multi: ; CHECK: // %bb.0: ; CHECK: mov w8, w0 ; CHECK: bfmls za.h[w8, 0, vgx4], { z0.h - z3.h }, { z4.h - z7.h } ; CHECK: bfmls za.h[w8, 7, vgx4], { z0.h - z3.h }, { z4.h - z7.h } ; CHECK: ret %a0, %a1, %a2, %a3, %b0, %b1, %b2, %b3) #0 { call void @llvm.aarch64.sme.fmls.vg1x4.nxv8bf16(i32 %slice, %a0, %a1, %a2, %a3, %b0, %b1, %b2, %b3) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmls.vg1x4.nxv8bf16(i32 %slice.7, %a0, %a1, %a2, %a3, %b0, %b1, %b2, %b3) ret void } define void @test_fmla_bf16_vg2_index(i32 %slice, ; CHECK-LABEL: test_fmla_bf16_vg2_index: ; CHECK: // %bb.0: ; CHECK: mov w8, w0 ; CHECK: bfmla za.h[w8, 0, vgx2], { z0.h, z1.h }, z2.h[7] ; CHECK: bfmla za.h[w8, 7, vgx2], { z0.h, z1.h }, z2.h[7] ; CHECK: ret %a0, %a1, %b) #0 { call void @llvm.aarch64.sme.fmla.lane.vg1x2.nxv8bf16(i32 %slice, %a0, %a1, %b, i32 7); %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmla.lane.vg1x2.nxv8bf16(i32 %slice.7, %a0, %a1, %b, i32 7); ret void } define void @test_fmla_bf16_vg4_index(i32 %slice, ; CHECK-LABEL: test_fmla_bf16_vg4_index: ; CHECK: // %bb.0: ; CHECK: mov w8, w0 ; CHECK: bfmla za.h[w8, 0, vgx4], { z0.h - z3.h }, z4.h[7] ; CHECK: bfmla za.h[w8, 0, vgx4], { z0.h - z3.h }, z4.h[7] ; CHECK: ret %a0, %a1, %a2, %a3, %b) #0 { call void @llvm.aarch64.sme.fmla.lane.vg1x4.nxv8bf16(i32 %slice, %a0, %a1, %a2, %a3, %b, i32 7); %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmla.lane.vg1x4.nxv8bf16(i32 %slice, %a0, %a1, %a2, %a3, %b, i32 7); ret void } define void @test_fmls_bf16_vg2_index(i32 %slice, ; CHECK-LABEL: test_fmls_bf16_vg2_index: ; CHECK: // %bb.0: ; CHECK: mov w8, w0 ; CHECK: bfmls za.h[w8, 0, vgx2], { z0.h, z1.h }, z2.h[7] ; CHECK: bfmls za.h[w8, 7, vgx2], { z0.h, z1.h }, z2.h[7] ; CHECK: ret %a0, %a1, %b) #0 { call void @llvm.aarch64.sme.fmls.lane.vg1x2.nxv8bf16(i32 %slice, %a0, %a1, %b, i32 7); %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmls.lane.vg1x2.nxv8bf16(i32 %slice.7, %a0, %a1, %b, i32 7); ret void } define void @test_fmls_bf16_vg4_index(i32 %slice, ; CHECK-LABEL: test_fmls_bf16_vg4_index: ; CHECK: // %bb.0: ; CHECK: mov w8, w0 ; CHECK: bfmls za.h[w8, 0, vgx4], { z0.h - z3.h }, z4.h[7] ; CHECK: bfmls za.h[w8, 0, vgx4], { z0.h - z3.h }, z4.h[7] ; CHECK: ret %a0, %a1, %a2, %a3, %b) #0 { call void @llvm.aarch64.sme.fmls.lane.vg1x4.nxv8bf16(i32 %slice, %a0, %a1, %a2, %a3, %b, i32 7); %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmls.lane.vg1x4.nxv8bf16(i32 %slice, %a0, %a1, %a2, %a3, %b, i32 7); ret void } attributes #0 = { nounwind "target-features"="+sme2p1,+sme-f16f16,+sme-b16b16" }