1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -force-streaming -verify-machineinstrs < %s | FileCheck %s 3 4; MOPA/MOPS 5 6define void @outer_sum_accumulate_s16(<vscale x 8 x i1> %pn, <vscale x 8 x i1> %pm, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm) { 7; CHECK-LABEL: outer_sum_accumulate_s16: 8; CHECK: // %bb.0: 9; CHECK-NEXT: smopa za3.s, p0/m, p1/m, z0.h, z1.h 10; CHECK-NEXT: ret 11 call void @llvm.aarch64.sme.smopa.za32.nxv8i16(i32 3, <vscale x 8 x i1> %pn, <vscale x 8 x i1> %pm, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm) 12 ret void 13} 14 15define void @outer_sum_accumulate_u16(<vscale x 8 x i1> %pn, <vscale x 8 x i1> %pm, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm) { 16; CHECK-LABEL: outer_sum_accumulate_u16: 17; CHECK: // %bb.0: 18; CHECK-NEXT: umopa za3.s, p0/m, p1/m, z0.h, z1.h 19; CHECK-NEXT: ret 20 call void @llvm.aarch64.sme.umopa.za32.nxv8i16(i32 3, <vscale x 8 x i1> %pn, <vscale x 8 x i1> %pm, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm) 21 ret void 22} 23 24define void @outer_sum_subtract_s16(<vscale x 8 x i1> %pn, <vscale x 8 x i1> %pm, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm) { 25; CHECK-LABEL: outer_sum_subtract_s16: 26; CHECK: // %bb.0: 27; CHECK-NEXT: smops za3.s, p0/m, p1/m, z0.h, z1.h 28; CHECK-NEXT: ret 29 call void @llvm.aarch64.sme.smops.za32.nxv8i16(i32 3, <vscale x 8 x i1> %pn, <vscale x 8 x i1> %pm, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm) 30 ret void 31} 32 33define void @outer_sum_subtract_u16(<vscale x 8 x i1> %pn, <vscale x 8 x i1> %pm, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm) { 34; CHECK-LABEL: outer_sum_subtract_u16: 35; CHECK: // %bb.0: 36; CHECK-NEXT: umops za3.s, p0/m, p1/m, z0.h, z1.h 37; CHECK-NEXT: ret 38 call void @llvm.aarch64.sme.umops.za32.nxv8i16(i32 3, <vscale x 8 x i1> %pn, <vscale x 8 x i1> %pm, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm) 39 ret void 40} 41 42; 43; BMOPA/BMOPS 44; 45 46define void @bitwise_outer_sum_accumulate_u32(<vscale x 4 x i1> %pn, <vscale x 4 x i1> %pm, <vscale x 4 x i32> %zn, <vscale x 4 x i32> %zm) { 47; CHECK-LABEL: bitwise_outer_sum_accumulate_u32: 48; CHECK: // %bb.0: 49; CHECK-NEXT: bmopa za3.s, p0/m, p1/m, z0.s, z1.s 50; CHECK-NEXT: ret 51 call void @llvm.aarch64.sme.bmopa.za32.nxv4i32(i32 3, <vscale x 4 x i1> %pn, <vscale x 4 x i1> %pm, <vscale x 4 x i32> %zn, <vscale x 4 x i32> %zm) 52 ret void 53} 54 55define void @bitwise_outer_sum_subtract_u32(<vscale x 4 x i1> %pn, <vscale x 4 x i1> %pm, <vscale x 4 x i32> %zn, <vscale x 4 x i32> %zm) { 56; CHECK-LABEL: bitwise_outer_sum_subtract_u32: 57; CHECK: // %bb.0: 58; CHECK-NEXT: bmops za3.s, p0/m, p1/m, z0.s, z1.s 59; CHECK-NEXT: ret 60 call void @llvm.aarch64.sme.bmops.za32.nxv4i32(i32 3, <vscale x 4 x i1> %pn, <vscale x 4 x i1> %pm, <vscale x 4 x i32> %zn, <vscale x 4 x i32> %zm) 61 ret void 62} 63 64declare void @llvm.aarch64.sme.smopa.za32.nxv8i16(i32, <vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>) 65declare void @llvm.aarch64.sme.umopa.za32.nxv8i16(i32, <vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>) 66 67declare void @llvm.aarch64.sme.smops.za32.nxv8i16(i32, <vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>) 68declare void @llvm.aarch64.sme.umops.za32.nxv8i16(i32, <vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>) 69 70declare void @llvm.aarch64.sme.bmopa.za32.nxv4i32(i32, <vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>) 71declare void @llvm.aarch64.sme.bmops.za32.nxv4i32(i32, <vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>) 72 73