1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+xsfvqmaccdod \ 3; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK 4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvqmaccdod \ 5; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK 6 7declare <vscale x 2 x i32> @llvm.riscv.sf.vqmaccu.2x8x2.nxv2i32.nxv8i8.nxv8i8( 8 <vscale x 2 x i32>, 9 <vscale x 8 x i8>, 10 <vscale x 8 x i8>, 11 iXLen, iXLen); 12 13define <vscale x 2 x i32> @intrinsic_vqmaccu_2x8x2_tu_i32m1(<vscale x 2 x i32> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind { 14; CHECK-LABEL: intrinsic_vqmaccu_2x8x2_tu_i32m1: 15; CHECK: # %bb.0: # %entry 16; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma 17; CHECK-NEXT: sf.vqmaccu.2x8x2 v8, v9, v10 18; CHECK-NEXT: ret 19entry: 20 %a = call <vscale x 2 x i32> @llvm.riscv.sf.vqmaccu.2x8x2.nxv2i32.nxv8i8.nxv8i8( 21 <vscale x 2 x i32> %0, 22 <vscale x 8 x i8> %1, 23 <vscale x 8 x i8> %2, 24 iXLen %3, iXLen 2) 25 26 ret <vscale x 2 x i32> %a 27} 28 29define <vscale x 2 x i32> @intrinsic_vqmaccu_2x8x2_ta_i32m1(<vscale x 2 x i32> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind { 30; CHECK-LABEL: intrinsic_vqmaccu_2x8x2_ta_i32m1: 31; CHECK: # %bb.0: # %entry 32; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 33; CHECK-NEXT: sf.vqmaccu.2x8x2 v8, v9, v10 34; CHECK-NEXT: ret 35entry: 36 %a = call <vscale x 2 x i32> @llvm.riscv.sf.vqmaccu.2x8x2.nxv2i32.nxv8i8.nxv8i8( 37 <vscale x 2 x i32> %0, 38 <vscale x 8 x i8> %1, 39 <vscale x 8 x i8> %2, 40 iXLen %3, iXLen 3) 41 42 ret <vscale x 2 x i32> %a 43} 44 45declare <vscale x 4 x i32> @llvm.riscv.sf.vqmaccu.2x8x2.nxv4i32.nxv8i8.nxv16i8( 46 <vscale x 4 x i32>, 47 <vscale x 8 x i8>, 48 <vscale x 16 x i8>, 49 iXLen, iXLen); 50 51define <vscale x 4 x i32> @intrinsic_vqmaccu_2x8x2_tu_i32m2(<vscale x 4 x i32> %0, <vscale x 8 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind { 52; CHECK-LABEL: intrinsic_vqmaccu_2x8x2_tu_i32m2: 53; CHECK: # %bb.0: # %entry 54; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma 55; CHECK-NEXT: sf.vqmaccu.2x8x2 v8, v10, v12 56; CHECK-NEXT: ret 57entry: 58 %a = call <vscale x 4 x i32> @llvm.riscv.sf.vqmaccu.2x8x2.nxv4i32.nxv8i8.nxv16i8( 59 <vscale x 4 x i32> %0, 60 <vscale x 8 x i8> %1, 61 <vscale x 16 x i8> %2, 62 iXLen %3, iXLen 2) 63 64 ret <vscale x 4 x i32> %a 65} 66 67define <vscale x 4 x i32> @intrinsic_vqmaccu_2x8x2_ta_i32m2(<vscale x 4 x i32> %0, <vscale x 8 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind { 68; CHECK-LABEL: intrinsic_vqmaccu_2x8x2_ta_i32m2: 69; CHECK: # %bb.0: # %entry 70; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma 71; CHECK-NEXT: sf.vqmaccu.2x8x2 v8, v10, v12 72; CHECK-NEXT: ret 73entry: 74 %a = call <vscale x 4 x i32> @llvm.riscv.sf.vqmaccu.2x8x2.nxv4i32.nxv8i8.nxv16i8( 75 <vscale x 4 x i32> %0, 76 <vscale x 8 x i8> %1, 77 <vscale x 16 x i8> %2, 78 iXLen %3, iXLen 3) 79 80 ret <vscale x 4 x i32> %a 81} 82 83declare <vscale x 8 x i32> @llvm.riscv.sf.vqmaccu.2x8x2.nxv8i32.nxv8i8.nxv32i8( 84 <vscale x 8 x i32>, 85 <vscale x 8 x i8>, 86 <vscale x 32 x i8>, 87 iXLen, iXLen); 88 89define <vscale x 8 x i32> @intrinsic_vqmaccu_2x8x2_tu_i32m4(<vscale x 8 x i32> %0, <vscale x 8 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind { 90; CHECK-LABEL: intrinsic_vqmaccu_2x8x2_tu_i32m4: 91; CHECK: # %bb.0: # %entry 92; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma 93; CHECK-NEXT: sf.vqmaccu.2x8x2 v8, v12, v16 94; CHECK-NEXT: ret 95entry: 96 %a = call <vscale x 8 x i32> @llvm.riscv.sf.vqmaccu.2x8x2.nxv8i32.nxv8i8.nxv32i8( 97 <vscale x 8 x i32> %0, 98 <vscale x 8 x i8> %1, 99 <vscale x 32 x i8> %2, 100 iXLen %3, iXLen 2) 101 102 ret <vscale x 8 x i32> %a 103} 104 105define <vscale x 8 x i32> @intrinsic_vqmaccu_2x8x2_ta_i32m4(<vscale x 8 x i32> %0, <vscale x 8 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind { 106; CHECK-LABEL: intrinsic_vqmaccu_2x8x2_ta_i32m4: 107; CHECK: # %bb.0: # %entry 108; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma 109; CHECK-NEXT: sf.vqmaccu.2x8x2 v8, v12, v16 110; CHECK-NEXT: ret 111entry: 112 %a = call <vscale x 8 x i32> @llvm.riscv.sf.vqmaccu.2x8x2.nxv8i32.nxv8i8.nxv32i8( 113 <vscale x 8 x i32> %0, 114 <vscale x 8 x i8> %1, 115 <vscale x 32 x i8> %2, 116 iXLen %3, iXLen 3) 117 118 ret <vscale x 8 x i32> %a 119} 120 121declare <vscale x 16 x i32> @llvm.riscv.sf.vqmaccu.2x8x2.nxv16i32.nxv8i8.nxv64i8( 122 <vscale x 16 x i32>, 123 <vscale x 8 x i8>, 124 <vscale x 64 x i8>, 125 iXLen, iXLen); 126 127define <vscale x 16 x i32> @intrinsic_vqmaccu_2x8x2_tu_i32m8(<vscale x 16 x i32> %0, <vscale x 8 x i8> %1, <vscale x 64 x i8> %2, iXLen %3) nounwind { 128; CHECK-LABEL: intrinsic_vqmaccu_2x8x2_tu_i32m8: 129; CHECK: # %bb.0: # %entry 130; CHECK-NEXT: vl8r.v v24, (a0) 131; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, ma 132; CHECK-NEXT: sf.vqmaccu.2x8x2 v8, v16, v24 133; CHECK-NEXT: ret 134entry: 135 %a = call <vscale x 16 x i32> @llvm.riscv.sf.vqmaccu.2x8x2.nxv16i32.nxv8i8.nxv64i8( 136 <vscale x 16 x i32> %0, 137 <vscale x 8 x i8> %1, 138 <vscale x 64 x i8> %2, 139 iXLen %3, iXLen 2) 140 141 ret <vscale x 16 x i32> %a 142} 143 144define <vscale x 16 x i32> @intrinsic_vqmaccu_2x8x2_ta_i32m8(<vscale x 16 x i32> %0, <vscale x 8 x i8> %1, <vscale x 64 x i8> %2, iXLen %3) nounwind { 145; CHECK-LABEL: intrinsic_vqmaccu_2x8x2_ta_i32m8: 146; CHECK: # %bb.0: # %entry 147; CHECK-NEXT: vl8r.v v24, (a0) 148; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma 149; CHECK-NEXT: sf.vqmaccu.2x8x2 v8, v16, v24 150; CHECK-NEXT: ret 151entry: 152 %a = call <vscale x 16 x i32> @llvm.riscv.sf.vqmaccu.2x8x2.nxv16i32.nxv8i8.nxv64i8( 153 <vscale x 16 x i32> %0, 154 <vscale x 8 x i8> %1, 155 <vscale x 64 x i8> %2, 156 iXLen %3, iXLen 3) 157 158 ret <vscale x 16 x i32> %a 159} 160