1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvkg \ 3; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK 4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkg \ 5; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK 6 7declare <vscale x 1 x i32> @llvm.riscv.vghsh.nxv1i32.nxv1i32( 8 <vscale x 1 x i32>, 9 <vscale x 1 x i32>, 10 <vscale x 1 x i32>, 11 iXLen, 12 iXLen) 13 14define <vscale x 1 x i32> @intrinsic_vghsh_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, iXLen %3) nounwind { 15; CHECK-LABEL: intrinsic_vghsh_vv_nxv1i32_nxv1i32: 16; CHECK: # %bb.0: # %entry 17; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma 18; CHECK-NEXT: vghsh.vv v8, v9, v10 19; CHECK-NEXT: ret 20entry: 21 %a = call <vscale x 1 x i32> @llvm.riscv.vghsh.nxv1i32.nxv1i32( 22 <vscale x 1 x i32> %0, 23 <vscale x 1 x i32> %1, 24 <vscale x 1 x i32> %2, 25 iXLen %3, 26 iXLen 2) 27 28 ret <vscale x 1 x i32> %a 29} 30 31declare <vscale x 2 x i32> @llvm.riscv.vghsh.nxv2i32.nxv2i32( 32 <vscale x 2 x i32>, 33 <vscale x 2 x i32>, 34 <vscale x 2 x i32>, 35 iXLen, 36 iXLen) 37 38define <vscale x 2 x i32> @intrinsic_vghsh_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind { 39; CHECK-LABEL: intrinsic_vghsh_vv_nxv2i32_nxv2i32: 40; CHECK: # %bb.0: # %entry 41; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma 42; CHECK-NEXT: vghsh.vv v8, v9, v10 43; CHECK-NEXT: ret 44entry: 45 %a = call <vscale x 2 x i32> @llvm.riscv.vghsh.nxv2i32.nxv2i32( 46 <vscale x 2 x i32> %0, 47 <vscale x 2 x i32> %1, 48 <vscale x 2 x i32> %2, 49 iXLen %3, 50 iXLen 2) 51 52 ret <vscale x 2 x i32> %a 53} 54 55declare <vscale x 4 x i32> @llvm.riscv.vghsh.nxv4i32.nxv4i32( 56 <vscale x 4 x i32>, 57 <vscale x 4 x i32>, 58 <vscale x 4 x i32>, 59 iXLen, 60 iXLen) 61 62define <vscale x 4 x i32> @intrinsic_vghsh_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, iXLen %3) nounwind { 63; CHECK-LABEL: intrinsic_vghsh_vv_nxv4i32_nxv4i32: 64; CHECK: # %bb.0: # %entry 65; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma 66; CHECK-NEXT: vghsh.vv v8, v10, v12 67; CHECK-NEXT: ret 68entry: 69 %a = call <vscale x 4 x i32> @llvm.riscv.vghsh.nxv4i32.nxv4i32( 70 <vscale x 4 x i32> %0, 71 <vscale x 4 x i32> %1, 72 <vscale x 4 x i32> %2, 73 iXLen %3, 74 iXLen 2) 75 76 ret <vscale x 4 x i32> %a 77} 78 79declare <vscale x 8 x i32> @llvm.riscv.vghsh.nxv8i32.nxv8i32( 80 <vscale x 8 x i32>, 81 <vscale x 8 x i32>, 82 <vscale x 8 x i32>, 83 iXLen, 84 iXLen) 85 86define <vscale x 8 x i32> @intrinsic_vghsh_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, iXLen %3) nounwind { 87; CHECK-LABEL: intrinsic_vghsh_vv_nxv8i32_nxv8i32: 88; CHECK: # %bb.0: # %entry 89; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma 90; CHECK-NEXT: vghsh.vv v8, v12, v16 91; CHECK-NEXT: ret 92entry: 93 %a = call <vscale x 8 x i32> @llvm.riscv.vghsh.nxv8i32.nxv8i32( 94 <vscale x 8 x i32> %0, 95 <vscale x 8 x i32> %1, 96 <vscale x 8 x i32> %2, 97 iXLen %3, 98 iXLen 2) 99 100 ret <vscale x 8 x i32> %a 101} 102 103declare <vscale x 16 x i32> @llvm.riscv.vghsh.nxv16i32.nxv16i32( 104 <vscale x 16 x i32>, 105 <vscale x 16 x i32>, 106 <vscale x 16 x i32>, 107 iXLen, 108 iXLen) 109 110define <vscale x 16 x i32> @intrinsic_vghsh_vv_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, iXLen %3) nounwind { 111; CHECK-LABEL: intrinsic_vghsh_vv_nxv16i32_nxv16i32: 112; CHECK: # %bb.0: # %entry 113; CHECK-NEXT: vl8re32.v v24, (a0) 114; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma 115; CHECK-NEXT: vghsh.vv v8, v16, v24 116; CHECK-NEXT: ret 117entry: 118 %a = call <vscale x 16 x i32> @llvm.riscv.vghsh.nxv16i32.nxv16i32( 119 <vscale x 16 x i32> %0, 120 <vscale x 16 x i32> %1, 121 <vscale x 16 x i32> %2, 122 iXLen %3, 123 iXLen 2) 124 125 ret <vscale x 16 x i32> %a 126} 127