1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvknhb \ 3; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK 4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvknhb \ 5; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK 6; RUN: sed 's/iXLen/i32/g' %s | not --crash llc -mtriple=riscv32 -mattr=+v,+zvknha 2>&1 \ 7; RUN: | FileCheck --check-prefixes=CHECK-ERROR %s 8; RUN: sed 's/iXLen/i64/g' %s | not --crash llc -mtriple=riscv64 -mattr=+v,+zvknha 2>&1 \ 9; RUN: | FileCheck --check-prefixes=CHECK-ERROR %s 10 11; CHECK-ERROR: LLVM ERROR: SEW=64 needs Zvknhb to be enabled. 12 13declare <vscale x 4 x i32> @llvm.riscv.vsha2cl.nxv4i32.nxv4i32( 14 <vscale x 4 x i32>, 15 <vscale x 4 x i32>, 16 <vscale x 4 x i32>, 17 iXLen, 18 iXLen) 19 20define <vscale x 4 x i32> @intrinsic_vsha2cl_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, iXLen %3) nounwind { 21; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv4i32_nxv4i32: 22; CHECK: # %bb.0: # %entry 23; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma 24; CHECK-NEXT: vsha2ch.vv v8, v10, v12 25; CHECK-NEXT: ret 26entry: 27 %a = call <vscale x 4 x i32> @llvm.riscv.vsha2cl.nxv4i32.nxv4i32( 28 <vscale x 4 x i32> %0, 29 <vscale x 4 x i32> %1, 30 <vscale x 4 x i32> %2, 31 iXLen %3, 32 iXLen 2) 33 34 ret <vscale x 4 x i32> %a 35} 36 37declare <vscale x 8 x i32> @llvm.riscv.vsha2cl.nxv8i32.nxv8i32( 38 <vscale x 8 x i32>, 39 <vscale x 8 x i32>, 40 <vscale x 8 x i32>, 41 iXLen, 42 iXLen) 43 44define <vscale x 8 x i32> @intrinsic_vsha2cl_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, iXLen %3) nounwind { 45; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv8i32_nxv8i32: 46; CHECK: # %bb.0: # %entry 47; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma 48; CHECK-NEXT: vsha2ch.vv v8, v12, v16 49; CHECK-NEXT: ret 50entry: 51 %a = call <vscale x 8 x i32> @llvm.riscv.vsha2cl.nxv8i32.nxv8i32( 52 <vscale x 8 x i32> %0, 53 <vscale x 8 x i32> %1, 54 <vscale x 8 x i32> %2, 55 iXLen %3, 56 iXLen 2) 57 58 ret <vscale x 8 x i32> %a 59} 60 61declare <vscale x 16 x i32> @llvm.riscv.vsha2cl.nxv16i32.nxv16i32( 62 <vscale x 16 x i32>, 63 <vscale x 16 x i32>, 64 <vscale x 16 x i32>, 65 iXLen, 66 iXLen) 67 68define <vscale x 16 x i32> @intrinsic_vsha2cl_vv_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, iXLen %3) nounwind { 69; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv16i32_nxv16i32: 70; CHECK: # %bb.0: # %entry 71; CHECK-NEXT: vl8re32.v v24, (a0) 72; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma 73; CHECK-NEXT: vsha2ch.vv v8, v16, v24 74; CHECK-NEXT: ret 75entry: 76 %a = call <vscale x 16 x i32> @llvm.riscv.vsha2cl.nxv16i32.nxv16i32( 77 <vscale x 16 x i32> %0, 78 <vscale x 16 x i32> %1, 79 <vscale x 16 x i32> %2, 80 iXLen %3, 81 iXLen 2) 82 83 ret <vscale x 16 x i32> %a 84} 85 86declare <vscale x 4 x i64> @llvm.riscv.vsha2cl.nxv4i64.nxv4i64( 87 <vscale x 4 x i64>, 88 <vscale x 4 x i64>, 89 <vscale x 4 x i64>, 90 iXLen, 91 iXLen) 92 93define <vscale x 4 x i64> @intrinsic_vsha2cl_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, iXLen %3) nounwind { 94; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv4i64_nxv4i64: 95; CHECK: # %bb.0: # %entry 96; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma 97; CHECK-NEXT: vsha2ch.vv v8, v12, v16 98; CHECK-NEXT: ret 99entry: 100 %a = call <vscale x 4 x i64> @llvm.riscv.vsha2cl.nxv4i64.nxv4i64( 101 <vscale x 4 x i64> %0, 102 <vscale x 4 x i64> %1, 103 <vscale x 4 x i64> %2, 104 iXLen %3, 105 iXLen 2) 106 107 ret <vscale x 4 x i64> %a 108} 109 110declare <vscale x 8 x i64> @llvm.riscv.vsha2cl.nxv8i64.nxv8i64( 111 <vscale x 8 x i64>, 112 <vscale x 8 x i64>, 113 <vscale x 8 x i64>, 114 iXLen, 115 iXLen) 116 117define <vscale x 8 x i64> @intrinsic_vsha2cl_vv_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, iXLen %3) nounwind { 118; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv8i64_nxv8i64: 119; CHECK: # %bb.0: # %entry 120; CHECK-NEXT: vl8re64.v v24, (a0) 121; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, ma 122; CHECK-NEXT: vsha2ch.vv v8, v16, v24 123; CHECK-NEXT: ret 124entry: 125 %a = call <vscale x 8 x i64> @llvm.riscv.vsha2cl.nxv8i64.nxv8i64( 126 <vscale x 8 x i64> %0, 127 <vscale x 8 x i64> %1, 128 <vscale x 8 x i64> %2, 129 iXLen %3, 130 iXLen 2) 131 132 ret <vscale x 8 x i64> %a 133} 134