xref: /llvm-project/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vsmul-eew64.c (revision 9ed668ad93218ab2fe062c977d89f486056b4e56)
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
4 // NOTE: The purpose of separating these 3 instructions from vsmul.c is that
5 // eew=64 versions only enable when V extension is specified. (Not for zve)
6 
7 #include <riscv_vector.h>
8 
9 // CHECK-RV64-LABEL: @test_vsmul_vv_i64m1(
10 // CHECK-RV64-NEXT:  entry:
11 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
12 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
13 //
test_vsmul_vv_i64m1(vint64m1_t op1,vint64m1_t op2,size_t vl)14 vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
15   return __riscv_vsmul_vv_i64m1(op1, op2, __RISCV_VXRM_RNU, vl);
16 }
17 
18 // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1(
19 // CHECK-RV64-NEXT:  entry:
20 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
21 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
22 //
test_vsmul_vx_i64m1(vint64m1_t op1,int64_t op2,size_t vl)23 vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
24   return __riscv_vsmul_vx_i64m1(op1, op2, __RISCV_VXRM_RNU, vl);
25 }
26 
27 // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2(
28 // CHECK-RV64-NEXT:  entry:
29 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
30 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
31 //
test_vsmul_vv_i64m2(vint64m2_t op1,vint64m2_t op2,size_t vl)32 vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
33   return __riscv_vsmul_vv_i64m2(op1, op2, __RISCV_VXRM_RNU, vl);
34 }
35 
36 // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2(
37 // CHECK-RV64-NEXT:  entry:
38 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
39 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
40 //
test_vsmul_vx_i64m2(vint64m2_t op1,int64_t op2,size_t vl)41 vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
42   return __riscv_vsmul_vx_i64m2(op1, op2, __RISCV_VXRM_RNU, vl);
43 }
44 
45 // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4(
46 // CHECK-RV64-NEXT:  entry:
47 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
48 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
49 //
test_vsmul_vv_i64m4(vint64m4_t op1,vint64m4_t op2,size_t vl)50 vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
51   return __riscv_vsmul_vv_i64m4(op1, op2, __RISCV_VXRM_RNU, vl);
52 }
53 
54 // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4(
55 // CHECK-RV64-NEXT:  entry:
56 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
57 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
58 //
test_vsmul_vx_i64m4(vint64m4_t op1,int64_t op2,size_t vl)59 vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
60   return __riscv_vsmul_vx_i64m4(op1, op2, __RISCV_VXRM_RNU, vl);
61 }
62 
63 // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8(
64 // CHECK-RV64-NEXT:  entry:
65 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
66 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
67 //
test_vsmul_vv_i64m8(vint64m8_t op1,vint64m8_t op2,size_t vl)68 vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
69   return __riscv_vsmul_vv_i64m8(op1, op2, __RISCV_VXRM_RNU, vl);
70 }
71 
72 // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8(
73 // CHECK-RV64-NEXT:  entry:
74 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
75 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
76 //
test_vsmul_vx_i64m8(vint64m8_t op1,int64_t op2,size_t vl)77 vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
78   return __riscv_vsmul_vx_i64m8(op1, op2, __RISCV_VXRM_RNU, vl);
79 }
80 
81 // CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_m(
82 // CHECK-RV64-NEXT:  entry:
83 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
84 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
85 //
test_vsmul_vv_i64m1_m(vbool64_t mask,vint64m1_t op1,vint64m1_t op2,size_t vl)86 vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
87   return __riscv_vsmul_vv_i64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
88 }
89 
90 // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_m(
91 // CHECK-RV64-NEXT:  entry:
92 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
93 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
94 //
test_vsmul_vx_i64m1_m(vbool64_t mask,vint64m1_t op1,int64_t op2,size_t vl)95 vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
96   return __riscv_vsmul_vx_i64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
97 }
98 
99 // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_m(
100 // CHECK-RV64-NEXT:  entry:
101 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
102 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
103 //
test_vsmul_vv_i64m2_m(vbool32_t mask,vint64m2_t op1,vint64m2_t op2,size_t vl)104 vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
105   return __riscv_vsmul_vv_i64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
106 }
107 
108 // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_m(
109 // CHECK-RV64-NEXT:  entry:
110 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
111 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
112 //
test_vsmul_vx_i64m2_m(vbool32_t mask,vint64m2_t op1,int64_t op2,size_t vl)113 vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
114   return __riscv_vsmul_vx_i64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
115 }
116 
117 // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_m(
118 // CHECK-RV64-NEXT:  entry:
119 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
120 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
121 //
test_vsmul_vv_i64m4_m(vbool16_t mask,vint64m4_t op1,vint64m4_t op2,size_t vl)122 vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
123   return __riscv_vsmul_vv_i64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
124 }
125 
126 // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_m(
127 // CHECK-RV64-NEXT:  entry:
128 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
129 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
130 //
test_vsmul_vx_i64m4_m(vbool16_t mask,vint64m4_t op1,int64_t op2,size_t vl)131 vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
132   return __riscv_vsmul_vx_i64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
133 }
134 
135 // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_m(
136 // CHECK-RV64-NEXT:  entry:
137 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
138 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
139 //
test_vsmul_vv_i64m8_m(vbool8_t mask,vint64m8_t op1,vint64m8_t op2,size_t vl)140 vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
141   return __riscv_vsmul_vv_i64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
142 }
143 
144 // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_m(
145 // CHECK-RV64-NEXT:  entry:
146 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
147 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
148 //
test_vsmul_vx_i64m8_m(vbool8_t mask,vint64m8_t op1,int64_t op2,size_t vl)149 vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
150   return __riscv_vsmul_vx_i64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
151 }
152