xref: /llvm-project/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhu-overloaded.c (revision c4a5b58497677f6be2618765f89b08462e820337)
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +zve32x -disable-O0-optnone \
4 // RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
5 // RUN:   FileCheck --check-prefix=CHECK-RV64 %s
6 
7 #include <riscv_vector.h>
8 
9 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4(
10 // CHECK-RV64-NEXT:  entry:
11 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
12 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
13 //
test_vmulhu_vv_u8mf4(vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)14 vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
15   return __riscv_vmulhu(op1, op2, vl);
16 }
17 
18 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4(
19 // CHECK-RV64-NEXT:  entry:
20 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
21 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
22 //
test_vmulhu_vx_u8mf4(vuint8mf4_t op1,uint8_t op2,size_t vl)23 vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
24   return __riscv_vmulhu(op1, op2, vl);
25 }
26 
27 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2(
28 // CHECK-RV64-NEXT:  entry:
29 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
30 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
31 //
test_vmulhu_vv_u8mf2(vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)32 vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
33   return __riscv_vmulhu(op1, op2, vl);
34 }
35 
36 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2(
37 // CHECK-RV64-NEXT:  entry:
38 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
39 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
40 //
test_vmulhu_vx_u8mf2(vuint8mf2_t op1,uint8_t op2,size_t vl)41 vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
42   return __riscv_vmulhu(op1, op2, vl);
43 }
44 
45 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1(
46 // CHECK-RV64-NEXT:  entry:
47 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
48 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
49 //
test_vmulhu_vv_u8m1(vuint8m1_t op1,vuint8m1_t op2,size_t vl)50 vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
51   return __riscv_vmulhu(op1, op2, vl);
52 }
53 
54 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1(
55 // CHECK-RV64-NEXT:  entry:
56 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
57 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
58 //
test_vmulhu_vx_u8m1(vuint8m1_t op1,uint8_t op2,size_t vl)59 vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
60   return __riscv_vmulhu(op1, op2, vl);
61 }
62 
63 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2(
64 // CHECK-RV64-NEXT:  entry:
65 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
66 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
67 //
test_vmulhu_vv_u8m2(vuint8m2_t op1,vuint8m2_t op2,size_t vl)68 vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
69   return __riscv_vmulhu(op1, op2, vl);
70 }
71 
72 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2(
73 // CHECK-RV64-NEXT:  entry:
74 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
75 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
76 //
test_vmulhu_vx_u8m2(vuint8m2_t op1,uint8_t op2,size_t vl)77 vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
78   return __riscv_vmulhu(op1, op2, vl);
79 }
80 
81 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4(
82 // CHECK-RV64-NEXT:  entry:
83 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
84 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
85 //
test_vmulhu_vv_u8m4(vuint8m4_t op1,vuint8m4_t op2,size_t vl)86 vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
87   return __riscv_vmulhu(op1, op2, vl);
88 }
89 
90 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4(
91 // CHECK-RV64-NEXT:  entry:
92 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
93 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
94 //
test_vmulhu_vx_u8m4(vuint8m4_t op1,uint8_t op2,size_t vl)95 vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
96   return __riscv_vmulhu(op1, op2, vl);
97 }
98 
99 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8(
100 // CHECK-RV64-NEXT:  entry:
101 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
102 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
103 //
test_vmulhu_vv_u8m8(vuint8m8_t op1,vuint8m8_t op2,size_t vl)104 vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
105   return __riscv_vmulhu(op1, op2, vl);
106 }
107 
108 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8(
109 // CHECK-RV64-NEXT:  entry:
110 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
111 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
112 //
test_vmulhu_vx_u8m8(vuint8m8_t op1,uint8_t op2,size_t vl)113 vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
114   return __riscv_vmulhu(op1, op2, vl);
115 }
116 
117 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2(
118 // CHECK-RV64-NEXT:  entry:
119 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
120 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
121 //
test_vmulhu_vv_u16mf2(vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)122 vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
123   return __riscv_vmulhu(op1, op2, vl);
124 }
125 
126 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2(
127 // CHECK-RV64-NEXT:  entry:
128 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
129 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
130 //
test_vmulhu_vx_u16mf2(vuint16mf2_t op1,uint16_t op2,size_t vl)131 vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
132   return __riscv_vmulhu(op1, op2, vl);
133 }
134 
135 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1(
136 // CHECK-RV64-NEXT:  entry:
137 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
138 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
139 //
test_vmulhu_vv_u16m1(vuint16m1_t op1,vuint16m1_t op2,size_t vl)140 vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
141   return __riscv_vmulhu(op1, op2, vl);
142 }
143 
144 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1(
145 // CHECK-RV64-NEXT:  entry:
146 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
147 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
148 //
test_vmulhu_vx_u16m1(vuint16m1_t op1,uint16_t op2,size_t vl)149 vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
150   return __riscv_vmulhu(op1, op2, vl);
151 }
152 
153 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2(
154 // CHECK-RV64-NEXT:  entry:
155 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
156 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
157 //
test_vmulhu_vv_u16m2(vuint16m2_t op1,vuint16m2_t op2,size_t vl)158 vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
159   return __riscv_vmulhu(op1, op2, vl);
160 }
161 
162 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2(
163 // CHECK-RV64-NEXT:  entry:
164 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
165 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
166 //
test_vmulhu_vx_u16m2(vuint16m2_t op1,uint16_t op2,size_t vl)167 vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
168   return __riscv_vmulhu(op1, op2, vl);
169 }
170 
171 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4(
172 // CHECK-RV64-NEXT:  entry:
173 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
174 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
175 //
test_vmulhu_vv_u16m4(vuint16m4_t op1,vuint16m4_t op2,size_t vl)176 vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
177   return __riscv_vmulhu(op1, op2, vl);
178 }
179 
180 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4(
181 // CHECK-RV64-NEXT:  entry:
182 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
183 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
184 //
test_vmulhu_vx_u16m4(vuint16m4_t op1,uint16_t op2,size_t vl)185 vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
186   return __riscv_vmulhu(op1, op2, vl);
187 }
188 
189 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8(
190 // CHECK-RV64-NEXT:  entry:
191 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
192 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
193 //
test_vmulhu_vv_u16m8(vuint16m8_t op1,vuint16m8_t op2,size_t vl)194 vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
195   return __riscv_vmulhu(op1, op2, vl);
196 }
197 
198 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8(
199 // CHECK-RV64-NEXT:  entry:
200 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
201 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
202 //
test_vmulhu_vx_u16m8(vuint16m8_t op1,uint16_t op2,size_t vl)203 vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
204   return __riscv_vmulhu(op1, op2, vl);
205 }
206 
207 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1(
208 // CHECK-RV64-NEXT:  entry:
209 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
210 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
211 //
test_vmulhu_vv_u32m1(vuint32m1_t op1,vuint32m1_t op2,size_t vl)212 vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
213   return __riscv_vmulhu(op1, op2, vl);
214 }
215 
216 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1(
217 // CHECK-RV64-NEXT:  entry:
218 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
219 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
220 //
test_vmulhu_vx_u32m1(vuint32m1_t op1,uint32_t op2,size_t vl)221 vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
222   return __riscv_vmulhu(op1, op2, vl);
223 }
224 
225 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2(
226 // CHECK-RV64-NEXT:  entry:
227 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
228 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
229 //
test_vmulhu_vv_u32m2(vuint32m2_t op1,vuint32m2_t op2,size_t vl)230 vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
231   return __riscv_vmulhu(op1, op2, vl);
232 }
233 
234 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2(
235 // CHECK-RV64-NEXT:  entry:
236 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
237 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
238 //
test_vmulhu_vx_u32m2(vuint32m2_t op1,uint32_t op2,size_t vl)239 vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
240   return __riscv_vmulhu(op1, op2, vl);
241 }
242 
243 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4(
244 // CHECK-RV64-NEXT:  entry:
245 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
246 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
247 //
test_vmulhu_vv_u32m4(vuint32m4_t op1,vuint32m4_t op2,size_t vl)248 vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
249   return __riscv_vmulhu(op1, op2, vl);
250 }
251 
252 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4(
253 // CHECK-RV64-NEXT:  entry:
254 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
255 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
256 //
test_vmulhu_vx_u32m4(vuint32m4_t op1,uint32_t op2,size_t vl)257 vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
258   return __riscv_vmulhu(op1, op2, vl);
259 }
260 
261 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8(
262 // CHECK-RV64-NEXT:  entry:
263 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
264 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
265 //
test_vmulhu_vv_u32m8(vuint32m8_t op1,vuint32m8_t op2,size_t vl)266 vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
267   return __riscv_vmulhu(op1, op2, vl);
268 }
269 
270 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8(
271 // CHECK-RV64-NEXT:  entry:
272 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
273 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
274 //
test_vmulhu_vx_u32m8(vuint32m8_t op1,uint32_t op2,size_t vl)275 vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
276   return __riscv_vmulhu(op1, op2, vl);
277 }
278 
279 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4_m(
280 // CHECK-RV64-NEXT:  entry:
281 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
282 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
283 //
test_vmulhu_vv_u8mf4_m(vbool32_t mask,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)284 vuint8mf4_t test_vmulhu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
285   return __riscv_vmulhu(mask, op1, op2, vl);
286 }
287 
288 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4_m(
289 // CHECK-RV64-NEXT:  entry:
290 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
291 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
292 //
test_vmulhu_vx_u8mf4_m(vbool32_t mask,vuint8mf4_t op1,uint8_t op2,size_t vl)293 vuint8mf4_t test_vmulhu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) {
294   return __riscv_vmulhu(mask, op1, op2, vl);
295 }
296 
297 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2_m(
298 // CHECK-RV64-NEXT:  entry:
299 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
300 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
301 //
test_vmulhu_vv_u8mf2_m(vbool16_t mask,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)302 vuint8mf2_t test_vmulhu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
303   return __riscv_vmulhu(mask, op1, op2, vl);
304 }
305 
306 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2_m(
307 // CHECK-RV64-NEXT:  entry:
308 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
309 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
310 //
test_vmulhu_vx_u8mf2_m(vbool16_t mask,vuint8mf2_t op1,uint8_t op2,size_t vl)311 vuint8mf2_t test_vmulhu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) {
312   return __riscv_vmulhu(mask, op1, op2, vl);
313 }
314 
315 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1_m(
316 // CHECK-RV64-NEXT:  entry:
317 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
318 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
319 //
test_vmulhu_vv_u8m1_m(vbool8_t mask,vuint8m1_t op1,vuint8m1_t op2,size_t vl)320 vuint8m1_t test_vmulhu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
321   return __riscv_vmulhu(mask, op1, op2, vl);
322 }
323 
324 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1_m(
325 // CHECK-RV64-NEXT:  entry:
326 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
327 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
328 //
test_vmulhu_vx_u8m1_m(vbool8_t mask,vuint8m1_t op1,uint8_t op2,size_t vl)329 vuint8m1_t test_vmulhu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) {
330   return __riscv_vmulhu(mask, op1, op2, vl);
331 }
332 
333 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2_m(
334 // CHECK-RV64-NEXT:  entry:
335 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
336 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
337 //
test_vmulhu_vv_u8m2_m(vbool4_t mask,vuint8m2_t op1,vuint8m2_t op2,size_t vl)338 vuint8m2_t test_vmulhu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
339   return __riscv_vmulhu(mask, op1, op2, vl);
340 }
341 
342 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2_m(
343 // CHECK-RV64-NEXT:  entry:
344 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
345 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
346 //
test_vmulhu_vx_u8m2_m(vbool4_t mask,vuint8m2_t op1,uint8_t op2,size_t vl)347 vuint8m2_t test_vmulhu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) {
348   return __riscv_vmulhu(mask, op1, op2, vl);
349 }
350 
351 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4_m(
352 // CHECK-RV64-NEXT:  entry:
353 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
354 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
355 //
test_vmulhu_vv_u8m4_m(vbool2_t mask,vuint8m4_t op1,vuint8m4_t op2,size_t vl)356 vuint8m4_t test_vmulhu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
357   return __riscv_vmulhu(mask, op1, op2, vl);
358 }
359 
360 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4_m(
361 // CHECK-RV64-NEXT:  entry:
362 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
363 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
364 //
test_vmulhu_vx_u8m4_m(vbool2_t mask,vuint8m4_t op1,uint8_t op2,size_t vl)365 vuint8m4_t test_vmulhu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) {
366   return __riscv_vmulhu(mask, op1, op2, vl);
367 }
368 
369 // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8_m(
370 // CHECK-RV64-NEXT:  entry:
371 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
372 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
373 //
test_vmulhu_vv_u8m8_m(vbool1_t mask,vuint8m8_t op1,vuint8m8_t op2,size_t vl)374 vuint8m8_t test_vmulhu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
375   return __riscv_vmulhu(mask, op1, op2, vl);
376 }
377 
378 // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8_m(
379 // CHECK-RV64-NEXT:  entry:
380 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
381 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
382 //
test_vmulhu_vx_u8m8_m(vbool1_t mask,vuint8m8_t op1,uint8_t op2,size_t vl)383 vuint8m8_t test_vmulhu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) {
384   return __riscv_vmulhu(mask, op1, op2, vl);
385 }
386 
387 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2_m(
388 // CHECK-RV64-NEXT:  entry:
389 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
390 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
391 //
test_vmulhu_vv_u16mf2_m(vbool32_t mask,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)392 vuint16mf2_t test_vmulhu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
393   return __riscv_vmulhu(mask, op1, op2, vl);
394 }
395 
396 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2_m(
397 // CHECK-RV64-NEXT:  entry:
398 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
399 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
400 //
test_vmulhu_vx_u16mf2_m(vbool32_t mask,vuint16mf2_t op1,uint16_t op2,size_t vl)401 vuint16mf2_t test_vmulhu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) {
402   return __riscv_vmulhu(mask, op1, op2, vl);
403 }
404 
405 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1_m(
406 // CHECK-RV64-NEXT:  entry:
407 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
408 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
409 //
test_vmulhu_vv_u16m1_m(vbool16_t mask,vuint16m1_t op1,vuint16m1_t op2,size_t vl)410 vuint16m1_t test_vmulhu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
411   return __riscv_vmulhu(mask, op1, op2, vl);
412 }
413 
414 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1_m(
415 // CHECK-RV64-NEXT:  entry:
416 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
417 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
418 //
test_vmulhu_vx_u16m1_m(vbool16_t mask,vuint16m1_t op1,uint16_t op2,size_t vl)419 vuint16m1_t test_vmulhu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) {
420   return __riscv_vmulhu(mask, op1, op2, vl);
421 }
422 
423 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2_m(
424 // CHECK-RV64-NEXT:  entry:
425 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
426 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
427 //
test_vmulhu_vv_u16m2_m(vbool8_t mask,vuint16m2_t op1,vuint16m2_t op2,size_t vl)428 vuint16m2_t test_vmulhu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
429   return __riscv_vmulhu(mask, op1, op2, vl);
430 }
431 
432 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2_m(
433 // CHECK-RV64-NEXT:  entry:
434 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
435 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
436 //
test_vmulhu_vx_u16m2_m(vbool8_t mask,vuint16m2_t op1,uint16_t op2,size_t vl)437 vuint16m2_t test_vmulhu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) {
438   return __riscv_vmulhu(mask, op1, op2, vl);
439 }
440 
441 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4_m(
442 // CHECK-RV64-NEXT:  entry:
443 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
444 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
445 //
test_vmulhu_vv_u16m4_m(vbool4_t mask,vuint16m4_t op1,vuint16m4_t op2,size_t vl)446 vuint16m4_t test_vmulhu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
447   return __riscv_vmulhu(mask, op1, op2, vl);
448 }
449 
450 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4_m(
451 // CHECK-RV64-NEXT:  entry:
452 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
453 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
454 //
test_vmulhu_vx_u16m4_m(vbool4_t mask,vuint16m4_t op1,uint16_t op2,size_t vl)455 vuint16m4_t test_vmulhu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) {
456   return __riscv_vmulhu(mask, op1, op2, vl);
457 }
458 
459 // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8_m(
460 // CHECK-RV64-NEXT:  entry:
461 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
462 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
463 //
test_vmulhu_vv_u16m8_m(vbool2_t mask,vuint16m8_t op1,vuint16m8_t op2,size_t vl)464 vuint16m8_t test_vmulhu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
465   return __riscv_vmulhu(mask, op1, op2, vl);
466 }
467 
468 // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8_m(
469 // CHECK-RV64-NEXT:  entry:
470 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
471 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
472 //
test_vmulhu_vx_u16m8_m(vbool2_t mask,vuint16m8_t op1,uint16_t op2,size_t vl)473 vuint16m8_t test_vmulhu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) {
474   return __riscv_vmulhu(mask, op1, op2, vl);
475 }
476 
477 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1_m(
478 // CHECK-RV64-NEXT:  entry:
479 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
480 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
481 //
test_vmulhu_vv_u32m1_m(vbool32_t mask,vuint32m1_t op1,vuint32m1_t op2,size_t vl)482 vuint32m1_t test_vmulhu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
483   return __riscv_vmulhu(mask, op1, op2, vl);
484 }
485 
486 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1_m(
487 // CHECK-RV64-NEXT:  entry:
488 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
489 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
490 //
test_vmulhu_vx_u32m1_m(vbool32_t mask,vuint32m1_t op1,uint32_t op2,size_t vl)491 vuint32m1_t test_vmulhu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) {
492   return __riscv_vmulhu(mask, op1, op2, vl);
493 }
494 
495 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2_m(
496 // CHECK-RV64-NEXT:  entry:
497 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
498 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
499 //
test_vmulhu_vv_u32m2_m(vbool16_t mask,vuint32m2_t op1,vuint32m2_t op2,size_t vl)500 vuint32m2_t test_vmulhu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
501   return __riscv_vmulhu(mask, op1, op2, vl);
502 }
503 
504 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2_m(
505 // CHECK-RV64-NEXT:  entry:
506 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
507 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
508 //
test_vmulhu_vx_u32m2_m(vbool16_t mask,vuint32m2_t op1,uint32_t op2,size_t vl)509 vuint32m2_t test_vmulhu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) {
510   return __riscv_vmulhu(mask, op1, op2, vl);
511 }
512 
513 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4_m(
514 // CHECK-RV64-NEXT:  entry:
515 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
516 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
517 //
test_vmulhu_vv_u32m4_m(vbool8_t mask,vuint32m4_t op1,vuint32m4_t op2,size_t vl)518 vuint32m4_t test_vmulhu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
519   return __riscv_vmulhu(mask, op1, op2, vl);
520 }
521 
522 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4_m(
523 // CHECK-RV64-NEXT:  entry:
524 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
525 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
526 //
test_vmulhu_vx_u32m4_m(vbool8_t mask,vuint32m4_t op1,uint32_t op2,size_t vl)527 vuint32m4_t test_vmulhu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) {
528   return __riscv_vmulhu(mask, op1, op2, vl);
529 }
530 
531 // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8_m(
532 // CHECK-RV64-NEXT:  entry:
533 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
534 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
535 //
test_vmulhu_vv_u32m8_m(vbool4_t mask,vuint32m8_t op1,vuint32m8_t op2,size_t vl)536 vuint32m8_t test_vmulhu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
537   return __riscv_vmulhu(mask, op1, op2, vl);
538 }
539 
540 // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8_m(
541 // CHECK-RV64-NEXT:  entry:
542 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
543 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
544 //
test_vmulhu_vx_u32m8_m(vbool4_t mask,vuint32m8_t op1,uint32_t op2,size_t vl)545 vuint32m8_t test_vmulhu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) {
546   return __riscv_vmulhu(mask, op1, op2, vl);
547 }
548